text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Copyright 2002 by Katharine Lindner. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Module to represent the NDB Atlas structure (a minimal subset of PDB format).
Hetero, Crystal and Chain exist to represent the NDB Atlas structure. Atlas
is a minimal subset of the PDB format. Heteo supports a 3 alphameric code.
The NDB web interface is located at http://ndbserver.rutgers.edu/NDB/index.html
"""
import copy
class CrystalError(Exception):
pass
def wrap_line(line):
output = ''
for i in range(0, len(line), 80):
output = output + '%s\n' % line[ i: i + 80 ]
return output
def validate_key(key):
if type(key) != type(''):
raise CrystalError('chain requires a string label')
if len(key) != 1:
raise CrystalError('chain label should contain one letter')
class Hetero(object):
"""
This class exists to support the PDB hetero codes.
Supports only the 3 alphameric code.
The annotation is available from http://alpha2.bmc.uu.se/hicup/
"""
def __init__(self, data):
# Enforce string storage
if type(data) != type(""):
raise CrystalError('Hetero data must be an alphameric string')
if data.isalnum() == 0:
raise CrystalError('Hetero data must be an alphameric string')
if len(data) > 3:
raise CrystalError('Hetero data may contain up to 3 characters')
if len(data) < 1:
raise CrystalError('Hetero data must not be empty')
self.data = data[:].lower()
def __eq__(self, other):
return self.data == other.data
def __ne__(self, other):
"""Returns true iff self is not equal to other."""
return not self.__eq__(other)
def __repr__(self):
return "%s" % self.data
def __str__(self):
return "%s" % self.data
def __len__(self): return len(self.data)
class Chain(object):
def __init__(self, residues = ''):
self.data = []
if type(residues) == type(''):
residues = residues.replace('*', ' ')
residues = residues.strip()
elements = residues.split()
self.data = map(Hetero, elements)
elif type(residues) == type([]):
for element in residues:
if not isinstance(element, Hetero):
raise CrystalError('Text must be a string')
for residue in residues:
self.data.append(residue)
elif isinstance(residues, Chain):
for residue in residues:
self.data.append(residue)
self.validate()
def validate(self):
data = self.data
for element in data:
self.validate_element(element)
def validate_element(self, element):
if not isinstance(element, Hetero):
raise TypeError
def __str__(self):
output = ''
i = 0
for element in self.data:
output = output + '%s ' % element
output = output.strip()
output = wrap_line(output)
return output
def __eq__(self, other):
if len(self.data) != len(other.data):
return 0
ok = reduce(lambda x, y: x and y, map(lambda x, y: x == y, self.data, other.data))
return ok
def __ne__(self, other):
"""Returns true iff self is not equal to other."""
return not self.__eq__(other)
def __len__(self): return len(self.data)
def __getitem__(self, index):
if isinstance(index, int):
return self.data[index]
elif isinstance(index, slice):
return self.__class__(self.data[index])
else:
raise TypeError
def __setitem__(self, index, value):
if isinstance(index, int):
try:
self.validate_element(value)
except TypeError:
value = Hetero(value.lower())
self.data[index] = value
elif isinstance(index, slice):
if isinstance(value, Chain):
self.data[index] = value.data
elif isinstance(value, type(self.data)):
self.data[index] = value
elif isinstance(value, basestring):
self.data[index] = Chain(value).data
else:
raise TypeError
else:
raise TypeError
def __delitem__(self, index):
del self.data[index]
def __contains__(self, item):
try:
self.validate_element(item)
except TypeError:
item = Hetero(item.lower())
return item in self.data
def append(self, item):
try:
self.validate_element(item)
except TypeError:
item = Hetero(item.lower())
self.data.append(item)
def insert(self, i, item):
try:
self.validate_element(item)
except TypeError:
item = Hetero(item.lower())
self.data.insert(i, item)
def remove(self, item):
item = Hetero(item.lower())
self.data.remove(item)
def count(self, item):
try:
self.validate_element(item)
except TypeError:
item = Hetero(item.lower())
return self.data.count(item)
def index(self, item):
try:
self.validate_element(item)
except TypeError:
item = Hetero(item.lower())
return self.data.index(item)
def __add__(self, other):
if isinstance(other, Chain):
return self.__class__(self.data + other.data)
elif type(other) == type(''):
return self.__class__(self.data + Chain(other).data)
else:
raise TypeError
def __radd__(self, other):
if isinstance(other, Chain):
return self.__class__(other.data + self.data)
elif type(other) == type(''):
return self.__class__(Chain(other).data + self.data)
else:
raise TypeError
def __iadd__(self, other):
if isinstance(other, Chain):
self.data += other.data
elif type(other) == type(''):
self.data += Chain(other).data
else:
raise TypeError
return self
class Crystal(object):
def __init__(self, data = {}):
# Enforcestorage
if type(data) != type({}):
raise CrystalError('Crystal must be a dictionary')
self.data = data
self.fix()
def fix(self):
data = self.data
for key in data:
element = data[key]
if isinstance(element, Chain):
pass
elif type(element) == type(''):
data[key] = Chain(element)
else:
raise TypeError
def __repr__(self):
output = ''
keys = self.data.keys()
keys.sort()
for key in keys:
output = output + '%s : %s\n' % (key, self.data[ key ])
return output
def __str__(self):
output = ''
keys = self.data.keys()
keys.sort()
for key in keys:
output = output + '%s : %s\n' % (key, self.data[ key ])
return output
def tostring(self):
return self.data
def __len__(self): return len(self.data)
def __getitem__(self, key): return self.data[key]
def __setitem__(self, key, item):
if isinstance(item, Chain):
self.data[key] = item
elif type(item) == type(''):
self.data[ key ] = Chain(item)
else:
raise TypeError
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
return copy.copy(self)
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def values(self): return self.data.values()
def __contains__(self, value): return value in self.data
def has_key(self, key): return key in self.data
def get(self, key, failobj=None):
return self.data.get(key, failobj)
def setdefault(self, key, failobj=None):
if key not in self.data:
self.data[key] = failobj
return self.data[key]
def popitem(self):
return self.data.popitem()
|
bryback/quickseq
|
genescript/Bio/Crystal/__init__.py
|
Python
|
mit
| 8,385
|
[
"Biopython",
"CRYSTAL"
] |
183a558b06a1b5d99090585335af91650eeb833219fac3ee22b354653e9d2a9f
|
from .atom import * # NOQA
from .molecule import * # NOQA
from .crystal import * # NOQA
from .fragment import * # NOQA
from .kernel import * # NOQA
|
crcollins/molml
|
molml/features.py
|
Python
|
mit
| 153
|
[
"CRYSTAL"
] |
5a876d09228d479e72468040cb6d760a0126d37de79ad436cc3be5cff7d91945
|
#!/usr/bin/env python3
import espressopp
from espressopp import Real3D, Int3D
from espressopp.tools import decomp, lattice, velocities
from mpi4py import MPI
import time
import numpy as np
def generate_particles(particles_per_direction):
num_particles = particles_per_direction**3
x, y, z, Lx, Ly, Lz = lattice.createCubic(num_particles, rho=0.8442, perfect=False)
vx, vy, vz = velocities.gaussian(T=0.6, N=num_particles, zero_momentum=True)
return x, y, z, Lx, Ly, Lz, vx, vy, vz
def generate_system(add_particles_array):
rc = 2.5
skin = 0.3
timestep = 0.005
temperature = 1.0
comm = MPI.COMM_WORLD
particles_per_direction = 64
x, y, z, Lx, Ly, Lz, vx, vy, vz = generate_particles(particles_per_direction)
num_particles = len(x)
density = num_particles / (Lx * Ly * Lz)
size = (Lx, Ly, Lz)
system = espressopp.System()
system.rng = espressopp.esutil.RNG()
system.bc = espressopp.bc.OrthorhombicBC(system.rng, size)
system.skin = skin
nodeGrid = decomp.nodeGrid(comm.size, size, rc, skin)
cellGrid = decomp.cellGrid(size, nodeGrid, rc, skin)
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
if add_particles_array:
tstart = time.time()
props = ['id', 'type', 'mass', 'posx', 'posy', 'posz', 'vx', 'vy', 'vz']
ids = np.arange(1,num_particles+1)
types = np.zeros(num_particles)
mass = np.ones(num_particles)
new_particles = np.stack((ids, types, mass, x, y, z, vx, vy, vz), axis=-1)
tprep = time.time()-tstart
tstart = time.time()
system.storage.addParticlesArray(new_particles, *props)
tadd = time.time()-tstart
else:
tstart = time.time()
props = ['id', 'type', 'mass', 'pos', 'v']
new_particles = []
for i in range(num_particles):
new_particles.append([i + 1, 0, 1.0, Real3D(x[i], y[i], z[i]), Real3D(vx[i], vy[i], vz[i])])
tprep = time.time()-tstart
tstart = time.time()
system.storage.addParticles(new_particles, *props)
tadd = time.time()-tstart
return num_particles, tprep, tadd
if __name__=="__main__":
num_particles1, tprep1, tadd1 = generate_system(False)
num_particles2, tprep2, tadd2 = generate_system(True)
print("\n")
print("Using system.storage.addParticles(...)")
print(" prepared {} particles in {:.2f} seconds".format(num_particles1, tprep1))
print(" added {} particles in {:.2f} seconds".format(num_particles1, tadd1))
print()
print("Using system.storage.addParticlesArray(...)")
print(" prepared {} particles in {:.2f} seconds, speedup: {:.2f}".format(num_particles2, tprep2, tprep1/tprep2))
print(" added {} particles in {:.2f} seconds, speedup: {:.2f}".format(num_particles2, tadd2, tadd1/tadd2))
|
espressopp/espressopp
|
examples/add_particles_numpy/add_particles_numpy.py
|
Python
|
gpl-3.0
| 2,889
|
[
"Gaussian"
] |
a26d56582e705503391baa49a0b09843a2f8038912398bcab387e49ce52eab14
|
#!/usr/bin/env python
"""
Solve day 1 of Advent of Code.
http://adventofcode.com/day/1
"""
# Do we go up or down based on the instruction character?
instruction_map= { '(': 1, ')': -1 }
def floor_from_instructions(instructions):
"""
Get the floor number resulting from a set of instructions.
"""
return sum([instruction_map.get(x, 0) for x in instructions])
def position_of_first_basement(instructions):
"""
Find the position in the instructions (1-based) where we
first visit the "basement" (get a -1 floor number).
Return -1 if there is no such instruction position.
"""
for i in range(len(instructions) + 1):
if floor_from_instructions(instructions[:i]) == -1:
return i
return -1
if __name__ == '__main__':
with open('input.txt') as f:
instructions = f.read()
print("Part 1:", floor_from_instructions(instructions))
print("Part 2:", position_of_first_basement(instructions))
|
mpirnat/adventofcode
|
day01/day01.py
|
Python
|
mit
| 979
|
[
"VisIt"
] |
19a326606338108d5db8cc0cb7a40875811dbc84d9cdaef2e8e99f8e913170db
|
import ast
from py14.scope import add_scope_context
from py14.context import add_variable_context
from py14.analysis import (FunctionTransformer, CalledWithTransformer,
ImportTransformer, AttributeCallTransformer,
is_void_function)
def parse(*args):
source = ast.parse("\n".join(args))
add_scope_context(source)
add_variable_context(source)
return source
def test_is_void_for_fun_with_no_return():
source = parse(
"def foo(x):",
" bar(x)",
)
foo = source.body[0]
assert is_void_function(foo)
def test_is_not_void_for_fun_with_return_value():
source = parse(
"def foo(x):",
" return x",
)
foo = source.body[0]
assert not is_void_function(foo)
class TestFunctionTransformer:
def test_nested_functions(self):
source = parse(
"def foo():",
" def bar():",
" def gib():",
" pass",
" def mir():",
" pass",
)
FunctionTransformer().visit(source)
foo = source.body[0]
bar = foo.body[0]
gib = bar.body[0]
mir = bar.body[1]
assert len(foo.defined_functions) == 1
assert len(bar.defined_functions) == 2
assert len(gib.defined_functions) == 0
assert len(mir.defined_functions) == 0
def test_functions_from_modules(self):
source = parse("from foo import bar, baz")
FunctionTransformer().visit(source)
module = source
assert len(module.defined_functions) == 2
class TestCalledWithTransformer:
def test_var_called_with_later_function(self):
source = parse(
"x = 3",
"bar(x)",
"bar(foo(x))",
)
CalledWithTransformer().visit(source)
x = source.body[0].targets[0]
assert len(x.called_with) == 2
class TestAttributeCallTransformer:
def test_call_to_attribute_registered(self):
source = parse(
"x = foo()",
"x.bar()",
)
AttributeCallTransformer().visit(source)
x = source.body[0].targets[0]
assert len(x.calls) == 1
class TestImportTransformer:
def test_function_knows_from_where_it_is_imported(self):
source = parse(
"from foo import bar",
"bar(x)",
)
ImportTransformer().visit(source)
module = source
bar_import = source.body[0].names[0]
assert len(module.imports) == 1
assert isinstance(bar_import.imported_from, ast.ImportFrom)
|
lukasmartinelli/py14
|
py14/tests/test_analysis.py
|
Python
|
mit
| 2,639
|
[
"VisIt"
] |
14269df1b20ba63065684103af90116c26a737794385cc34e839b4067fbcbeca
|
# todo:
# * vtkVolumeMapper::SetCroppingRegionPlanes(xmin,xmax,ymin,ymax,zmin,zmax)
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import vtk
import vtkdevide
class VolumeRender(
ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
# initialise our base class
ModuleBase.__init__(self, module_manager)
# at the first config_to_logic (at the end of the ctor), this will
# be set to 0
self._current_rendering_type = -1
# setup some config defaults
self._config.rendering_type = 0
self._config.interpolation = 1 # linear
self._config.ambient = 0.1
self._config.diffuse = 0.7
self._config.specular = 0.6
self._config.specular_power = 80
self._config.threshold = 1250
# this is not in the interface yet, change by introspection
self._config.mip_colour = (0.0, 0.0, 1.0)
config_list = [
('Rendering type:', 'rendering_type', 'base:int', 'choice',
'Direct volume rendering algorithm that will be used.',
('Raycast (fixed point)', 'GPU raycasting',
'2D Texture', '3D Texture',
'ShellSplatting', 'Raycast (old)')),
('Interpolation:', 'interpolation', 'base:int', 'choice',
'Linear (high quality, slower) or nearest neighbour (lower '
'quality, faster) interpolation',
('Nearest Neighbour', 'Linear')),
('Ambient:', 'ambient', 'base:float', 'text',
'Ambient lighting term.'),
('Diffuse:', 'diffuse', 'base:float', 'text',
'Diffuse lighting term.'),
('Specular:', 'specular', 'base:float', 'text',
'Specular lighting term.'),
('Specular power:', 'specular_power', 'base:float', 'text',
'Specular power lighting term (more focused high-lights).'),
('Threshold:', 'threshold', 'base:float', 'text',
'Used to generate transfer function ONLY if none is supplied')
]
ScriptedConfigModuleMixin.__init__(
self, config_list,
{'Module (self)' : self})
self._input_data = None
self._input_otf = None
self._input_ctf = None
self._volume_raycast_function = None
self._create_pipeline()
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of GUI
ScriptedConfigModuleMixin.close(self)
# get rid of our reference
del self._volume_property
del self._volume_raycast_function
del self._volume_mapper
del self._volume
def get_input_descriptions(self):
return ('input image data',
'opacity transfer function',
'colour transfer function')
def set_input(self, idx, inputStream):
if idx == 0:
self._input_data = inputStream
elif idx == 1:
self._input_otf = inputStream
else:
self._input_ctf = inputStream
def get_output_descriptions(self):
return ('vtkVolume',)
def get_output(self, idx):
return self._volume
def logic_to_config(self):
self._config.rendering_type = self._current_rendering_type
self._config.interpolation = \
self._volume_property.GetInterpolationType()
self._config.ambient = self._volume_property.GetAmbient()
self._config.diffuse = self._volume_property.GetDiffuse()
self._config.specular = self._volume_property.GetSpecular()
self._config.specular_power = \
self._volume_property.GetSpecularPower()
def config_to_logic(self):
self._volume_property.SetInterpolationType(self._config.interpolation)
self._volume_property.SetAmbient(self._config.ambient)
self._volume_property.SetDiffuse(self._config.diffuse)
self._volume_property.SetSpecular(self._config.specular)
self._volume_property.SetSpecularPower(self._config.specular_power)
if self._config.rendering_type != self._current_rendering_type:
if self._config.rendering_type == 0:
# raycast fixed point
self._setup_for_fixed_point()
elif self._config.rendering_type == 1:
# gpu raycasting
self._setup_for_gpu_raycasting()
elif self._config.rendering_type == 2:
# 2d texture
self._setup_for_2d_texture()
elif self._config.rendering_type == 3:
# 3d texture
self._setup_for_3d_texture()
elif self._config.rendering_type == 4:
# shell splatter
self._setup_for_shell_splatting()
else:
# old raycaster (very picky about input types)
self._setup_for_raycast()
self._volume.SetMapper(self._volume_mapper)
self._current_rendering_type = self._config.rendering_type
def _setup_for_raycast(self):
self._volume_raycast_function = \
vtk.vtkVolumeRayCastCompositeFunction()
self._volume_mapper = vtk.vtkVolumeRayCastMapper()
self._volume_mapper.SetVolumeRayCastFunction(
self._volume_raycast_function)
module_utils.setup_vtk_object_progress(self, self._volume_mapper,
'Preparing render.')
def _setup_for_2d_texture(self):
self._volume_mapper = vtk.vtkVolumeTextureMapper2D()
module_utils.setup_vtk_object_progress(self, self._volume_mapper,
'Preparing render.')
def _setup_for_3d_texture(self):
self._volume_mapper = vtk.vtkVolumeTextureMapper3D()
module_utils.setup_vtk_object_progress(self, self._volume_mapper,
'Preparing render.')
def _setup_for_shell_splatting(self):
self._volume_mapper = vtkdevide.vtkOpenGLVolumeShellSplatMapper()
self._volume_mapper.SetOmegaL(0.9)
self._volume_mapper.SetOmegaH(0.9)
# high-quality rendermode
self._volume_mapper.SetRenderMode(0)
module_utils.setup_vtk_object_progress(self, self._volume_mapper,
'Preparing render.')
def _setup_for_fixed_point(self):
"""This doesn't seem to work. After processing is complete,
it stalls on actually rendering the volume. No idea.
"""
self._volume_mapper = vtk.vtkFixedPointVolumeRayCastMapper()
self._volume_mapper.SetBlendModeToComposite()
#self._volume_mapper.SetBlendModeToMaximumIntensity()
module_utils.setup_vtk_object_progress(self, self._volume_mapper,
'Preparing render.')
def _setup_for_gpu_raycasting(self):
"""This doesn't seem to work. After processing is complete,
it stalls on actually rendering the volume. No idea.
"""
self._volume_mapper = vtk.vtkGPUVolumeRayCastMapper()
self._volume_mapper.SetBlendModeToComposite()
#self._volume_mapper.SetBlendModeToMaximumIntensity()
module_utils.setup_vtk_object_progress(self, self._volume_mapper,
'Preparing render.')
def execute_module(self):
otf, ctf = self._create_tfs()
if self._input_otf is not None:
otf = self._input_otf
if self._input_ctf is not None:
ctf = self._input_ctf
self._volume_property.SetScalarOpacity(otf)
self._volume_property.SetColor(ctf)
self._volume_mapper.SetInput(self._input_data)
self._volume_mapper.Update()
def _create_tfs(self):
otf = vtk.vtkPiecewiseFunction()
ctf = vtk.vtkColorTransferFunction()
otf.RemoveAllPoints()
t = self._config.threshold
p1 = t - t / 10.0
p2 = t + t / 5.0
print "MIP: %.2f - %.2f" % (p1, p2)
otf.AddPoint(p1, 0.0)
otf.AddPoint(p2, 1.0)
otf.AddPoint(self._config.threshold, 1.0)
ctf.RemoveAllPoints()
ctf.AddHSVPoint(p1, 0.1, 0.7, 1.0)
#ctf.AddHSVPoint(p2, *self._config.mip_colour)
ctf.AddHSVPoint(p2, 0.65, 0.7, 1.0)
return (otf, ctf)
def _create_pipeline(self):
# setup our pipeline
self._volume_property = vtk.vtkVolumeProperty()
self._volume_property.ShadeOn()
self._volume_mapper = None
self._volume = vtk.vtkVolume()
self._volume.SetProperty(self._volume_property)
self._volume.SetMapper(self._volume_mapper)
|
nagyistoce/devide
|
modules/filters/VolumeRender.py
|
Python
|
bsd-3-clause
| 9,232
|
[
"VTK"
] |
b7d5bd52e8c7b25fdbee9770e191ed84f68225ac1b2c550c1201a8545b35d42c
|
import os, sys
import numpy as np
import pandas as pd
from pytrack_analysis import Node
from pytrack_analysis.cli import colorprint, flprint, prn
import pytrack_analysis.preprocessing as prp
from pkg_resources import get_distribution
__version__ = get_distribution('pytrack_analysis').version
"""
Kinematics class: loads centroid data and metadata >> processes and returns kinematic data
"""
class Kinematics(Node):
def __init__(self, _df, _meta, body=('body_x', 'body_y'), head=('head_x', 'head_y'), time='elapsed_time', dt='frame_dt', angle='angle', ma='major', mi='minor'):
"""
Initializes the class. Setting up internal variables for input data; setting up logging.
"""
Node.__init__(self, _df, _meta)
### data check
self.keys = [body[0], body[1], head[0], head[1], time, dt, angle, ma, mi]
self.statcols = ['session', 'day', 'daytime', 'condition', 'position', 'head_speed', 'body_speed', 'distance', 'min_dpatch', 'dcenter', 'abs_turn_rate', 'major', 'minor', 'mistracks']
assert (all([(key in _df.keys()) for key in self.keys])), '[ERROR] Some keys not found in dataframe.'
def get_angle(self, _data_origin, _data_tip):
"""
Returns angular heading for given origin and tip positions.
"""
xb, yb = np.array(_data_origin.iloc[:,0]), np.array(_data_origin.iloc[:,1])
xh, yh = np.array(_data_tip.iloc[:,0]), np.array(_data_tip.iloc[:,1])
dx, dy = xh-xb, yh-yb
angle = np.arctan2(dy,dx)
angle = np.degrees(angle)
return angle
def get_angular_speed(self, _data, _dt):
"""
Returns angular turning rate for given time series of angles.
"""
speed = np.append(0, np.diff(_data))
speed[speed>180] -= 360. ## correction for circularity
speed[speed<-180] += 360. ## correction for circularity
speed = np.divide(speed, _dt) ## divide by time increments
return speed
def get_distance(self, _data):
dist_sq = np.square(_data.iloc[:,0]) + np.square(_data.iloc[:,1])
return np.sqrt(dist_sq)
def get_distance_to_patch(self, _data, _patch):
xp, yp = _patch["x"], _patch["y"]
dist_sq = np.square(_data.iloc[:, 0] - xp) + np.square(_data.iloc[:, 1] - yp)
return np.sqrt(dist_sq)
def get_forward_speed(self, _X):
pass
def get_linear_speed(self, _data, _dt):
x, y = _data.columns[0], _data.columns[1]
### take differences between frames for displacement and divide by dt
xdiff = np.divide(np.append(0, np.diff(_data[x])), _dt)
ydiff = np.divide(np.append(0, np.diff(_data[y])), _dt)
### linear speed is the squareroot of squared displacements in x and y (Pythagoras' theorem)
speed = np.sqrt(np.square(xdiff) + np.square(ydiff))
return speed
def get_sideward_speed(self, _X):
pass
def hist(x, bins=None):
hist, _ = np.histogram(self.outdf[x], bins=bins) # arguments are passed to np.histogram
hist = hist/np.sum(hist) # normalize
return hist
def run(self, save_as=None, ret=False, VERBOSE=True):
"""
returns kinematic data from running kinematics analysis for a session
"""
### data from file
# positions body and head
bx, by = self.keys[0], self.keys[1]
body_pos = self.df[[bx, by]]
hx, hy = self.keys[2], self.keys[3]
head_pos = self.df[[hx, hy]]
# get frame duration from framerate
#dt = 0.0333
time = pd.DataFrame({}, index=body_pos.index)
first = self.meta['video']['first_frame']
### adjusted time (starts at 0)
time['elapsed_time'] = self.df[self.keys[4]] - self.df.loc[first,self.keys[4]]
time['frame_dt'] = self.df[self.keys[5]]
# orientation
angle = self.df[self.keys[6]]
# major and minor lengths
makey, mikey = self.keys[7], self.keys[8]
major = self.df[makey]
minor = self.df[mikey]
###
### this prints out header
if VERBOSE:
prn(__name__)
flprint("{0:8s} (condition: {1:3s})...".format(self.session_name, str(self.meta['fly']['metabolic'])))
###
### This are the steps for kinematic analysis
## STEP 1: NaN removal + interpolation
body_pos, head_pos = prp.interpolate(body_pos, head_pos)
## STEP 2: Gaussian filtering
window_len = 10 # now: 10/0.333 s #### before used (15/0.5 s)
sigma = window_len/10.
body_pos, head_pos = prp.gaussian_filter(body_pos, head_pos, _len=window_len, _sigma=sigma)
## STEP 3: Distance from patch
distances = pd.DataFrame({}, index=body_pos.index)
for ix, each_spot in enumerate(self.meta['food_spots']):
distances['dpatch_'+str(ix)] = self.get_distance_to_patch(head_pos, each_spot)
distances['min_dpatch'] = np.amin(distances, axis=1)
distances['dcenter'] = self.get_distance(head_pos)
## STEP 4: Linear Speed
speed = pd.DataFrame({}, index=body_pos.index)
speed['displacements'] = self.get_linear_speed(body_pos, 1)
speed['head_speed'] = self.get_linear_speed(head_pos, time['frame_dt'])
speed['body_speed'] = self.get_linear_speed(body_pos, time['frame_dt'])
## STEP 5: Smoothing speed
window_len = 36 # now: 36/1.2 s #### before used (60/2 s)
speed['sm_head_speed'] = prp.gaussian_filter_np(speed[['head_speed']], _len=window_len, _sigma=window_len/10)
speed['sm_body_speed'] = prp.gaussian_filter_np(speed[['body_speed']], _len=window_len, _sigma=window_len/10)
window_len = 72 # now: 72/2.4 s #### before used (120/4 s)
speed['smm_head_speed'] = prp.gaussian_filter_np(speed[['sm_head_speed']], _len=window_len, _sigma=window_len/10)
speed['smm_body_speed'] = prp.gaussian_filter_np(speed[['sm_body_speed']], _len=window_len, _sigma=window_len/10)
## STEP 6: Angular Heading & Speed
angular = pd.DataFrame({}, index=body_pos.index)
angular['new_angle'] = self.get_angle(body_pos, head_pos)
angular['old_angle'] = np.degrees(angle)
angular['angular_speed'] = self.get_angular_speed(angular['new_angle'], time['frame_dt'])
window_len = 36 # now: 36/1.2 s #### before used (60/2 s)
angular['sm_angular_speed'] = prp.gaussian_filter_np(angular[['angular_speed']], _len=window_len, _sigma=window_len/10)
### DONE
### Prepare output to DataFrame or file
### rounding data
time, body_pos, head_pos, distances, speed, angular = time.round(6), body_pos.round(4), head_pos.round(4), distances.round(4), speed.round(4), angular.round(3)
listdfs = [time, body_pos, head_pos, distances, speed, angular]
self.outdf = pd.concat(listdfs, axis=1)
if VERBOSE: colorprint('done.', color='success')
if save_as is not None:
outfile = os.path.join(save_as, self.session_name+'_'+self.name+'.csv')
self.outdf.to_csv(outfile, index_label='frame')
if ret or save_as is None:
return self.outdf
def stats(self):
data = []
data.append(self.session_name)
data.append(self.meta['datetime'].date())
data.append(self.meta['datetime'].hour)
data.append(self.meta['condition'])
data.append(self.meta['arena']['name'])
data.append(self.outdf['smm_head_speed'].mean())
data.append(self.outdf['smm_body_speed'].mean())
data.append(np.array(self.outdf['displacements'].cumsum())[-1])
data.append(self.outdf['min_dpatch'].mean())
data.append(self.outdf['dcenter'].mean())
data.append(np.abs(self.outdf['angular_speed']).mean())
data.append(self.df['major'].mean())
data.append(self.df['minor'].mean())
data.append(self.meta['flags']['mistracked_frames'])
statsdict = {}
for i, each_col in enumerate(self.statcols):
statsdict[each_col] = [data[i]]
statdf = pd.DataFrame(statsdict)
statdf = statdf.reindex(columns=['session', 'day', 'daytime', 'condition', 'position', 'head_speed', 'body_speed', 'distance', 'min_dpatch', 'dcenter', 'abs_turn_rate', 'major', 'minor', 'mistracks'])
return statdf
|
degoldschmidt/pytrack-analysis
|
pytrack_analysis/kinematics.py
|
Python
|
gpl-3.0
| 8,362
|
[
"Gaussian"
] |
100fbe6b0196e8dc7b170f25dbc5af1c50f3d5fd0bf09ba41a22035edede5120
|
# Copyright 2013 Mark Chilenski
# This program is distributed under the terms of the GNU General Purpose License (GPL).
# Refer to http://www.gnu.org/licenses/gpl.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Provides convenient utilities for working with the classes and results from :py:mod:`gptools`.
"""
from __future__ import division
import collections
import warnings
import scipy
import scipy.optimize
import scipy.special
import scipy.stats
import numpy.random
import copy
import itertools
try:
import emcee
except ImportError:
warnings.warn(
"Could not import emcee: MCMC sampling will not be available.",
ImportWarning
)
try:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.widgets as mplw
import matplotlib.gridspec as mplgs
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.patches as mplp
except ImportError:
warnings.warn(
"Could not import matplotlib. Plotting functions will not be available!",
ImportWarning
)
class JointPrior(object):
"""Abstract class for objects implementing joint priors over hyperparameters.
In addition to the abstract methods defined in this template,
implementations should also have an attribute named `bounds` which contains
the bounds (for a prior with finite bounds) or the 95%% interval (for a
prior which is unbounded in at least one direction).
"""
def __init__(self, i=1.0):
"""Sets the interval that :py:attr:`bounds` should return.
Parameters
----------
i : float, optional
The interval to return. Default is 1.0 (100%%). Another useful value
is 95%%.
"""
self.i = 1.0
def __call__(self, theta, hyper_deriv=None):
"""Evaluate the prior log-PDF at the given values of the hyperparameters, theta.
Parameters
----------
theta : array-like, (`num_params`,)
The hyperparameters to evaluate the log-PDF at.
hyper_deriv : int or None, optional
If present, return the derivative of the log-PDF with respect to
the variable with this index.
"""
raise NotImplementedError("__call__ must be implemented in your own class.")
def random_draw(self, size=None):
"""Draw random samples of the hyperparameters.
Parameters
----------
size : None, int or array-like, optional
The number/shape of samples to draw. If None, only one sample is
returned. Default is None.
"""
raise NotImplementedError("random_draw must be implemented in your own class.")
def sample_u(self, q):
r"""Extract a sample from random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the inverse
CDF. To facilitate efficient sampling, this function returns a *vector*
of PPF values, one value for each variable. Basically, the idea is that,
given a vector :math:`q` of `num_params` values each of which is
distributed uniformly on :math:`[0, 1]`, this function will return
corresponding samples for each variable.
Parameters
----------
q : array-like, (`num_params`,)
Values between 0 and 1 to evaluate inverse CDF at.
"""
raise NotImplementedError("ppf must be implemented in your own class.")
def elementwise_cdf(self, p):
r"""Convert a sample to random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the CDF. To
facilitate efficient sampling, this function returns a *vector* of CDF
values, one value for each variable. Basically, the idea is that, given
a vector :math:`q` of `num_params` values each of which is distributed
according to the prior, this function will return variables uniform on
:math:`[0, 1]` corresponding to each variable. This is the inverse
operation to :py:meth:`sample_u`.
Parameters
----------
p : array-like, (`num_params`,)
Values to evaluate CDF at.
"""
raise NotImplementedError("cdf must be implemented in your own class.")
def __mul__(self, other):
"""Multiply two :py:class:`JointPrior` instances together.
"""
return ProductJointPrior(self, other)
class CombinedBounds(object):
"""Object to support reassignment of the bounds from a combined prior.
Works for any types of arrays.
Parameters
----------
l1 : array-like
The first list.
l2 : array-like
The second list.
"""
# TODO: This could use a lot more work!
def __init__(self, l1, l2):
self.l1 = l1
self.l2 = l2
def __getitem__(self, pos):
"""Get the item(s) at `pos`.
`pos` can be a basic slice object. But, the method is implemented by
turning the internal array-like objects into lists, so only the basic
indexing capabilities supported by the list data type can be used.
"""
return (list(self.l1) + list(self.l2))[pos]
def __setitem__(self, pos, value):
"""Set the item at location pos to value.
Only works for scalar indices.
"""
if pos < len(self.l1):
self.l1[pos] = value
else:
self.l2[pos - len(self.l1)] = value
def __len__(self):
"""Get the length of the combined arrays.
"""
return len(self.l1) + len(self.l2)
def __invert__(self):
"""Return the elementwise inverse.
"""
return ~scipy.asarray(self)
def __str__(self):
"""Get user-friendly string representation.
"""
return str(self[:])
def __repr__(self):
"""Get exact string representation.
"""
return str(self) + " from CombinedBounds(" + str(self.l1) + ", " + str(self.l2) + ")"
class MaskedBounds(object):
"""Object to support reassignment of free parameter bounds.
Parameters
----------
a : array
The array to be masked.
m : array of int
The indices in `a` which are to be accessible.
"""
def __init__(self, a, m):
self.a = a
self.m = m
def __getitem__(self, pos):
"""Get the item(s) at location `pos` in the masked array.
"""
return self.a[self.m[pos]]
def __setitem__(self, pos, value):
"""Set the item(s) at location `pos` in the masked array.
"""
self.a[self.m[pos]] = value
def __len__(self):
"""Get the length of the masked array.
"""
return len(self.m)
def __str__(self):
"""Get user-friendly string representation.
"""
return str(self[:])
def __repr__(self):
"""Get exact string representation.
"""
return str(self) + " from MaskedBounds(" + str(self.a) + ", " + str(self.m) + ")"
class ProductJointPrior(JointPrior):
"""Product of two independent priors.
Parameters
----------
p1, p2: :py:class:`JointPrior` instances
The two priors to merge.
"""
def __init__(self, p1, p2):
if not isinstance(p1, JointPrior) or not isinstance(p2, JointPrior):
raise TypeError(
"Both arguments to ProductPrior must be instances of JointPrior!"
)
self.p1 = p1
self.p2 = p2
@property
def i(self):
return min(self.p1.i, self.p2.i)
@i.setter
def i(self, v):
self.p1.i = i
self.p2.i = i
@property
def bounds(self):
return CombinedBounds(self.p1.bounds, self.p2.bounds)
@bounds.setter
def bounds(self, v):
num_p1_bounds = len(self.p1.bounds)
self.p1.bounds = v[:num_p1_bounds]
self.p2.bounds = v[num_p1_bounds:]
def __call__(self, theta, hyper_deriv=None):
"""Evaluate the prior log-PDF at the given values of the hyperparameters, theta.
The log-PDFs of the two priors are summed.
Parameters
----------
theta : array-like, (`num_params`,)
The hyperparameters to evaluate the log-PDF at.
"""
p1_num_params = len(self.p1.bounds)
if hyper_deriv is not None:
if hyper_deriv < p1_num_params:
return self.p1(theta[:p1_num_params], hyper_deriv=hyper_deriv)
else:
return self.p2(theta[p1_num_params:], hyper_deriv=hyper_deriv - p1_num_params)
return self.p1(theta[:p1_num_params]) + self.p2(theta[p1_num_params:])
def sample_u(self, q):
r"""Extract a sample from random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the inverse
CDF. To facilitate efficient sampling, this function returns a *vector*
of PPF values, one value for each variable. Basically, the idea is that,
given a vector :math:`q` of `num_params` values each of which is
distributed uniformly on :math:`[0, 1]`, this function will return
corresponding samples for each variable.
Parameters
----------
q : array-like, (`num_params`,)
Values between 0 and 1 to evaluate inverse CDF at.
"""
p1_num_params = len(self.p1.bounds)
return scipy.concatenate(
(
self.p1.sample_u(q[:p1_num_params]),
self.p2.sample_u(q[p1_num_params:])
)
)
def elementwise_cdf(self, p):
r"""Convert a sample to random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the CDF. To
facilitate efficient sampling, this function returns a *vector* of CDF
values, one value for each variable. Basically, the idea is that, given
a vector :math:`q` of `num_params` values each of which is distributed
according to the prior, this function will return variables uniform on
:math:`[0, 1]` corresponding to each variable. This is the inverse
operation to :py:meth:`sample_u`.
Parameters
----------
p : array-like, (`num_params`,)
Values to evaluate CDF at.
"""
p1_num_params = len(self.p1.bounds)
return scipy.concatenate(
(
self.p1.elementwise_cdf(p[:p1_num_params]),
self.p2.elementwise_cdf(p[p1_num_params:])
)
)
def random_draw(self, size=None):
"""Draw random samples of the hyperparameters.
The outputs of the two priors are stacked vertically.
Parameters
----------
size : None, int or array-like, optional
The number/shape of samples to draw. If None, only one sample is
returned. Default is None.
"""
draw_1 = self.p1.random_draw(size=size)
draw_2 = self.p2.random_draw(size=size)
if draw_1.ndim == 1:
return scipy.hstack((draw_1, draw_2))
else:
return scipy.vstack((draw_1, draw_2))
class UniformJointPrior(JointPrior):
"""Uniform prior over the specified bounds.
Parameters
----------
bounds : list of tuples, (`num_params`,)
The bounds for each of the random variables.
ub : list of float, (`num_params`,), optional
The upper bounds for each of the random variables. If present, `bounds`
is then taken to be a list of float with the lower bounds. This gives
:py:class:`UniformJointPrior` a similar calling fingerprint as the other
:py:class:`JointPrior` classes.
"""
def __init__(self, bounds, ub=None, **kwargs):
super(UniformJointPrior, self).__init__(**kwargs)
if ub is not None:
try:
bounds = zip(bounds, ub)
except TypeError:
bounds = [(bounds, ub)]
self.bounds = bounds
def __call__(self, theta, hyper_deriv=None):
"""Evaluate the prior log-PDF at the given values of the hyperparameters, theta.
Parameters
----------
theta : array-like, (`num_params`,)
The hyperparameters to evaluate the log-PDF at.
"""
if hyper_deriv is not None:
return 0.0
ll = 0.0
for v, b in zip(theta, self.bounds):
if b[0] <= v and v <= b[1]:
ll += -scipy.log(b[1] - b[0])
else:
ll = -scipy.inf
break
return ll
def sample_u(self, q):
r"""Extract a sample from random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the inverse
CDF. To facilitate efficient sampling, this function returns a *vector*
of PPF values, one value for each variable. Basically, the idea is that,
given a vector :math:`q` of `num_params` values each of which is
distributed uniformly on :math:`[0, 1]`, this function will return
corresponding samples for each variable.
Parameters
----------
q : array of float
Values between 0 and 1 to evaluate inverse CDF at.
"""
q = scipy.atleast_1d(q)
if len(q) != len(self.bounds):
raise ValueError("length of q must equal the number of parameters!")
if q.ndim != 1:
raise ValueError("q must be one-dimensional!")
if (q < 0).any() or (q > 1).any():
raise ValueError("q must be within [0, 1]!")
return scipy.asarray([(b[1] - b[0]) * v + b[0] for v, b in zip(q, self.bounds)])
def elementwise_cdf(self, p):
r"""Convert a sample to random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the CDF. To
facilitate efficient sampling, this function returns a *vector* of CDF
values, one value for each variable. Basically, the idea is that, given
a vector :math:`q` of `num_params` values each of which is distributed
according to the prior, this function will return variables uniform on
:math:`[0, 1]` corresponding to each variable. This is the inverse
operation to :py:meth:`sample_u`.
Parameters
----------
p : array-like, (`num_params`,)
Values to evaluate CDF at.
"""
p = scipy.atleast_1d(p)
if len(p) != len(self.bounds):
raise ValueError("length of p must equal the number of parameters!")
if p.ndim != 1:
raise ValueError("p must be one-dimensional!")
c = scipy.zeros(len(self.bounds))
for k in range(0, len(self.bounds)):
if p[k] <= self.bounds[k][0]:
c[k] = 0.0
elif p[k] >= self.bounds[k][1]:
c[k] = 1.0
else:
c[k] = (p[k] - self.bounds[k][0]) / (self.bounds[k][1] - self.bounds[k][0])
return c
def random_draw(self, size=None):
"""Draw random samples of the hyperparameters.
Parameters
----------
size : None, int or array-like, optional
The number/shape of samples to draw. If None, only one sample is
returned. Default is None.
"""
return scipy.asarray([numpy.random.uniform(low=b[0], high=b[1], size=size) for b in self.bounds])
class CoreEdgeJointPrior(UniformJointPrior):
"""Prior for use with Gibbs kernel warping functions with an inequality constraint between the core and edge length scales.
"""
def __call__(self, theta, hyper_deriv=None):
"""Evaluate the prior log-PDF at the given values of the hyperparameters, theta.
Parameters
----------
theta : array-like, (`num_params`,)
The hyperparameters to evaluate the log-PDF at.
"""
if hyper_deriv is not None:
return 0.0
ll = 0
bounds_new = copy.copy(self.bounds)
bounds_new[2] = (self.bounds[2][0], theta[1])
for v, b in zip(theta, bounds_new):
if b[0] <= v and v <= b[1]:
ll += -scipy.log(b[1] - b[0])
else:
ll = -scipy.inf
break
return ll
def sample_u(self, q):
r"""Extract a sample from random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the inverse
CDF. To facilitate efficient sampling, this function returns a *vector*
of PPF values, one value for each variable. Basically, the idea is that,
given a vector :math:`q` of `num_params` values each of which is
distributed uniformly on :math:`[0, 1]`, this function will return
corresponding samples for each variable.
Parameters
----------
q : array of float
Values between 0 and 1 to evaluate inverse CDF at.
"""
# TODO: Do this!
raise NotImplementedError("Not done yet!")
def elementwise_cdf(self, p):
r"""Convert a sample to random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the CDF. To
facilitate efficient sampling, this function returns a *vector* of CDF
values, one value for each variable. Basically, the idea is that, given
a vector :math:`q` of `num_params` values each of which is distributed
according to the prior, this function will return variables uniform on
:math:`[0, 1]` corresponding to each variable. This is the inverse
operation to :py:meth:`sample_u`.
Parameters
----------
p : array-like, (`num_params`,)
Values to evaluate CDF at.
"""
# TODO: Do this!
raise NotImplementedError("Not done yet!")
def random_draw(self, size=None):
"""Draw random samples of the hyperparameters.
Parameters
----------
size : None, int or array-like, optional
The number/shape of samples to draw. If None, only one sample is
returned. Default is None.
"""
if size is None:
size = 1
single_val = True
else:
single_val = False
out_shape = [len(self.bounds)]
try:
out_shape.extend(size)
except TypeError:
out_shape.append(size)
out = scipy.zeros(out_shape)
for j in range(0, len(self.bounds)):
if j != 2:
out[j, :] = numpy.random.uniform(low=self.bounds[j][0],
high=self.bounds[j][1],
size=size)
else:
out[j, :] = numpy.random.uniform(low=self.bounds[j][0],
high=out[j - 1, :],
size=size)
if not single_val:
return out
else:
return out.ravel()
class CoreMidEdgeJointPrior(UniformJointPrior):
"""Prior for use with Gibbs kernel warping functions with an inequality constraint between the core, mid and edge length scales and the core-mid and mid-edge joins.
"""
def __call__(self, theta, hyper_deriv=None):
"""Evaluate the prior log-PDF at the given values of the hyperparameters, theta.
Parameters
----------
theta : array-like, (`num_params`,)
The hyperparameters to evaluate the log-PDF at.
"""
if hyper_deriv is not None:
return 0.0
ll = 0
bounds_new = copy.copy(self.bounds)
# lc < lm:
# bounds_new[1] = (self.bounds[1][0], theta[2])
# le < lm:
# bounds_new[3] = (self.bounds[3][0], theta[2])
# xa < xb:
bounds_new[6] = (self.bounds[6][0], theta[7])
for v, b in zip(theta, bounds_new):
if b[0] <= v and v <= b[1]:
ll += -scipy.log(b[1] - b[0])
else:
ll = -scipy.inf
break
return ll
def sample_u(self, q):
r"""Extract a sample from random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the inverse
CDF. To facilitate efficient sampling, this function returns a *vector*
of PPF values, one value for each variable. Basically, the idea is that,
given a vector :math:`q` of `num_params` values each of which is
distributed uniformly on :math:`[0, 1]`, this function will return
corresponding samples for each variable.
Parameters
----------
q : array of float
Values between 0 and 1 to evaluate inverse CDF at.
"""
# TODO: Do this!
raise NotImplementedError("Not done yet!")
def elementwise_cdf(self, p):
r"""Convert a sample to random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the CDF. To
facilitate efficient sampling, this function returns a *vector* of CDF
values, one value for each variable. Basically, the idea is that, given
a vector :math:`q` of `num_params` values each of which is distributed
according to the prior, this function will return variables uniform on
:math:`[0, 1]` corresponding to each variable. This is the inverse
operation to :py:meth:`sample_u`.
Parameters
----------
p : array-like, (`num_params`,)
Values to evaluate CDF at.
"""
# TODO: Do this!
raise NotImplementedError("Not done yet!")
def random_draw(self, size=None):
"""Draw random samples of the hyperparameters.
Parameters
----------
size : None, int or array-like, optional
The number/shape of samples to draw. If None, only one sample is
returned. Default is None.
"""
if size is None:
size = 1
single_val = True
else:
single_val = False
out_shape = [len(self.bounds)]
try:
out_shape.extend(size)
except TypeError:
out_shape.append(size)
out = scipy.zeros(out_shape)
# sigma_f, lm, la, lb, xb:
for j in [0, 1, 2, 3, 4, 5, 7]:
out[j, :] = numpy.random.uniform(low=self.bounds[j][0],
high=self.bounds[j][1],
size=size)
# lc, le:
# for j in [1, 3]:
# out[j, :] = numpy.random.uniform(low=self.bounds[j][0],
# high=out[2, :],
# size=size)
# xa:
out[6, :] = numpy.random.uniform(low=self.bounds[6][0],
high=out[7, :],
size=size)
if not single_val:
return out
else:
return out.ravel()
class IndependentJointPrior(JointPrior):
"""Joint prior for which each hyperparameter is independent.
Parameters
----------
univariate_priors : list of callables or rv_frozen, (`num_params`,)
The univariate priors for each hyperparameter. Entries in this list
can either be a callable that takes as an argument the entire list of
hyperparameters or a frozen instance of a distribution from
:py:mod:`scipy.stats`.
"""
def __init__(self, univariate_priors):
super(IndependentJointPrior, self).__init__(**kwargs)
self.univariate_priors = univariate_priors
def __call__(self, theta, hyper_deriv=None):
"""Evaluate the prior log-PDF at the given values of the hyperparameters, theta.
Parameters
----------
theta : array-like, (`num_params`,)
The hyperparameters to evaluate the log-PDF at.
"""
if hyper_deriv is not None:
raise NotImplementedError(
"Hyperparameter derivatives not supported for IndependentJointPrior!"
)
ll = 0
for v, p in zip(theta, self.univariate_priors):
try:
ll += p(theta)
except TypeError:
ll += p.logpdf(v)
return ll
@property
def bounds(self):
"""The bounds of the random variable.
Set `self.i=0.95` to return the 95% interval if this is used for setting
bounds on optimizers/etc. where infinite bounds may not be useful.
"""
return [p.interval(self.i) for p in self.univariate_priors]
def sample_u(self, q):
r"""Extract a sample from random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the inverse
CDF. To facilitate efficient sampling, this function returns a *vector*
of PPF values, one value for each variable. Basically, the idea is that,
given a vector :math:`q` of `num_params` values each of which is
distributed uniformly on :math:`[0, 1]`, this function will return
corresponding samples for each variable.
Parameters
----------
q : array of float
Values between 0 and 1 to evaluate inverse CDF at.
"""
q = scipy.atleast_1d(q)
if len(q) != len(self.univariate_priors):
raise ValueError("length of q must equal the number of parameters!")
if q.ndim != 1:
raise ValueError("q must be one-dimensional!")
if (q < 0).any() or (q > 1).any():
raise ValueError("q must be within [0, 1]!")
return scipy.asarray([p.ppf(v) for v, p in zip(q, self.univariate_priors)])
def elementwise_cdf(self, p):
r"""Convert a sample to random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the CDF. To
facilitate efficient sampling, this function returns a *vector* of CDF
values, one value for each variable. Basically, the idea is that, given
a vector :math:`q` of `num_params` values each of which is distributed
according to the prior, this function will return variables uniform on
:math:`[0, 1]` corresponding to each variable. This is the inverse
operation to :py:meth:`sample_u`.
Parameters
----------
p : array-like, (`num_params`,)
Values to evaluate CDF at.
"""
p = scipy.atleast_1d(p)
if len(p) != len(self.univariate_priors):
raise ValueError("length of p must equal the number of parameters!")
if p.ndim != 1:
raise ValueError("p must be one-dimensional!")
return scipy.asarray([pr.cdf(v) for v, pr in zip(p, self.univariate_priors)])
def random_draw(self, size=None):
"""Draw random samples of the hyperparameters.
Parameters
----------
size : None, int or array-like, optional
The number/shape of samples to draw. If None, only one sample is
returned. Default is None.
"""
return scipy.asarray([p.rvs(size=size) for p in self.univariate_priors])
class NormalJointPrior(JointPrior):
"""Joint prior for which each hyperparameter has a normal prior with fixed hyper-hyperparameters.
Parameters
----------
mu : list of float, same size as `sigma`
Means of the hyperparameters.
sigma : list of float
Standard deviations of the hyperparameters.
"""
def __init__(self, mu, sigma, **kwargs):
super(NormalJointPrior, self).__init__(**kwargs)
sigma = scipy.atleast_1d(scipy.asarray(sigma, dtype=float))
mu = scipy.atleast_1d(scipy.asarray(mu, dtype=float))
if sigma.shape != mu.shape:
raise ValueError("sigma and mu must have the same shape!")
if sigma.ndim != 1:
raise ValueError("sigma and mu must both be one dimensional!")
self.sigma = sigma
self.mu = mu
def __call__(self, theta, hyper_deriv=None):
"""Evaluate the prior log-PDF at the given values of the hyperparameters, theta.
Parameters
----------
theta : array-like, (`num_params`,)
The hyperparameters to evaluate the log-PDF at.
"""
if hyper_deriv is not None:
return (self.mu[hyper_deriv] - theta[hyper_deriv]) / self.sigma[hyper_deriv]**2.0
ll = 0
for v, s, m in zip(theta, self.sigma, self.mu):
ll += scipy.stats.norm.logpdf(v, loc=m, scale=s)
return ll
@property
def bounds(self):
"""The bounds of the random variable.
Set `self.i=0.95` to return the 95% interval if this is used for setting
bounds on optimizers/etc. where infinite bounds may not be useful.
"""
return [scipy.stats.norm.interval(self.i, loc=m, scale=s) for s, m in zip(self.sigma, self.mu)]
def sample_u(self, q):
r"""Extract a sample from random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the inverse
CDF. To facilitate efficient sampling, this function returns a *vector*
of PPF values, one value for each variable. Basically, the idea is that,
given a vector :math:`q` of `num_params` values each of which is
distributed uniformly on :math:`[0, 1]`, this function will return
corresponding samples for each variable.
Parameters
----------
q : array of float
Values between 0 and 1 to evaluate inverse CDF at.
"""
q = scipy.atleast_1d(q)
if len(q) != len(self.sigma):
raise ValueError("length of q must equal the number of parameters!")
if q.ndim != 1:
raise ValueError("q must be one-dimensional!")
if (q < 0).any() or (q > 1).any():
raise ValueError("q must be within [0, 1]!")
return scipy.asarray([scipy.stats.norm.ppf(v, loc=m, scale=s) for v, s, m in zip(q, self.sigma, self.mu)])
def elementwise_cdf(self, p):
r"""Convert a sample to random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the CDF. To
facilitate efficient sampling, this function returns a *vector* of CDF
values, one value for each variable. Basically, the idea is that, given
a vector :math:`q` of `num_params` values each of which is distributed
according to the prior, this function will return variables uniform on
:math:`[0, 1]` corresponding to each variable. This is the inverse
operation to :py:meth:`sample_u`.
Parameters
----------
p : array-like, (`num_params`,)
Values to evaluate CDF at.
"""
p = scipy.atleast_1d(p)
if len(p) != len(self.sigma):
raise ValueError("length of p must equal the number of parameters!")
if p.ndim != 1:
raise ValueError("p must be one-dimensional!")
return scipy.asarray([scipy.stats.norm.cdf(v, loc=m, scale=s) for v, s, m in zip(p, self.sigma, self.mu)])
def random_draw(self, size=None):
"""Draw random samples of the hyperparameters.
Parameters
----------
size : None, int or array-like, optional
The number/shape of samples to draw. If None, only one sample is
returned. Default is None.
"""
return scipy.asarray([scipy.stats.norm.rvs(loc=m, scale=s, size=size) for s, m in zip(self.sigma, self.mu)])
class LogNormalJointPrior(JointPrior):
"""Joint prior for which each hyperparameter has a log-normal prior with fixed hyper-hyperparameters.
Parameters
----------
mu : list of float, same size as `sigma`
Means of the logarithms of the hyperparameters.
sigma : list of float
Standard deviations of the logarithms of the hyperparameters.
"""
def __init__(self, mu, sigma, **kwargs):
super(LogNormalJointPrior, self).__init__(**kwargs)
sigma = scipy.atleast_1d(scipy.asarray(sigma, dtype=float))
mu = scipy.atleast_1d(scipy.asarray(mu, dtype=float))
if sigma.shape != mu.shape:
raise ValueError("sigma and mu must have the same shape!")
if sigma.ndim != 1:
raise ValueError("sigma and mu must both be one dimensional!")
self.sigma = sigma
self.emu = scipy.exp(mu)
def __call__(self, theta, hyper_deriv=None):
"""Evaluate the prior log-PDF at the given values of the hyperparameters, theta.
Parameters
----------
theta : array-like, (`num_params`,)
The hyperparameters to evaluate the log-PDF at.
"""
if hyper_deriv is not None:
return -1.0 / theta[hyper_deriv] * (
1.0 + scipy.log(theta[hyper_deriv] / self.emu[hyper_deriv]) /
self.sigma[hyper_deriv]**2.0
)
ll = 0
for v, s, em in zip(theta, self.sigma, self.emu):
ll += scipy.stats.lognorm.logpdf(v, s, loc=0, scale=em)
return ll
@property
def bounds(self):
"""The bounds of the random variable.
Set `self.i=0.95` to return the 95% interval if this is used for setting
bounds on optimizers/etc. where infinite bounds may not be useful.
"""
return [scipy.stats.lognorm.interval(self.i, s, loc=0, scale=em) for s, em in zip(self.sigma, self.emu)]
def sample_u(self, q):
r"""Extract a sample from random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the inverse
CDF. To facilitate efficient sampling, this function returns a *vector*
of PPF values, one value for each variable. Basically, the idea is that,
given a vector :math:`q` of `num_params` values each of which is
distributed uniformly on :math:`[0, 1]`, this function will return
corresponding samples for each variable.
Parameters
----------
q : array of float
Values between 0 and 1 to evaluate inverse CDF at.
"""
q = scipy.atleast_1d(q)
if len(q) != len(self.sigma):
raise ValueError("length of q must equal the number of parameters!")
if q.ndim != 1:
raise ValueError("q must be one-dimensional!")
if (q < 0).any() or (q > 1).any():
raise ValueError("q must be within [0, 1]!")
return scipy.asarray([scipy.stats.lognorm.ppf(v, s, loc=0, scale=em) for v, s, em in zip(q, self.sigma, self.emu)])
def elementwise_cdf(self, p):
r"""Convert a sample to random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the CDF. To
facilitate efficient sampling, this function returns a *vector* of CDF
values, one value for each variable. Basically, the idea is that, given
a vector :math:`q` of `num_params` values each of which is distributed
according to the prior, this function will return variables uniform on
:math:`[0, 1]` corresponding to each variable. This is the inverse
operation to :py:meth:`sample_u`.
Parameters
----------
p : array-like, (`num_params`,)
Values to evaluate CDF at.
"""
p = scipy.atleast_1d(p)
if len(p) != len(self.sigma):
raise ValueError("length of p must equal the number of parameters!")
if p.ndim != 1:
raise ValueError("p must be one-dimensional!")
return scipy.asarray([scipy.stats.lognorm.cdf(v, s, loc=0, scale=em) for v, s, em in zip(p, self.sigma, self.emu)])
def random_draw(self, size=None):
"""Draw random samples of the hyperparameters.
Parameters
----------
size : None, int or array-like, optional
The number/shape of samples to draw. If None, only one sample is
returned. Default is None.
"""
return scipy.asarray([scipy.stats.lognorm.rvs(s, loc=0, scale=em, size=size) for s, em in zip(self.sigma, self.emu)])
class GammaJointPrior(JointPrior):
"""Joint prior for which each hyperparameter has a gamma prior with fixed hyper-hyperparameters.
Parameters
----------
a : list of float, same size as `b`
Shape parameters.
b : list of float
Rate parameters.
"""
def __init__(self, a, b, **kwargs):
super(GammaJointPrior, self).__init__(**kwargs)
a = scipy.atleast_1d(scipy.asarray(a, dtype=float))
b = scipy.atleast_1d(scipy.asarray(b, dtype=float))
if a.shape != b.shape:
raise ValueError("a and b must have the same shape!")
if a.ndim != 1:
raise ValueError("a and b must both be one dimensional!")
self.a = a
self.b = b
def __call__(self, theta, hyper_deriv=None):
"""Evaluate the prior log-PDF at the given values of the hyperparameters, theta.
Parameters
----------
theta : array-like, (`num_params`,)
The hyperparameters to evaluate the log-PDF at.
"""
if hyper_deriv is not None:
if self.a[hyper_deriv] == 1.0 and theta[hyper_deriv] == 0.0:
return -self.b[hyper_deriv]
else:
return (self.a[hyper_deriv] - 1.0) / theta[hyper_deriv] - self.b[hyper_deriv]
ll = 0
for v, a, b in zip(theta, self.a, self.b):
ll += scipy.stats.gamma.logpdf(v, a, loc=0, scale=1.0 / b)
return ll
@property
def bounds(self):
"""The bounds of the random variable.
Set `self.i=0.95` to return the 95% interval if this is used for setting
bounds on optimizers/etc. where infinite bounds may not be useful.
"""
return [scipy.stats.gamma.interval(self.i, a, loc=0, scale=1.0 / b) for a, b in zip(self.a, self.b)]
def sample_u(self, q):
r"""Extract a sample from random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the inverse
CDF. To facilitate efficient sampling, this function returns a *vector*
of PPF values, one value for each variable. Basically, the idea is that,
given a vector :math:`q` of `num_params` values each of which is
distributed uniformly on :math:`[0, 1]`, this function will return
corresponding samples for each variable.
Parameters
----------
q : array of float
Values between 0 and 1 to evaluate inverse CDF at.
"""
q = scipy.atleast_1d(q)
if len(q) != len(self.a):
raise ValueError("length of q must equal the number of parameters!")
if q.ndim != 1:
raise ValueError("q must be one-dimensional!")
if (q < 0).any() or (q > 1).any():
raise ValueError("q must be within [0, 1]!")
return scipy.asarray([scipy.stats.gamma.ppf(v, a, loc=0, scale=1.0 / b) for v, a, b in zip(q, self.a, self.b)])
def elementwise_cdf(self, p):
r"""Convert a sample to random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the CDF. To
facilitate efficient sampling, this function returns a *vector* of CDF
values, one value for each variable. Basically, the idea is that, given
a vector :math:`q` of `num_params` values each of which is distributed
according to the prior, this function will return variables uniform on
:math:`[0, 1]` corresponding to each variable. This is the inverse
operation to :py:meth:`sample_u`.
Parameters
----------
p : array-like, (`num_params`,)
Values to evaluate CDF at.
"""
p = scipy.atleast_1d(p)
if len(p) != len(self.a):
raise ValueError("length of p must equal the number of parameters!")
if p.ndim != 1:
raise ValueError("p must be one-dimensional!")
return scipy.asarray([scipy.stats.gamma.cdf(v, a, loc=0, scale=1.0 / b) for v, a, b in zip(p, self.a, self.b)])
def random_draw(self, size=None):
"""Draw random samples of the hyperparameters.
Parameters
----------
size : None, int or array-like, optional
The number/shape of samples to draw. If None, only one sample is
returned. Default is None.
"""
return scipy.asarray([scipy.stats.gamma.rvs(a, loc=0, scale=1.0 / b, size=size) for a, b in zip(self.a, self.b)])
class GammaJointPriorAlt(GammaJointPrior):
"""Joint prior for which each hyperparameter has a gamma prior with fixed hyper-hyperparameters.
This is an alternate form that lets you specify the mode and standard
deviation instead of the shape and rate parameters.
Parameters
----------
m : list of float, same size as `s`
Modes
s : list of float
Standard deviations
"""
def __init__(self, m, s, i=1.0):
self.i = i
m = scipy.atleast_1d(scipy.asarray(m, dtype=float))
s = scipy.atleast_1d(scipy.asarray(s, dtype=float))
if m.shape != s.shape:
raise ValueError("s and mu must have the same shape!")
if m.ndim != 1:
raise ValueError("s and mu must both be one dimensional!")
self.m = m
self.s = s
@property
def a(self):
return 1.0 + self.b * self.m
@property
def b(self):
return (self.m + scipy.sqrt(self.m**2 + 4.0 * self.s**2)) / (2.0 * self.s**2)
class SortedUniformJointPrior(JointPrior):
"""Joint prior for a set of variables which must be strictly increasing but are otherwise uniformly-distributed.
Parameters
----------
num_var : int
The number of variables represented.
lb : float
The lower bound for all of the variables.
ub : float
The upper bound for all of the variables.
"""
def __init__(self, num_var, lb, ub, **kwargs):
super(SortedUniformJointPrior, self).__init__(**kwargs)
self.num_var = num_var
self.lb = lb
self.ub = ub
def __call__(self, theta, hyper_deriv=None):
"""Evaluate the log-probability of the variables.
Parameters
----------
theta : array
The parameters to find the log-probability of.
"""
if hyper_deriv is not None:
return 0.0
theta = scipy.asarray(theta)
if (scipy.sort(theta) != theta).all() or (theta < self.lb).any() or (theta > self.ub).any():
return -scipy.inf
else:
return (
scipy.log(scipy.misc.factorial(self.num_var)) -
self.num_var * scipy.log(self.ub - self.lb)
)
@property
def bounds(self):
return [(self.lb, self.ub)] * self.num_var
def sample_u(self, q):
r"""Extract a sample from random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the inverse
CDF. To facilitate efficient sampling, this function returns a *vector*
of PPF values, one value for each variable. Basically, the idea is that,
given a vector :math:`q` of `num_params` values each of which is
distributed uniformly on :math:`[0, 1]`, this function will return
corresponding samples for each variable.
Parameters
----------
q : array of float
Values between 0 and 1 to evaluate inverse CDF at.
"""
q = scipy.atleast_1d(q)
if len(q) != self.num_var:
raise ValueError("length of q must equal the number of parameters!")
if q.ndim != 1:
raise ValueError("q must be one-dimensional!")
if (q < 0).any() or (q > 1).any():
raise ValueError("q must be within [0, 1]!")
# Old way, not quite correct:
# q = scipy.sort(q)
# return scipy.asarray([(self.ub - self.lb) * v + self.lb for v in q])
# New way, based on conditional marginals:
out = scipy.zeros_like(q, dtype=float)
out[0] = self.lb
for d in range(0, len(out)):
out[d] = (
(1.0 - (1.0 - q[d])**(1.0 / (self.num_var - d))) *
(self.ub - out[max(d - 1, 0)]) + out[max(d - 1, 0)]
)
return out
def elementwise_cdf(self, p):
r"""Convert a sample to random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the CDF. To
facilitate efficient sampling, this function returns a *vector* of CDF
values, one value for each variable. Basically, the idea is that, given
a vector :math:`q` of `num_params` values each of which is distributed
according to the prior, this function will return variables uniform on
:math:`[0, 1]` corresponding to each variable. This is the inverse
operation to :py:meth:`sample_u`.
Parameters
----------
p : array-like, (`num_params`,)
Values to evaluate CDF at.
"""
p = scipy.atleast_1d(p)
if len(p) != len(self.bounds):
raise ValueError("length of p must equal the number of parameters!")
if p.ndim != 1:
raise ValueError("p must be one-dimensional!")
c = scipy.zeros(len(self.bounds))
# Old way, based on sorted uniform variables:
# for k in range(0, len(self.bounds)):
# if p[k] <= self.bounds[k][0]:
# c[k] = 0.0
# elif p[k] >= self.bounds[k][1]:
# c[k] = 1.0
# else:
# c[k] = (p[k] - self.bounds[k][0]) / (self.bounds[k][1] - self.bounds[k][0])
# New way, based on conditional marginals:
for d in range(0, len(c)):
pdm1 = p[d - 1] if d > 0 else self.lb
if p[d] <= pdm1:
c[d] = 0.0
elif p[d] >= self.ub:
c[d] = 1.0
else:
c[d] = 1.0 - (1.0 - (p[d] - pdm1) / (self.ub - pdm1))**(self.num_var - d)
return c
def random_draw(self, size=None):
"""Draw random samples of the hyperparameters.
Parameters
----------
size : None, int or array-like, optional
The number/shape of samples to draw. If None, only one sample is
returned. Default is None.
"""
if size is None:
size = 1
single_val = True
else:
single_val = False
out_shape = [self.num_var]
try:
out_shape.extend(size)
except TypeError:
out_shape.append(size)
out = scipy.sort(
numpy.random.uniform(
low=self.lb,
high=self.ub,
size=out_shape
),
axis=0
)
if not single_val:
return out
else:
return out.ravel()
def wrap_fmin_slsqp(fun, guess, opt_kwargs={}):
"""Wrapper for :py:func:`fmin_slsqp` to allow it to be called with :py:func:`minimize`-like syntax.
This is included to enable the code to run with :py:mod:`scipy` versions
older than 0.11.0.
Accepts `opt_kwargs` in the same format as used by
:py:func:`scipy.optimize.minimize`, with the additional precondition
that the keyword `method` has already been removed by the calling code.
Parameters
----------
fun : callable
The function to minimize.
guess : sequence
The initial guess for the parameters.
opt_kwargs : dict, optional
Dictionary of extra keywords to pass to
:py:func:`scipy.optimize.minimize`. Refer to that function's
docstring for valid options. The keywords 'jac', 'hess' and 'hessp'
are ignored. Note that if you were planning to use `jac` = True
(i.e., optimization function returns Jacobian) and have set
`args` = (True,) to tell :py:meth:`update_hyperparameters` to
compute and return the Jacobian this may cause unexpected behavior.
Default is: {}.
Returns
-------
Result : namedtuple
:py:class:`namedtuple` that mimics the fields of the
:py:class:`Result` object returned by
:py:func:`scipy.optimize.minimize`. Has the following fields:
======= ======= ===================================================================================
status int Code indicating the exit mode of the optimizer (`imode` from :py:func:`fmin_slsqp`)
success bool Boolean indicating whether or not the optimizer thinks a minimum was found.
fun float Value of the optimized function (-1*LL).
x ndarray Optimal values of the hyperparameters.
message str String describing the exit state (`smode` from :py:func:`fmin_slsqp`)
nit int Number of iterations.
======= ======= ===================================================================================
Raises
------
ValueError
Invalid constraint type in `constraints`. (See documentation for :py:func:`scipy.optimize.minimize`.)
"""
opt_kwargs = dict(opt_kwargs)
opt_kwargs.pop('method', None)
eqcons = []
ieqcons = []
if 'constraints' in opt_kwargs:
if isinstance(opt_kwargs['constraints'], dict):
opt_kwargs['constraints'] = [opt_kwargs['constraints'],]
for con in opt_kwargs.pop('constraints'):
if con['type'] == 'eq':
eqcons += [con['fun'],]
elif con['type'] == 'ineq':
ieqcons += [con['fun'],]
else:
raise ValueError("Invalid constraint type {:s}!".format(con['type']))
if 'jac' in opt_kwargs:
warnings.warn("Jacobian not supported for default solver SLSQP!",
RuntimeWarning)
opt_kwargs.pop('jac')
if 'tol' in opt_kwargs:
opt_kwargs['acc'] = opt_kwargs.pop('tol')
if 'options' in opt_kwargs:
opts = opt_kwargs.pop('options')
opt_kwargs = dict(opt_kwargs.items() + opts.items())
# Other keywords with less likelihood for causing failures are silently ignored:
opt_kwargs.pop('hess', None)
opt_kwargs.pop('hessp', None)
opt_kwargs.pop('callback', None)
out, fx, its, imode, smode = scipy.optimize.fmin_slsqp(
fun,
guess,
full_output=True,
eqcons=eqcons,
ieqcons=ieqcons,
**opt_kwargs
)
Result = collections.namedtuple('Result',
['status', 'success', 'fun', 'x', 'message', 'nit'])
return Result(status=imode,
success=(imode == 0),
fun=fx,
x=out,
message=smode,
nit=its)
def fixed_poch(a, n):
"""Implementation of the Pochhammer symbol :math:`(a)_n` which handles negative integer arguments properly.
Need conditional statement because scipy's impelementation of the Pochhammer
symbol is wrong for negative integer arguments. This function uses the
definition from
http://functions.wolfram.com/GammaBetaErf/Pochhammer/02/
Parameters
----------
a : float
The argument.
n : nonnegative int
The order.
"""
# Old form, calls gamma function:
# if a < 0.0 and a % 1 == 0 and n <= -a:
# p = (-1.0)**n * scipy.misc.factorial(-a) / scipy.misc.factorial(-a - n)
# else:
# p = scipy.special.poch(a, n)
# return p
if (int(n) != n) or (n < 0):
raise ValueError("Parameter n must be a nonnegative int!")
n = int(n)
# Direct form based on product:
terms = [a + k for k in range(0, n)]
return scipy.prod(terms)
def Kn2Der(nu, y, n=0):
r"""Find the derivatives of :math:`K_\nu(y^{1/2})`.
Parameters
----------
nu : float
The order of the modified Bessel function of the second kind.
y : array of float
The values to evaluate at.
n : nonnegative int, optional
The order of derivative to take.
"""
n = int(n)
y = scipy.asarray(y, dtype=float)
sqrty = scipy.sqrt(y)
if n == 0:
K = scipy.special.kv(nu, sqrty)
else:
K = scipy.zeros_like(y)
x = scipy.asarray(
[
fixed_poch(1.5 - j, j) * y**(0.5 - j)
for j in scipy.arange(1.0, n + 1.0, dtype=float)
]
).T
for k in scipy.arange(1.0, n + 1.0, dtype=float):
K += (
scipy.special.kvp(nu, sqrty, n=int(k)) *
incomplete_bell_poly(n, int(k), x)
)
return K
def yn2Kn2Der(nu, y, n=0, tol=5e-4, nterms=1, nu_step=0.001):
r"""Computes the function :math:`y^{\nu/2} K_{\nu}(y^{1/2})` and its derivatives.
Care has been taken to handle the conditions at :math:`y=0`.
For `n=0`, uses a direct evaluation of the expression, replacing points
where `y=0` with the appropriate value. For `n>0`, uses a general sum
expression to evaluate the expression, and handles the value at `y=0` using
a power series expansion. Where it becomes infinite, the infinities will
have the appropriate sign for a limit approaching zero from the right.
Uses a power series expansion around :math:`y=0` to avoid numerical issues.
Handles integer `nu` by performing a linear interpolation between values of
`nu` slightly above and below the requested value.
Parameters
----------
nu : float
The order of the modified Bessel function and the exponent of `y`.
y : array of float
The points to evaluate the function at. These are assumed to be
nonegative.
n : nonnegative int, optional
The order of derivative to take. Set to zero (the default) to get the
value.
tol : float, optional
The distance from zero for which the power series is used. Default is
5e-4.
nterms : int, optional
The number of terms to include in the power series. Default is 1.
nu_step : float, optional
The amount to vary `nu` by when handling integer values of `nu`. Default
is 0.001.
"""
n = int(n)
y = scipy.asarray(y, dtype=float)
if n == 0:
K = y**(nu / 2.0) * scipy.special.kv(nu, scipy.sqrt(y))
K[y == 0.0] = scipy.special.gamma(nu) / 2.0**(1.0 - nu)
else:
K = scipy.zeros_like(y)
for k in scipy.arange(0.0, n + 1.0, dtype=float):
K += (
scipy.special.binom(n, k) * fixed_poch(1.0 + nu / 2.0 - k, k) *
y**(nu / 2.0 - k) * Kn2Der(nu, y, n=n-k)
)
# Do the extra work to handle y == 0 only if we need to:
mask = (y == 0.0)
if (mask).any():
if int(nu) == nu:
K[mask] = 0.5 * (
yn2Kn2Der(nu - nu_step, y[mask], n=n, tol=tol, nterms=nterms, nu_step=nu_step) +
yn2Kn2Der(nu + nu_step, y[mask], n=n, tol=tol, nterms=nterms, nu_step=nu_step)
)
else:
if n > nu:
K[mask] = scipy.special.gamma(-nu) * fixed_poch(1 + nu - n, n) * scipy.inf
else:
K[mask] = scipy.special.gamma(nu) * scipy.special.gamma(n + 1.0) / (
2.0**(1.0 - nu + 2.0 * n) * fixed_poch(1.0 - nu, n) *
scipy.special.factorial(n)
)
if tol > 0.0:
# Replace points within tol (absolute distance) of zero with the power
# series approximation:
mask = (y <= tol) & (y > 0.0)
K[mask] = 0.0
if int(nu) == nu:
K[mask] = 0.5 * (
yn2Kn2Der(nu - nu_step, y[mask], n=n, tol=tol, nterms=nterms, nu_step=nu_step) +
yn2Kn2Der(nu + nu_step, y[mask], n=n, tol=tol, nterms=nterms, nu_step=nu_step)
)
else:
for k in scipy.arange(n, n + nterms, dtype=float):
K[mask] += (
scipy.special.gamma(nu) * fixed_poch(1.0 + k - n, n) * y[mask]**(k - n) / (
2.0**(1.0 - nu + 2 * k) * fixed_poch(1.0 - nu, k) * scipy.special.factorial(k))
)
for k in scipy.arange(0, nterms, dtype=float):
K[mask] += (
scipy.special.gamma(-nu) * fixed_poch(1.0 + nu + k - n, n) *
y[mask]**(nu + k - n) / (
2.0**(1.0 + nu + 2.0 * k) * fixed_poch(1.0 + nu, k) *
scipy.special.factorial(k)
)
)
return K
def incomplete_bell_poly(n, k, x):
r"""Recursive evaluation of the incomplete Bell polynomial :math:`B_{n, k}(x)`.
Evaluates the incomplete Bell polynomial :math:`B_{n, k}(x_1, x_2, \dots, x_{n-k+1})`,
also known as the partial Bell polynomial or the Bell polynomial of the
second kind. This polynomial is useful in the evaluation of (the univariate)
Faa di Bruno's formula which generalizes the chain rule to higher order
derivatives.
The implementation here is based on the implementation in:
:py:func:`sympy.functions.combinatorial.numbers.bell._bell_incomplete_poly`
Following that function's documentation, the polynomial is computed
according to the recurrence formula:
.. math::
B_{n, k}(x_1, x_2, \dots, x_{n-k+1}) = \sum_{m=1}^{n-k+1}x_m\binom{n-1}{m-1}B_{n-m, k-1}(x_1, x_2, \dots, x_{n-m-k})
| The end cases are:
| :math:`B_{0, 0} = 1`
| :math:`B_{n, 0} = 0` for :math:`n \ge 1`
| :math:`B_{0, k} = 0` for :math:`k \ge 1`
Parameters
----------
n : scalar int
The first subscript of the polynomial.
k : scalar int
The second subscript of the polynomial.
x : :py:class:`Array` of floats, (`p`, `n` - `k` + 1)
`p` sets of `n` - `k` + 1 points to use as the arguments to
:math:`B_{n,k}`. The second dimension can be longer than
required, in which case the extra entries are silently ignored
(this facilitates recursion without needing to subset the array `x`).
Returns
-------
result : :py:class:`Array`, (`p`,)
Incomplete Bell polynomial evaluated at the desired values.
"""
if n == 0 and k == 0:
return scipy.ones(x.shape[0], dtype=float)
elif k == 0 and n >= 1:
return scipy.zeros(x.shape[0], dtype=float)
elif n == 0 and k >= 1:
return scipy.zeros(x.shape[0], dtype=float)
else:
result = scipy.zeros(x.shape[0], dtype=float)
for m in range(0, n - k + 1):
result += x[:, m] * scipy.special.binom(n - 1, m) * incomplete_bell_poly(n - (m + 1), k - 1, x)
return result
def generate_set_partition_strings(n):
"""Generate the restricted growth strings for all of the partitions of an `n`-member set.
Uses Algorithm H from page 416 of volume 4A of Knuth's `The Art of Computer
Programming`. Returns the partitions in lexicographical order.
Parameters
----------
n : scalar int, non-negative
Number of (unique) elements in the set to be partitioned.
Returns
-------
partitions : list of :py:class:`Array`
List has a number of elements equal to the `n`-th Bell number (i.e.,
the number of partitions for a set of size `n`). Each element has
length `n`, the elements of which are the restricted growth strings
describing the partitions of the set. The strings are returned in
lexicographic order.
"""
# Handle edge cases:
if n == 0:
return []
elif n == 1:
return [scipy.array([0])]
partitions = []
# Step 1: Initialize
a = scipy.zeros(n, dtype=int)
b = scipy.ones(n, dtype=int)
while True:
# Step 2: Visit
partitions.append(a.copy())
if a[-1] == b[-1]:
# Step 4: Find j. j is the index of the first element from the end
# for which a != b, with the exception of the last element.
j = (a[:-1] != b[:-1]).nonzero()[0][-1]
# Step 5: Increase a_j (or terminate):
if j == 0:
break
else:
a[j] += 1
# Step 6: Zero out a_{j+1} to a_n:
b[-1] = b[j] + (a[j] == b[j])
a[j + 1:] = 0
b[j + 1 :-1] = b[-1]
else:
# Step 3: Increase a_n:
a[-1] += 1
return partitions
def generate_set_partitions(set_):
"""Generate all of the partitions of a set.
This is a helper function that utilizes the restricted growth strings from
:py:func:`generate_set_partition_strings`. The partitions are returned in
lexicographic order.
Parameters
----------
set_ : :py:class:`Array` or other Array-like, (`m`,)
The set to find the partitions of.
Returns
-------
partitions : list of lists of :py:class:`Array`
The number of elements in the outer list is equal to the number of
partitions, which is the len(`m`)^th Bell number. Each of the inner lists
corresponds to a single possible partition. The length of an inner list
is therefore equal to the number of blocks. Each of the arrays in an
inner list is hence a block.
"""
set_ = scipy.asarray(set_)
strings = generate_set_partition_strings(len(set_))
partitions = []
for string in strings:
blocks = []
for block_num in scipy.unique(string):
blocks.append(set_[string == block_num])
partitions.append(blocks)
return partitions
def powerset(iterable):
"""powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
From itertools documentation, https://docs.python.org/2/library/itertools.html.
"""
s = list(iterable)
return itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(len(s) + 1))
def unique_rows(arr, return_index=False, return_inverse=False):
"""Returns a copy of arr with duplicate rows removed.
From Stackoverflow "Find unique rows in numpy.array."
Parameters
----------
arr : :py:class:`Array`, (`m`, `n`)
The array to find the unique rows of.
return_index : bool, optional
If True, the indices of the unique rows in the array will also be
returned. I.e., unique = arr[idx]. Default is False (don't return
indices).
return_inverse: bool, optional
If True, the indices in the unique array to reconstruct the original
array will also be returned. I.e., arr = unique[inv]. Default is False
(don't return inverse).
Returns
-------
unique : :py:class:`Array`, (`p`, `n`) where `p` <= `m`
The array `arr` with duplicate rows removed.
"""
b = scipy.ascontiguousarray(arr).view(
scipy.dtype((scipy.void, arr.dtype.itemsize * arr.shape[1]))
)
try:
out = scipy.unique(b, return_index=True, return_inverse=return_inverse)
dum = out[0]
idx = out[1]
if return_inverse:
inv = out[2]
except TypeError:
if return_inverse:
raise RuntimeError(
"Error in scipy.unique on older versions of numpy prevents "
"return_inverse from working!"
)
# Handle bug in numpy 1.6.2:
rows = [_Row(row) for row in b]
srt_idx = sorted(range(len(rows)), key=rows.__getitem__)
rows = scipy.asarray(rows)[srt_idx]
row_cmp = [-1]
for k in range(1, len(srt_idx)):
row_cmp.append(rows[k-1].__cmp__(rows[k]))
row_cmp = scipy.asarray(row_cmp)
transition_idxs = scipy.where(row_cmp != 0)[0]
idx = scipy.asarray(srt_idx)[transition_idxs]
out = arr[idx]
if return_index:
out = (out, idx)
elif return_inverse:
out = (out, inv)
elif return_index and return_inverse:
out = (out, idx, inv)
return out
class _Row(object):
"""Helper class to compare rows of a matrix.
This is used to workaround the bug with scipy.unique in numpy 1.6.2.
Parameters
----------
row : ndarray
The row this object is to represent. Must be 1d. (Will be flattened.)
"""
def __init__(self, row):
self.row = scipy.asarray(row).flatten()
def __cmp__(self, other):
"""Compare two rows.
Parameters
----------
other : :py:class:`_Row`
The row to compare to.
Returns
-------
cmp : int
== ==================================================================
0 if the two rows have all elements equal
1 if the first non-equal element (from the right) in self is greater
-1 if the first non-equal element (from the right) in self is lesser
== ==================================================================
"""
if (self.row == other.row).all():
return 0
else:
# Get first non-equal element:
first_nonequal_idx = scipy.where(self.row != other.row)[0][0]
if self.row[first_nonequal_idx] > other.row[first_nonequal_idx]:
return 1
else:
# Other must be greater than self in this case:
return -1
# Conversion factor to get from interquartile range to standard deviation:
IQR_TO_STD = 2.0 * scipy.stats.norm.isf(0.25)
def compute_stats(vals, check_nan=False, robust=False, axis=1, plot_QQ=False, bins=15, name=''):
"""Compute the average statistics (mean, std dev) for the given values.
Parameters
----------
vals : array-like, (`M`, `D`)
Values to compute the average statistics along the specified axis of.
check_nan : bool, optional
Whether or not to check for (and exclude) NaN's. Default is False (do
not attempt to handle NaN's).
robust : bool, optional
Whether or not to use robust estimators (median for mean, IQR for
standard deviation). Default is False (use non-robust estimators).
axis : int, optional
Axis to compute the statistics along. Presently only supported if
`robust` is False. Default is 1.
plot_QQ : bool, optional
Whether or not a QQ plot and histogram should be drawn for each channel.
Default is False (do not draw QQ plots).
bins : int, optional
Number of bins to use when plotting histogram (for plot_QQ=True).
Default is 15
name : str, optional
Name to put in the title of the QQ/histogram plot.
Returns
-------
mean : ndarray, (`M`,)
Estimator for the mean of `vals`.
std : ndarray, (`M`,)
Estimator for the standard deviation of `vals`.
Raises
------
NotImplementedError
If `axis` != 1 when `robust` is True.
NotImplementedError
If `plot_QQ` is True.
"""
if axis != 1 and robust:
raise NotImplementedError("Values of axis other than 1 are not supported "
"with the robust keyword at this time!")
if robust:
# TODO: This stuff should really be vectorized if there is something that allows it!
if check_nan:
mean = scipy.stats.nanmedian(vals, axis=axis)
# TODO: HANDLE AXIS PROPERLY!
std = scipy.zeros(vals.shape[0], dtype=float)
for k in range(0, len(vals)):
ch = vals[k]
ok_idxs = ~scipy.isnan(ch)
if ok_idxs.any():
std[k] = (scipy.stats.scoreatpercentile(ch[ok_idxs], 75) -
scipy.stats.scoreatpercentile(ch[ok_idxs], 25))
else:
# Leave a nan where there are no non-nan values:
std[k] = scipy.nan
std /= IQR_TO_STD
else:
mean = scipy.median(vals, axis=axis)
# TODO: HANDLE AXIS PROPERLY!
std = scipy.asarray([scipy.stats.scoreatpercentile(ch, 75.0) -
scipy.stats.scoreatpercentile(ch, 25.0)
for ch in vals]) / IQR_TO_STD
else:
if check_nan:
mean = scipy.stats.nanmean(vals, axis=axis)
std = scipy.stats.nanstd(vals, axis=axis)
else:
mean = scipy.mean(vals, axis=axis)
std = scipy.std(vals, axis=axis)
if plot_QQ:
f = plt.figure()
gs = mplgs.GridSpec(2, 2, height_ratios=[8, 1])
a_QQ = f.add_subplot(gs[0, 0])
a_hist = f.add_subplot(gs[0, 1])
a_slider = f.add_subplot(gs[1, :])
title = f.suptitle("")
def update(val):
"""Update the index from the results to be displayed.
"""
a_QQ.clear()
a_hist.clear()
idx = slider.val
title.set_text("{:s}, n={:d}".format(name, idx))
nan_idxs = scipy.isnan(vals[idx, :])
if not nan_idxs.all():
osm, osr = scipy.stats.probplot(vals[idx, ~nan_idxs], dist='norm', plot=None, fit=False)
a_QQ.plot(osm, osr, 'bo', markersize=10)
a_QQ.set_title('QQ plot')
a_QQ.set_xlabel('quantiles of $\mathcal{N}(0,1)$')
a_QQ.set_ylabel('quantiles of data')
a_hist.hist(vals[idx, ~nan_idxs], bins=bins, normed=True)
locs = scipy.linspace(vals[idx, ~nan_idxs].min(), vals[idx, ~nan_idxs].max())
a_hist.plot(locs, scipy.stats.norm.pdf(locs, loc=mean[idx], scale=std[idx]))
a_hist.set_title('Normalized histogram and reported PDF')
a_hist.set_xlabel('value')
a_hist.set_ylabel('density')
f.canvas.draw()
def arrow_respond(slider, event):
"""Event handler for arrow key events in plot windows.
Pass the slider object to update as a masked argument using a lambda function::
lambda evt: arrow_respond(my_slider, evt)
Parameters
----------
slider : Slider instance associated with this handler.
event : Event to be handled.
"""
if event.key == 'right':
slider.set_val(min(slider.val + 1, slider.valmax))
elif event.key == 'left':
slider.set_val(max(slider.val - 1, slider.valmin))
slider = mplw.Slider(a_slider,
'index',
0,
len(vals) - 1,
valinit=0,
valfmt='%d')
slider.on_changed(update)
update(0)
f.canvas.mpl_connect('key_press_event', lambda evt: arrow_respond(slider, evt))
return (mean, std)
def univariate_envelope_plot(x, mean, std, ax=None, base_alpha=0.375, envelopes=[1, 3], lb=None, ub=None, expansion=10, **kwargs):
"""Make a plot of a mean curve with uncertainty envelopes.
"""
if ax is None:
f = plt.figure()
ax = f.add_subplot(1, 1, 1)
elif ax == 'gca':
ax = plt.gca()
mean = scipy.asarray(mean, dtype=float).copy()
std = scipy.asarray(std, dtype=float).copy()
# Truncate the data so matplotlib doesn't die:
if lb is not None and ub is not None and expansion != 1.0:
expansion *= ub - lb
ub = ub + expansion
lb = lb - expansion
if ub is not None:
mean[mean > ub] = ub
if lb is not None:
mean[mean < lb] = lb
l = ax.plot(x, mean, **kwargs)
color = plt.getp(l[0], 'color')
e = []
for i in envelopes:
lower = mean - i * std
upper = mean + i * std
if ub is not None:
lower[lower > ub] = ub
upper[upper > ub] = ub
if lb is not None:
lower[lower < lb] = lb
upper[upper < lb] = lb
e.append(ax.fill_between(x, lower, upper, facecolor=color, alpha=base_alpha / i))
return (l, e)
def summarize_sampler(sampler, weights=None, burn=0, ci=0.95, chain_mask=None):
r"""Create summary statistics of the flattened chain of the sampler.
The confidence regions are computed from the quantiles of the data.
Parameters
----------
sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`)
The sampler to summarize the chains of.
weights : array, (`n_temps`, `n_chains`, `n_samp`), (`n_chains`, `n_samp`) or (`n_samp`,), optional
The weight for each sample. This is useful for post-processing the
output from MultiNest sampling, for instance.
burn : int, optional
The number of samples to burn from the beginning of the chain. Default
is 0 (no burn).
ci : float, optional
A number between 0 and 1 indicating the confidence region to compute.
Default is 0.95 (return upper and lower bounds of the 95% confidence
interval).
chain_mask : (index) array, optional
Mask identifying the chains to keep before plotting, in case there are
bad chains. Default is to use all chains.
Returns
-------
mean : array, (num_params,)
Mean values of each of the parameters sampled.
ci_l : array, (num_params,)
Lower bounds of the `ci*100%` confidence intervals.
ci_u : array, (num_params,)
Upper bounds of the `ci*100%` confidence intervals.
"""
try:
k = sampler.flatchain.shape[-1]
except AttributeError:
# Assumes array input is only case where there is no "flatchain" attribute.
k = sampler.shape[-1]
if isinstance(sampler, emcee.EnsembleSampler):
if chain_mask is None:
chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool)
flat_trace = sampler.chain[chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
elif isinstance(sampler, emcee.PTSampler):
if chain_mask is None:
chain_mask = scipy.ones(sampler.nwalkers, dtype=bool)
flat_trace = sampler.chain[temp_idx, chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
elif isinstance(sampler, scipy.ndarray):
if sampler.ndim == 4:
if chain_mask is None:
chain_mask = scipy.ones(sampler.shape[1], dtype=bool)
flat_trace = sampler[temp_idx, chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
if weights is not None:
weights = weights[temp_idx, chain_mask, burn:]
weights = weights.ravel()
elif sampler.ndim == 3:
if chain_mask is None:
chain_mask = scipy.ones(sampler.shape[0], dtype=bool)
flat_trace = sampler[chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
if weights is not None:
weights = weights[chain_mask, burn:]
weights = weights.ravel()
elif sampler.ndim == 2:
flat_trace = sampler[burn:, :]
flat_trace = flat_trace.reshape((-1, k))
if weights is not None:
weights = weights[burn:]
weights = weights.ravel()
else:
raise ValueError("Unknown sampler class: {:s}".format(type(sampler),))
cibdry = 100.0 * (1.0 - ci) / 2.0
if weights is None:
mean = scipy.mean(flat_trace, axis=0)
ci_l, ci_u = scipy.percentile(flat_trace, [cibdry, 100.0 - cibdry], axis=0)
else:
mean = weights.dot(flat_trace) / weights.sum()
ci_l = scipy.zeros(k)
ci_u = scipy.zeros(k)
p = scipy.asarray([cibdry, 100.0 - cibdry])
for i in range(0, k):
srt = flat_trace[:, i].argsort()
x = flat_trace[srt, i]
w = weights[srt]
Sn = w.cumsum()
pn = 100.0 / Sn[-1] * (Sn - w / 2.0)
j = scipy.digitize(p, pn) - 1
ci_l[i], ci_u[i] = x[j] + (p - pn[j]) / (pn[j + 1] - pn[j]) * (x[j + 1] - x[j])
return (mean, ci_l, ci_u)
def plot_sampler(
sampler, suptitle=None, labels=None, bins=50,
plot_samples=False, plot_hist=True, plot_chains=True,
burn=0, chain_mask=None, temp_idx=0, weights=None, cutoff_weight=None,
cmap='gray_r', hist_color='k', chain_alpha=0.1,
points=None, covs=None, colors=None, ci=[0.95],
max_hist_ticks=None, max_chain_ticks=6,
label_chain_y=False, hide_chain_yticklabels=False, chain_ytick_pad=2.0,
label_fontsize=None, ticklabel_fontsize=None, chain_label_fontsize=None,
chain_ticklabel_fontsize=None, xticklabel_angle=90.0,
bottom_sep=0.075, suptitle_space=0.1, fixed_height=None,
fixed_width=None, l=0.1, r=0.9, t1=None, b1=None, t2=0.2, b2=0.1,
ax_space=0.1
):
"""Plot the results of MCMC sampler (posterior and chains).
Loosely based on triangle.py. Provides extensive options to format the plot.
Parameters
----------
sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`)
The sampler to plot the chains/marginals of. Can also be an array of
samples which matches the shape of the `chain` attribute that would be
present in a :py:class:`emcee.Sampler` instance.
suptitle : str, optional
The figure title to place at the top. Default is no title.
labels : list of str, optional
The labels to use for each of the free parameters. Default is to leave
the axes unlabeled.
bins : int, optional
Number of bins to use for the histograms. Default is 50.
plot_samples : bool, optional
If True, the samples are plotted as individual points. Default is False.
plot_hist : bool, optional
If True, histograms are plotted. Default is True.
plot_chains : bool, optional
If True, plot the sampler chains at the bottom. Default is True.
burn : int, optional
The number of samples to burn before making the marginal histograms.
Default is zero (use all samples).
chain_mask : (index) array, optional
Mask identifying the chains to keep before plotting, in case there are
bad chains. Default is to use all chains.
temp_idx : int, optional
Index of the temperature to plot when plotting a
:py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior).
weights : array, (`n_temps`, `n_chains`, `n_samp`), (`n_chains`, `n_samp`) or (`n_samp`,), optional
The weight for each sample. This is useful for post-processing the
output from MultiNest sampling, for instance. Default is to not weight
the samples.
cutoff_weight : float, optional
If `weights` and `cutoff_weight` are present, points with
`weights < cutoff_weight * weights.max()` will be excluded. Default is
to plot all points.
cmap : str, optional
The colormap to use for the histograms. Default is 'gray_r'.
hist_color : str, optional
The color to use for the univariate histograms. Default is 'k'.
chain_alpha : float, optional
The transparency to use for the plots of the individual chains. Setting
this to something low lets you better visualize what is going on.
Default is 0.1.
points : array, (`D`,) or (`N`, `D`), optional
Array of point(s) to plot onto each marginal and chain. Default is None.
covs : array, (`D`, `D`) or (`N`, `D`, `D`), optional
Covariance matrix or array of covariance matrices to plot onto each
marginal. If you do not want to plot a covariance matrix for a specific
point, set its corresponding entry to `None`. Default is to not plot
confidence ellipses for any points.
colors : array of str, (`N`,), optional
The colors to use for the points in `points`. Default is to use the
standard matplotlib RGBCMYK cycle.
ci : array, (`num_ci`,), optional
List of confidence intervals to plot for each non-`None` entry in `covs`.
Default is 0.95 (just plot the 95 percent confidence interval).
max_hist_ticks : int, optional
The maximum number of ticks for the histogram plots. Default is None
(no limit).
max_chain_ticks : int, optional
The maximum number of y-axis ticks for the chain plots. Default is 6.
label_chain_y : bool, optional
If True, the chain plots will have y axis labels. Default is False.
hide_chain_yticklabels : bool, optional
If True, hide the y axis tick labels for the chain plots. Default is
False (show y tick labels).
chain_ytick_pad : float, optional
The padding (in points) between the y-axis tick labels and the axis for
the chain plots. Default is 2.0.
label_fontsize : float, optional
The font size (in points) to use for the axis labels. Default is
`axes.labelsize`.
ticklabel_fontsize : float, optional
The font size (in points) to use for the axis tick labels. Default is
`xtick.labelsize`.
chain_label_fontsize : float, optional
The font size (in points) to use for the labels of the chain axes.
Default is `axes.labelsize`.
chain_ticklabel_fontsize : float, optional
The font size (in points) to use for the chain axis tick labels. Default
is `xtick.labelsize`.
xticklabel_angle : float, optional
The angle to rotate the x tick labels, in degrees. Default is 90.
bottom_sep : float, optional
The separation (in relative figure units) between the chains and the
marginals. Default is 0.075.
suptitle_space : float, optional
The amount of space (in relative figure units) to leave for a figure
title. Default is 0.1.
fixed_height : float, optional
The desired figure height (in inches). Default is to automatically
adjust based on `fixed_width` to make the subplots square.
fixed_width : float, optional
The desired figure width (in inches). Default is `figure.figsize[0]`.
l : float, optional
The location (in relative figure units) of the left margin. Default is
0.1.
r : float, optional
The location (in relative figure units) of the right margin. Default is
0.9.
t1 : float, optional
The location (in relative figure units) of the top of the grid of
histograms. Overrides `suptitle_space` if present.
b1 : float, optional
The location (in relative figure units) of the bottom of the grid of
histograms. Overrides `bottom_sep` if present. Defaults to 0.1 if
`plot_chains` is False.
t2 : float, optional
The location (in relative figure units) of the top of the grid of chain
plots. Default is 0.2.
b2 : float, optional
The location (in relative figure units) of the bottom of the grid of
chain plots. Default is 0.1.
ax_space : float, optional
The `w_space` and `h_space` to use (in relative figure units). Default
is 0.1.
"""
masked_weights = None
if points is not None:
points = scipy.atleast_2d(points)
if covs is not None and len(covs) != len(points):
raise ValueError(
"If covariance matrices are provided, len(covs) must equal len(points)!"
)
elif covs is None:
covs = [None,] * len(points)
if colors is None:
c_cycle = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
colors = [c_cycle.next() for p in points]
# Create axes:
try:
k = sampler.flatchain.shape[-1]
except AttributeError:
# Assumes array input is only case where there is no "flatchain" attribute.
k = sampler.shape[-1]
if labels is None:
labels = [''] * k
# Set up geometry:
# plot_chains =
# True: False:
# +-----------+ +-----------+
# | +-------+ | | +-------+ |
# | | | | | | | |
# | | | | | | | |
# | | | | | | | |
# | +-------+ | | +-------+ |
# | +-------+ | +-----------+
# | | | |
# | +-------+ |
# +-----------+
# We retain support for the original suptitle_space keyword, but can
# override with t1 as needed:
if t1 is None:
t1 = 1 - suptitle_space
# We retain support for the original bottom_sep keyword, but can override
# with b1 as needed:
if b1 is None:
if plot_chains:
b1 = t2 + bottom_sep
else:
b1 = 0.1
if fixed_height is None and fixed_width is None:
# Default: use matplotlib's default width, handle remaining parameters
# with the fixed width case below:
fixed_width = matplotlib.rcParams['figure.figsize'][0]
if fixed_height is None and fixed_width is not None:
# Only width specified, compute height to yield square histograms:
fixed_height = fixed_width * (r - l) / (t1 - b1)
elif fixed_height is not None and fixed_width is None:
# Only height specified, compute width to yield square histograms
fixed_width = fixed_height * (t1 - b1) / (r - l)
# Otherwise width and height are fixed, and we may not have square
# histograms, at the user's discretion.
wspace = ax_space
hspace = ax_space
# gs1 is the histograms, gs2 is the chains:
f = plt.figure(figsize=(fixed_width, fixed_height))
gs1 = mplgs.GridSpec(k, k)
gs1.update(bottom=b1, top=t1, left=l, right=r, wspace=wspace, hspace=hspace)
if plot_chains:
gs2 = mplgs.GridSpec(1, k)
gs2.update(bottom=b2, top=t2, left=l, right=r, wspace=wspace, hspace=hspace)
axes = []
# j is the row, i is the column.
for j in range(0, k + int(plot_chains)):
row = []
for i in range(0, k):
if i > j:
row.append(None)
else:
sharey = row[-1] if i > 0 and i < j and j < k else None
sharex = axes[-1][i] if j > i and j < k else \
(row[-1] if i > 0 and j == k else None)
gs = gs1[j, i] if j < k else gs2[:, i]
row.append(f.add_subplot(gs, sharey=sharey, sharex=sharex))
if j < k and ticklabel_fontsize is not None:
row[-1].tick_params(labelsize=ticklabel_fontsize)
elif j >= k and chain_ticklabel_fontsize is not None:
row[-1].tick_params(labelsize=chain_ticklabel_fontsize)
axes.append(row)
axes = scipy.asarray(axes)
# Update axes with the data:
if isinstance(sampler, emcee.EnsembleSampler):
if chain_mask is None:
chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool)
flat_trace = sampler.chain[chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
elif isinstance(sampler, emcee.PTSampler):
if chain_mask is None:
chain_mask = scipy.ones(sampler.nwalkers, dtype=bool)
flat_trace = sampler.chain[temp_idx, chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
elif isinstance(sampler, scipy.ndarray):
if sampler.ndim == 4:
if chain_mask is None:
chain_mask = scipy.ones(sampler.shape[1], dtype=bool)
flat_trace = sampler[temp_idx, chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
if weights is not None:
weights = weights[temp_idx, chain_mask, burn:]
weights = weights.ravel()
elif sampler.ndim == 3:
if chain_mask is None:
chain_mask = scipy.ones(sampler.shape[0], dtype=bool)
flat_trace = sampler[chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
if weights is not None:
weights = weights[chain_mask, burn:]
weights = weights.ravel()
elif sampler.ndim == 2:
flat_trace = sampler[burn:, :]
flat_trace = flat_trace.reshape((-1, k))
if weights is not None:
weights = weights[burn:]
weights = weights.ravel()
if cutoff_weight is not None and weights is not None:
mask = weights >= cutoff_weight * weights.max()
flat_trace = flat_trace[mask, :]
masked_weights = weights[mask]
else:
masked_weights = weights
else:
raise ValueError("Unknown sampler class: {:s}".format(type(sampler)))
# j is the row, i is the column.
for i in range(0, k):
axes[i, i].clear()
if plot_hist:
axes[i, i].hist(flat_trace[:, i], bins=bins, color=hist_color, weights=masked_weights, normed=True, histtype='stepfilled')
if plot_samples:
axes[i, i].plot(flat_trace[:, i], scipy.zeros_like(flat_trace[:, i]), ',', alpha=0.1)
if points is not None:
# axvline can only take a scalar x, so we have to loop:
for p, c, cov in zip(points, colors, covs):
axes[i, i].axvline(x=p[i], linewidth=3, color=c)
if cov is not None:
xlim = axes[i, i].get_xlim()
i_grid = scipy.linspace(xlim[0], xlim[1], 100)
axes[i, i].plot(
i_grid,
scipy.stats.norm.pdf(
i_grid,
loc=p[i],
scale=scipy.sqrt(cov[i, i])
),
c,
linewidth=3.0
)
axes[i, i].set_xlim(xlim)
if i == k - 1:
axes[i, i].set_xlabel(labels[i], fontsize=label_fontsize)
plt.setp(axes[i, i].xaxis.get_majorticklabels(), rotation=xticklabel_angle)
if i < k - 1:
plt.setp(axes[i, i].get_xticklabels(), visible=False)
plt.setp(axes[i, i].get_yticklabels(), visible=False)
for j in range(i + 1, k):
axes[j, i].clear()
if plot_hist:
ct, x, y, im = axes[j, i].hist2d(
flat_trace[:, i],
flat_trace[:, j],
bins=bins,
cmap=cmap,
weights=masked_weights
)
if plot_samples:
axes[j, i].plot(flat_trace[:, i], flat_trace[:, j], ',', alpha=0.1)
if points is not None:
for p, c, cov in zip(points, colors, covs):
axes[j, i].plot(p[i], p[j], 'o', color=c)
if cov is not None:
Sigma = scipy.asarray([[cov[i, i], cov[i, j]], [cov[j, i], cov[j, j]]], dtype=float)
lam, v = scipy.linalg.eigh(Sigma)
chi2 = [-scipy.log(1.0 - cival) * 2.0 for cival in ci]
a = [2.0 * scipy.sqrt(chi2val * lam[-1]) for chi2val in chi2]
b = [2.0 * scipy.sqrt(chi2val * lam[-2]) for chi2val in chi2]
ang = scipy.arctan2(v[1, -1], v[0, -1])
for aval, bval in zip(a, b):
ell = mplp.Ellipse(
[p[i], p[j]],
aval,
bval,
angle=scipy.degrees(ang),
facecolor='none',
edgecolor=c,
linewidth=3
)
axes[j, i].add_artist(ell)
# axes[j, i].plot(points[i], points[j], 'o')
# xmid = 0.5 * (x[1:] + x[:-1])
# ymid = 0.5 * (y[1:] + y[:-1])
# axes[j, i].contour(xmid, ymid, ct.T, colors='k')
if j < k - 1:
plt.setp(axes[j, i].get_xticklabels(), visible=False)
if i != 0:
plt.setp(axes[j, i].get_yticklabels(), visible=False)
if i == 0:
axes[j, i].set_ylabel(labels[j], fontsize=label_fontsize)
if j == k - 1:
axes[j, i].set_xlabel(labels[i], fontsize=label_fontsize)
plt.setp(axes[j, i].xaxis.get_majorticklabels(), rotation=xticklabel_angle)
if plot_chains:
axes[-1, i].clear()
if isinstance(sampler, emcee.EnsembleSampler):
axes[-1, i].plot(sampler.chain[:, :, i].T, alpha=chain_alpha)
elif isinstance(sampler, emcee.PTSampler):
axes[-1, i].plot(sampler.chain[temp_idx, :, :, i].T, alpha=chain_alpha)
else:
if sampler.ndim == 4:
axes[-1, i].plot(sampler[temp_idx, :, :, i].T, alpha=chain_alpha)
elif sampler.ndim == 3:
axes[-1, i].plot(sampler[:, :, i].T, alpha=chain_alpha)
elif sampler.ndim == 2:
axes[-1, i].plot(sampler[:, i].T, alpha=chain_alpha)
# Plot the weights on top of the chains:
if weights is not None:
a_wt = axes[-1, i].twinx()
a_wt.plot(weights, alpha=chain_alpha, linestyle='--', color='r')
plt.setp(a_wt.yaxis.get_majorticklabels(), visible=False)
a_wt.yaxis.set_ticks_position('none')
# Plot the cutoff weight as a horizontal line and the first sample
# which is included as a vertical bar. Note that this won't be quite
# the right behavior if the weights are not roughly monotonic.
if cutoff_weight is not None:
a_wt.axhline(cutoff_weight * weights.max(), linestyle='-', color='r')
wi, = scipy.where(weights >= cutoff_weight * weights.max())
a_wt.axvline(wi[0], linestyle='-', color='r')
if burn > 0:
axes[-1, i].axvline(burn, color='r', linewidth=3)
if points is not None:
for p, c in zip(points, colors):
axes[-1, i].axhline(y=p[i], linewidth=3, color=c)
# Reset the xlim since it seems to get messed up:
axes[-1, i].set_xlim(left=0)
# try:
# [axes[-1, i].axhline(y=pt, linewidth=3) for pt in points[i]]
# except TypeError:
# axes[-1, i].axhline(y=points[i], linewidth=3)
if label_chain_y:
axes[-1, i].set_ylabel(labels[i], fontsize=chain_label_fontsize)
axes[-1, i].set_xlabel('step', fontsize=chain_label_fontsize)
plt.setp(axes[-1, i].xaxis.get_majorticklabels(), rotation=xticklabel_angle)
for tick in axes[-1, i].get_yaxis().get_major_ticks():
tick.set_pad(chain_ytick_pad)
tick.label1 = tick._get_text1()
for i in range(0, k):
if max_hist_ticks is not None:
axes[k - 1, i].xaxis.set_major_locator(plt.MaxNLocator(nbins=max_hist_ticks - 1))
axes[i, 0].yaxis.set_major_locator(plt.MaxNLocator(nbins=max_hist_ticks - 1))
if plot_chains and max_chain_ticks is not None:
axes[k, i].yaxis.set_major_locator(plt.MaxNLocator(nbins=max_chain_ticks - 1))
axes[k, i].xaxis.set_major_locator(plt.MaxNLocator(nbins=max_chain_ticks - 1))
if plot_chains and hide_chain_yticklabels:
plt.setp(axes[k, i].get_yticklabels(), visible=False)
if suptitle is not None:
f.suptitle(suptitle)
f.canvas.draw()
return f
def plot_sampler_fingerprint(
sampler, hyperprior, weights=None, cutoff_weight=None, nbins=None,
labels=None, burn=0, chain_mask=None, temp_idx=0, points=None,
plot_samples=False, sample_color='k', point_color=None, point_lw=3,
title='', rot_x_labels=False, figsize=None
):
"""Make a plot of the sampler's "fingerprint": univariate marginal histograms for all hyperparameters.
The hyperparameters are mapped to [0, 1] using
:py:meth:`hyperprior.elementwise_cdf`, so this can only be used with prior
distributions which implement this function.
Returns the figure and axis created.
Parameters
----------
sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`)
The sampler to plot the chains/marginals of. Can also be an array of
samples which matches the shape of the `chain` attribute that would be
present in a :py:class:`emcee.Sampler` instance.
hyperprior : :py:class:`~gptools.utils.JointPrior` instance
The joint prior distribution for the hyperparameters. Used to map the
values to [0, 1] so that the hyperparameters can all be shown on the
same axis.
weights : array, (`n_temps`, `n_chains`, `n_samp`), (`n_chains`, `n_samp`) or (`n_samp`,), optional
The weight for each sample. This is useful for post-processing the
output from MultiNest sampling, for instance.
cutoff_weight : float, optional
If `weights` and `cutoff_weight` are present, points with
`weights < cutoff_weight * weights.max()` will be excluded. Default is
to plot all points.
nbins : int or array of int, (`D`,), optional
The number of bins dividing [0, 1] to use for each histogram. If a
single int is given, this is used for all of the hyperparameters. If an
array of ints is given, these are the numbers of bins for each of the
hyperparameters. The default is to determine the number of bins using
the Freedman-Diaconis rule.
labels : array of str, (`D`,), optional
The labels for each hyperparameter. Default is to use empty strings.
burn : int, optional
The number of samples to burn before making the marginal histograms.
Default is zero (use all samples).
chain_mask : (index) array, optional
Mask identifying the chains to keep before plotting, in case there are
bad chains. Default is to use all chains.
temp_idx : int, optional
Index of the temperature to plot when plotting a
:py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior).
points : array, (`D`,) or (`N`, `D`), optional
Array of point(s) to plot as horizontal lines. Default is None.
plot_samples : bool, optional
If True, the samples are plotted as horizontal lines. Default is False.
sample_color : str, optional
The color to plot the samples in. Default is 'k', meaning black.
point_color : str or list of str, optional
The color to plot the individual points in. Default is to loop through
matplotlib's default color sequence. If a list is provided, it will be
cycled through.
point_lw : float, optional
Line width to use when plotting the individual points.
title : str, optional
Title to use for the plot.
rot_x_labels : bool, optional
If True, the labels for the x-axis are rotated 90 degrees. Default is
False (do not rotate labels).
figsize : 2-tuple, optional
The figure size to use. Default is to use the matplotlib default.
"""
try:
k = sampler.flatchain.shape[-1]
except AttributeError:
# Assumes array input is only case where there is no "flatchain" attribute.
k = sampler.shape[-1]
# Process the samples:
if isinstance(sampler, emcee.EnsembleSampler):
if chain_mask is None:
chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool)
flat_trace = sampler.chain[chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
elif isinstance(sampler, emcee.PTSampler):
if chain_mask is None:
chain_mask = scipy.ones(sampler.nwalkers, dtype=bool)
flat_trace = sampler.chain[temp_idx, chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
elif isinstance(sampler, scipy.ndarray):
if sampler.ndim == 4:
if chain_mask is None:
chain_mask = scipy.ones(sampler.shape[1], dtype=bool)
flat_trace = sampler[temp_idx, chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
if weights is not None:
weights = weights[temp_idx, chain_mask, burn:]
weights = weights.ravel()
elif sampler.ndim == 3:
if chain_mask is None:
chain_mask = scipy.ones(sampler.shape[0], dtype=bool)
flat_trace = sampler[chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
if weights is not None:
weights = weights[chain_mask, burn:]
weights = weights.ravel()
elif sampler.ndim == 2:
flat_trace = sampler[burn:, :]
flat_trace = flat_trace.reshape((-1, k))
if weights is not None:
weights = weights[burn:]
weights = weights.ravel()
if cutoff_weight is not None and weights is not None:
mask = weights >= cutoff_weight * weights.max()
flat_trace = flat_trace[mask, :]
weights = weights[mask]
else:
raise ValueError("Unknown sampler class: {:s}".format(type(sampler)))
if labels is None:
labels = [''] * k
u = scipy.asarray([hyperprior.elementwise_cdf(p) for p in flat_trace], dtype=float).T
if nbins is None:
lq, uq = scipy.stats.scoreatpercentile(u, [25, 75], axis=1)
h = 2.0 * (uq - lq) / u.shape[0]**(1.0 / 3.0)
n = scipy.asarray(scipy.ceil(1.0 / h), dtype=int)
else:
try:
iter(nbins)
n = nbins
except TypeError:
n = nbins * scipy.ones(u.shape[0])
hist = [scipy.stats.histogram(uv, numbins=nv, defaultlimits=[0, 1], weights=weights) for uv, nv in zip(u, n)]
max_ct = max([max(h.count) for h in hist])
min_ct = min([min(h.count) for h in hist])
f = plt.figure(figsize=figsize)
a = f.add_subplot(1, 1, 1)
for i, (h, pn) in enumerate(zip(hist, labels)):
a.imshow(
scipy.atleast_2d(scipy.asarray(h.count[::-1], dtype=float)).T,
cmap='gray_r',
interpolation='nearest',
vmin=min_ct,
vmax=max_ct,
extent=(i, i + 1, 0, 1),
aspect='auto'
)
if plot_samples:
for p in u:
for i, uv in enumerate(p):
a.plot([i, i + 1], [uv, uv], sample_color, alpha=0.1)
if points is not None:
points = scipy.atleast_2d(scipy.asarray(points, dtype=float))
u_points = [hyperprior.elementwise_cdf(p) for p in points]
if point_color is None:
c_cycle = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
else:
c_cycle = itertools.cycle(scipy.atleast_1d(point_color))
for p in u_points:
c = c_cycle.next()
for i, uv in enumerate(p):
a.plot([i, i + 1], [uv, uv], color=c, lw=point_lw)
a.set_xlim(0, len(hist))
a.set_ylim(0, 1)
a.set_xticks(0.5 + scipy.arange(0, len(hist), dtype=float))
a.set_xticklabels(labels)
if rot_x_labels:
plt.setp(a.xaxis.get_majorticklabels(), rotation=90)
a.set_xlabel("parameter")
a.set_ylabel("$u=F_P(p)$")
a.set_title(title)
return f, a
def plot_sampler_cov(
sampler, method='corr', weights=None, cutoff_weight=None, labels=None,
burn=0, chain_mask=None, temp_idx=0, cbar_label=None, title='',
rot_x_labels=False, figsize=None, xlabel_on_top=True
):
"""Make a plot of the sampler's correlation or covariance matrix.
Returns the figure and axis created.
Parameters
----------
sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`)
The sampler to plot the chains/marginals of. Can also be an array of
samples which matches the shape of the `chain` attribute that would be
present in a :py:class:`emcee.Sampler` instance.
method : {'corr', 'cov'}
Whether to plot the correlation matrix ('corr') or the covariance matrix
('cov'). The covariance matrix is often not useful because different
parameters have wildly different scales. Default is to plot the
correlation matrix.
labels : array of str, (`D`,), optional
The labels for each hyperparameter. Default is to use empty strings.
burn : int, optional
The number of samples to burn before making the marginal histograms.
Default is zero (use all samples).
chain_mask : (index) array, optional
Mask identifying the chains to keep before plotting, in case there are
bad chains. Default is to use all chains.
temp_idx : int, optional
Index of the temperature to plot when plotting a
:py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior).
cbar_label : str, optional
The label to use for the colorbar. The default is chosen based on the
value of the `method` keyword.
title : str, optional
Title to use for the plot.
rot_x_labels : bool, optional
If True, the labels for the x-axis are rotated 90 degrees. Default is
False (do not rotate labels).
figsize : 2-tuple, optional
The figure size to use. Default is to use the matplotlib default.
xlabel_on_top : bool, optional
If True, the x-axis labels are put on top (the way mathematicians
present matrices). Default is True.
"""
try:
k = sampler.flatchain.shape[-1]
except AttributeError:
# Assumes array input is only case where there is no "flatchain" attribute.
k = sampler.shape[-1]
# Process the samples:
if isinstance(sampler, emcee.EnsembleSampler):
if chain_mask is None:
chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool)
flat_trace = sampler.chain[chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
elif isinstance(sampler, emcee.PTSampler):
if chain_mask is None:
chain_mask = scipy.ones(sampler.nwalkers, dtype=bool)
flat_trace = sampler.chain[temp_idx, chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
elif isinstance(sampler, scipy.ndarray):
if sampler.ndim == 4:
if chain_mask is None:
chain_mask = scipy.ones(sampler.shape[1], dtype=bool)
flat_trace = sampler[temp_idx, chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
if weights is not None:
weights = weights[temp_idx, chain_mask, burn:]
weights = weights.ravel()
elif sampler.ndim == 3:
if chain_mask is None:
chain_mask = scipy.ones(sampler.shape[0], dtype=bool)
flat_trace = sampler[chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
if weights is not None:
weights = weights[chain_mask, burn:]
weights = weights.ravel()
elif sampler.ndim == 2:
flat_trace = sampler[burn:, :]
flat_trace = flat_trace.reshape((-1, k))
if weights is not None:
weights = weights[burn:]
weights = weights.ravel()
if cutoff_weight is not None and weights is not None:
mask = weights >= cutoff_weight * weights.max()
flat_trace = flat_trace[mask, :]
weights = weights[mask]
else:
raise ValueError("Unknown sampler class: {:s}".format(type(sampler)))
if labels is None:
labels = [''] * k
if cbar_label is None:
cbar_label = r'$\mathrm{cov}(p_1, p_2)$' if method == 'cov' else r'$\mathrm{corr}(p_1, p_2)$'
if weights is None:
if method == 'corr':
cov = scipy.corrcoef(flat_trace, rowvar=0, ddof=1)
else:
cov = scipy.cov(flat_trace, rowvar=0, ddof=1)
else:
cov = scipy.cov(flat_trace, rowvar=0, aweights=weights)
if method == 'corr':
stds = scipy.sqrt(scipy.diag(cov))
STD_1, STD_2 = scipy.meshgrid(stds, stds)
cov = cov / (STD_1 * STD_2)
f_cov = plt.figure(figsize=figsize)
a_cov = f_cov.add_subplot(1, 1, 1)
a_cov.set_title(title)
if method == 'cov':
vmax = scipy.absolute(cov).max()
else:
vmax = 1.0
cax = a_cov.pcolor(cov, cmap='seismic', vmin=-1 * vmax, vmax=vmax)
divider = make_axes_locatable(a_cov)
a_cb = divider.append_axes("right", size="10%", pad=0.05)
cbar = f_cov.colorbar(cax, cax=a_cb, label=cbar_label)
a_cov.set_xlabel('parameter')
a_cov.set_ylabel('parameter')
a_cov.axis('square')
a_cov.invert_yaxis()
if xlabel_on_top:
a_cov.xaxis.tick_top()
a_cov.xaxis.set_label_position('top')
a_cov.set_xticks(0.5 + scipy.arange(0, flat_trace.shape[1], dtype=float))
a_cov.set_yticks(0.5 + scipy.arange(0, flat_trace.shape[1], dtype=float))
a_cov.set_xticklabels(labels)
if rot_x_labels:
plt.setp(a_cov.xaxis.get_majorticklabels(), rotation=90)
a_cov.set_yticklabels(labels)
a_cov.set_xlim(0, flat_trace.shape[1])
a_cov.set_ylim(flat_trace.shape[1], 0)
return f_cov, a_cov
|
markchil/gptools
|
gptools/utils.py
|
Python
|
gpl-3.0
| 112,260
|
[
"VisIt"
] |
20a7e6626e9a4905bd22d90c05671d3cb6e78af77c02f6ae820b2af4508e9cf3
|
#!/usr/bin/python
"""
Copyright 2016 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import cgi
import Cookie
import hashlib
import time
import MySQLdb
import dbSession
import dbShared
import urllib
import datetime
sys.path.append("../")
import dbInfo
cookies = Cookie.SimpleCookie()
useCookies = 1
result = ''
linkappend = ''
exactUser = ''
newhashDate = datetime.datetime(2016, 05, 16, 20, 30)
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
form = cgi.FieldStorage()
src_url = form.getfirst('src_url')
sid = form.getfirst('gh_sid')
loginp = form.getfirst('loginu')
passp = form.getfirst('passu')
passc = form.getfirst('passc')
persist = form.getfirst('persist')
push_key = form.getfirst('push_key')
#sessions persist up to 30 days
duration = 2592000
#escape input to prevent sql injection
loginp = dbShared.dbInsertSafe(loginp)
sid = dbShared.dbInsertSafe(sid)
push_key = form.getfirst('push_key')
if (loginp == None or (passp == None and passc == None)):
result = 'no login data'
else:
conn = dbShared.ghConn()
cursor = conn.cursor()
cursor.execute('SELECT userID, userPassword, userState, created, lastReset FROM tUsers WHERE userID=%s', (loginp,))
row = cursor.fetchone()
if row == None:
result = 'bad user'
elif not row[2] > 0:
result = 'unverified account'
else:
exactUser = row[0]
# New hash date is when salt that goes with password to create hash was
# changed from loginp to DB_KEY3 since loginp did not always exactly match username
if row[3] > newhashDate or (row[4] != None and row[4] > newhashDate):
crypt_pass = hashlib.sha1(dbInfo.DB_KEY3 + passp).hexdigest()
else:
crypt_pass = hashlib.sha1(loginp + passp).hexdigest()
if passc != None:
# already encrypted password was sent
crypt_pass = passc
if row[1] == crypt_pass:
updatestr = 'UPDATE tUsers SET lastLogin=NOW() WHERE userID=%s'
cursor.execute(updatestr, (loginp,))
dbSession.verifySessionDB()
sid = hashlib.sha1(str(time.time()) + exactUser).hexdigest()
updatestr = 'INSERT INTO tSessions (sid, userID, expires, pushKey) VALUES (%s, %s, %s, %s)'
cursor.execute(updatestr, (sid, exactUser, time.time() + duration, push_key))
result = 'success'
else:
result = 'bad password or user name'
cursor.close()
conn.close()
if sid == None:
sid = ""
if useCookies:
cookies['loginAttempt'] = result
if result == "success":
# session id cookie expires when browser closes unless we are told to persist
expiration = datetime.datetime.utcnow() + datetime.timedelta(days=30)
cookies['gh_sid'] = sid
if persist != None:
cookies['gh_sid']['expires'] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S GMT")
# userid and theme stay for up to 7 days
expiration = datetime.datetime.now() + datetime.timedelta(days=7)
cookies['userID'] = exactUser
cookies['userID']['expires'] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S GMT")
cookies['uiTheme'] = dbShared.getUserAttr(loginp, 'themeName')
cookies['uiTheme']['expires'] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S GMT")
print cookies
else:
# add results to url if not using cookies
linkappend = 'loginAttempt=' + urllib.quote(result) + '&gh_sid=' + sid
if src_url != None:
if src_url.find('?') > -1:
queryChar = '&'
else:
queryChar = '?'
# go back where they came from
print 'Status: 303 See Other'
print 'Location: ' + src_url + queryChar + linkappend
print ''
else:
print 'Content-Type: text/html\n'
print result + '-' + sid
|
druss316/G-Harvestor
|
html/authUser.py
|
Python
|
gpl-3.0
| 4,198
|
[
"Galaxy"
] |
3a62920e3d1c11864e7421b2160f62c0a1bad4ee5f8d0ce2cbe53c66a7ef211a
|
# $Author: patricio $
# $Revision: 285 $
# $Date: 2010-06-18 17:59:25 -0400 (Fri, 18 Jun 2010) $
# $HeadURL: file:///home/esp01/svn/code/python/branches/patricio/photpipe/lib/gaussian.py $
# $Id: gaussian.py 285 2010-06-18 21:59:25Z patricio $
#! /usr/bin/env python
'''
Name
----
gaussian
File
----
gaussian.py
Description
-----------
Routines for evaluating, estimating parameters of, and fitting Gaussians.
Package Contents
----------------
N-dimensional functions:
gaussian(x, width=1., center=0., height=None, params=None)
Evaluate the Gaussian function with given parameters at x
(n-dimensional).
fitgaussian(y, x)
Calculates a Gaussian fit to (y, x) data, returns (width,
center, height).
1-dimensional functions:
gaussianguess(y, x=None)
Crudely estimates the parameters of a Gaussian that fits the
(y, x) data.
Examples:
---------
See fitgaussian() example.
Revisions
---------
2007-09-17 0.1 jh@physics.ucf.edu Initial version 0.01, portions
adapted from http://www.scipy.org/Cookbook/FittingData.
2007-10-02 0.2 jh@physics.ucf.edu Started making N-dimensional,
put width before center in args.
2007-11-13 0.3 jh@physics.ucf.edu Made N-dimensional.
2008-12-02 0.4 nlust@physics.ucf.edu Made fit gaussian return errors, and
fixed a bug generating initial guesses
2009-10-25 0.5 jh@physics.ucf.edu Standardized all headers, fixed
an error in a fitgaussian example, added example
">>>"s and plot labels.
'''
import numpy as np
import scipy.optimize as so
import disk as d
def gaussian(x, width=1.0, center=0.0, height=None, bgpars=[0.0, 0.0, 0.0]):
"""
Evaluates the Gaussian and a background with given parameters at
locations in x.
Parameters
----------
x : ndarray (any shape)
Abcissa values. Arranged as the output of np.indices() but
may be float. The highest dimension must be equal to the
number of other dimensions (i.e., if x has 6 dimensions, the
highest dimension must have length 5, and each of those must
give the coordinate along the respective axis). May also be
1-dimensional. Default: np.indices(y.shape).
width : array_like
The width of the Gaussian function, sometimes called sigma.
If scalar, assumed constant for all dimensions. If array,
must be linear and the same length as the first dimension of
x. In this case, each element gives the width of the function
in the corresponding dimension. Default: [1.].
center : array_like
The mean value of the Gaussian function, sometimes called x0.
Same scalar/array behavior as width. Default: [0.].
height : scalar
The height of the Gaussian at its center. If not set,
initialized to the value that makes the Gaussian integrate to
1. If you want it to integrate to another number, leave
height alone and multiply the result by that other number
instead. Must be scalar. Default: [product(1./sqrt(2 * pi *
width**2))].
bgpars : ndarray or tuple, 3-element
Background parameters, the elements determine a X- and Y-linearly
dependant level, of the form:
f = Y*bgparam[0] + X*bgparam[1] + bgparam[2]
(Not tested for 1D yet).
Returns
-------
results : ndarray, same shape as x (or first element of x if
multidimensional)
This function returns the Gaussian function of the given
width(s), center(s), and height applied to its input plus a
linear background level. The Gaussian function is: f(x) =
1./sqrt(2 * pi * width**2) * exp(-0.5 * ((x - center) /
width)**2). It is defined in multiple dimensions as the
product of orthogonal, single-dimension Gaussians.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import gaussian as g
>>> x = np.arange(-10., 10.005, 0.01)
>>> plt.plot(x, g.gaussian(x))
>>> plt.title('Gaussian')
>>> plt.xlabel('Abcissa')
>>> plt.ylabel('Ordinate')
>>> # use an array [3] as a single parameter vector
>>> z = np.array([2., 2, 3])
>>> plt.plot(x, g.gaussian(x, *z))
>>> # Test that it integrates to 1.
>>> a = np.indices([100, 100]) - 50
>>> print(np.sum(g.gaussian(a, 3, 3)))
0.999999999999997
>>> print(np.sum(g.gaussian(a, np.array([1,2]), np.array([2,3]))))
1.0000000107
>>> plt.clf()
>>> plt.imshow(g.gaussian(a, [3,5], [7,3]))
>>> plt.title('2D Gaussian')
>>> plt.xlabel('X')
>>> plt.ylabel('Y')
>>> A gaussian + a linear background level:
>>> g2 = g.gaussian(x, width=(1.2, 1.15), center=(13.2,15.75), height=4.3,
>>> bgpars=[0.05, 0.01, 1.0])
>>> plt.figure(1)
>>> plt.clf()
>>> plt.imshow(g2, origin='lower_left', interpolation='nearest')
>>> plt.colorbar()
>>> plt.title('2D Gaussian')
>>> plt.xlabel('X')
>>> plt.ylabel('Y')
>>> plt.figure(2)
>>> plt.clf()
>>> plt.plot(g2[13,:])
>>> plt.title('X slice of 2D Gaussian')
>>> plt.xlabel('X')
>>> plt.ylabel('Z')
>>> plt.figure(3)
>>> plt.clf()
>>> plt.plot(g2[:,16])
>>> plt.title('Y slice of 2D Gaussian')
>>> plt.xlabel('Y')
>>> plt.ylabel('Z')
Revisions
---------
2007-09-17 0.1 jh@physics.ucf.edu Initial version 0.01
2007-10-02 0.2 jh@physics.ucf.edu Started making N-dimensional,
put width before center in args.
2007-11-13 0.3 jh@physics.ucf.edu Fixed docs, bugs, added param,
made N-dimensional
2009-10-01 0.4 jh@physics.ucf.edu Fixed docs.
2009-10-25 0.5 jh@physics.ucf.edu Added examples and plot labels.
2011-05-03 patricio Params option no longer sopported,
Added bgpars to add a background.
pcubillos@fulbrightmail.org
"""
ndim = np.ndim(x) - 1
if ndim == 0: # We use an indexing trick below that fails for 1D case.
ndim = 1
oldshape = np.shape(x)
x.shape = (1, x.shape[0])
# Make center a ndarray:
if type(center) != np.ndarray:
center += np.zeros(ndim)
# Make width a ndarray:
if type(width) != np.ndarray:
width += np.zeros(ndim)
r2pi = np.sqrt(2. * np.pi)
# Define height if needed:
if height == None:
height = np.product(1. / (width * r2pi))
ponent = 0.0
for i in np.arange(ndim):
ponent += ( (x[i] - center[i]) / width[i] )**2
if 'oldshape' in locals():
x.shape = oldshape
# Set up the background:
if ndim == 2:
background = x[0]*bgpars[0] + x[1]*bgpars[1] + bgpars[2]
else: # it must be 1D:
background = x*bgpars[0] + bgpars[2]
return height * np.exp(-0.5 * ponent) + background
def old_gaussianguess(y, x=None, mask=None):
"""
Crudely estimates the parameters of a Gaussian that fits the (y, x) data.
Parameters
----------
y : ndarray
Array giving the function values.
x : ndarray, same shape as y
(optional) An array of the same shape as y giving the
abcissas of y (if missing, uses array indices). Must be
sorted ascending (which is not checked).
Returns
-------
param : tuple, 3 elements
This function returns a tuple giving extimates of the (width,
center, height) of a Gaussian that might fit the input data.
See 'param' input parameter of gaussian() for format of this
tuple.
Notes
-----
Currently only works for 1D data.
If the data do not look Gaussian, and certainly if they contain
spikes higher/lower than the peak of the real Gaussian, the
parameter estimates will be poor. x must be sorted ascending
(which is not checked).
Method: The most extreme element of y (or its neighbor if a border
element) is the location and height of the peak. The routine
looks for the width at 0.6 of this maximum.
Todo:
When expanding to 2D, take arrays of X and Y coords rather than a
(redundant) 2D array. This will prevent irregular grids.
2011-05-05 patricio: This function doesnt work for 2D, I don't
even know if it works for 1D. If I ever have time I'll see what
can we do. The function below seems to work fine for our 2D data.
Examples
--------
>>> import gaussian as g
>>> x = np.arange(-10., 10.05, 0.1)
>>> y = g.gaussian(x)
>>> print(g.gaussianguess(y, x))
(0.99999999999999645, -3.5527136788005009e-14, 0.3989422804014327)
Revisions
---------
2007-09-17 0.1 jh@physics.ucf.edu Initial version 0.01
2007-11-13 0.2 jh@physics.ucf.edu Fixed docs, return order.
2008-12-02 0.3 nlust@physics.ucf.edu Fixed a bug where if an
initial guess was not provided, it would error out
2009-10-25 0.4 jh@physics.ucf.edu Converted to standard doc header.
"""
if y.ndim != 1 :
raise ArrayShapeError, "y must be 1D, for now."
if x == None :
x = np.indices(y.shape)[0]
else:
if x.shape == (1, y.shape):
oldshape = x.shape
x.shape = y.shape
elif x.shape != y.shape :
raise ArrayShapeError, "x must have same shape as y (and be sorted)."
# Default mask:
if mask == None:
mask = np.ones(np.shape(y))
ymax = np.amax(y*mask)
#iymax = np.where(y == ymax)[0][0]
iymax = np.argmax(y*mask)
ymin = np.amin(y*mask)
#iymin = np.where(y == ymin)[0][0]
iymin = np.argmin(y*mask)
if np.abs(ymin) >= np.abs(ymax):
icenter = iymin
else:
icenter = iymax
icenter = np.clip(icenter, 1, x.size-2)
center = x[icenter]
height = y[icenter]
gtsigma = np.where(y > (0.6 * height))
width = (x[gtsigma[0].max()] - x[gtsigma[0].min()] ) / 2.
if 'oldshape' in locals():
x.shape = oldshape
return (width, center, height)
def gaussianguess(data, mask=None, yxguess=None):
# Default mask:
if mask == None:
mask = np.ones(np.shape(data))
# Center position guess, looking the max value:
if yxguess == None:
gcenter = np.unravel_index(np.argmax(data*mask), np.shape(data))
else:
gcenter = np.around(yxguess[0]), np.around(yxguess[1])
# Height guess is value at gcenter position:
gheight = data[gcenter]
# The width guess is the sum of the number of pixels that are
# greater than two sigma of the values in the x and y direction.
# This gives a (very) rough guess, in pixels, how wide the PSF is.
sigma = np.array( [np.std(data[:, gcenter[1]]), # y std (of central column)
np.std(data[gcenter[0], :])] ) # x std (of central row)
gwidth = ( np.sum((data*mask)[:, gcenter[1]] > 2*sigma[0])/2.0,
np.sum((data*mask)[gcenter[0], :] > 2*sigma[1])/2.0 )
return (gwidth, gcenter, gheight)
def fitgaussian(y, x=None, bgpars=None, fitbg=0, guess=None,
mask=None, weights=None, maskg=False, yxguess=None):
"""
Fits an N-dimensional Gaussian to (value, coordinate) data.
Parameters
----------
y : ndarray
Array giving the values of the function.
x : ndarray
(optional) Array (any shape) giving the abcissas of y (if
missing, uses np.indices(y). The highest dimension must be
equal to the number of other dimensions (i.e., if x has 6
dimensions, the highest dimension must have length 5). The
rest of the dimensions must have the same shape as y. Must be
sorted ascending (which is not checked), if guess is not
given.
bgpars : ndarray or tuple, 3-elements
Background parameters, the elements determine a X- and Y-linearly
dependant level, of the form:
f = Y*bgparam[0] + X*bgparam[1] + bgparam[2]
(Not tested for 1D yet).
fitbg : Integer
This flag indicates the level of background fitting:
fitbg=0: No fitting, estimate the bg as median(data).
fitbg=1: Fit a constant to the bg (bg = c).
fitbg=2: Fit a plane as bg (bg = a*x + b*y + c).
guess : tuple, (width, center, height)
Tuple giving an initial guess of the Gaussian parameters for
the optimizer. If supplied, x and y can be any shape and need
not be sorted. See gaussian() for meaning and format of this
tuple.
mask : ndarray
Same shape as y. Values where its corresponding mask value is
0 are disregarded for the minimization. Only values where the
mask value is 1 are considered.
weights : ndarray
Same shape as y. This array defines weights for the
minimization, for scientific data the weights should be
1/sqrt(variance).
Returns
-------
params : ndarray
This array contains the best fitting values parameters: width,
center, height, and if requested, bgpars. with:
width : The fitted Gaussian widths in each dimension.
center : The fitted Gaussian center coordinate in each dimension.
height : The fitted height.
err : ndarray
An array containing the concatenated uncertainties
corresponding to the values of params. For example, 2D input
gives np.array([widthyerr, widthxerr, centeryerr, centerxerr,
heighterr]).
Notes
-----
If the input does not look anything like a Gaussian, the result
might not even be the best fit to that.
Method: First guess the parameters (if no guess is provided), then
call a Levenberg-Marquardt optimizer to finish the job.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import gaussian as g
>>> # parameters for X
>>> lx = -3. # low end of range
>>> hx = 5. # high end of range
>>> dx = 0.05 # step
>>> # parameters of the noise
>>> nc = 0.0 # noice center
>>> ns = 1.0 # noise width
>>> na = 0.2 # noise amplitude
>>> # 1D Example
>>> # parameters of the underlying Gaussian
>>> wd = 1.1 # width
>>> ct = 1.2 # center
>>> ht = 2.2 # height
>>> # x and y data to fit
>>> x = np.arange(lx, hx + dx / 2., dx)
>>> x += na * np.random.normal(nc, ns, x.size)
>>> y = g.gaussian(x, wd, ct, ht) + na * np.random.normal(nc, ns, x.size)
>>> s = x.argsort() # sort, in case noise violated order
>>> xs = x[s]
>>> ys = y[s]
>>> # calculate guess and fit
>>> (width, center, height) = g.gaussianguess(ys, xs)
>>> (fw, fc, fh, err) = g.fitgaussian(ys, xs)
>>> # plot results
>>> plt.clf()
>>> plt.plot(xs, ys)
>>> plt.plot(xs, g.gaussian(xs, wd, ct, ht))
>>> plt.plot(xs, g.gaussian(xs, width, center, height))
>>> plt.plot(xs, g.gaussian(xs, fw, fc, fh))
>>> plt.title('Gaussian Data, Guess, and Fit')
>>> plt.xlabel('Abcissa')
>>> plt.ylabel('Ordinate')
>>> # plot residuals
>>> plt.clf()
>>> plt.plot(xs, ys - g.gaussian(xs, fw, fc, fh))
>>> plt.title('Gaussian Fit Residuals')
>>> plt.xlabel('Abcissa')
>>> plt.ylabel('Ordinate')
>>> # 2D Example
>>> # parameters of the underlying Gaussian
>>> wd = (1.1, 3.2) # width
>>> ct = (1.2, 3.1) # center
>>> ht = 2.2 # height
>>> # x and y data to fit
>>> nx = (hx - lx) / dx + 1
>>> x = np.indices((nx, nx)) * dx + lx
>>> y = g.gaussian(x, wd, ct, ht) + na * np.random.normal(nc, ns, x.shape[1:])
>>> # calculate guess and fit
>>> #(width, center, height) = g.gaussianguess(y, x) # not in 2D yet...
>>> (fw, fc, fh, err) = g.fitgaussian(y, x, (wd, ct, ht))
>>> # plot results
>>> plt.clf()
>>> plt.title('2D Gaussian Given')
>>> plt.xlabel('X')
>>> plt.ylabel('Y')
>>> plt.imshow( g.gaussian(x, wd, ct, ht))
>>> plt.clf()
>>> plt.title('2D Gaussian With Noise')
>>> plt.xlabel('X')
>>> plt.ylabel('Y')
>>> plt.imshow(y)
>>> #plt.imshow( g.gaussian(x, width, center, height)) # not in 2D yet...
>>> plt.clf()
>>> plt.title('2D Gaussian Fit')
>>> plt.xlabel('X')
>>> plt.ylabel('Y')
>>> plt.imshow( g.gaussian(x, fw, fc, fh))
>>> plt.clf()
>>> plt.title('2D Gaussian Fit Residuals')
>>> plt.xlabel('X')
>>> plt.ylabel('Y')
>>> plt.imshow(y - g.gaussian(x, fw, fc, fh))
>>> # All cases benefit from...
>>> # show difference between fit and underlying Gaussian
>>> # Random data, your answers WILL VARY.
>>> np.array(fw) - np.array(wd)
array([ 0.00210398, -0.00937687])
>>> np.array(fc) - np.array(ct)
array([-0.00260803, 0.00555011])
>>> np.array(fh) - np.array(ht)
0.0030143371034774269
>>> Last Example:
>>> x = np.indices((30,30))
>>> g1 = g.gaussian(x, width=(1.2, 1.15), center=(13.2,15.75), height=1e4,
>>> bgpars=[0.0, 0.0, 100.0])
>>> error = np.sqrt(g1) * np.random.randn(30,30)
>>> y = g1 + error
>>> var = g1
>>>
>>> plt.figure(1)
>>> plt.clf()
>>> plt.imshow(y, origin='lower_left', interpolation='nearest')
>>> plt.colorbar()
>>> plt.title('2D Gaussian')
>>> plt.xlabel('X')
>>> plt.ylabel('Y')
>>>
>>> guess = ((1.2,1.2),(13,16.),1e4)
>>> reload(g)
>>> fit = g.fitgaussian(y, x, bgpars=[0.0, 0.0, 110.], fitbg=1, guess=guess,
>>> mask=None, weights=1/np.sqrt(var))
>>> print(fit[0])
Revisions
---------
2007-09-17 Joe Initial version, portions adapted from
http://www.scipy.org/Cookbook/FittingData.
jh@physics.ucf.edu
2007-11-13 Joe Made N-dimensional.
2008-12-02 Nate Included error calculation, and return Fixed a bug
in which if the initial guess was None, and incorrect
shape array was generated. This caused gaussian guess
to fail.
nlust@physics.ucf.edu
2009-10-25 Converted to standard doc header, fixed examples to
return 4 parameters.
2011-05-03 patricio Added mask, weights, and background-fitting options.
pcubillos@fulbrightmail.org
"""
if x == None:
x = np.indices(np.shape(y))
else:
if ( ((x.ndim == 1) and (x.shape != y.shape))
or ((x.ndim > 1) and (x.shape[1:] != y.shape))):
raise ValueError, "x must give coordinates of points in y."
# Default mask: all good
if mask == None:
mask = np.ones(np.shape(y))
# Default weights: no weighting
if weights == None:
weights = np.ones(np.shape(y))
# Mask the gaussian if requested:
medmask = np.copy(mask)
if maskg and (yxguess != None or guess != None):
if yxguess != None:
center = yxguess
elif guess != None:
center = guess[1]
medmask *= (1 - d.disk(3, center, np.shape(y)))
# Estimate the median of the image:
medbg = np.median(y[np.where(medmask)])
if bgpars == None:
bgpars = [0.0, 0.0, medbg]
# get a guess if not provided
if guess == None:
if yxguess == None:
guess = gaussianguess(y-medbg, mask=mask)
else:
guess = gaussianguess(y-medbg, mask=mask, yxguess=yxguess)
# "ravel" the guess
gparams = np.append(guess[0], guess[1])
gparams = np.append(gparams, guess[2])
# Background params to fit:
if fitbg == 0:
bgparams = []
elif fitbg == 1:
bgparams = bgpars[2]
elif fitbg == 2:
bgparams = bgpars
# Concatenate sets of parameters we want to fit:
params = np.append(gparams, bgparams)
# Rest of parameters needed by residuals:
args = (x, y, mask, weights, bgpars, fitbg)
# The fit:
p, cov, info, mesg, success = so.leastsq(residuals, params, args,
full_output=True)
try:
err = np.sqrt(np.diagonal(cov))
except:
err = None
return p, err
def residuals(params, x, data, mask, weights, bgpars, fitbg):
"""
Calculates the residuals between data and a gaussian model
determined by the rest of the parameters. Used in fitgaussian.
Parameters
----------
params : 1D ndarray
This array contains the parameters desired to fit with
fitgaussian, depending on fitbg, the number of elements
varies.
x : ndarray
Array (any shape) giving the abcissas of data.
data : ndarray
Array giving the values of the function.
mask : ndarray
Same shape as data. Values where its corresponding mask value is
0 are disregarded for the minimization. Only values where the
mask value is 1 are considered.
weights : ndarray
Same shape as data. This array defines weights for the
minimization, for scientific data the weights should be
1/sqrt(variance).
bgpars : ndarray or tuple, 3-elements
Background parameters, the elements determine a X- and Y-linearly
dependant level, of the form:
background = Y*bgparam[0] + X*bgparam[1] + bgparam[2]
fitbg : Integer
This flag indicates the level of background fitting:
fitbg=0: No fitting, estimate the bg as median(data).
fitbg=1: Fit a constant to the bg (bg = c).
fitbg=2: Fit a plane as bg (bg = a*x + b*y + c).
Returns
-------
residuals : 1D ndarray
An array of the (unmasked) weighted residuals between data and
a gaussian model determined by params (and bgpars when
necessary).
Examples
--------
Revisions
---------
2011-05-03 patricio Initial version.
pcubillos@fulbrightmail.org
"""
# Use bgpars as default for background parameters, if those values
# are being fitted update them:
bgparams = bgpars
if fitbg == 1:
bgparams[2] = params[-1] # update
params = params[0:-1] # remove last parameters from params
elif fitbg == 2:
bgparams = params[-3:] # update
params = params[0:-3] # remove last parameters
# Extract width, center, and height from params:
data_dims = np.ndim(data)
params_len = len(params)
width = params[0 : data_dims]
center = params[data_dims:2*data_dims]
if params_len - 2*data_dims == 1:
height = params[2*data_dims]
else:
# when height is None, queda la cagada, avoid this case.
height = None
# Produce the model:
model = gaussian(x, width, center, height, bgparams).squeeze()
# Calculate residuals:
res = (model - data) * weights
# Return only unmasked values:
return res[np.where(mask)]
def gaussians(x, param):
"""
Evaluate more than 1 gaussian.
"""
ndim = x.ndim - 1
if ndim == 0: # We use an indexing trick below that fails for 1D case.
ndim = 1
oldshape = x.shape
x.shape = (1, x.shape[0])
# The number of gaussians:
ngauss = np.shape(param)[0]
if ngauss == 1:
param = [param]
result = np.zeros(x[0].shape)
for k in np.arange(ngauss): # Unpack parameters
pdim = len(param[k])
if pdim % 2: # pdim is odd (when height is specified)
pdim = (pdim - 1) / 2
height = param[k][-1]
else: # pdim is even
pdim = pdim / 2
height = None
width = param[k][ : pdim]
center = param[k][pdim : 2 * pdim]
if type(center) != np.ndarray:
center += np.zeros(ndim)
if type(width) != np.ndarray:
width += np.zeros(ndim)
if height == None:
height = np.product(1.0 / (width * np.sqrt(2.0 * np.pi)))
ponent = 0.0
for i in np.arange(ndim):
ponent += ( (x[i] - center[i]) / width[i] )**2.0
result += height * np.exp(-0.5 * ponent)
if 'oldshape' in locals(): # reshape it back if necessary
x.shape = oldshape
return result
def fitgaussians(y, x=None, guess=None, sigma=1.0):
"""
Fit position and flux of a data image with gaussians, same sigma
is applied to all dispersions.
Parameters:
-----------
y : array_like
Array giving the values of the function.
x : array_like
(optional) Array (any shape) giving the abcissas of y (if
missing, uses np.indices(y).
guess : 2D-tuple, [[width1, center1, height1],
[width2, center2, height2],
... ]
Tuple giving an initial guess of the Gaussian parameters for
the optimizer. If supplied, x and y can be any shape and need
not be sorted. See gaussian() for meaning and format of this
tuple.
"""
if x == None:
x = np.indices(y.shape)[0]
else:
if ( ((x.ndim == 1) and (x.shape != y.shape))
or ((x.ndim > 1) and (x.shape[1:] != y.shape))):
raise ValueError, "x must give coordinates of points in y."
# "ravel" the guess
ngauss = np.shape(guess)[0]
params = np.ravel(guess)
params = np.append(guess, sigma)
# Minimize residuals of the fit:
p, cov, info, mesg, success = so.leastsq(resids, params, args=(x,ngauss,y),
full_output=True)
sigma = p[-1]
p = np.reshape(p [0:-1], (ngauss, len(p [0:-1])/ngauss))
iscov = 0 if cov==None else 1
extra = (p, sigma, iscov, cov, info, mesg)
return np.array(p[0,0:2]), extra
def resids(param, x, ngauss, y):
sigma = param[-1]
param = np.reshape(param[0:-1], (ngauss, len(param[0:-1])/ngauss))
gss = []
for k in np.arange(ngauss):
gauss = np.append(sigma, np.append(sigma, param[k]))
gss = np.append(gss,gauss)
p = np.reshape(gss, (ngauss,len(gss)/ngauss))
return np.ravel(gaussians(x,param=p)-y)
|
zkbt/mosasaurus
|
mosasaurus/gaussian.py
|
Python
|
mit
| 25,888
|
[
"Gaussian"
] |
f4dbe33a24d5c108b1ed8519bf2b21137b0600872082655d23d093ca87b5d74d
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2002 Gary Shao
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .fontstyle import FontStyle
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
log = logging.getLogger(".paragraphstyle")
#-------------------------------------------------------------------------
#
# Paragraph alignment
#
#-------------------------------------------------------------------------
PARA_ALIGN_CENTER = 0
PARA_ALIGN_LEFT = 1
PARA_ALIGN_RIGHT = 2
PARA_ALIGN_JUSTIFY = 3
#------------------------------------------------------------------------
#
# ParagraphStyle
#
#------------------------------------------------------------------------
class ParagraphStyle:
"""
Defines the characteristics of a paragraph. The characteristics are:
font (a :class:`.FontStyle` instance), right margin, left margin,
first indent, top margin, bottom margin, alignment, level, top border,
bottom border, right border, left border, padding, and background color.
"""
def __init__(self, source=None):
"""
:param source: if not None, then the ParagraphStyle is created using the
values of the source instead of the default values.
"""
if source:
self.font = FontStyle(source.font)
self.rmargin = source.rmargin
self.lmargin = source.lmargin
self.first_indent = source.first_indent
self.tmargin = source.tmargin
self.bmargin = source.bmargin
self.align = source.align
self.level = source.level
self.top_border = source.top_border
self.bottom_border = source.bottom_border
self.right_border = source.right_border
self.left_border = source.left_border
self.pad = source.pad
self.bgcolor = source.bgcolor
self.description = source.description
self.tabs = source.tabs
else:
self.font = FontStyle()
self.rmargin = 0
self.lmargin = 0
self.tmargin = 0
self.bmargin = 0
self.first_indent = 0
self.align = PARA_ALIGN_LEFT
self.level = 0
self.top_border = 0
self.bottom_border = 0
self.right_border = 0
self.left_border = 0
self.pad = 0
self.bgcolor = (255, 255, 255)
self.description = ""
self.tabs = []
def set_description(self, text):
"""
Set the desciption of the paragraph
"""
self.description = text
def get_description(self):
"""
Return the desciption of the paragraph
"""
return self.description
def set(self, rmargin=None, lmargin=None, first_indent=None,
tmargin=None, bmargin=None, align=None,
tborder=None, bborder=None, rborder=None, lborder=None,
pad=None, bgcolor=None, font=None):
"""
Allows the values of the object to be set.
:param rmargin: right indent in centimeters
:param lmargin: left indent in centimeters
:param first_indent: first line indent in centimeters
:param tmargin: space above paragraph in centimeters
:param bmargin: space below paragraph in centimeters
:param align: alignment type (PARA_ALIGN_LEFT, PARA_ALIGN_RIGHT, PARA_ALIGN_CENTER, or PARA_ALIGN_JUSTIFY)
:param tborder: non zero indicates that a top border should be used
:param bborder: non zero indicates that a bottom border should be used
:param rborder: non zero indicates that a right border should be used
:param lborder: non zero indicates that a left border should be used
:param pad: padding in centimeters
:param bgcolor: background color of the paragraph as an RGB tuple.
:param font: FontStyle instance that defines the font
"""
if font is not None:
self.font = FontStyle(font)
if pad is not None:
self.set_padding(pad)
if tborder is not None:
self.set_top_border(tborder)
if bborder is not None:
self.set_bottom_border(bborder)
if rborder is not None:
self.set_right_border(rborder)
if lborder is not None:
self.set_left_border(lborder)
if bgcolor is not None:
self.set_background_color(bgcolor)
if align is not None:
self.set_alignment(align)
if rmargin is not None:
self.set_right_margin(rmargin)
if lmargin is not None:
self.set_left_margin(lmargin)
if first_indent is not None:
self.set_first_indent(first_indent)
if tmargin is not None:
self.set_top_margin(tmargin)
if bmargin is not None:
self.set_bottom_margin(bmargin)
def set_header_level(self, level):
"""
Set the header level for the paragraph. This is useful for
numbered paragraphs. A value of 1 indicates a header level
format of X, a value of two implies X.X, etc. A value of zero
means no header level.
"""
self.level = level
def get_header_level(self):
"Return the header level of the paragraph"
return self.level
def set_font(self, font):
"""
Set the font style of the paragraph.
:param font: :class:`.FontStyle` object containing the font definition
to use.
"""
self.font = FontStyle(font)
def get_font(self):
"Return the :class:`.FontStyle` of the paragraph"
return self.font
def set_padding(self, val):
"""
Set the paragraph padding in centimeters
:param val: floating point value indicating the padding in centimeters
"""
self.pad = val
def get_padding(self):
"""Return a the padding of the paragraph"""
return self.pad
def set_top_border(self, val):
"""
Set the presence or absence of top border.
:param val: True indicates a border should be used, False indicates
no border.
"""
self.top_border = val
def get_top_border(self):
"Return 1 if a top border is specified"
return self.top_border
def set_bottom_border(self, val):
"""
Set the presence or absence of bottom border.
:param val: True indicates a border should be used, False
indicates no border.
"""
self.bottom_border = val
def get_bottom_border(self):
"Return 1 if a bottom border is specified"
return self.bottom_border
def set_left_border(self, val):
"""
Set the presence or absence of left border.
:param val: True indicates a border should be used, False
indicates no border.
"""
self.left_border = val
def get_left_border(self):
"Return 1 if a left border is specified"
return self.left_border
def set_right_border(self, val):
"""
Set the presence or absence of rigth border.
:param val: True indicates a border should be used, False
indicates no border.
"""
self.right_border = val
def get_right_border(self):
"Return 1 if a right border is specified"
return self.right_border
def get_background_color(self):
"""
Return a tuple indicating the RGB components of the background
color
"""
return self.bgcolor
def set_background_color(self, color):
"""
Set the background color of the paragraph.
:param color: tuple representing the RGB components of a color
(0,0,0) to (255,255,255)
"""
self.bgcolor = color
def set_alignment(self, align):
"""
Set the paragraph alignment.
:param align: PARA_ALIGN_LEFT, PARA_ALIGN_RIGHT, PARA_ALIGN_CENTER,
or PARA_ALIGN_JUSTIFY
"""
self.align = align
def get_alignment(self):
"Return the alignment of the paragraph"
return self.align
def get_alignment_text(self):
"""
Return a text string representing the alignment, either 'left',
'right', 'center', or 'justify'
"""
if self.align == PARA_ALIGN_LEFT:
return "left"
elif self.align == PARA_ALIGN_CENTER:
return "center"
elif self.align == PARA_ALIGN_RIGHT:
return "right"
elif self.align == PARA_ALIGN_JUSTIFY:
return "justify"
return "unknown"
def set_left_margin(self, value):
"sets the left indent in centimeters"
self.lmargin = value
def set_right_margin(self, value):
"sets the right indent in centimeters"
self.rmargin = value
def set_first_indent(self, value):
"sets the first line indent in centimeters"
self.first_indent = value
def set_top_margin(self, value):
"sets the space above paragraph in centimeters"
self.tmargin = value
def set_bottom_margin(self, value):
"sets the space below paragraph in centimeters"
self.bmargin = value
def get_left_margin(self):
"returns the left indent in centimeters"
return self.lmargin
def get_right_margin(self):
"returns the right indent in centimeters"
return self.rmargin
def get_first_indent(self):
"returns the first line indent in centimeters"
return self.first_indent
def get_top_margin(self):
"returns the space above paragraph in centimeters"
return self.tmargin
def get_bottom_margin(self):
"returns the space below paragraph in centimeters"
return self.bmargin
def set_tabs(self, tab_stops):
assert isinstance(tab_stops, list)
self.tabs = tab_stops
def get_tabs(self):
return self.tabs
|
beernarrd/gramps
|
gramps/gen/plug/docgen/paragraphstyle.py
|
Python
|
gpl-2.0
| 11,465
|
[
"Brian"
] |
ecbe0ecf82afc653bb6108a4e02b4a3f16eb3e528cd656c339f0580bbbf5220e
|
#!/usr/bin/env python
# This is a simple volume rendering example that uses a
# vtkVolumeTextureMapper2D mapper
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the standard renderer, render window and interactor
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Create the reader for the data
reader = vtk.vtkStructuredPointsReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/ironProt.vtk")
# Create transfer mapping scalar value to opacity
opacityTransferFunction = vtk.vtkPiecewiseFunction()
opacityTransferFunction.AddPoint(20, 0.0)
opacityTransferFunction.AddPoint(255, 0.2)
# Create transfer mapping scalar value to color
colorTransferFunction = vtk.vtkColorTransferFunction()
colorTransferFunction.AddRGBPoint(0.0, 0.0, 0.0, 0.0)
colorTransferFunction.AddRGBPoint(64.0, 1.0, 0.0, 0.0)
colorTransferFunction.AddRGBPoint(128.0, 0.0, 0.0, 1.0)
colorTransferFunction.AddRGBPoint(192.0, 0.0, 1.0, 0.0)
colorTransferFunction.AddRGBPoint(255.0, 0.0, 0.2, 0.0)
# The property describes how the data will look
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(colorTransferFunction)
volumeProperty.SetScalarOpacity(opacityTransferFunction)
# The mapper knows how to render the data
volumeMapper = vtk.vtkVolumeTextureMapper2D()
volumeMapper.SetInputConnection(reader.GetOutputPort())
# The volume holds the mapper and the property and can be used to
# position/orient the volume
volume = vtk.vtkVolume()
volume.SetMapper(volumeMapper)
volume.SetProperty(volumeProperty)
ren.AddVolume(volume)
renWin.Render()
def CheckAbort(obj, event):
if obj.GetEventPending() != 0:
obj.SetAbortRender(1)
renWin.AddObserver("AbortCheckEvent", CheckAbort)
iren.Initialize()
renWin.Render()
iren.Start()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Examples/VolumeRendering/Python/SimpleTextureMap2D.py
|
Python
|
gpl-3.0
| 1,868
|
[
"VTK"
] |
25a2f0ae510a73cbd5fbc4723f1162dedbf7c98744932fb15f1b2994a4477568
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Chromium.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import re
import subprocess
import sys
_EXCLUDED_PATHS = (
r"^breakpad[\\\/].*",
r"^native_client_sdk[\\\/]src[\\\/]build_tools[\\\/]make_rules.py",
r"^native_client_sdk[\\\/]src[\\\/]build_tools[\\\/]make_simple.py",
r"^native_client_sdk[\\\/]src[\\\/]tools[\\\/].*.mk",
r"^net[\\\/]tools[\\\/]spdyshark[\\\/].*",
r"^skia[\\\/].*",
r"^v8[\\\/].*",
r".*MakeFile$",
r".+_autogen\.h$",
r".+[\\\/]pnacl_shim\.c$",
r"^gpu[\\\/]config[\\\/].*_list_json\.cc$",
)
# Fragment of a regular expression that matches C++ and Objective-C++
# implementation files.
_IMPLEMENTATION_EXTENSIONS = r'\.(cc|cpp|cxx|mm)$'
# Regular expression that matches code only used for test binaries
# (best effort).
_TEST_CODE_EXCLUDED_PATHS = (
r'.*[/\\](fake_|test_|mock_).+%s' % _IMPLEMENTATION_EXTENSIONS,
r'.+_test_(base|support|util)%s' % _IMPLEMENTATION_EXTENSIONS,
r'.+_(api|browser|perf|pixel|unit|ui)?test(_[a-z]+)?%s' %
_IMPLEMENTATION_EXTENSIONS,
r'.+profile_sync_service_harness%s' % _IMPLEMENTATION_EXTENSIONS,
r'.*[/\\](test|tool(s)?)[/\\].*',
# content_shell is used for running layout tests.
r'content[/\\]shell[/\\].*',
# At request of folks maintaining this folder.
r'chrome[/\\]browser[/\\]automation[/\\].*',
)
_TEST_ONLY_WARNING = (
'You might be calling functions intended only for testing from\n'
'production code. It is OK to ignore this warning if you know what\n'
'you are doing, as the heuristics used to detect the situation are\n'
'not perfect. The commit queue will not block on this warning.\n'
'Email joi@chromium.org if you have questions.')
_INCLUDE_ORDER_WARNING = (
'Your #include order seems to be broken. Send mail to\n'
'marja@chromium.org if this is not the case.')
_BANNED_OBJC_FUNCTIONS = (
(
'addTrackingRect:',
(
'The use of -[NSView addTrackingRect:owner:userData:assumeInside:] is'
'prohibited. Please use CrTrackingArea instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
False,
),
(
'NSTrackingArea',
(
'The use of NSTrackingAreas is prohibited. Please use CrTrackingArea',
'instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
False,
),
(
'convertPointFromBase:',
(
'The use of -[NSView convertPointFromBase:] is almost certainly wrong.',
'Please use |convertPoint:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertPointToBase:',
(
'The use of -[NSView convertPointToBase:] is almost certainly wrong.',
'Please use |convertPoint:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertRectFromBase:',
(
'The use of -[NSView convertRectFromBase:] is almost certainly wrong.',
'Please use |convertRect:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertRectToBase:',
(
'The use of -[NSView convertRectToBase:] is almost certainly wrong.',
'Please use |convertRect:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertSizeFromBase:',
(
'The use of -[NSView convertSizeFromBase:] is almost certainly wrong.',
'Please use |convertSize:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertSizeToBase:',
(
'The use of -[NSView convertSizeToBase:] is almost certainly wrong.',
'Please use |convertSize:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
)
_BANNED_CPP_FUNCTIONS = (
# Make sure that gtest's FRIEND_TEST() macro is not used; the
# FRIEND_TEST_ALL_PREFIXES() macro from base/gtest_prod_util.h should be
# used instead since that allows for FLAKY_ and DISABLED_ prefixes.
(
'FRIEND_TEST(',
(
'Chromium code should not use gtest\'s FRIEND_TEST() macro. Include',
'base/gtest_prod_util.h and use FRIEND_TEST_ALL_PREFIXES() instead.',
),
False,
(),
),
(
'ScopedAllowIO',
(
'New code should not use ScopedAllowIO. Post a task to the blocking',
'pool or the FILE thread instead.',
),
True,
(
r"^content[\\\/]shell[\\\/]browser[\\\/]shell_browser_main\.cc$",
r"^content[\\\/]shell[\\\/]browser[\\\/]shell_message_filter\.cc$",
r"^net[\\\/]disk_cache[\\\/]cache_util\.cc$",
),
),
(
'SkRefPtr',
(
'The use of SkRefPtr is prohibited. ',
'Please use skia::RefPtr instead.'
),
True,
(),
),
(
'SkAutoRef',
(
'The indirect use of SkRefPtr via SkAutoRef is prohibited. ',
'Please use skia::RefPtr instead.'
),
True,
(),
),
(
'SkAutoTUnref',
(
'The use of SkAutoTUnref is dangerous because it implicitly ',
'converts to a raw pointer. Please use skia::RefPtr instead.'
),
True,
(),
),
(
'SkAutoUnref',
(
'The indirect use of SkAutoTUnref through SkAutoUnref is dangerous ',
'because it implicitly converts to a raw pointer. ',
'Please use skia::RefPtr instead.'
),
True,
(),
),
)
_VALID_OS_MACROS = (
# Please keep sorted.
'OS_ANDROID',
'OS_BSD',
'OS_CAT', # For testing.
'OS_CHROMEOS',
'OS_FREEBSD',
'OS_IOS',
'OS_LINUX',
'OS_MACOSX',
'OS_NACL',
'OS_OPENBSD',
'OS_POSIX',
'OS_SOLARIS',
'OS_WIN',
)
def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
"""Attempts to prevent use of functions intended only for testing in
non-testing code. For now this is just a best-effort implementation
that ignores header files and may have some false positives. A
better implementation would probably need a proper C++ parser.
"""
# We only scan .cc files and the like, as the declaration of
# for-testing functions in header files are hard to distinguish from
# calls to such functions without a proper C++ parser.
file_inclusion_pattern = r'.+%s' % _IMPLEMENTATION_EXTENSIONS
base_function_pattern = r'ForTest(ing)?|for_test(ing)?'
inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern)
comment_pattern = input_api.re.compile(r'//.*%s' % base_function_pattern)
exclusion_pattern = input_api.re.compile(
r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
base_function_pattern, base_function_pattern))
def FilterFile(affected_file):
black_list = (_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST)
return input_api.FilterSourceFile(
affected_file,
white_list=(file_inclusion_pattern, ),
black_list=black_list)
problems = []
for f in input_api.AffectedSourceFiles(FilterFile):
local_path = f.LocalPath()
lines = input_api.ReadFile(f).splitlines()
line_number = 0
for line in lines:
if (inclusion_pattern.search(line) and
not comment_pattern.search(line) and
not exclusion_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
line_number += 1
if problems:
return [output_api.PresubmitPromptOrNotify(_TEST_ONLY_WARNING, problems)]
else:
return []
def _CheckNoIOStreamInHeaders(input_api, output_api):
"""Checks to make sure no .h files include <iostream>."""
files = []
pattern = input_api.re.compile(r'^#include\s*<iostream>',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [ output_api.PresubmitError(
'Do not #include <iostream> in header files, since it inserts static '
'initialization into every file including the header. Instead, '
'#include <ostream>. See http://crbug.com/94794',
files) ]
return []
def _CheckNoUNIT_TESTInSourceFiles(input_api, output_api):
"""Checks to make sure no source files use UNIT_TEST"""
problems = []
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith(('.cc', '.mm'))):
continue
for line_num, line in f.ChangedContents():
if 'UNIT_TEST' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning('UNIT_TEST is only for headers.\n' +
'\n'.join(problems))]
def _CheckNoNewWStrings(input_api, output_api):
"""Checks to make sure we don't introduce use of wstrings."""
problems = []
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith(('.cc', '.h')) or
f.LocalPath().endswith(('test.cc', '_win.cc', '_win.h'))):
continue
allowWString = False
for line_num, line in f.ChangedContents():
if 'presubmit: allow wstring' in line:
allowWString = True
elif not allowWString and 'wstring' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
allowWString = False
else:
allowWString = False
if not problems:
return []
return [output_api.PresubmitPromptWarning('New code should not use wstrings.'
' If you are calling a cross-platform API that accepts a wstring, '
'fix the API.\n' +
'\n'.join(problems))]
def _CheckNoDEPSGIT(input_api, output_api):
"""Make sure .DEPS.git is never modified manually."""
if any(f.LocalPath().endswith('.DEPS.git') for f in
input_api.AffectedFiles()):
return [output_api.PresubmitError(
'Never commit changes to .DEPS.git. This file is maintained by an\n'
'automated system based on what\'s in DEPS and your changes will be\n'
'overwritten.\n'
'See http://code.google.com/p/chromium/wiki/UsingNewGit#Rolling_DEPS\n'
'for more information')]
return []
def _CheckNoBannedFunctions(input_api, output_api):
"""Make sure that banned functions are not used."""
warnings = []
errors = []
file_filter = lambda f: f.LocalPath().endswith(('.mm', '.m', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
for func_name, message, error in _BANNED_OBJC_FUNCTIONS:
if func_name in line:
problems = warnings;
if error:
problems = errors;
problems.append(' %s:%d:' % (f.LocalPath(), line_num))
for message_line in message:
problems.append(' %s' % message_line)
file_filter = lambda f: f.LocalPath().endswith(('.cc', '.mm', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
for func_name, message, error, excluded_paths in _BANNED_CPP_FUNCTIONS:
def IsBlacklisted(affected_file, blacklist):
local_path = affected_file.LocalPath()
for item in blacklist:
if input_api.re.match(item, local_path):
return True
return False
if IsBlacklisted(f, excluded_paths):
continue
if func_name in line:
problems = warnings;
if error:
problems = errors;
problems.append(' %s:%d:' % (f.LocalPath(), line_num))
for message_line in message:
problems.append(' %s' % message_line)
result = []
if (warnings):
result.append(output_api.PresubmitPromptWarning(
'Banned functions were used.\n' + '\n'.join(warnings)))
if (errors):
result.append(output_api.PresubmitError(
'Banned functions were used.\n' + '\n'.join(errors)))
return result
def _CheckNoPragmaOnce(input_api, output_api):
"""Make sure that banned functions are not used."""
files = []
pattern = input_api.re.compile(r'^#pragma\s+once',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if files:
return [output_api.PresubmitError(
'Do not use #pragma once in header files.\n'
'See http://www.chromium.org/developers/coding-style#TOC-File-headers',
files)]
return []
def _CheckNoTrinaryTrueFalse(input_api, output_api):
"""Checks to make sure we don't introduce use of foo ? true : false."""
problems = []
pattern = input_api.re.compile(r'\?\s*(true|false)\s*:\s*(true|false)')
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith(('.cc', '.h', '.inl', '.m', '.mm')):
continue
for line_num, line in f.ChangedContents():
if pattern.match(line):
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning(
'Please consider avoiding the "? true : false" pattern if possible.\n' +
'\n'.join(problems))]
def _CheckUnwantedDependencies(input_api, output_api):
"""Runs checkdeps on #include statements added in this
change. Breaking - rules is an error, breaking ! rules is a
warning.
"""
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools', 'checkdeps')]
import checkdeps
from cpp_checker import CppChecker
from rules import Rule
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
added_includes = []
for f in input_api.AffectedFiles():
if not CppChecker.IsCppFile(f.LocalPath()):
continue
changed_lines = [line for line_num, line in f.ChangedContents()]
added_includes.append([f.LocalPath(), changed_lines])
deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath())
error_descriptions = []
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
description_with_path = '%s\n %s' % (path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
warning_descriptions.append(description_with_path)
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.',
error_descriptions))
if warning_descriptions:
results.append(output_api.PresubmitPromptOrNotify(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.',
warning_descriptions))
return results
def _CheckFilePermissions(input_api, output_api):
"""Check that all files have their permissions properly set."""
args = [sys.executable, 'tools/checkperms/checkperms.py', '--root',
input_api.change.RepositoryRoot()]
for f in input_api.AffectedFiles():
args += ['--file', f.LocalPath()]
errors = []
(errors, stderrdata) = subprocess.Popen(args).communicate()
results = []
if errors:
results.append(output_api.PresubmitError('checkperms.py failed.',
errors))
return results
def _CheckNoAuraWindowPropertyHInHeaders(input_api, output_api):
"""Makes sure we don't include ui/aura/window_property.h
in header files.
"""
pattern = input_api.re.compile(r'^#include\s*"ui/aura/window_property.h"')
errors = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith('.h'):
continue
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d' % (f.LocalPath(), line_num))
results = []
if errors:
results.append(output_api.PresubmitError(
'Header files should not include ui/aura/window_property.h', errors))
return results
def _CheckIncludeOrderForScope(scope, input_api, file_path, changed_linenums):
"""Checks that the lines in scope occur in the right order.
1. C system files in alphabetical order
2. C++ system files in alphabetical order
3. Project's .h files
"""
c_system_include_pattern = input_api.re.compile(r'\s*#include <.*\.h>')
cpp_system_include_pattern = input_api.re.compile(r'\s*#include <.*>')
custom_include_pattern = input_api.re.compile(r'\s*#include ".*')
C_SYSTEM_INCLUDES, CPP_SYSTEM_INCLUDES, CUSTOM_INCLUDES = range(3)
state = C_SYSTEM_INCLUDES
previous_line = ''
previous_line_num = 0
problem_linenums = []
for line_num, line in scope:
if c_system_include_pattern.match(line):
if state != C_SYSTEM_INCLUDES:
problem_linenums.append((line_num, previous_line_num))
elif previous_line and previous_line > line:
problem_linenums.append((line_num, previous_line_num))
elif cpp_system_include_pattern.match(line):
if state == C_SYSTEM_INCLUDES:
state = CPP_SYSTEM_INCLUDES
elif state == CUSTOM_INCLUDES:
problem_linenums.append((line_num, previous_line_num))
elif previous_line and previous_line > line:
problem_linenums.append((line_num, previous_line_num))
elif custom_include_pattern.match(line):
if state != CUSTOM_INCLUDES:
state = CUSTOM_INCLUDES
elif previous_line and previous_line > line:
problem_linenums.append((line_num, previous_line_num))
else:
problem_linenums.append(line_num)
previous_line = line
previous_line_num = line_num
warnings = []
for (line_num, previous_line_num) in problem_linenums:
if line_num in changed_linenums or previous_line_num in changed_linenums:
warnings.append(' %s:%d' % (file_path, line_num))
return warnings
def _CheckIncludeOrderInFile(input_api, f, changed_linenums):
"""Checks the #include order for the given file f."""
system_include_pattern = input_api.re.compile(r'\s*#include \<.*')
# Exclude the following includes from the check:
# 1) #include <.../...>, e.g., <sys/...> includes often need to appear in a
# specific order.
# 2) <atlbase.h>, "build/build_config.h"
excluded_include_pattern = input_api.re.compile(
r'\s*#include (\<.*/.*|\<atlbase\.h\>|"build/build_config.h")')
custom_include_pattern = input_api.re.compile(r'\s*#include "(?P<FILE>.*)"')
if_pattern = input_api.re.compile(
r'\s*#\s*(if|elif|else|endif|define|undef).*')
# Some files need specialized order of includes; exclude such files from this
# check.
uncheckable_includes_pattern = input_api.re.compile(
r'\s*#include '
'("ipc/.*macros\.h"|<windows\.h>|".*gl.*autogen.h")\s*')
contents = f.NewContents()
warnings = []
line_num = 0
# Handle the special first include. If the first include file is
# some/path/file.h, the corresponding including file can be some/path/file.cc,
# some/other/path/file.cc, some/path/file_platform.cc, some/path/file-suffix.h
# etc. It's also possible that no special first include exists.
for line in contents:
line_num += 1
if system_include_pattern.match(line):
# No special first include -> process the line again along with normal
# includes.
line_num -= 1
break
match = custom_include_pattern.match(line)
if match:
match_dict = match.groupdict()
header_basename = input_api.os_path.basename(
match_dict['FILE']).replace('.h', '')
if header_basename not in input_api.os_path.basename(f.LocalPath()):
# No special first include -> process the line again along with normal
# includes.
line_num -= 1
break
# Split into scopes: Each region between #if and #endif is its own scope.
scopes = []
current_scope = []
for line in contents[line_num:]:
line_num += 1
if uncheckable_includes_pattern.match(line):
return []
if if_pattern.match(line):
scopes.append(current_scope)
current_scope = []
elif ((system_include_pattern.match(line) or
custom_include_pattern.match(line)) and
not excluded_include_pattern.match(line)):
current_scope.append((line_num, line))
scopes.append(current_scope)
for scope in scopes:
warnings.extend(_CheckIncludeOrderForScope(scope, input_api, f.LocalPath(),
changed_linenums))
return warnings
def _CheckIncludeOrder(input_api, output_api):
"""Checks that the #include order is correct.
1. The corresponding header for source files.
2. C system files in alphabetical order
3. C++ system files in alphabetical order
4. Project's .h files in alphabetical order
Each region separated by #if, #elif, #else, #endif, #define and #undef follows
these rules separately.
"""
warnings = []
for f in input_api.AffectedFiles():
if f.LocalPath().endswith(('.cc', '.h')):
changed_linenums = set(line_num for line_num, _ in f.ChangedContents())
warnings.extend(_CheckIncludeOrderInFile(input_api, f, changed_linenums))
results = []
if warnings:
results.append(output_api.PresubmitPromptOrNotify(_INCLUDE_ORDER_WARNING,
warnings))
return results
def _CheckForVersionControlConflictsInFile(input_api, f):
pattern = input_api.re.compile('^(?:<<<<<<<|>>>>>>>) |^=======$')
errors = []
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d %s' % (f.LocalPath(), line_num, line))
return errors
def _CheckForVersionControlConflicts(input_api, output_api):
"""Usually this is not intentional and will cause a compile failure."""
errors = []
for f in input_api.AffectedFiles():
errors.extend(_CheckForVersionControlConflictsInFile(input_api, f))
results = []
if errors:
results.append(output_api.PresubmitError(
'Version control conflict markers found, please resolve.', errors))
return results
def _CheckHardcodedGoogleHostsInLowerLayers(input_api, output_api):
def FilterFile(affected_file):
"""Filter function for use with input_api.AffectedSourceFiles,
below. This filters out everything except non-test files from
top-level directories that generally speaking should not hard-code
service URLs (e.g. src/android_webview/, src/content/ and others).
"""
return input_api.FilterSourceFile(
affected_file,
white_list=(r'^(android_webview|base|content|net)[\\\/].*', ),
black_list=(_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST))
base_pattern = '"[^"]*google\.com[^"]*"'
comment_pattern = input_api.re.compile('//.*%s' % base_pattern)
pattern = input_api.re.compile(base_pattern)
problems = [] # items are (filename, line_number, line)
for f in input_api.AffectedSourceFiles(FilterFile):
for line_num, line in f.ChangedContents():
if not comment_pattern.search(line) and pattern.search(line):
problems.append((f.LocalPath(), line_num, line))
if problems:
return [output_api.PresubmitPromptOrNotify(
'Most layers below src/chrome/ should not hardcode service URLs.\n'
'Are you sure this is correct? (Contact: joi@chromium.org)',
[' %s:%d: %s' % (
problem[0], problem[1], problem[2]) for problem in problems])]
else:
return []
def _CheckNoAbbreviationInPngFileName(input_api, output_api):
"""Makes sure there are no abbreviations in the name of PNG files.
"""
pattern = input_api.re.compile(r'.*_[a-z]_.*\.png$|.*_[a-z]\.png$')
errors = []
for f in input_api.AffectedFiles(include_deletes=False):
if pattern.match(f.LocalPath()):
errors.append(' %s' % f.LocalPath())
results = []
if errors:
results.append(output_api.PresubmitError(
'The name of PNG files should not have abbreviations. \n'
'Use _hover.png, _center.png, instead of _h.png, _c.png.\n'
'Contact oshima@chromium.org if you have questions.', errors))
return results
def _DepsFilesToCheck(re, changed_lines):
"""Helper method for _CheckAddedDepsHaveTargetApprovals. Returns
a set of DEPS entries that we should look up."""
results = set()
# This pattern grabs the path without basename in the first
# parentheses, and the basename (if present) in the second. It
# relies on the simple heuristic that if there is a basename it will
# be a header file ending in ".h".
pattern = re.compile(
r"""['"]\+([^'"]+?)(/[a-zA-Z0-9_]+\.h)?['"].*""")
for changed_line in changed_lines:
m = pattern.match(changed_line)
if m:
path = m.group(1)
if not (path.startswith('grit/') or path == 'grit'):
results.add('%s/DEPS' % m.group(1))
return results
def _CheckAddedDepsHaveTargetApprovals(input_api, output_api):
"""When a dependency prefixed with + is added to a DEPS file, we
want to make sure that the change is reviewed by an OWNER of the
target file or directory, to avoid layering violations from being
introduced. This check verifies that this happens.
"""
changed_lines = set()
for f in input_api.AffectedFiles():
filename = input_api.os_path.basename(f.LocalPath())
if filename == 'DEPS':
changed_lines |= set(line.strip()
for line_num, line
in f.ChangedContents())
if not changed_lines:
return []
virtual_depended_on_files = _DepsFilesToCheck(input_api.re, changed_lines)
if not virtual_depended_on_files:
return []
if input_api.is_committing:
if input_api.tbr:
return [output_api.PresubmitNotifyResult(
'--tbr was specified, skipping OWNERS check for DEPS additions')]
if not input_api.change.issue:
return [output_api.PresubmitError(
"DEPS approval by OWNERS check failed: this change has "
"no Rietveld issue number, so we can't check it for approvals.")]
output = output_api.PresubmitError
else:
output = output_api.PresubmitNotifyResult
owners_db = input_api.owners_db
owner_email, reviewers = input_api.canned_checks._RietveldOwnerAndReviewers(
input_api,
owners_db.email_regexp,
approval_needed=input_api.is_committing)
owner_email = owner_email or input_api.change.author_email
reviewers_plus_owner = set(reviewers)
if owner_email:
reviewers_plus_owner.add(owner_email)
missing_files = owners_db.files_not_covered_by(virtual_depended_on_files,
reviewers_plus_owner)
unapproved_dependencies = ["'+%s'," % path[:-len('/DEPS')]
for path in missing_files]
if unapproved_dependencies:
output_list = [
output('Missing LGTM from OWNERS of directories added to DEPS:\n %s' %
'\n '.join(sorted(unapproved_dependencies)))]
if not input_api.is_committing:
suggested_owners = owners_db.reviewers_for(missing_files, owner_email)
output_list.append(output(
'Suggested missing target path OWNERS:\n %s' %
'\n '.join(suggested_owners or [])))
return output_list
return []
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS))
results.extend(_CheckAuthorizedAuthor(input_api, output_api))
results.extend(
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
results.extend(_CheckNoIOStreamInHeaders(input_api, output_api))
results.extend(_CheckNoUNIT_TESTInSourceFiles(input_api, output_api))
results.extend(_CheckNoNewWStrings(input_api, output_api))
results.extend(_CheckNoDEPSGIT(input_api, output_api))
results.extend(_CheckNoBannedFunctions(input_api, output_api))
results.extend(_CheckNoPragmaOnce(input_api, output_api))
results.extend(_CheckNoTrinaryTrueFalse(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(_CheckFilePermissions(input_api, output_api))
results.extend(_CheckNoAuraWindowPropertyHInHeaders(input_api, output_api))
results.extend(_CheckIncludeOrder(input_api, output_api))
results.extend(_CheckForVersionControlConflicts(input_api, output_api))
results.extend(_CheckPatchFiles(input_api, output_api))
results.extend(_CheckHardcodedGoogleHostsInLowerLayers(input_api, output_api))
results.extend(_CheckNoAbbreviationInPngFileName(input_api, output_api))
results.extend(_CheckForInvalidOSMacros(input_api, output_api))
results.extend(_CheckAddedDepsHaveTargetApprovals(input_api, output_api))
results.extend(
input_api.canned_checks.CheckChangeHasNoTabs(
input_api,
output_api,
source_file_filter=lambda x: x.LocalPath().endswith('.grd')))
if any('PRESUBMIT.py' == f.LocalPath() for f in input_api.AffectedFiles()):
results.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api,
input_api.PresubmitLocalPath(),
whitelist=[r'^PRESUBMIT_test\.py$']))
return results
def _CheckSubversionConfig(input_api, output_api):
"""Verifies the subversion config file is correctly setup.
Checks that autoprops are enabled, returns an error otherwise.
"""
join = input_api.os_path.join
if input_api.platform == 'win32':
appdata = input_api.environ.get('APPDATA', '')
if not appdata:
return [output_api.PresubmitError('%APPDATA% is not configured.')]
path = join(appdata, 'Subversion', 'config')
else:
home = input_api.environ.get('HOME', '')
if not home:
return [output_api.PresubmitError('$HOME is not configured.')]
path = join(home, '.subversion', 'config')
error_msg = (
'Please look at http://dev.chromium.org/developers/coding-style to\n'
'configure your subversion configuration file. This enables automatic\n'
'properties to simplify the project maintenance.\n'
'Pro-tip: just download and install\n'
'http://src.chromium.org/viewvc/chrome/trunk/tools/build/slave/config\n')
try:
lines = open(path, 'r').read().splitlines()
# Make sure auto-props is enabled and check for 2 Chromium standard
# auto-prop.
if (not '*.cc = svn:eol-style=LF' in lines or
not '*.pdf = svn:mime-type=application/pdf' in lines or
not 'enable-auto-props = yes' in lines):
return [
output_api.PresubmitNotifyResult(
'It looks like you have not configured your subversion config '
'file or it is not up-to-date.\n' + error_msg)
]
except (OSError, IOError):
return [
output_api.PresubmitNotifyResult(
'Can\'t find your subversion config file.\n' + error_msg)
]
return []
def _CheckAuthorizedAuthor(input_api, output_api):
"""For non-googler/chromites committers, verify the author's email address is
in AUTHORS.
"""
# TODO(maruel): Add it to input_api?
import fnmatch
author = input_api.change.author_email
if not author:
input_api.logging.info('No author, skipping AUTHOR check')
return []
authors_path = input_api.os_path.join(
input_api.PresubmitLocalPath(), 'AUTHORS')
valid_authors = (
input_api.re.match(r'[^#]+\s+\<(.+?)\>\s*$', line)
for line in open(authors_path))
valid_authors = [item.group(1).lower() for item in valid_authors if item]
if not any(fnmatch.fnmatch(author.lower(), valid) for valid in valid_authors):
input_api.logging.info('Valid authors are %s', ', '.join(valid_authors))
return [output_api.PresubmitPromptWarning(
('%s is not in AUTHORS file. If you are a new contributor, please visit'
'\n'
'http://www.chromium.org/developers/contributing-code and read the '
'"Legal" section\n'
'If you are a chromite, verify the contributor signed the CLA.') %
author)]
return []
def _CheckPatchFiles(input_api, output_api):
problems = [f.LocalPath() for f in input_api.AffectedFiles()
if f.LocalPath().endswith(('.orig', '.rej'))]
if problems:
return [output_api.PresubmitError(
"Don't commit .rej and .orig files.", problems)]
else:
return []
def _DidYouMeanOSMacro(bad_macro):
try:
return {'A': 'OS_ANDROID',
'B': 'OS_BSD',
'C': 'OS_CHROMEOS',
'F': 'OS_FREEBSD',
'L': 'OS_LINUX',
'M': 'OS_MACOSX',
'N': 'OS_NACL',
'O': 'OS_OPENBSD',
'P': 'OS_POSIX',
'S': 'OS_SOLARIS',
'W': 'OS_WIN'}[bad_macro[3].upper()]
except KeyError:
return ''
def _CheckForInvalidOSMacrosInFile(input_api, f):
"""Check for sensible looking, totally invalid OS macros."""
preprocessor_statement = input_api.re.compile(r'^\s*#')
os_macro = input_api.re.compile(r'defined\((OS_[^)]+)\)')
results = []
for lnum, line in f.ChangedContents():
if preprocessor_statement.search(line):
for match in os_macro.finditer(line):
if not match.group(1) in _VALID_OS_MACROS:
good = _DidYouMeanOSMacro(match.group(1))
did_you_mean = ' (did you mean %s?)' % good if good else ''
results.append(' %s:%d %s%s' % (f.LocalPath(),
lnum,
match.group(1),
did_you_mean))
return results
def _CheckForInvalidOSMacros(input_api, output_api):
"""Check all affected files for invalid OS macros."""
bad_macros = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith(('.py', '.js', '.html', '.css')):
bad_macros.extend(_CheckForInvalidOSMacrosInFile(input_api, f))
if not bad_macros:
return []
return [output_api.PresubmitError(
'Possibly invalid OS macro[s] found. Please fix your code\n'
'or add your macro to src/PRESUBMIT.py.', bad_macros)]
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
# TODO(thestig) temporarily disabled, doesn't work in third_party/
#results.extend(input_api.canned_checks.CheckSvnModifiedDirectories(
# input_api, output_api, sources))
# Make sure the tree is 'open'.
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api,
output_api,
json_url='http://chromium-status.appspot.com/current?format=json'))
results.extend(input_api.canned_checks.CheckRietveldTryJobExecution(input_api,
output_api, 'http://codereview.chromium.org',
('win_rel', 'linux_rel', 'mac_rel, win:compile'),
'tryserver@chromium.org'))
results.extend(input_api.canned_checks.CheckChangeHasBugField(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(_CheckSubversionConfig(input_api, output_api))
return results
def GetPreferredTrySlaves(project, change):
files = change.LocalPaths()
if not files or all(re.search(r'[\\/]OWNERS$', f) for f in files):
return []
if all(re.search('\.(m|mm)$|(^|[/_])mac[/_.]', f) for f in files):
return ['mac_rel', 'mac:compile']
if all(re.search('(^|[/_])win[/_.]', f) for f in files):
return ['win_rel', 'win7_aura', 'win:compile']
if all(re.search('(^|[/_])android[/_.]', f) for f in files):
return ['android_aosp', 'android_dbg', 'android_clang_dbg']
if all(re.search('^native_client_sdk', f) for f in files):
return ['linux_nacl_sdk', 'win_nacl_sdk', 'mac_nacl_sdk']
if all(re.search('[/_]ios[/_.]', f) for f in files):
return ['ios_rel_device', 'ios_dbg_simulator']
trybots = [
'android_clang_dbg',
'android_dbg',
'ios_dbg_simulator',
'ios_rel_device',
'linux_asan',
'linux_aura',
'linux_chromeos',
'linux_clang:compile',
'linux_rel',
'mac_rel',
'mac:compile',
'win7_aura',
'win_rel',
'win:compile',
'win_x64_rel:compile',
]
# Match things like path/aura/file.cc and path/file_aura.cc.
# Same for chromeos.
if any(re.search('[/_](aura|chromeos)', f) for f in files):
trybots += ['linux_chromeos_clang:compile', 'linux_chromeos_asan']
# The AOSP bot doesn't build the chrome/ layer, so ignore any changes to it
# unless they're .gyp(i) files as changes to those files can break the gyp
# step on that bot.
if (not all(re.search('^chrome', f) for f in files) or
any(re.search('\.gypi?$', f) for f in files)):
trybots += ['android_aosp']
return trybots
|
mogoweb/chromium-crosswalk
|
PRESUBMIT.py
|
Python
|
bsd-3-clause
| 38,018
|
[
"VisIt"
] |
f8cc476fe8b0ce003f5a9199b7321488f12621c5714e835db4104a73f03c3276
|
from __future__ import print_function
"""
INSTRUCTIONS
This tests requires AT LEAST the following set
export FBUTILS_APP_ID=xxxxxxxxx
export FBUTILS_APP_SECRET=xxxxxxxxxx
export FBUTILS_APP_SECRETPROOF=1
export FBUTILS_APP_SCOPE=email
export FBUTILS_APP_DOMAIN=xxxxxxxxxx
export FBUTILS_REDIRECT_URI_OAUTH_CODE=https://myapp.example.com/oauth?response_type=code'
"""
# stdlib
import os
import pdb
import pprint
# pypi
from six.moves import input as _input
# local
import facebook_utils
from facebook_utils.utils import parse_environ
# ==============================================================================
REQUIRED_ENV = [
"FBUTILS_APP_ID",
"FBUTILS_APP_SECRET",
"FBUTILS_APP_SECRETPROOF",
"FBUTILS_APP_DOMAIN",
"FBUTILS_APP_SCOPE",
"FBUTILS_REDIRECT_URI_OAUTH_CODE",
]
FB_UTILS_ENV = parse_environ(requires=REQUIRED_ENV)
# ------------------------------------------------------------------------------
def new_fb_object():
return facebook_utils.FacebookHub(
app_id=FB_UTILS_ENV["app_id"],
app_secret=FB_UTILS_ENV["app_secret"],
app_secretproof=FB_UTILS_ENV["app_secretproof"],
app_scope=FB_UTILS_ENV["app_scope"],
oauth_code_redirect_uri=FB_UTILS_ENV["oauth_code_redirect_uri"],
debug_error=True,
)
def _get_code(_hub):
print(
"Visit the following url to approve. You will be redirected back to the `FBUTILS_REDIRECT_URI_OAUTH_CODE` URI >>> "
)
print(_hub.oauth_code__url_dialog())
_code = _input("""What is the `code` query param in the url? >>> """)
_code = _code.strip()
# remove fragments
_code = _code.split("#")[0]
return _code
#
# STEP 1 - generate a dialog url
#
hub = new_fb_object()
# this one is a bit extended. not always needed
if True:
print(("*" * 40))
_code = _get_code(hub)
print("fbutils will now try to exchange the code for an access token.")
print(">>> fbutils will access the facebook graph api:")
print(
hub.oauth_code__url_access_token(
submitted_code=_code,
redirect_uri=FB_UTILS_ENV["oauth_code_redirect_uri"],
scope=FB_UTILS_ENV["app_scope"],
)
)
access_token = hub.oauth_code__get_access_token(submitted_code=_code)
print("- " * 20)
print("Success!")
print("!!! The access token is: `%s`" % access_token)
print(("*" * 40))
print(
"let's do this again, but use another API tool that will save the full response."
)
_code = _get_code(hub)
print(
hub.oauth_code__url_access_token(
submitted_code=_code,
redirect_uri=FB_UTILS_ENV["oauth_code_redirect_uri"],
scope=FB_UTILS_ENV["app_scope"],
)
)
(access_token, response) = hub.oauth_code__get_access_token(
submitted_code=_code, keep_response=True
)
print("- " * 20)
print("Success!")
print("!!! The access token is: `%s`" % access_token)
print("!!! The response is: %s" % pprint.pformat(response))
print(("*" * 40))
print("now let's try to get the Profile & Token at once.")
_code = _get_code(hub)
(access_token, profile) = hub.oauth_code__get_access_token_and_profile(
submitted_code=_code
)
print("- " * 20)
print("Success!")
print("!!! The access token is: `%s`" % access_token)
print(">>> The profile is: %s" % pprint.pformat(profile))
print(("*" * 40))
print("now let's grab the profile alone.")
url_me = hub.graph__url_me_for_access_token(access_token)
fb_data = hub.api_proxy(url=url_me, expected_format="json.load")
print("- " * 20)
print("Success!")
print(">>>", fb_data)
print(("*" * 40))
print("now let's grab a batch.")
FB_LIMIT_LINKS = 1
FB_LIMIT_HOME = 1
FB_FIELDS = "id,from,message,comments,created_time,link,caption,description"
url_multi = """https://graph.facebook.com"""
fb_post_data = {
"access_token": access_token,
"batch": [
{"method": "GET", "relative_url": "/me/permissions"},
{
"method": "GET",
"relative_url": "/me/feed",
"limit": FB_LIMIT_LINKS,
"fields": FB_FIELDS,
},
# {"method": "GET", 'relative_url': "/me/links", 'limit': FB_LIMIT_LINKS, 'fields': FB_FIELDS, },
# {"method": "GET", 'relative_url': "/me/home", 'limit': FB_LIMIT_HOME, 'fields': FB_FIELDS, },
],
}
fb_data = hub.api_proxy(
url=url_multi, expected_format="json.load", post_data=fb_post_data
)
print("- " * 20)
print("Success!")
pprint.pprint(fb_data)
|
jvanasco/facebook_utils
|
test_interactive.py
|
Python
|
bsd-3-clause
| 4,520
|
[
"VisIt"
] |
2e5060ddd96c55d4c38ae1138f528039f3c5923553876ce83153f8655f9d65c0
|
# -*- coding: utf-8 -*-
'''
@author: Gabriele Girelli
@contact: gigi.ga90@gmail.com
@description: statistic operations library.
'''
# DEPENDENCIES =================================================================
import math
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from pygpseq import const
from pygpseq.tools import vector as vt
# FUNCTIONS ====================================================================
def angle_between_points( p0, c, p1 ):
'''
c is the center point; result is in degrees
From http://phrogz.net/angle-between-three-points
'''
p0 = np.array(p0)
c = np.array(c)
p1 = np.array(p1)
p0c = np.sqrt(np.sum((p0 - c)**2))
p1c = np.sqrt(np.sum((p1 - c)**2))
p01 = np.sqrt(np.sum((p0 - p1)**2))
tetha = math.acos( (p0c**2 + p1c**2 - p01**2) / (2 * p0c * p1c) )
return(tetha / math.pi * 180)
def binned_mode(x, nbins):
"""Identify binned mode.
Args:
x (np.array): dataset.
nbins (int): number of bins.
Returns:
int: the most occupied bin in the provided dataset.
"""
if 0 == len(x):
return(np.nan)
# Bin breaks
breaks = np.linspace(0, max(x), nbins)
# Assign data to the bins
assigned_bins = np.digitize(x, breaks)
# Count bins occurrences
occ = vt.uniquec(assigned_bins)
counts = np.array([e[1] for e in occ])
# Order counts
ordered = np.argsort(counts).tolist()
ordered.reverse()
occ = [occ[i] for i in ordered]
# Return mode
return(breaks[occ[0][0] - 1])
def binned_profile(x, y, nbins = None):
"""Produce an approximation of sparse data by binning it.
Args:
x (numeric): x coordinates.
y (numeric): y coordinates.
nbins (int): curve precision (opt, def: 200).
Returns:
np.array: profiles.
"""
if None == nbins:
nbins = 200
# Check format
y = np.array(y)
# Bin breaks
breaks = np.linspace(0, max(x), nbins)
# Assign data to the bins
assigned_bins = np.digitize(x, breaks)
# Get mean and median for every bin
data = np.zeros((len(breaks),),
dtype = [('breaks', 'f'), ('mean', 'f'), ('median', 'f'), ('std', 'f'),
('mode', 'f'), ('max', 'f'), ('mean_raw', 'f'), ('median_raw', 'f'),
('std_raw', 'f'), ('mode_raw', 'f'), ('max_raw', 'f'), ('n', 'f')])
for bin_id in range(assigned_bins.max()):
where = np.where(assigned_bins == bin_id)
data['breaks'][bin_id] = breaks[bin_id]
if 0 != where[0].shape[0]:
data['mean'][bin_id] = np.mean(y[where])
data['median'][bin_id] = np.median(y[where])
data['mode'][bin_id] = binned_mode(y[where], nbins)
data['std'][bin_id] = np.std(y[where])
data['max'][bin_id] = np.max(y[where])
data['n'][bin_id] = len(y[where])
else:
data['mean'][bin_id] = np.nan
data['median'][bin_id] = np.nan
data['mode'][bin_id] = np.nan
data['std'][bin_id] = np.nan
data['max'][bin_id] = np.nan
data['n'][bin_id] = 0
# Output
return(data)
def calc_density(data, **kwargs):
"""
Calculate the Gaussian KDE of the provided data series.
Args:
sigma_density (float): standard deviation used for covariance calculation.
nbins (int): #steps for the density curve calculation (opt, def: 1000).
Returns:
dict: density curve (x, y) and function (f).
"""
# Default values
if not 'sigma_density' in kwargs.keys():
sigma_density = .1
else:
sigma_density = kwargs['sigma_density']
if not 'nbins' in kwargs.keys():
nbins = 1000
else:
nbins = kwargs['nbins']
# If only one nucleus was found
if 1 == len(data):
f = eval('lambda x: 1 if x == ' + str(data[0]) + ' else 0')
f = np.vectorize(f)
return({
'x' : np.array([data[0]]),
'y' : np.array([1]),
'f' : f
})
# Prepare density function
density = stats.gaussian_kde(data)
# Re-compute covariance
density.covariance_factor = lambda : sigma_density
density._compute_covariance()
# Output
out = {}
out['x'] = np.linspace(min(data), max(data), nbins)
out['f'] = density
out['y'] = density(out['x'])
return(out)
def calc_theta(a, b):
'''
Calculate rotation angle based on a (opposite) and b (adjacent) sides.
Return:
float: theta in rad.
'''
c = np.sqrt(a**2 + b**2)
if a > 0 and b < 0:
return(np.arccos(a / c))
elif a < 0 and b > 0:
return(-np.arccos(a / c))
elif a < 0 and b < 0:
return(np.arccos(a / c))
else:
return(-np.arccos(a / c))
def centered_coords_3d(img, dot_coords = None):
'''
Extract coordinates from binary image and center them on the origin.
Args:
img (nd.array): binary image.
'''
z, x, y = np.nonzero(img)
if not type(None) == type(dot_coords):
zd, xd, yd = dot_coords
xd = xd - np.mean(x)
yd = yd - np.mean(y)
zd = zd - np.mean(z)
x = x - np.mean(x)
y = y - np.mean(y)
z = z - np.mean(z)
if not type(None) == type(dot_coords):
return((x, y, z, xd, yd, zd))
else:
return((x, y, z, None, None, None))
def extract_3ev(coords):
'''
Extract 3 major eigen vectors.
Args:
coords (nd.array): coordinates table with one point per row.
Returns:
tuple: major 3 eigen vectors. Coordinate order matches input columns.
'''
cov = np.cov(coords)
evals, evecs = np.linalg.eig(cov)
sort_indices = np.argsort(evals)[::-1]
a_v1, b_v1, c_v1 = evecs[:, sort_indices[0]]
a_v2, b_v2, c_v2 = evecs[:, sort_indices[1]]
a_v3, b_v3, c_v3 = evecs[:, sort_indices[2]]
av = [a_v1, a_v2, a_v3]
bv = [b_v1, b_v2, b_v3]
cv = [c_v1, c_v2, c_v3]
return((av, bv, cv))
def get_fwhm(xs, ys):
"""Calculate FWHM of highest peak in a curve.
Args:
xs (np.array): x-coordinates of the curve.
ys (np.array): y-coordinates of the curve.
Returns:
list: FWHM interval x-coords of highest peak.
"""
# CHECK PARAMS =============================================================
# Must have the same length
if len(xs) != len(ys):
return(None)
if 1 == len(ys):
return([xs[0] - 1, xs[0] + 1])
# GET FWHM =================================================================
# Identify highest peak
xmaxi = ys.tolist().index(max(ys))
# Get FWHM range [left] ----------------------------------------------------
if 0 != xmaxi:
# Get absolute difference to HM value
x1 = abs(ys[range(xmaxi)] - max(ys) / 2)
# Get threshold based on average distance of consecutive points
if 1 == len(x1):
thr = x1[0]
else:
thr = np.max(abs(np.diff(x1)))
# Select values close to the HM (based on threshold and abs difference)
selected = [i for i in range(len(x1)) if x1[i] <= thr]
# Select left boundary
if 0 == len(selected):
x1 = xs[range(xmaxi)][x1.tolist().index(min(x1))]
else:
x1 = xs[range(xmaxi)][max(selected)]
else:
x1 = min(xs)
# Get FWHM range [right] ---------------------------------------------------
if len(xs) != (xmaxi + 1):
# Get absolute difference to HM value
x2 = abs(ys[range(xmaxi + 1, len(ys))] - max(ys) / 2)
# Get threshold based on average distance of consecutive points
if 1 == len(x2):
thr = x2[0]
else:
thr = np.max(abs(np.diff(x2)))
# Select values close to the HM (based on threshold and abs difference)
selected = [i for i in range(len(x2)) if x2[i] <= thr]
# Select right boundary
if 0 == len(selected):
x2 = xs[range(xmaxi + 1, len(xs))][x2.tolist().index(min(x2))]
else:
x2 = xs[range(xmaxi + 1, len(xs))][min(selected)]
else:
x2 = max(xs)
# Output
return([x1, x2])
def get_intercepts(ys, xs = None):
"""Given a series of y coordinates, identify the x-axis intercept.
Args:
ys (numeric): y coordinates series.
xs (numeric): x coordinates series (optional).
Returns:
int: index of x-axis intercepting y (if xs == None).
numeric: x coordinates interpolated point of intersection with x-axis.
"""
# Return no intercept if no data was provided
if 0 == len(ys):
return([])
# Will contain the intercepts (pairs of) indexes
out = []
# Reformat coordinate series
ys = np.array(ys)
# Count ys
nys = ys.shape[0]
# Checking matching ys/xs size
if not type(None) == type(xs):
xs = np.array(xs)
if ys.shape[0] != xs.shape[0]:
print(ys.shape[0])
print(xs.shape[0])
print('Discarded provided X coordinates.')
xs = None
# Perfect intersections ----------------------------------------------------
# Identify zero-holding cells
bys = ys == 0
if 0 != bys.sum():
# Save zero-holding cells index
out.extend([i for i in range(nys) if bys[i]])
# Interpolate intersections ------------------------------------------------
# Identify positive/negative ys (convert to boolean)
bys = np.zeros(ys.shape)
bys[ys == 0] = np.nan
bys[ys > 0] = 1
bys[ys < 0] = -1
# Identify intercepts by summing previous boolean cell value
bys = np.array([bys[i] + bys[i+1] for i in range(bys.shape[0] - 1)])
bysi = [i for i in range(len(bys)) if (bys == 0)[i]]
if type(None) == type(xs):
# Interpolate index of intercepts
out.extend([i + .5 for i in bysi])
else:
# Interpolate x of intercepts
out = [xs[i] for i in out]
out.extend([(xs[i] + xs[i+1]) / 2.0 for i in bysi])
# Output
return(out)
def get_norm_pdf(mu, sigma, x):
"""Normal distribution N(mu, sigma) probability value at x.
Args:
mu (float): normal distribution mean.
sigma (float): normal distribution standard deviation.
x (float): x-coordinate.
Returns:
float: N(mu, sigma) probability value at x.
"""
return(1 / np.sqrt(2 * sigma**2 * np.pi) *
np.exp(-(x - mu)**2 / (2 * sigma**2)))
def get_outliers(x, non = None, fig = None, close = None):
"""Identifies the outliers in a data set.
Args:
x (np.array): data set.
non (bool): whether to return outliers or non-outliers.
fig (plt.figure): for boxplot purposes.
close (bool): whether to close the figure before return.
Returns:
list: (non-)outlier indexes.
"""
if None == non:
non = False
if None == fig:
fig = plt.figure()
ax = fig.gca()
if None == close:
close = False
# If no dataset is provided
if 0 == len(x):
return([])
# Identify outliers through boxplot
bp = ax.boxplot(x)
# Close figure
if close:
plt.close(tmp_fig)
# Retrieve outliers
outliers = []
outliers.extend(bp['fliers'][0].get_data()[0].tolist())
outliers.extend(bp['fliers'][0].get_data()[1].tolist())
# Unique outliers
outliers = set(outliers)
# Output
if not non:
return([i for i in range(len(x)) if x[i] in outliers])
else:
return([i for i in range(len(x)) if not x[i] in outliers])
def r_to_size(r_interval, size_type):
"""Convert radius interval to size (Area/Volume) interval.
Args:
r_interval (tuple[float]): radius interval.
size_type (int): segmentation type (according to pygpseq.const).
Returns:
tuple(float): size (volume of area) interval.
"""
if const.SEG_3D == size_type:
# Volume interval
o_interval = (4 / float(3)) * np.pi
o_interval *= np.power(np.array(r_interval), 3)
else:
# Area interval
o_interval = np.pi * np.square(np.array(r_interval))
return(o_interval)
def rotate3d(coords, theta, axis):
'''
Rotate coordinates around an axis.
Args:
coords (nd.array): coordinate table.
theta (float): rotation angle.
axis (int): rotation axis, axis order matches coordinate columns.
Returns:
tuple: rotated coordinates.
'''
rotation_mat = False
if 0 == axis: # X axis rotation
rotation_mat = np.matrix([
[1, 0, 0],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)]
])
if 1 == axis: # Y axis rotation
rotation_mat = np.matrix([
[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-np.sin(theta), 0, np.cos(theta)]
])
if 2 == axis: # Z axis rotation
rotation_mat = np.matrix([
[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]
])
if axis < 0 or axis > 2: # Unrecognized axis
return()
transformed_mat = rotation_mat * coords
a, b, c = transformed_mat.A
return((a, b, c))
def round_unicode(n, nsig):
"""Round operation on unicode number in scientific notation.
Args:
n (unicode): number in scientific notation.
nsig (int): number of significant digits.
Returns:
unicode: rounded unicode number in scientific notation.
"""
# Convert unicode to string
n = str(n.replace(u'\u2212', '-'))
# Split on the exponent
if 'e' in n:
n = n.split('e')
else:
n = [n]
# Round the float part
n[0] = str(round(float(n[0]), nsig))
# Re-join with the exponent and return
return(unicode('e'.join(n)))
def smooth_gaussian(x, y, sigma_smooth = None, nbins = None):
"""Smoothen a curve.
Args:
x (numeric): x coordinates.
y (numeric): y coordinates.
nbins (int): curve precision (opt, def: 500).
sigma_smooth (float): smoothing factor (opt, def: 0.01).
Returns:
np.array: smoothened curve.
"""
# SET PARAMS ===============================================================
if None == nbins:
nbins = 500
if None == sigma_smooth:
sigma_smooth = .1
# SMOOTHEN =================================================================
# Evenly sampled domain
xs = np.linspace(0, max(x), nbins)
# Initialize output
ynum = np.zeros(len(xs))
ysum = np.zeros(len(xs))
# Weighted moving average
for i in [i for i in range(len(x)) if not np.isnan(y[i])]:
norm = get_norm_pdf(x[i], sigma_smooth, xs)
ynum += norm
ysum += norm * y[i]
# Output
return(ysum / ynum)
def smooth_sparse_gaussian(x, y, nbins = None, sigma_smooth = None,
rescale_sigma = None, **kwargs):
"""Produce a smooth approximation of sparse data.
Basically a smoothened binned distribution.
Args:
x (float): x coordinates.
y (float): y coordinates.
nbins (int): curve precision (opt, def: 200).
sigma_smooth (float): smoothing factor (opt, def: 0.01).
rescale_sigma (bool): whether to multiply sigma_smooth to max(x).
Returns:
dict: various metrics profiles (mean, median, mode, std).
"""
if None == nbins:
nbins = 200
if None == sigma_smooth:
sigma_smooth = .01
if None == rescale_sigma:
rescale_sigma = True
if rescale_sigma:
sigma_smooth *= max(x)
# Bin data
data = binned_profile(x, y, nbins)
# Prepare output
out = {
'x' : data['breaks'].tolist(),
'n' : data['n'].tolist()
}
# Smoothen profiles
for field in ['mean', 'median', 'mode', 'std', 'max']:
out[field + '_raw'] = data[field]
out[field] = smooth_gaussian(data['breaks'],
data[field], sigma_smooth, nbins)
# Output
return(out)
def wilcox_sets(df, groupkey, setkey):
"""Perform Wilcoxon-Mann-Whitney U test on the provided list of sets.
Args:
df (pandas.DataFrame): data frame with distributions to be compared.
groupkey (string): df column of set labels.
setkey (string): df column with distributions to be compared.
Returns:
np.array: dataset with WMW U-test p-value of compared distribution.
Examples:
>>> dd = [('condition', 'S100'), ('y', 'float')]
>>> p = np.array([('c1', 1), ('c2', 2)], dtype = dd)
>>> p = pd.DataFrame(p)
>>> print(wilcox_sets(p, 'condition', 'y'))
[('y', 'c2', 'c1', 1.0, '')]
"""
# Identify sets
set_names = [c for c in set(df[groupkey])]
n_sets = len(set_names)
grouped = df.groupby(groupkey)
# Initialize output
dtype_definition = [('field', 'S100'), ('i', 'S100'), ('j', 'S100'),
('p', 'float'), ('sig', 'S5')]
p_vals = np.zeros((n_sets * (n_sets - 1) / 2,),
dtype = dtype_definition)
# Cycle counter
c = 0
for i in range(n_sets):
for j in range(i + 1, n_sets):
# Run Wilcoxon-Mann-Whitney U test
p = stats.mannwhitneyu(
grouped.get_group(set_names[i])[setkey],
grouped.get_group(set_names[j])[setkey],
alternative = 'two-sided'
).pvalue
# Significance string
sig = ''
if p <= .0001:
sig = '***'
elif p <= .001:
sig = '**'
elif p <= .01:
sig = '*'
elif p <= .05:
sig = '.'
# Append result
p_vals[c] = np.array((setkey, set_names[i], set_names[j], p, sig),
dtype = dtype_definition)
# Increase cycle counter
c += 1
# Output
return(p_vals)
# END ==========================================================================
################################################################################
|
ggirelli/gpseq-img-py
|
pygpseq/tools/stat.py
|
Python
|
mit
| 18,120
|
[
"Gaussian"
] |
53f9eb33649c143a79e8acab0b1033078df2d309d9a2d07bce744ddb8cccb919
|
"""
Module to set up run time parameters for Clawpack.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy as np
#-----------------------------------------------
# Set these parameters for adjoint flagging....
# location of output from computing adjoint:
adjoint_output = os.path.abspath('adjoint/_output')
print('Will flag using adjoint solution from %s' % adjoint_output)
# Time period of interest:
t1 = 5.5
t2 = 6.
# Determining type of adjoint flagging:
# taking inner product with forward solution or Richardson error:
flag_forward_adjoint = True
flag_richardson_adjoint = False
# tolerance for adjoint flagging:
adjoint_flag_tolerance = 0.002 # suggested if using forward solution
#adjoint_flag_tolerance = 1e-3 # suggested if using Richardson error
#-----------------------------------------------
#------------------------------
def setrun(claw_pkg='amrclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "amrclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'amrclaw', "Expected claw_pkg = 'amrclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
probdata.add_param('rho', 1., 'density of medium')
probdata.add_param('bulk', 4., 'bulk modulus')
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
# (or to amrclaw.data for AMR)
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = -4.000000e+00 # xlower
clawdata.upper[0] = 8.000000e+00 # xupper
clawdata.lower[1] = -1.000000e+00 # ylower
clawdata.upper[1] = 11.000000e+00 # yupper
# Number of grid cells:
clawdata.num_cells[0] = 50 # mx
clawdata.num_cells[1] = 50 # my
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
# see setadjoint
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 0
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.000000
# Restart from checkpoint file of a previous run?
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00006' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output ntimes frames at equally spaced times up to tfinal:
# Can specify num_output_times = 0 for no output
clawdata.num_output_times = 20
clawdata.tfinal = 6.0
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list or numpy array of output times:
# Include t0 if you want output at the initial time.
clawdata.output_times = [0., 0.1]
elif clawdata.output_style == 3:
# Output every step_interval timesteps over total_steps timesteps:
clawdata.output_step_interval = 2
clawdata.total_steps = 4
clawdata.output_t0 = True # output at initial (or restart) time?
clawdata.output_format = 'ascii' # 'ascii', 'binary', 'netcdf'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'all' # could be list
clawdata.output_aux_onlyonce = False # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==True: variable time steps used based on cfl_desired,
# if dt_variable==False: fixed time steps dt = dt_initial always used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# (If dt_variable==0 then dt=dt_initial for all steps)
clawdata.dt_initial = 1.00000e-03
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1.000000e+99
# Desired Courant number if variable dt used
clawdata.cfl_desired = 0.900000
# max Courant number to allow without retaking step with a smaller dt:
clawdata.cfl_max = 1.000000
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 50000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 2
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'vanleer' ==> van Leer
# 4 or 'mc' ==> MC limiter
clawdata.limiter = ['vanleer','vanleer']
clawdata.use_fwaves = False # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 0
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 or 'user' => user specified (must modify bcNamr.f to use this option)
# 1 or 'extrap' => extrapolation (non-reflecting outflow)
# 2 or 'periodic' => periodic (must specify this at both boundaries)
# 3 or 'wall' => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap' # at xlower
clawdata.bc_upper[0] = 'wall' # at xupper
clawdata.bc_lower[1] = 'wall' # at ylower
clawdata.bc_upper[1] = 'extrap' # at yupper
# ---------------
# Gauges:
# ---------------
rundata.gaugedata.gauges = []
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
rundata.gaugedata.gauges.append([0, 3.5, 0.5, 1.22, 2.85])
#rundata.gaugedata.gauges.append([1, 3.6, 0.5, 2.7, 2.85])
# --------------
# Checkpointing:
# --------------
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif clawdata.checkpt_style == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = [0.1,0.15]
elif clawdata.checkpt_style == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters:
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 4
# List of refinement ratios at each level (length at least amr_level_max-1)
amrdata.refinement_ratios_x = [2,2,2,2,2,2,2,2,2]
amrdata.refinement_ratios_y = [2,2,2,2,2,2,2,2,2]
amrdata.refinement_ratios_t = [2,2,2,2,2,2,2,2,2]
# Specify type of each aux variable in clawdata.auxtype.
# This must be a list of length num_aux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
# need 1 value, set in setadjoint
# Flag for refinement based on Richardson error estimater:
amrdata.flag_richardson = False
# Flag for refinement using routine flag2refine:
amrdata.flag2refine = False
# see setadjoint to set tolerance for adjoint flagging
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 2
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.7
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# ---------------
# Regions:
# ---------------
rundata.regiondata.regions = []
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
#------------------------------------------------------------------
# Adjoint specific data:
#------------------------------------------------------------------
rundata = setadjoint(rundata)
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
return rundata
# end of function setrun
# ----------------------
#-------------------
def setadjoint(rundata):
#-------------------
"""
Setting up adjoint variables and
reading in all of the checkpointed Adjoint files
"""
import glob
# Set these parameters at top of this file:
# adjoint_flag_tolerance, t1, t2, adjoint_output
# Then you don't need to modify this function...
# flag and tolerance for adjoint flagging:
if flag_forward_adjoint == True:
# setting up taking inner product with forward solution
rundata.amrdata.flag2refine = True
rundata.amrdata.flag2refine_tol = adjoint_flag_tolerance
elif flag_richardson_adjoint == True:
# setting up taking inner product with Richardson error
rundata.amrdata.flag_richardson = True
rundata.amrdata.flag_richardson_tol = adjoint_flag_tolerance
else:
print("No refinement flag set!")
rundata.clawdata.num_aux = 1 # 1 required for adjoint flagging
rundata.amrdata.aux_type = ['center']
adjointdata = rundata.new_UserData(name='adjointdata',fname='adjoint.data')
adjointdata.add_param('adjoint_output',adjoint_output,'adjoint_output')
adjointdata.add_param('t1',t1,'t1, start time of interest')
adjointdata.add_param('t2',t2,'t2, final time of interest')
files = glob.glob(os.path.join(adjoint_output,"fort.b*"))
files.sort()
if (len(files) == 0):
print("No binary files found for adjoint output!")
adjointdata.add_param('numadjoints', len(files), 'Number of adjoint checkpoint files.')
adjointdata.add_param('innerprod_index', 1, 'Index for innerproduct data in aux array.')
counter = 1
for fname in files:
f = open(fname)
time = f.readline().split()[-1]
adjointdata.add_param('file' + str(counter), fname, 'Binary file' + str(counter))
counter = counter + 1
return rundata
# end of function setadjoint
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
|
clawpack/adjoint
|
examples/acoustics_2d_radial_mixed/setrun.py
|
Python
|
bsd-2-clause
| 14,033
|
[
"NetCDF"
] |
c1f9bcaebbbb6fd9fcc26117b64d4109872b8316bc4fe946e18338d7b0a62281
|
"""
Results for test_glm.py.
Hard-coded from R or Stata. Note that some of the remaining discrepancy vs.
Stata may be because Stata uses ML by default unless you specifically ask for
IRLS.
"""
import os
import numpy as np
import pandas as pd
from statsmodels.api import add_constant
from statsmodels.genmod.tests.results import glm_test_resids
# Test Precisions
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_0 = 0
class Longley(object):
"""
Longley used for TestGlmGaussian
Results are from Stata and R.
"""
def __init__(self):
self.resids = np.array([
[267.34002976, 267.34002976, 267.34002976,
267.34002976, 267.34002976],
[-94.0139424, -94.0139424, -94.0139424, -94.0139424,
-94.0139424],
[46.28716776, 46.28716776, 46.28716776, 46.28716776,
46.28716776],
[-410.11462193, -410.11462193, -410.11462193, -410.11462193,
-410.11462193],
[309.71459076, 309.71459076, 309.71459076, 309.71459076,
309.71459076],
[-249.31121533, -249.31121533, -249.31121533, -249.31121533,
-249.31121533],
[-164.0489564, -164.0489564, -164.0489564, -164.0489564,
-164.0489564],
[-13.18035687, -13.18035687, -13.18035687, -13.18035687,
-13.18035687],
[14.3047726, 14.3047726, 14.3047726, 14.3047726,
14.3047726],
[455.39409455, 455.39409455, 455.39409455, 455.39409455,
455.39409455],
[-17.26892711, -17.26892711, -17.26892711, -17.26892711,
-17.26892711],
[-39.05504252, -39.05504252, -39.05504252, -39.05504252,
-39.05504252],
[-155.5499736, -155.5499736, -155.5499736, -155.5499736,
-155.5499736],
[-85.67130804, -85.67130804, -85.67130804, -85.67130804,
-85.67130804],
[341.93151396, 341.93151396, 341.93151396, 341.93151396,
341.93151396],
[-206.75782519, -206.75782519, -206.75782519, -206.75782519,
-206.75782519]])
self.null_deviance = 185008826 # taken from R.
self.params = np.array([
1.50618723e+01, -3.58191793e-02,
-2.02022980e+00, -1.03322687e+00, -5.11041057e-02,
1.82915146e+03, -3.48225863e+06])
self.bse = np.array([
8.49149258e+01, 3.34910078e-02, 4.88399682e-01,
2.14274163e-01, 2.26073200e-01,
4.55478499e+02, 8.90420384e+05])
self.aic_R = 235.23486961695903 # R adds 2 for dof to AIC
self.aic_Stata = 14.57717943930524 # stata divides by nobs
self.deviance = 836424.0555058046 # from R
self.scale = 92936.006167311629
self.llf = -109.61743480847952
self.null_deviance = 185008826 # taken from R. Rpy bug
self.bic_Stata = 836399.1760177979 # no bic in R?
self.df_model = 6
self.df_resid = 9
# TODO: taken from Stata; not available in sm yet
self.chi2 = 1981.711859508729
# self.pearson_chi2 = 836424.1293162981 # from Stata (?)
self.fittedvalues = np.array([
60055.659970240202, 61216.013942398131,
60124.71283224225, 61597.114621930756, 62911.285409240052,
63888.31121532945, 65153.048956395127, 63774.180356866214,
66004.695227399934, 67401.605905447621,
68186.268927114084, 66552.055042522494,
68810.549973595422, 69649.67130804155, 68989.068486039061,
70757.757825193927])
class GaussianLog(object):
"""
Uses generated data. These results are from R and Stata.
"""
def __init__(self):
self.resids = np.array([
[3.20800000e-04, 3.20800000e-04,
8.72100000e-04, 3.20800000e-04, 3.20800000e-04],
[8.12100000e-04, 8.12100000e-04, 2.16350000e-03,
8.12100000e-04, 8.12100000e-04],
[-2.94800000e-04, -2.94800000e-04, -7.69700000e-04,
-2.94800000e-04, -2.94800000e-04],
[1.40190000e-03, 1.40190000e-03, 3.58560000e-03,
1.40190000e-03, 1.40190000e-03],
[-2.30910000e-03, -2.30910000e-03, -5.78490000e-03,
-2.30910000e-03, -2.30910000e-03],
[1.10380000e-03, 1.10380000e-03, 2.70820000e-03,
1.10380000e-03, 1.10380000e-03],
[-5.14000000e-06, -5.14000000e-06, -1.23000000e-05,
-5.14000000e-06, -5.14000000e-06],
[-1.65500000e-04, -1.65500000e-04, -3.89200000e-04,
-1.65500000e-04, -1.65500000e-04],
[-7.55400000e-04, -7.55400000e-04, -1.73870000e-03,
-7.55400000e-04, -7.55400000e-04],
[-1.39800000e-04, -1.39800000e-04, -3.14800000e-04,
-1.39800000e-04, -1.39800000e-04],
[-7.17000000e-04, -7.17000000e-04, -1.58000000e-03,
-7.17000000e-04, -7.17000000e-04],
[-1.12200000e-04, -1.12200000e-04, -2.41900000e-04,
-1.12200000e-04, -1.12200000e-04],
[3.22100000e-04, 3.22100000e-04, 6.79000000e-04,
3.22100000e-04, 3.22100000e-04],
[-3.78000000e-05, -3.78000000e-05, -7.79000000e-05,
-3.78000000e-05, -3.78000000e-05],
[5.54500000e-04, 5.54500000e-04, 1.11730000e-03,
5.54500000e-04, 5.54500000e-04],
[3.38400000e-04, 3.38400000e-04, 6.66300000e-04,
3.38400000e-04, 3.38400000e-04],
[9.72000000e-05, 9.72000000e-05, 1.87000000e-04,
9.72000000e-05, 9.72000000e-05],
[-7.92900000e-04, -7.92900000e-04, -1.49070000e-03,
-7.92900000e-04, -7.92900000e-04],
[3.33000000e-04, 3.33000000e-04, 6.11500000e-04,
3.33000000e-04, 3.33000000e-04],
[-8.35300000e-04, -8.35300000e-04, -1.49790000e-03,
-8.35300000e-04, -8.35300000e-04],
[-3.99700000e-04, -3.99700000e-04, -6.99800000e-04,
-3.99700000e-04, -3.99700000e-04],
[1.41300000e-04, 1.41300000e-04, 2.41500000e-04,
1.41300000e-04, 1.41300000e-04],
[-8.50700000e-04, -8.50700000e-04, -1.41920000e-03,
-8.50700000e-04, -8.50700000e-04],
[1.43000000e-06, 1.43000000e-06, 2.33000000e-06,
1.43000000e-06, 1.43000000e-06],
[-9.12000000e-05, -9.12000000e-05, -1.44900000e-04,
-9.12000000e-05, -9.12000000e-05],
[6.75500000e-04, 6.75500000e-04, 1.04650000e-03,
6.75500000e-04, 6.75500000e-04],
[3.97900000e-04, 3.97900000e-04, 6.01100000e-04,
3.97900000e-04, 3.97900000e-04],
[1.07000000e-05, 1.07000000e-05, 1.57000000e-05,
1.07000000e-05, 1.07000000e-05],
[-8.15200000e-04, -8.15200000e-04, -1.17060000e-03,
-8.15200000e-04, -8.15200000e-04],
[-8.46400000e-04, -8.46400000e-04, -1.18460000e-03,
-8.46400000e-04, -8.46400000e-04],
[9.91200000e-04, 9.91200000e-04, 1.35180000e-03,
9.91200000e-04, 9.91200000e-04],
[-5.07400000e-04, -5.07400000e-04, -6.74200000e-04,
-5.07400000e-04, -5.07400000e-04],
[1.08520000e-03, 1.08520000e-03, 1.40450000e-03,
1.08520000e-03, 1.08520000e-03],
[9.56100000e-04, 9.56100000e-04, 1.20500000e-03,
9.56100000e-04, 9.56100000e-04],
[1.87500000e-03, 1.87500000e-03, 2.30090000e-03,
1.87500000e-03, 1.87500000e-03],
[-1.93920000e-03, -1.93920000e-03, -2.31650000e-03,
-1.93920000e-03, -1.93920000e-03],
[8.16000000e-04, 8.16000000e-04, 9.48700000e-04,
8.16000000e-04, 8.16000000e-04],
[1.01520000e-03, 1.01520000e-03, 1.14860000e-03,
1.01520000e-03, 1.01520000e-03],
[1.04150000e-03, 1.04150000e-03, 1.14640000e-03,
1.04150000e-03, 1.04150000e-03],
[-3.88200000e-04, -3.88200000e-04, -4.15600000e-04,
-3.88200000e-04, -3.88200000e-04],
[9.95900000e-04, 9.95900000e-04, 1.03690000e-03,
9.95900000e-04, 9.95900000e-04],
[-6.82800000e-04, -6.82800000e-04, -6.91200000e-04,
-6.82800000e-04, -6.82800000e-04],
[-8.11400000e-04, -8.11400000e-04, -7.98500000e-04,
-8.11400000e-04, -8.11400000e-04],
[-1.79050000e-03, -1.79050000e-03, -1.71250000e-03,
-1.79050000e-03, -1.79050000e-03],
[6.10000000e-04, 6.10000000e-04, 5.66900000e-04,
6.10000000e-04, 6.10000000e-04],
[2.52600000e-04, 2.52600000e-04, 2.28100000e-04,
2.52600000e-04, 2.52600000e-04],
[-8.62500000e-04, -8.62500000e-04, -7.56400000e-04,
-8.62500000e-04, -8.62500000e-04],
[-3.47300000e-04, -3.47300000e-04, -2.95800000e-04,
-3.47300000e-04, -3.47300000e-04],
[-7.79000000e-05, -7.79000000e-05, -6.44000000e-05,
-7.79000000e-05, -7.79000000e-05],
[6.72000000e-04, 6.72000000e-04, 5.39400000e-04,
6.72000000e-04, 6.72000000e-04],
[-3.72100000e-04, -3.72100000e-04, -2.89900000e-04,
-3.72100000e-04, -3.72100000e-04],
[-1.22900000e-04, -1.22900000e-04, -9.29000000e-05,
-1.22900000e-04, -1.22900000e-04],
[-1.63470000e-03, -1.63470000e-03, -1.19900000e-03,
-1.63470000e-03, -1.63470000e-03],
[2.64400000e-04, 2.64400000e-04, 1.88100000e-04,
2.64400000e-04, 2.64400000e-04],
[1.79230000e-03, 1.79230000e-03, 1.23650000e-03,
1.79230000e-03, 1.79230000e-03],
[-1.40500000e-04, -1.40500000e-04, -9.40000000e-05,
-1.40500000e-04, -1.40500000e-04],
[-2.98500000e-04, -2.98500000e-04, -1.93600000e-04,
-2.98500000e-04, -2.98500000e-04],
[-9.33100000e-04, -9.33100000e-04, -5.86400000e-04,
-9.33100000e-04, -9.33100000e-04],
[9.11200000e-04, 9.11200000e-04, 5.54900000e-04,
9.11200000e-04, 9.11200000e-04],
[-1.31840000e-03, -1.31840000e-03, -7.77900000e-04,
-1.31840000e-03, -1.31840000e-03],
[-1.30200000e-04, -1.30200000e-04, -7.44000000e-05,
-1.30200000e-04, -1.30200000e-04],
[9.09300000e-04, 9.09300000e-04, 5.03200000e-04,
9.09300000e-04, 9.09300000e-04],
[-2.39500000e-04, -2.39500000e-04, -1.28300000e-04,
-2.39500000e-04, -2.39500000e-04],
[7.15300000e-04, 7.15300000e-04, 3.71000000e-04,
7.15300000e-04, 7.15300000e-04],
[5.45000000e-05, 5.45000000e-05, 2.73000000e-05,
5.45000000e-05, 5.45000000e-05],
[2.85310000e-03, 2.85310000e-03, 1.38600000e-03,
2.85310000e-03, 2.85310000e-03],
[4.63400000e-04, 4.63400000e-04, 2.17800000e-04,
4.63400000e-04, 4.63400000e-04],
[2.80900000e-04, 2.80900000e-04, 1.27700000e-04,
2.80900000e-04, 2.80900000e-04],
[5.42000000e-05, 5.42000000e-05, 2.38000000e-05,
5.42000000e-05, 5.42000000e-05],
[-3.62300000e-04, -3.62300000e-04, -1.54000000e-04,
-3.62300000e-04, -3.62300000e-04],
[-1.11900000e-03, -1.11900000e-03, -4.59800000e-04,
-1.11900000e-03, -1.11900000e-03],
[1.28900000e-03, 1.28900000e-03, 5.11900000e-04,
1.28900000e-03, 1.28900000e-03],
[-1.40820000e-03, -1.40820000e-03, -5.40400000e-04,
-1.40820000e-03, -1.40820000e-03],
[-1.69300000e-04, -1.69300000e-04, -6.28000000e-05,
-1.69300000e-04, -1.69300000e-04],
[-1.03620000e-03, -1.03620000e-03, -3.71000000e-04,
-1.03620000e-03, -1.03620000e-03],
[1.49150000e-03, 1.49150000e-03, 5.15800000e-04,
1.49150000e-03, 1.49150000e-03],
[-7.22000000e-05, -7.22000000e-05, -2.41000000e-05,
-7.22000000e-05, -7.22000000e-05],
[5.49000000e-04, 5.49000000e-04, 1.76900000e-04,
5.49000000e-04, 5.49000000e-04],
[-2.12320000e-03, -2.12320000e-03, -6.60400000e-04,
-2.12320000e-03, -2.12320000e-03],
[7.84000000e-06, 7.84000000e-06, 2.35000000e-06,
7.84000000e-06, 7.84000000e-06],
[1.15580000e-03, 1.15580000e-03, 3.34700000e-04,
1.15580000e-03, 1.15580000e-03],
[4.83400000e-04, 4.83400000e-04, 1.35000000e-04,
4.83400000e-04, 4.83400000e-04],
[-5.26100000e-04, -5.26100000e-04, -1.41700000e-04,
-5.26100000e-04, -5.26100000e-04],
[-1.75100000e-04, -1.75100000e-04, -4.55000000e-05,
-1.75100000e-04, -1.75100000e-04],
[-1.84600000e-03, -1.84600000e-03, -4.62100000e-04,
-1.84600000e-03, -1.84600000e-03],
[2.07200000e-04, 2.07200000e-04, 5.00000000e-05,
2.07200000e-04, 2.07200000e-04],
[-8.54700000e-04, -8.54700000e-04, -1.98700000e-04,
-8.54700000e-04, -8.54700000e-04],
[-9.20000000e-05, -9.20000000e-05, -2.06000000e-05,
-9.20000000e-05, -9.20000000e-05],
[5.35700000e-04, 5.35700000e-04, 1.15600000e-04,
5.35700000e-04, 5.35700000e-04],
[-7.67300000e-04, -7.67300000e-04, -1.59400000e-04,
-7.67300000e-04, -7.67300000e-04],
[-1.79710000e-03, -1.79710000e-03, -3.59500000e-04,
-1.79710000e-03, -1.79710000e-03],
[1.10910000e-03, 1.10910000e-03, 2.13500000e-04,
1.10910000e-03, 1.10910000e-03],
[-5.53800000e-04, -5.53800000e-04, -1.02600000e-04,
-5.53800000e-04, -5.53800000e-04],
[7.48000000e-04, 7.48000000e-04, 1.33400000e-04,
7.48000000e-04, 7.48000000e-04],
[4.23000000e-04, 4.23000000e-04, 7.26000000e-05,
4.23000000e-04, 4.23000000e-04],
[-3.16400000e-04, -3.16400000e-04, -5.22000000e-05,
-3.16400000e-04, -3.16400000e-04],
[-6.63200000e-04, -6.63200000e-04, -1.05200000e-04,
-6.63200000e-04, -6.63200000e-04],
[1.33540000e-03, 1.33540000e-03, 2.03700000e-04,
1.33540000e-03, 1.33540000e-03],
[-7.81200000e-04, -7.81200000e-04, -1.14600000e-04,
-7.81200000e-04, -7.81200000e-04],
[1.67880000e-03, 1.67880000e-03, 2.36600000e-04,
1.67880000e-03, 1.67880000e-03]])
self.null_deviance = 56.691617808182208
self.params = np.array([
9.99964386e-01, -1.99896965e-02, -1.00027232e-04])
self.bse = np.array([1.42119293e-04, 1.20276468e-05, 1.87347682e-07])
self.aic_R = -1103.8187213072656 # adds 2 for dof for scale
self.aic_Stata = -11.05818072104212 # divides by nobs for e(aic)
self.deviance = 8.68876986288542e-05
self.scale = 8.9574946938163984e-07 # from R but e(phi) in Stata
self.llf = 555.9093606536328
self.bic_Stata = -446.7014211525822
self.df_model = 2
self.df_resid = 97
self.chi2 = 33207648.86501769 # from Stata not in sm
self.fittedvalues = np.array([
2.7181850213327747, 2.664122305869506,
2.6106125414084405, 2.5576658143523567, 2.5052916730829535,
2.4534991313100165, 2.4022966718815781, 2.3516922510411282,
2.3016933031175575, 2.2523067456332542, 2.2035389848154616,
2.1553959214958001, 2.107882957382607, 2.0610050016905817,
2.0147664781120667, 1.969171332114154, 1.9242230385457144,
1.8799246095383746, 1.8362786026854092, 1.7932871294825108,
1.7509518640143886, 1.7092740518711942, 1.6682545192788105,
1.6278936824271399, 1.5881915569806042, 1.5491477677552221,
1.5107615585467538, 1.4730318020945796, 1.4359570101661721,
1.3995353437472129, 1.3637646233226499, 1.3286423392342188,
1.2941656621002184, 1.2603314532836074, 1.2271362753947765,
1.1945764028156565, 1.162647832232141, 1.1313462931621328,
1.1006672584668622, 1.0706059548334832, 1.0411573732173065,
1.0123162792324054, 0.98407722347970683, 0.95643455180206194,
0.92938241545618494, 0.90291478119174029, 0.87702544122826565,
0.85170802312101246, 0.82695599950720078, 0.80276269772458597,
0.77912130929465073, 0.75602489926313921, 0.73346641539106316,
0.71143869718971686, 0.68993448479364294, 0.66894642766589496,
0.64846709313034534, 0.62848897472617915, 0.60900450038011367,
0.5900060403922629, 0.57148591523195513, 0.55343640314018494,
0.5358497475357491, 0.51871816422248385, 0.50203384839536769,
0.48578898144361343, 0.46997573754920047, 0.45458629007964013,
0.4396128177740814, 0.42504751072218311, 0.41088257613548018,
0.39711024391126759, 0.38372277198930843, 0.37071245150195081,
0.35807161171849949, 0.34579262478494655, 0.33386791026040569,
0.32228993945183393, 0.31105123954884056, 0.30014439756060574,
0.28956206405712448, 0.27929695671718968, 0.26934186368570684,
0.25968964674310463, 0.25033324428976694, 0.24126567414856051,
0.23248003618867552, 0.22396951477412205, 0.21572738104035141,
0.20774699500257574, 0.20002180749946474, 0.19254536197598673,
0.18531129610924435, 0.17831334328122878, 0.17154533390247831,
0.16500119659068577, 0.15867495920834204, 0.15256074976354628,
0.14665279717814039, 0.14094543192735109])
class GaussianInverse(object):
"""
This test uses generated data. Results are from R and Stata.
"""
def __init__(self):
self.resids = np.array([
[-5.15300000e-04, -5.15300000e-04,
5.14800000e-04, -5.15300000e-04, -5.15300000e-04],
[-2.12500000e-04, -2.12500000e-04, 2.03700000e-04,
-2.12500000e-04, -2.12500000e-04],
[-1.71400000e-04, -1.71400000e-04, 1.57200000e-04,
-1.71400000e-04, -1.71400000e-04],
[1.94020000e-03, 1.94020000e-03, -1.69710000e-03,
1.94020000e-03, 1.94020000e-03],
[-6.81100000e-04, -6.81100000e-04, 5.66900000e-04,
-6.81100000e-04, -6.81100000e-04],
[1.21370000e-03, 1.21370000e-03, -9.58800000e-04,
1.21370000e-03, 1.21370000e-03],
[-1.51090000e-03, -1.51090000e-03, 1.13070000e-03,
-1.51090000e-03, -1.51090000e-03],
[3.21500000e-04, 3.21500000e-04, -2.27400000e-04,
3.21500000e-04, 3.21500000e-04],
[-3.18500000e-04, -3.18500000e-04, 2.12600000e-04,
-3.18500000e-04, -3.18500000e-04],
[3.75600000e-04, 3.75600000e-04, -2.36300000e-04,
3.75600000e-04, 3.75600000e-04],
[4.82300000e-04, 4.82300000e-04, -2.85500000e-04,
4.82300000e-04, 4.82300000e-04],
[-1.41870000e-03, -1.41870000e-03, 7.89300000e-04,
-1.41870000e-03, -1.41870000e-03],
[6.75000000e-05, 6.75000000e-05, -3.52000000e-05,
6.75000000e-05, 6.75000000e-05],
[4.06300000e-04, 4.06300000e-04, -1.99100000e-04,
4.06300000e-04, 4.06300000e-04],
[-3.61500000e-04, -3.61500000e-04, 1.66000000e-04,
-3.61500000e-04, -3.61500000e-04],
[-2.97400000e-04, -2.97400000e-04, 1.28000000e-04,
-2.97400000e-04, -2.97400000e-04],
[-9.32700000e-04, -9.32700000e-04, 3.75800000e-04,
-9.32700000e-04, -9.32700000e-04],
[1.16270000e-03, 1.16270000e-03, -4.38500000e-04,
1.16270000e-03, 1.16270000e-03],
[6.77900000e-04, 6.77900000e-04, -2.39200000e-04,
6.77900000e-04, 6.77900000e-04],
[-1.29330000e-03, -1.29330000e-03, 4.27000000e-04,
-1.29330000e-03, -1.29330000e-03],
[2.24500000e-04, 2.24500000e-04, -6.94000000e-05,
2.24500000e-04, 2.24500000e-04],
[1.05510000e-03, 1.05510000e-03, -3.04900000e-04,
1.05510000e-03, 1.05510000e-03],
[2.50400000e-04, 2.50400000e-04, -6.77000000e-05,
2.50400000e-04, 2.50400000e-04],
[4.08600000e-04, 4.08600000e-04, -1.03400000e-04,
4.08600000e-04, 4.08600000e-04],
[-1.67610000e-03, -1.67610000e-03, 3.96800000e-04,
-1.67610000e-03, -1.67610000e-03],
[7.47600000e-04, 7.47600000e-04, -1.65700000e-04,
7.47600000e-04, 7.47600000e-04],
[2.08200000e-04, 2.08200000e-04, -4.32000000e-05,
2.08200000e-04, 2.08200000e-04],
[-8.00800000e-04, -8.00800000e-04, 1.55700000e-04,
-8.00800000e-04, -8.00800000e-04],
[5.81200000e-04, 5.81200000e-04, -1.05900000e-04,
5.81200000e-04, 5.81200000e-04],
[1.00980000e-03, 1.00980000e-03, -1.72400000e-04,
1.00980000e-03, 1.00980000e-03],
[2.77400000e-04, 2.77400000e-04, -4.44000000e-05,
2.77400000e-04, 2.77400000e-04],
[-5.02800000e-04, -5.02800000e-04, 7.55000000e-05,
-5.02800000e-04, -5.02800000e-04],
[2.69800000e-04, 2.69800000e-04, -3.80000000e-05,
2.69800000e-04, 2.69800000e-04],
[2.01300000e-04, 2.01300000e-04, -2.67000000e-05,
2.01300000e-04, 2.01300000e-04],
[-1.19690000e-03, -1.19690000e-03, 1.48900000e-04,
-1.19690000e-03, -1.19690000e-03],
[-6.94200000e-04, -6.94200000e-04, 8.12000000e-05,
-6.94200000e-04, -6.94200000e-04],
[5.65500000e-04, 5.65500000e-04, -6.22000000e-05,
5.65500000e-04, 5.65500000e-04],
[4.93100000e-04, 4.93100000e-04, -5.10000000e-05,
4.93100000e-04, 4.93100000e-04],
[3.25000000e-04, 3.25000000e-04, -3.17000000e-05,
3.25000000e-04, 3.25000000e-04],
[-7.70200000e-04, -7.70200000e-04, 7.07000000e-05,
-7.70200000e-04, -7.70200000e-04],
[2.58000000e-05, 2.58000000e-05, -2.23000000e-06,
2.58000000e-05, 2.58000000e-05],
[-1.52800000e-04, -1.52800000e-04, 1.25000000e-05,
-1.52800000e-04, -1.52800000e-04],
[4.52000000e-05, 4.52000000e-05, -3.48000000e-06,
4.52000000e-05, 4.52000000e-05],
[-6.83900000e-04, -6.83900000e-04, 4.97000000e-05,
-6.83900000e-04, -6.83900000e-04],
[-7.77600000e-04, -7.77600000e-04, 5.34000000e-05,
-7.77600000e-04, -7.77600000e-04],
[1.03170000e-03, 1.03170000e-03, -6.70000000e-05,
1.03170000e-03, 1.03170000e-03],
[1.20000000e-03, 1.20000000e-03, -7.37000000e-05,
1.20000000e-03, 1.20000000e-03],
[-7.71600000e-04, -7.71600000e-04, 4.48000000e-05,
-7.71600000e-04, -7.71600000e-04],
[-3.37000000e-04, -3.37000000e-04, 1.85000000e-05,
-3.37000000e-04, -3.37000000e-04],
[1.19880000e-03, 1.19880000e-03, -6.25000000e-05,
1.19880000e-03, 1.19880000e-03],
[-1.54610000e-03, -1.54610000e-03, 7.64000000e-05,
-1.54610000e-03, -1.54610000e-03],
[9.11600000e-04, 9.11600000e-04, -4.27000000e-05,
9.11600000e-04, 9.11600000e-04],
[-4.70800000e-04, -4.70800000e-04, 2.09000000e-05,
-4.70800000e-04, -4.70800000e-04],
[-1.21550000e-03, -1.21550000e-03, 5.13000000e-05,
-1.21550000e-03, -1.21550000e-03],
[1.09160000e-03, 1.09160000e-03, -4.37000000e-05,
1.09160000e-03, 1.09160000e-03],
[-2.72000000e-04, -2.72000000e-04, 1.04000000e-05,
-2.72000000e-04, -2.72000000e-04],
[-7.84500000e-04, -7.84500000e-04, 2.84000000e-05,
-7.84500000e-04, -7.84500000e-04],
[1.53330000e-03, 1.53330000e-03, -5.28000000e-05,
1.53330000e-03, 1.53330000e-03],
[-1.84450000e-03, -1.84450000e-03, 6.05000000e-05,
-1.84450000e-03, -1.84450000e-03],
[1.68550000e-03, 1.68550000e-03, -5.26000000e-05,
1.68550000e-03, 1.68550000e-03],
[-3.06100000e-04, -3.06100000e-04, 9.10000000e-06,
-3.06100000e-04, -3.06100000e-04],
[1.00950000e-03, 1.00950000e-03, -2.86000000e-05,
1.00950000e-03, 1.00950000e-03],
[5.22000000e-04, 5.22000000e-04, -1.41000000e-05,
5.22000000e-04, 5.22000000e-04],
[-2.18000000e-05, -2.18000000e-05, 5.62000000e-07,
-2.18000000e-05, -2.18000000e-05],
[-7.80600000e-04, -7.80600000e-04, 1.92000000e-05,
-7.80600000e-04, -7.80600000e-04],
[6.81400000e-04, 6.81400000e-04, -1.60000000e-05,
6.81400000e-04, 6.81400000e-04],
[-1.43800000e-04, -1.43800000e-04, 3.23000000e-06,
-1.43800000e-04, -1.43800000e-04],
[7.76000000e-04, 7.76000000e-04, -1.66000000e-05,
7.76000000e-04, 7.76000000e-04],
[2.54900000e-04, 2.54900000e-04, -5.22000000e-06,
2.54900000e-04, 2.54900000e-04],
[5.77500000e-04, 5.77500000e-04, -1.13000000e-05,
5.77500000e-04, 5.77500000e-04],
[7.58100000e-04, 7.58100000e-04, -1.42000000e-05,
7.58100000e-04, 7.58100000e-04],
[-8.31000000e-04, -8.31000000e-04, 1.49000000e-05,
-8.31000000e-04, -8.31000000e-04],
[-2.10340000e-03, -2.10340000e-03, 3.62000000e-05,
-2.10340000e-03, -2.10340000e-03],
[-8.89900000e-04, -8.89900000e-04, 1.47000000e-05,
-8.89900000e-04, -8.89900000e-04],
[1.08570000e-03, 1.08570000e-03, -1.71000000e-05,
1.08570000e-03, 1.08570000e-03],
[-1.88600000e-04, -1.88600000e-04, 2.86000000e-06,
-1.88600000e-04, -1.88600000e-04],
[9.10000000e-05, 9.10000000e-05, -1.32000000e-06,
9.10000000e-05, 9.10000000e-05],
[1.07700000e-03, 1.07700000e-03, -1.50000000e-05,
1.07700000e-03, 1.07700000e-03],
[9.04100000e-04, 9.04100000e-04, -1.21000000e-05,
9.04100000e-04, 9.04100000e-04],
[-2.20000000e-04, -2.20000000e-04, 2.83000000e-06,
-2.20000000e-04, -2.20000000e-04],
[-1.64030000e-03, -1.64030000e-03, 2.02000000e-05,
-1.64030000e-03, -1.64030000e-03],
[2.20600000e-04, 2.20600000e-04, -2.62000000e-06,
2.20600000e-04, 2.20600000e-04],
[-2.78300000e-04, -2.78300000e-04, 3.17000000e-06,
-2.78300000e-04, -2.78300000e-04],
[-4.93000000e-04, -4.93000000e-04, 5.40000000e-06,
-4.93000000e-04, -4.93000000e-04],
[-1.85000000e-04, -1.85000000e-04, 1.95000000e-06,
-1.85000000e-04, -1.85000000e-04],
[-7.64000000e-04, -7.64000000e-04, 7.75000000e-06,
-7.64000000e-04, -7.64000000e-04],
[7.79600000e-04, 7.79600000e-04, -7.61000000e-06,
7.79600000e-04, 7.79600000e-04],
[2.88400000e-04, 2.88400000e-04, -2.71000000e-06,
2.88400000e-04, 2.88400000e-04],
[1.09370000e-03, 1.09370000e-03, -9.91000000e-06,
1.09370000e-03, 1.09370000e-03],
[3.07000000e-04, 3.07000000e-04, -2.68000000e-06,
3.07000000e-04, 3.07000000e-04],
[-8.76000000e-04, -8.76000000e-04, 7.37000000e-06,
-8.76000000e-04, -8.76000000e-04],
[-1.85300000e-04, -1.85300000e-04, 1.50000000e-06,
-1.85300000e-04, -1.85300000e-04],
[3.24700000e-04, 3.24700000e-04, -2.54000000e-06,
3.24700000e-04, 3.24700000e-04],
[4.59600000e-04, 4.59600000e-04, -3.47000000e-06,
4.59600000e-04, 4.59600000e-04],
[-2.73300000e-04, -2.73300000e-04, 1.99000000e-06,
-2.73300000e-04, -2.73300000e-04],
[1.32180000e-03, 1.32180000e-03, -9.29000000e-06,
1.32180000e-03, 1.32180000e-03],
[-1.32620000e-03, -1.32620000e-03, 9.00000000e-06,
-1.32620000e-03, -1.32620000e-03],
[9.62000000e-05, 9.62000000e-05, -6.31000000e-07,
9.62000000e-05, 9.62000000e-05],
[-6.04400000e-04, -6.04400000e-04, 3.83000000e-06,
-6.04400000e-04, -6.04400000e-04],
[-6.66300000e-04, -6.66300000e-04, 4.08000000e-06,
-6.66300000e-04, -6.66300000e-04]])
self.null_deviance = 6.8088354977561 # from R, Rpy bug
self.params = np.array([1.00045997, 0.01991666, 0.00100126])
self.bse = np.array([4.55214070e-04, 7.00529313e-05, 1.84478509e-06])
self.aic_R = -1123.1528237643774
self.aic_Stata = -11.25152876811373
self.deviance = 7.1612915365488368e-05
self.scale = 7.3827747608449547e-07
self.llf = 565.57641188218872
self.bic_Stata = -446.7014364279675
self.df_model = 2
self.df_resid = 97
self.chi2 = 2704006.698904491
self.fittedvalues = np.array([
0.99954024, 0.97906956, 0.95758077, 0.93526008, 0.91228657,
0.88882978, 0.8650479, 0.84108646, 0.81707757, 0.79313958,
0.76937709, 0.74588129, 0.72273051, 0.69999099, 0.67771773,
0.65595543, 0.63473944, 0.61409675, 0.59404691, 0.57460297,
0.55577231, 0.53755742, 0.51995663, 0.50296478, 0.48657379,
0.47077316, 0.4555505, 0.44089187, 0.42678213, 0.41320529,
0.40014475, 0.38758348, 0.37550428, 0.36388987, 0.35272306,
0.34198684, 0.33166446, 0.32173953, 0.31219604, 0.30301842,
0.29419156, 0.28570085, 0.27753216, 0.26967189, 0.26210695,
0.25482476, 0.24781324, 0.2410608, 0.23455636, 0.22828931,
0.22224947, 0.21642715, 0.21081306, 0.20539835, 0.20017455,
0.19513359, 0.19026777, 0.18556972, 0.18103243, 0.17664922,
0.1724137, 0.16831977, 0.16436164, 0.16053377, 0.15683086,
0.15324789, 0.14978003, 0.1464227, 0.14317153, 0.14002232,
0.13697109, 0.13401403, 0.1311475, 0.12836802, 0.12567228,
0.1230571, 0.12051944, 0.11805642, 0.11566526, 0.1133433,
0.11108802, 0.10889699, 0.10676788, 0.10469847, 0.10268664,
0.10073034, 0.09882763, 0.09697663, 0.09517555, 0.09342267,
0.09171634, 0.09005498, 0.08843707, 0.08686116, 0.08532585,
0.08382979, 0.0823717, 0.08095035, 0.07956453, 0.07821311])
class Star98(object):
"""
Star98 class used with TestGlmBinomial
"""
def __init__(self):
self.params = (
-0.0168150366, 0.0099254766, -0.0187242148,
-0.0142385609, 0.2544871730, 0.2406936644, 0.0804086739,
-1.9521605027, -0.3340864748, -0.1690221685, 0.0049167021,
-0.0035799644, -0.0140765648, -0.0040049918, -0.0039063958,
0.0917143006, 0.0489898381, 0.0080407389, 0.0002220095,
-0.0022492486, 2.9588779262)
self.bse = (
4.339467e-04, 6.013714e-04, 7.435499e-04, 4.338655e-04,
2.994576e-02, 5.713824e-02, 1.392359e-02, 3.168109e-01,
6.126411e-02, 3.270139e-02, 1.253877e-03, 2.254633e-04,
1.904573e-03, 4.739838e-04, 9.623650e-04, 1.450923e-02,
7.451666e-03, 1.499497e-03, 2.988794e-05, 3.489838e-04,
1.546712e+00)
self.null_deviance = 34345.3688931
self.df_null = 302
self.deviance = 4078.76541772
self.df_resid = 282
self.df_model = 20
self.aic_R = 6039.22511799
self.aic_Stata = 19.93143846737438
self.bic_Stata = 2467.493504191302
self.llf = -2998.61255899391 # from R
self.llf_Stata = -2998.612927807218
self.scale = 1.
self.pearson_chi2 = 4051.921614
self.resids = glm_test_resids.star98_resids
self.fittedvalues = np.array([
0.5833118, 0.75144661, 0.50058272, 0.68534524, 0.32251021,
0.68693601, 0.33299827, 0.65624766, 0.49851481, 0.506736,
0.23954874, 0.86631452, 0.46432936, 0.44171873, 0.66797935,
0.73988491, 0.51966014, 0.42442446, 0.5649369, 0.59251634,
0.34798337, 0.56415024, 0.49974355, 0.3565539, 0.20752309,
0.18269097, 0.44932642, 0.48025128, 0.59965277, 0.58848671,
0.36264203, 0.33333196, 0.74253352, 0.5081886, 0.53421878,
0.56291445, 0.60205239, 0.29174423, 0.2954348, 0.32220414,
0.47977903, 0.23687535, 0.11776464, 0.1557423, 0.27854799,
0.22699533, 0.1819439, 0.32554433, 0.22681989, 0.15785389,
0.15268609, 0.61094772, 0.20743222, 0.51649059, 0.46502006,
0.41031788, 0.59523288, 0.65733285, 0.27835336, 0.2371213,
0.25137045, 0.23953942, 0.27854519, 0.39652413, 0.27023163,
0.61411863, 0.2212025, 0.42005842, 0.55940397, 0.35413774,
0.45724563, 0.57399437, 0.2168918, 0.58308738, 0.17181104,
0.49873249, 0.22832683, 0.14846056, 0.5028073, 0.24513863,
0.48202096, 0.52823155, 0.5086262, 0.46295993, 0.57869402,
0.78363217, 0.21144435, 0.2298366, 0.17954825, 0.32232586,
0.8343015, 0.56217006, 0.47367315, 0.52535649, 0.60350746,
0.43210701, 0.44712008, 0.35858239, 0.2521347, 0.19787004,
0.63256553, 0.51386532, 0.64997027, 0.13402072, 0.81756174,
0.74543642, 0.30825852, 0.23988707, 0.17273125, 0.27880599,
0.17395893, 0.32052828, 0.80467697, 0.18726218, 0.23842081,
0.19020381, 0.85835388, 0.58703615, 0.72415106, 0.64433695,
0.68766653, 0.32923663, 0.16352185, 0.38868816, 0.44980444,
0.74810044, 0.42973792, 0.53762581, 0.72714996, 0.61229484,
0.30267667, 0.24713253, 0.65086008, 0.48957265, 0.54955545,
0.5697156, 0.36406211, 0.48906545, 0.45919413, 0.4930565,
0.39785555, 0.5078719, 0.30159626, 0.28524393, 0.34687707,
0.22522042, 0.52947159, 0.29277287, 0.8585002, 0.60800389,
0.75830521, 0.35648175, 0.69508796, 0.45518355, 0.21567675,
0.39682985, 0.49042948, 0.47615798, 0.60588234, 0.62910299,
0.46005639, 0.71755165, 0.48852156, 0.47940661, 0.60128813,
0.16589699, 0.68512861, 0.46305199, 0.68832227, 0.7006721,
0.56564937, 0.51753941, 0.54261733, 0.56072214, 0.34545715,
0.30226104, 0.3572956, 0.40996287, 0.33517519, 0.36248407,
0.33937041, 0.34140691, 0.2627528, 0.29955161, 0.38581683,
0.24840026, 0.15414272, 0.40415991, 0.53936252, 0.52111887,
0.28060168, 0.45600958, 0.51110589, 0.43757523, 0.46891953,
0.39425249, 0.5834369, 0.55817308, 0.32051259, 0.43567448,
0.34134195, 0.43016545, 0.4885413, 0.28478325, 0.2650776,
0.46784606, 0.46265983, 0.42655938, 0.18972234, 0.60448491,
0.211896, 0.37886032, 0.50727577, 0.39782309, 0.50427121,
0.35882898, 0.39596807, 0.49160806, 0.35618002, 0.6819922,
0.36871093, 0.43079679, 0.67985516, 0.41270595, 0.68952767,
0.52587734, 0.32042126, 0.39120123, 0.56870985, 0.32962349,
0.32168989, 0.54076251, 0.4592907, 0.48480182, 0.4408386,
0.431178, 0.47078232, 0.55911605, 0.30331618, 0.50310393,
0.65036038, 0.45078895, 0.62354291, 0.56435463, 0.50034281,
0.52693538, 0.57217285, 0.49221472, 0.40707122, 0.44226533,
0.3475959, 0.54746396, 0.86385832, 0.48402233, 0.54313657,
0.61586824, 0.27097185, 0.69717808, 0.52156974, 0.50401189,
0.56724181, 0.6577178, 0.42732047, 0.44808396, 0.65435634,
0.54766225, 0.38160648, 0.49890847, 0.50879037, 0.5875452,
0.45101593, 0.5709704, 0.3175516, 0.39813159, 0.28305688,
0.40521062, 0.30120578, 0.26400428, 0.44205496, 0.40545798,
0.39366599, 0.55288196, 0.14104184, 0.17550155, 0.1949095,
0.40255144, 0.21016822, 0.09712017, 0.63151487, 0.25885514,
0.57323748, 0.61836898, 0.43268601, 0.67008878, 0.75801989,
0.50353406, 0.64222315, 0.29925757, 0.32592036, 0.39634977,
0.39582747, 0.41037006, 0.34174944])
class Lbw(object):
'''
The LBW data can be found here
https://www.stata-press.com/data/r9/rmain.html
'''
def __init__(self):
# data set up for data not in datasets
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"stata_lbw_glm.csv")
data = pd.read_csv(filename)
dummies = pd.get_dummies(data.race, prefix="race", drop_first=False)
data = pd.concat([data, dummies], 1)
self.endog = data.low
design = data[["age", "lwt", "race_black", "race_other", "smoke",
"ptl", "ht", "ui"]]
self.exog = add_constant(design, prepend=False)
# Results for Canonical Logit Link
self.params = (
-.02710031, -.01515082, 1.26264728,
.86207916, .92334482, .54183656, 1.83251780,
.75851348, .46122388)
self.bse = (
0.036449917, 0.006925765, 0.526405169,
0.439146744, 0.400820976, 0.346246857, 0.691623875,
0.459373871, 1.204574885)
self.aic_R = 219.447991133
self.aic_Stata = 1.161100482182551
self.deviance = 201.4479911325021
self.scale = 1
self.llf = -100.7239955662511
self.chi2 = 25.65329337867037 # from Stata not used by sm
self.null_deviance = 234.671996193219
self.bic_Stata = -742.0664715782335
self.df_resid = 180
self.df_model = 8
self.df_null = 188
self.pearson_chi2 = 182.023342493558
self.resids = glm_test_resids.lbw_resids
self.fittedvalues = np.array([
0.31217507, 0.12793027, 0.32119762, 0.48442686, 0.50853393,
0.24517662, 0.12755193, 0.33226988, 0.22013309, 0.26268069,
0.34729955, 0.18782188, 0.75404181, 0.54723527, 0.35016393,
0.35016393, 0.45824406, 0.25336683, 0.43087357, 0.23284101,
0.20146616, 0.24315597, 0.02725586, 0.22207692, 0.39800383,
0.05584178, 0.28403447, 0.06931188, 0.35371946, 0.3896279,
0.3896279, 0.47812002, 0.60043853, 0.07144772, 0.29995988,
0.17910031, 0.22773411, 0.22691015, 0.06221253, 0.2384528,
0.32633864, 0.05131047, 0.2954536, 0.07364416, 0.57241299,
0.57241299, 0.08272435, 0.23298882, 0.12658158, 0.58967487,
0.46989562, 0.22455631, 0.2348285, 0.29571887, 0.28212464,
0.31499013, 0.68340511, 0.14090647, 0.31448425, 0.28082972,
0.28082972, 0.24918728, 0.27018297, 0.08175784, 0.64808999,
0.38252574, 0.25550797, 0.09113411, 0.40736693, 0.32644055,
0.54367425, 0.29606968, 0.47028421, 0.39972155, 0.25079125,
0.09678472, 0.08807264, 0.27467837, 0.5675742, 0.045619,
0.10719293, 0.04826292, 0.23934092, 0.24179618, 0.23802197,
0.49196179, 0.31379451, 0.10605469, 0.04047396, 0.11620849,
0.09937016, 0.21822964, 0.29770265, 0.83912829, 0.25079125,
0.08548557, 0.06550308, 0.2046457, 0.2046457, 0.08110349,
0.13519643, 0.47862055, 0.38891913, 0.1383964, 0.26176764,
0.31594589, 0.11418612, 0.06324112, 0.28468594, 0.21663702,
0.03827107, 0.27237604, 0.20246694, 0.19042999, 0.15019447,
0.18759474, 0.12308435, 0.19700616, 0.11564002, 0.36595033,
0.07765727, 0.14119063, 0.13584627, 0.11012759, 0.10102472,
0.10002166, 0.07439288, 0.27919958, 0.12491598, 0.06774594,
0.72513764, 0.17714986, 0.67373352, 0.80679436, 0.52908941,
0.15695938, 0.49722003, 0.41970014, 0.62375224, 0.53695622,
0.25474238, 0.79135707, 0.2503871, 0.25352337, 0.33474211,
0.19308929, 0.24658944, 0.25495092, 0.30867144, 0.41240259,
0.59412526, 0.16811226, 0.48282791, 0.36566756, 0.09279325,
0.75337353, 0.57128885, 0.52974123, 0.44548504, 0.77748843,
0.3224082, 0.40054277, 0.29522468, 0.19673553, 0.73781774,
0.57680312, 0.44545573, 0.30242355, 0.38720223, 0.16632904,
0.30804092, 0.56385194, 0.60012179, 0.48324821, 0.24636345,
0.26153216, 0.2348285, 0.29023669, 0.41011454, 0.36472083,
0.65922069, 0.30476903, 0.09986775, 0.70658332, 0.30713075,
0.36096386, 0.54962701, 0.71996086, 0.6633756])
class Scotvote(object):
"""
Scotvot class is used with TestGlmGamma.
"""
def __init__(self):
self.params = (
4.961768e-05, 2.034423e-03, -7.181429e-05, 1.118520e-04,
-1.467515e-07, -5.186831e-04, -2.42717498e-06, -1.776527e-02)
self.bse = (
1.621577e-05, 5.320802e-04, 2.711664e-05, 4.057691e-05,
1.236569e-07, 2.402534e-04, 7.460253e-07, 1.147922e-02)
self.null_deviance = 0.536072
self.df_null = 31
self.deviance = 0.087388516417
self.df_resid = 24
self.df_model = 7
self.aic_R = 182.947045954721
self.aic_Stata = 10.72212
self.bic_Stata = -83.09027
self.llf = -163.5539382 # from Stata, same as ours with scale = 1
# self.llf = -82.47352 # Very close to ours as is
self.scale = 0.003584283
self.pearson_chi2 = .0860228056
self.resids = glm_test_resids.scotvote_resids
self.fittedvalues = np.array([
57.80431482, 53.2733447, 50.56347993, 58.33003783,
70.46562169, 56.88801284, 66.81878401, 66.03410393,
57.92937473, 63.23216907, 53.9914785, 61.28993391,
64.81036393, 63.47546816, 60.69696114, 74.83508176,
56.56991106, 72.01804172, 64.35676519, 52.02445881,
64.24933079, 71.15070332, 45.73479688, 54.93318588,
66.98031261, 52.02479973, 56.18413736, 58.12267471,
67.37947398, 60.49162862, 73.82609217, 69.61515621])
class Cancer(object):
'''
The Cancer data can be found here
https://www.stata-press.com/data/r10/rmain.html
'''
def __init__(self):
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"stata_cancer_glm.csv")
data = np.recfromcsv(open(filename, 'rb'))
self.endog = data.studytime
dummies = pd.get_dummies(pd.Series(data.drug, dtype="category"),
drop_first=True)
design = np.column_stack((data.age, dummies)).astype(float)
self.exog = add_constant(design, prepend=False)
class CancerLog(Cancer):
"""
CancerLog is used TestGlmGammaLog
"""
def __init__(self):
super(CancerLog, self).__init__()
self.resids = np.array([
[-8.52598100e-01, -1.45739100e+00, -3.92408100e+01,
-1.41526900e+00, -5.78417200e+00],
[-8.23683800e-01, -1.35040200e+00, -2.64957500e+01,
-1.31777000e+00, -4.67162900e+00],
[-7.30450400e-01, -1.07754600e+00, -4.02136400e+01,
-1.06208800e+00, -5.41978500e+00],
[-7.04471600e-01, -1.01441500e+00, -7.25951500e+01,
-1.00172900e+00, -7.15130900e+00],
[-5.28668000e-01, -6.68617300e-01, -3.80758100e+01,
-6.65304600e-01, -4.48658700e+00],
[-2.28658500e-01, -2.48859700e-01, -6.14913600e+00,
-2.48707200e-01, -1.18577100e+00],
[-1.93939400e-01, -2.08119900e-01, -7.46226500e+00,
-2.08031700e-01, -1.20300800e+00],
[-3.55635700e-01, -4.09525000e-01, -2.14132500e+01,
-4.08815100e-01, -2.75958600e+00],
[-5.73360000e-02, -5.84700000e-02, -4.12946200e+00,
-5.84681000e-02, -4.86586900e-01],
[3.09828000e-02, 3.06685000e-02, 1.86551100e+00,
3.06682000e-02, 2.40413800e-01],
[-2.11924300e-01, -2.29071300e-01, -2.18386100e+01,
-2.28953000e-01, -2.15130900e+00],
[-3.10989000e-01, -3.50739300e-01, -4.19249500e+01,
-3.50300400e-01, -3.61084500e+00],
[-9.22250000e-03, -9.25100000e-03, -1.13679700e+00,
-9.25100000e-03, -1.02392100e-01],
[2.39402500e-01, 2.22589700e-01, 1.88577300e+01,
2.22493500e-01, 2.12475600e+00],
[3.35166000e-02, 3.31493000e-02, 4.51842400e+00,
3.31489000e-02, 3.89155400e-01],
[8.49829400e-01, 6.85180200e-01, 3.57627500e+01,
6.82689900e-01, 5.51291500e+00],
[4.12934200e-01, 3.66785200e-01, 4.65392600e+01,
3.66370400e-01, 4.38379500e+00],
[4.64148400e-01, 4.07123200e-01, 6.25726500e+01,
4.06561900e-01, 5.38915500e+00],
[1.71104600e+00, 1.19474800e+00, 1.12676500e+02,
1.18311900e+00, 1.38850500e+01],
[1.26571800e+00, 9.46389000e-01, 1.30431000e+02,
9.40244600e-01, 1.28486900e+01],
[-3.48532600e-01, -3.99988300e-01, -2.95638100e+01,
-3.99328600e-01, -3.20997700e+00],
[-4.04340300e-01, -4.76960100e-01, -4.10254300e+01,
-4.75818000e-01, -4.07286500e+00],
[-4.92057900e-01, -6.08818300e-01, -9.34509600e+01,
-6.06357200e-01, -6.78109700e+00],
[-4.02876400e-01, -4.74878400e-01, -9.15226200e+01,
-4.73751900e-01, -6.07225700e+00],
[-5.15056700e-01, -6.46013300e-01, -2.19014600e+02,
-6.43043500e-01, -1.06209700e+01],
[-8.70423000e-02, -8.97043000e-02, -1.26361400e+01,
-8.96975000e-02, -1.04875100e+00],
[1.28362300e-01, 1.23247800e-01, 1.70383300e+01,
1.23231000e-01, 1.47887800e+00],
[-2.39271900e-01, -2.61562100e-01, -9.30283300e+01,
-2.61384400e-01, -4.71795100e+00],
[7.37246500e-01, 6.08186000e-01, 6.25359600e+01,
6.06409700e-01, 6.79002300e+00],
[-3.64110000e-02, -3.68626000e-02, -1.41565300e+01,
-3.68621000e-02, -7.17951200e-01],
[2.68833000e-01, 2.47933100e-01, 6.67934100e+01,
2.47801000e-01, 4.23748400e+00],
[5.96389600e-01, 5.07237700e-01, 1.13265500e+02,
5.06180100e-01, 8.21890300e+00],
[1.98218000e-02, 1.96923000e-02, 1.00820900e+01,
1.96923000e-02, 4.47040700e-01],
[7.74936000e-01, 6.34305300e-01, 2.51883900e+02,
6.32303700e-01, 1.39711800e+01],
[-7.63925100e-01, -1.16591700e+00, -4.93461700e+02,
-1.14588000e+00, -1.94156600e+01],
[-6.23771700e-01, -8.41174800e-01, -4.40679600e+02,
-8.34266300e-01, -1.65796100e+01],
[-1.63272900e-01, -1.73115100e-01, -6.73975900e+01,
-1.73064800e-01, -3.31725800e+00],
[-4.28562500e-01, -5.11932900e-01, -4.73787800e+02,
-5.10507400e-01, -1.42494800e+01],
[8.00693000e-02, 7.80269000e-02, 3.95353400e+01,
7.80226000e-02, 1.77920500e+00],
[-2.13674400e-01, -2.31127400e-01, -2.15987000e+02,
-2.31005700e-01, -6.79344600e+00],
[-1.63544000e-02, -1.64444000e-02, -1.05642100e+01,
-1.64444000e-02, -4.15657600e-01],
[2.04900500e-01, 1.92372100e-01, 1.10651300e+02,
1.92309400e-01, 4.76156600e+00],
[-1.94758900e-01, -2.09067700e-01, -2.35484100e+02,
-2.08978200e-01, -6.77219400e+00],
[3.16727400e-01, 2.88367800e-01, 1.87065600e+02,
2.88162100e-01, 7.69732400e+00],
[6.24234900e-01, 5.27632500e-01, 2.57678500e+02,
5.26448400e-01, 1.26827400e+01],
[8.30241100e-01, 6.72002100e-01, 2.86513700e+02,
6.69644800e-01, 1.54232100e+01],
[6.55140000e-03, 6.53710000e-03, 7.92130700e+00,
6.53710000e-03, 2.27805800e-01],
[3.41595200e-01, 3.08985000e-01, 2.88667600e+02,
3.08733300e-01, 9.93012900e+00]])
self.null_deviance = 27.92207137420696 # From R (bug in rpy)
self.params = np.array([
-0.04477778, 0.57437126, 1.05210726, 4.64604002])
self.bse = np.array([0.0147328, 0.19694727, 0.19772507, 0.83534671])
self.aic_R = 331.89022395372069
self.aic_Stata = 7.403608467857651
self.deviance = 16.174635536991005
self.scale = 0.31805268736385695
# self.llf = -160.94511197686035 # From R
self.llf = -173.6866032285836 # from Staa
self.bic_Stata = -154.1582089453923 # from Stata
self.df_model = 3
self.df_resid = 44
self.chi2 = 36.77821448266359 # from Stata not in sm
self.fittedvalues = np.array([
6.78419193, 5.67167253, 7.41979002, 10.15123371,
8.48656317, 5.18582263, 6.20304079, 7.75958258,
8.48656317, 7.75958258, 10.15123371, 11.61071755,
11.10228357, 8.87520908, 11.61071755, 6.48711178,
10.61611394, 11.61071755, 8.11493609, 10.15123371,
9.21009116, 10.07296716, 13.78112366, 15.07225103,
20.62079147, 12.04881666, 11.5211983, 19.71780584,
9.21009116, 19.71780584, 15.76249142, 13.78112366,
22.55271436, 18.02872842, 25.41575239, 26.579678,
20.31745227, 33.24937131, 22.22095589, 31.79337946,
25.41575239, 23.23857437, 34.77204095, 24.30279515,
20.31745227, 18.57700761, 34.77204095, 29.06987768])
class CancerIdentity(Cancer):
"""
CancerIdentity is used with TestGlmGammaIdentity
"""
def __init__(self):
super(CancerIdentity, self).__init__()
self.resids = np.array([
[-8.52598100e-01, -1.45739100e+00, -3.92408100e+01,
-1.41526900e+00, -5.78417200e+00],
[-8.23683800e-01, -1.35040200e+00, -2.64957500e+01,
-1.31777000e+00, -4.67162900e+00],
[-7.30450400e-01, -1.07754600e+00, -4.02136400e+01,
-1.06208800e+00, -5.41978500e+00],
[-7.04471600e-01, -1.01441500e+00, -7.25951500e+01,
-1.00172900e+00, -7.15130900e+00],
[-5.28668000e-01, -6.68617300e-01, -3.80758100e+01,
-6.65304600e-01, -4.48658700e+00],
[-2.28658500e-01, -2.48859700e-01, -6.14913600e+00,
-2.48707200e-01, -1.18577100e+00],
[-1.93939400e-01, -2.08119900e-01, -7.46226500e+00,
-2.08031700e-01, -1.20300800e+00],
[-3.55635700e-01, -4.09525000e-01, -2.14132500e+01,
-4.08815100e-01, -2.75958600e+00],
[-5.73360000e-02, -5.84700000e-02, -4.12946200e+00,
-5.84681000e-02, -4.86586900e-01],
[3.09828000e-02, 3.06685000e-02, 1.86551100e+00,
3.06682000e-02, 2.40413800e-01],
[-2.11924300e-01, -2.29071300e-01, -2.18386100e+01,
-2.28953000e-01, -2.15130900e+00],
[-3.10989000e-01, -3.50739300e-01, -4.19249500e+01,
-3.50300400e-01, -3.61084500e+00],
[-9.22250000e-03, -9.25100000e-03, -1.13679700e+00,
-9.25100000e-03, -1.02392100e-01],
[2.39402500e-01, 2.22589700e-01, 1.88577300e+01,
2.22493500e-01, 2.12475600e+00],
[3.35166000e-02, 3.31493000e-02, 4.51842400e+00,
3.31489000e-02, 3.89155400e-01],
[8.49829400e-01, 6.85180200e-01, 3.57627500e+01,
6.82689900e-01, 5.51291500e+00],
[4.12934200e-01, 3.66785200e-01, 4.65392600e+01,
3.66370400e-01, 4.38379500e+00],
[4.64148400e-01, 4.07123200e-01, 6.25726500e+01,
4.06561900e-01, 5.38915500e+00],
[1.71104600e+00, 1.19474800e+00, 1.12676500e+02,
1.18311900e+00, 1.38850500e+01],
[1.26571800e+00, 9.46389000e-01, 1.30431000e+02,
9.40244600e-01, 1.28486900e+01],
[-3.48532600e-01, -3.99988300e-01, -2.95638100e+01,
-3.99328600e-01, -3.20997700e+00],
[-4.04340300e-01, -4.76960100e-01, -4.10254300e+01,
-4.75818000e-01, -4.07286500e+00],
[-4.92057900e-01, -6.08818300e-01, -9.34509600e+01,
-6.06357200e-01, -6.78109700e+00],
[-4.02876400e-01, -4.74878400e-01, -9.15226200e+01,
-4.73751900e-01, -6.07225700e+00],
[-5.15056700e-01, -6.46013300e-01, -2.19014600e+02,
-6.43043500e-01, -1.06209700e+01],
[-8.70423000e-02, -8.97043000e-02, -1.26361400e+01,
-8.96975000e-02, -1.04875100e+00],
[1.28362300e-01, 1.23247800e-01, 1.70383300e+01,
1.23231000e-01, 1.47887800e+00],
[-2.39271900e-01, -2.61562100e-01, -9.30283300e+01,
-2.61384400e-01, -4.71795100e+00],
[7.37246500e-01, 6.08186000e-01, 6.25359600e+01,
6.06409700e-01, 6.79002300e+00],
[-3.64110000e-02, -3.68626000e-02, -1.41565300e+01,
-3.68621000e-02, -7.17951200e-01],
[2.68833000e-01, 2.47933100e-01, 6.67934100e+01,
2.47801000e-01, 4.23748400e+00],
[5.96389600e-01, 5.07237700e-01, 1.13265500e+02,
5.06180100e-01, 8.21890300e+00],
[1.98218000e-02, 1.96923000e-02, 1.00820900e+01,
1.96923000e-02, 4.47040700e-01],
[7.74936000e-01, 6.34305300e-01, 2.51883900e+02,
6.32303700e-01, 1.39711800e+01],
[-7.63925100e-01, -1.16591700e+00, -4.93461700e+02,
-1.14588000e+00, -1.94156600e+01],
[-6.23771700e-01, -8.41174800e-01, -4.40679600e+02,
-8.34266300e-01, -1.65796100e+01],
[-1.63272900e-01, -1.73115100e-01, -6.73975900e+01,
-1.73064800e-01, -3.31725800e+00],
[-4.28562500e-01, -5.11932900e-01, -4.73787800e+02,
-5.10507400e-01, -1.42494800e+01],
[8.00693000e-02, 7.80269000e-02, 3.95353400e+01,
7.80226000e-02, 1.77920500e+00],
[-2.13674400e-01, -2.31127400e-01, -2.15987000e+02,
-2.31005700e-01, -6.79344600e+00],
[-1.63544000e-02, -1.64444000e-02, -1.05642100e+01,
-1.64444000e-02, -4.15657600e-01],
[2.04900500e-01, 1.92372100e-01, 1.10651300e+02,
1.92309400e-01, 4.76156600e+00],
[-1.94758900e-01, -2.09067700e-01, -2.35484100e+02,
-2.08978200e-01, -6.77219400e+00],
[3.16727400e-01, 2.88367800e-01, 1.87065600e+02,
2.88162100e-01, 7.69732400e+00],
[6.24234900e-01, 5.27632500e-01, 2.57678500e+02,
5.26448400e-01, 1.26827400e+01],
[8.30241100e-01, 6.72002100e-01, 2.86513700e+02,
6.69644800e-01, 1.54232100e+01],
[6.55140000e-03, 6.53710000e-03, 7.92130700e+00,
6.53710000e-03, 2.27805800e-01],
[3.41595200e-01, 3.08985000e-01, 2.88667600e+02,
3.08733300e-01, 9.93012900e+00]])
self.params = np.array([
-0.5369833, 6.47296332, 16.20336802, 38.96617431])
self.bse = np.array([
0.13341238, 2.1349966, 3.87411875, 8.19235553])
self.aic_R = 328.39209118952965
# TODO: the below will fail
self.aic_Stata = 7.381090276021671
self.deviance = 15.093762327607557
self.scale = 0.29512089119443752
self.null_deviance = 27.92207137420696 # from R bug in RPy
# NOTE: our scale is Stata's dispers_p (pearson?)
# TODO: if scale is analagous to Stata's dispersion, then this might be
# where the discrepancies come from?
# self.llf = -159.19604559476483 # From R
self.llf = -173.1461666245201 # From Stata
self.bic_Stata = -155.2390821535193
self.df_model = 3
self.df_resid = 44
self.chi2 = 51.56632068622578
self.fittedvalues = np.array([
6.21019277, 4.06225956,
7.28415938, 11.04304251,
8.89510929, 2.98829295, 5.13622616, 7.82114268,
8.89510929, 7.82114268, 11.04304251, 12.65399242,
12.11700911, 9.43209259, 12.65399242, 5.67320947,
11.58002581, 12.65399242, 8.35812599, 11.04304251,
9.46125627, 10.53522287, 14.294106, 15.36807261,
19.12695574, 12.68315609, 12.14617279, 18.58997243,
9.46125627, 18.58997243, 15.90505591, 14.294106,
20.20092234, 17.51600582, 25.63546061, 26.17244391,
22.95054409, 28.85736043, 24.0245107, 28.32037713,
25.63546061, 24.561494, 29.39434374, 25.09847731,
22.95054409, 21.87657748, 29.39434374, 27.24641052])
class Cpunish(object):
'''
The following are from the R script in models.datasets.cpunish
Slightly different than published results, but should be correct
Probably due to rounding in cleaning?
'''
def __init__(self):
self.params = (
2.611017e-04, 7.781801e-02, -9.493111e-02, 2.969349e-01,
2.301183e+00, -1.872207e+01, -6.801480e+00)
self.bse = (
5.187132e-05, 7.940193e-02, 2.291926e-02, 4.375164e-01,
4.283826e-01, 4.283961e+00, 4.146850e+00)
self.null_deviance = 136.57281747225
self.df_null = 16
self.deviance = 18.591641759528944
self.df_resid = 10
self.df_model = 6
self.aic_R = 77.8546573896503 # same as Stata
self.aic_Stata = 4.579685683305706
self.bic_Stata = -9.740492454486446
self.chi2 = 128.8021169250578 # from Stata not in sm
self.llf = -31.92732869482515
self.scale = 1
self.pearson_chi2 = 24.75374835
self.resids = glm_test_resids.cpunish_resids
self.fittedvalues = np.array([
35.2263655, 8.1965744, 1.3118966,
3.6862982, 2.0823003, 1.0650316, 1.9260424, 2.4171405,
1.8473219, 2.8643241, 3.1211989, 3.3382067, 2.5269969,
0.8972542, 0.9793332, 0.5346209, 1.9790936])
class Cpunish_offset(Cpunish):
'''
Same model as Cpunish but with offset of 100. Many things do not change.
'''
def __init__(self):
super(Cpunish_offset, self).__init__()
self.params = (
-1.140665e+01, 2.611017e-04, 7.781801e-02,
-9.493111e-02, 2.969349e-01, 2.301183e+00,
-1.872207e+01)
self.bse = (
4.147e+00, 5.187e-05, 7.940e-02, 2.292e-02,
4.375e-01, 4.284e-01, 4.284e+00)
class InvGauss(object):
'''
Usef
Data was generated by Hardin and Hilbe using Stata.
Note only the first 5000 observations are used because
the models code currently uses np.eye.
'''
# FIXME: do something with the commented-out code below
# np.random.seed(54321)
# x1 = np.abs(stats.norm.ppf((np.random.random(5000))))
# x2 = np.abs(stats.norm.ppf((np.random.random(5000))))
# X = np.column_stack((x1, x2))
# X = add_constant(X)
# params = np.array([.5, -.25, 1])
# eta = np.dot(X, params)
# mu = 1/np.sqrt(eta)
# sigma = .5
# This is not correct. Errors need to be normally distributed
# But Y needs to be Inverse Gaussian, so we could build it up
# by throwing out data?
# Refs:
# * Lai (2009) Generating inverse Gaussian random variates by
# approximation
# * Atkinson (1982) The simulation of generalized inverse gaussian
# and hyperbolic random variables seems to be the canonical ref
# Y = np.dot(X, params) + np.random.wald(mu, sigma, 1000)
# model = GLM(Y, X, family=models.family.InverseGaussian(link=\
# models.family.links.identity()))
def __init__(self):
# set up data #
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"inv_gaussian.csv")
with open(filename, 'r') as fd:
data = np.genfromtxt(fd, delimiter=",", dtype=float)[1:]
self.endog = data[:5000, 0]
self.exog = data[:5000, 1:]
self.exog = add_constant(self.exog, prepend=False)
# Results
# NOTE: loglikelihood difference in R vs. Stata vs. Models
# is the same situation as gamma
self.params = (0.4519770, -0.2508288, 1.0359574)
self.bse = (0.03148291, 0.02237211, 0.03429943)
self.null_deviance = 1520.673165475461
self.df_null = 4999
self.deviance = 1423.943980407997
self.df_resid = 4997
self.df_model = 2
self.aic_R = 5059.41911646446
self.aic_Stata = 1.552280060977946
self.bic_Stata = -41136.47039418921
self.llf = -3877.700354 # Stata is same as ours with scale set to 1
# self.llf = -2525.70955823223 # from R, close to ours
self.scale = 0.2867266359127567
self.pearson_chi2 = 1432.771536
self.resids = glm_test_resids.invgauss_resids
self.fittedvalues = np.array([
1.0404339, 0.96831526, 0.81265833, 0.9958362, 1.05433442,
1.09866137, 0.95548191, 1.38082105, 0.98942888, 0.96521958,
1.02684056, 0.91412576, 0.91492102, 0.92639676, 0.96763425,
0.80250852, 0.85281816, 0.90962261, 0.95550299, 0.86386815,
0.94760134, 0.94269533, 0.98960509, 0.84787252, 0.78949111,
0.76873582, 0.98933453, 0.95105574, 0.8489395, 0.88962971,
0.84856357, 0.88567313, 0.84505405, 0.84626147, 0.77250421,
0.90175601, 1.15436378, 0.98375558, 0.83539542, 0.82845381,
0.90703971, 0.85546165, 0.96707286, 0.84127197, 0.82096543,
1.1311227, 0.87617029, 0.91194419, 1.05125511, 0.95330314,
0.75556148, 0.82573228, 0.80424982, 0.83800144, 0.8203644,
0.84423807, 0.98348433, 0.93165089, 0.83968706, 0.79256287,
1.0302839, 0.90982028, 0.99471562, 0.70931825, 0.85471721,
1.02668021, 1.11308301, 0.80497105, 1.02708486, 1.07671424,
0.821108, 0.86373486, 0.99104964, 1.06840593, 0.94947784,
0.80982122, 0.95778065, 1.0254212, 1.03480946, 0.83942363,
1.17194944, 0.91772559, 0.92368795, 1.10410916, 1.12558875,
1.11290791, 0.87816503, 1.04299294, 0.89631173, 1.02093004,
0.86331723, 1.13134858, 1.01807861, 0.98441692, 0.72567667,
1.42760495, 0.78987436, 0.72734482, 0.81750166, 0.86451854,
0.90564264, 0.81022323, 0.98720325, 0.98263709, 0.99364823,
0.7264445, 0.81632452, 0.7627845, 1.10726938, 0.79195664,
0.86836774, 1.01558149, 0.82673675, 0.99529548, 0.97155636,
0.980696, 0.85460503, 1.00460782, 0.77395244, 0.81229831,
0.94078297, 1.05910564, 0.95921954, 0.97841172, 0.93093166,
0.93009865, 0.89888111, 1.18714408, 0.98964763, 1.03388898,
1.67554215, 0.82998876, 1.34100687, 0.86766346, 0.96392316,
0.91371033, 0.76589296, 0.92329051, 0.82560326, 0.96758148,
0.8412995, 1.02550678, 0.74911108, 0.8751611, 1.01389312,
0.87865556, 1.24095868, 0.90678261, 0.85973204, 1.05617845,
0.94163038, 0.88087351, 0.95699844, 0.86083491, 0.89669384,
0.78646825, 1.0014202, 0.82399199, 1.05313139, 1.06458324,
0.88501766, 1.19043294, 0.8458026, 1.00231535, 0.72464305,
0.94790753, 0.7829744, 1.1953009, 0.85574035, 0.95433052,
0.96341484, 0.91362908, 0.94097713, 0.87273804, 0.81126399,
0.72715262, 0.85526116, 0.76015834, 0.8403826, 0.9831501,
1.17104665, 0.78862494, 1.01054909, 0.91511601, 1.0990797,
0.91352124, 1.13671162, 0.98793866, 1.0300545, 1.04490115,
0.85778231, 0.94824343, 1.14510618, 0.81305136, 0.88085051,
0.94743792, 0.94875465, 0.96206997, 0.94493612, 0.93547218,
1.09212018, 0.86934651, 0.90532353, 1.07066001, 1.26197714,
0.93858662, 0.9685039, 0.7946546, 1.03052031, 0.75395899,
0.87527062, 0.82156476, 0.949774, 1.01000235, 0.82613526,
1.0224591, 0.91529149, 0.91608832, 1.09418385, 0.8228272,
1.06337472, 1.05533176, 0.93513063, 1.00055806, 0.95474743,
0.91329368, 0.88711836, 0.95584926, 0.9825458, 0.74954073,
0.96964967, 0.88779583, 0.95321846, 0.95390055, 0.95369029,
0.94326714, 1.31881201, 0.71512263, 0.84526602, 0.92323824,
1.01993108, 0.85155992, 0.81416851, 0.98749128, 1.00034192,
0.98763473, 1.05974138, 1.05912658, 0.89772172, 0.97905626,
1.1534306, 0.92304181, 1.16450278, 0.7142307, 0.99846981,
0.79861247, 0.73939835, 0.93776385, 1.0072242, 0.89159707,
1.05514263, 1.05254569, 0.81005146, 0.95179784, 1.00278795,
1.04910398, 0.88427798, 0.74394266, 0.92941178, 0.83622845,
0.84064958, 0.93426956, 1.03619314, 1.22439347, 0.73510451,
0.82997071, 0.90828036, 0.80866989, 1.34078212, 0.85079169,
0.88346039, 0.76871666, 0.96763454, 0.66936914, 0.94175741,
0.97127617, 1.00844382, 0.83449557, 0.88095564, 1.17711652,
1.0547188, 1.04525593, 0.93817487, 0.77978294, 1.36143199,
1.16127997, 1.03792952, 1.03151637, 0.83837387, 0.94326066,
1.0054787, 0.99656841, 1.05575689, 0.97641643, 0.85108163,
0.82631589, 0.77407305, 0.90566132, 0.91308164, 0.95560906,
1.04523011, 1.03773723, 0.97378685, 0.83999133, 1.06926871,
1.01073982, 0.9804959, 1.06473061, 1.25315673, 0.969175,
0.63443508, 0.84574684, 1.06031239, 0.93834605, 1.01784925,
0.93488249, 0.80240225, 0.88757274, 0.9224097, 0.99158962,
0.87412592, 0.76418199, 0.78044069, 1.03117412, 0.82042521,
1.10272129, 1.09673757, 0.89626935, 1.01678612, 0.84911824,
0.95821431, 0.99169558, 0.86853864, 0.92172772, 0.94046199,
0.89750517, 1.09599258, 0.92387291, 1.07770118, 0.98831383,
0.86352396, 0.83079533, 0.94431185, 1.12424626, 1.02553104,
0.8357513, 0.97019669, 0.76816092, 1.34011343, 0.86489527,
0.82156358, 1.25529129, 0.86820218, 0.96970237, 0.85850546,
0.97429559, 0.84826078, 1.02498396, 0.72478517, 0.993497,
0.76918521, 0.91079198, 0.80988325, 0.75431095, 1.02918073,
0.88884197, 0.82625507, 0.78564563, 0.91505355, 0.88896863,
0.85882361, 0.81538316, 0.67656235, 0.8564822, 0.82473022,
0.92928331, 0.98068415, 0.82605685, 1.0150412, 1.00631678,
0.92405101, 0.88909552, 0.94873568, 0.87657342, 0.8280683,
0.77596382, 0.96598811, 0.78922426, 0.87637606, 0.98698735,
0.92207026, 0.71487846, 1.03845478, 0.70749745, 1.08603388,
0.92697779, 0.86470448, 0.70119494, 1.00596847, 0.91426549,
1.05318838, 0.79621712, 0.96169742, 0.88053405, 0.98963934,
0.94152997, 0.88413591, 0.75035344, 0.86007123, 0.83713514,
0.91234911, 0.79562744, 0.84099675, 1.0334279, 1.00272243,
0.95359383, 0.84292969, 0.94234155, 0.90190899, 0.97302022,
1.1009829, 1.0148975, 0.99082987, 0.75916515, 0.9204784,
0.94477378, 1.01108683, 1.00038149, 0.9259798, 1.19400436,
0.80191877, 0.79565851, 0.81865924, 0.79003506, 0.8995508,
0.73137983, 0.88336018, 0.7855268, 1.04478073, 0.90857981,
1.16076951, 0.76096486, 0.90004113, 0.83819665, 0.95295365,
1.09911441, 0.78498197, 0.95094991, 0.94333419, 0.95131688,
0.82961049, 1.08001761, 1.06426458, 0.94291798, 1.04381938,
0.90380364, 0.74060138, 0.98701862, 0.72250236, 0.86125293,
0.76488061, 0.9858051, 0.98099677, 0.96849209, 0.90053351,
0.88469597, 0.80688516, 1.06396217, 1.02446023, 0.911863,
0.98837746, 0.91102987, 0.92810392, 1.13526335, 1.00419541,
1.00866175, 0.74352261, 0.91051641, 0.81868428, 0.93538014,
0.87822651, 0.93278572, 1.0356074, 1.25158731, 0.98372647,
0.81335741, 1.06441863, 0.80305786, 0.95201148, 0.90283451,
1.17319519, 0.8984894, 0.88911288, 0.91474736, 0.94512294,
0.92956283, 0.86682085, 1.08937227, 0.94825713, 0.9787145,
1.16747163, 0.80863682, 0.98314119, 0.91052823, 0.80913225,
0.78503169, 0.78751737, 1.08932193, 0.86859845, 0.96847458,
0.93468839, 1.10769915, 1.1769249, 0.84916138, 1.00556408,
0.84508585, 0.92617942, 0.93985886, 1.17303268, 0.81172495,
0.93482682, 1.04082486, 1.03209348, 0.97220394, 0.90274672,
0.93686291, 0.91116431, 1.14814563, 0.83279158, 0.95853283,
1.0261179, 0.95779432, 0.86995883, 0.78164915, 0.89946906,
0.9194465, 0.97919367, 0.92719039, 0.89063569, 0.80847805,
0.81192101, 0.75044535, 0.86819023, 1.03420014, 0.8899434,
0.94899544, 0.9860773, 1.10047297, 1.00243849, 0.82153972,
1.14289945, 0.8604684, 0.87187524, 1.00415032, 0.78460709,
0.86319884, 0.92818335, 1.08892111, 1.06841003, 1.00735918,
1.20775251, 0.72613554, 1.25768191, 1.08573511, 0.89671127,
0.91259535, 1.01414208, 0.87422903, 0.82720677, 0.9568079,
1.00450416, 0.91043845, 0.84095709, 1.08010574, 0.69848293,
0.90769214, 0.94713501, 1.14808251, 1.0605676, 1.21734482,
0.78578521, 1.01516235, 0.94330326, 0.98363817, 0.99650084,
0.74280796, 0.96227123, 0.95741454, 1.00980406, 0.93468092,
1.10098591, 1.18175828, 0.8553791, 0.81713219, 0.82912143,
0.87599518, 1.15006511, 1.03151163, 0.8751847, 1.15701331,
0.73394166, 0.91426368, 0.96953458, 1.13901709, 0.83028721,
1.15742641, 0.9395442, 0.98118552, 0.89585426, 0.74147117,
0.8902096, 1.00212097, 0.97665858, 0.92624514, 0.98006601,
0.9507215, 1.00889825, 1.2406772, 0.88768719, 0.76587533,
1.0081044, 0.89608494, 1.00083526, 0.85594415, 0.76425576,
1.0286636, 1.13570272, 0.82020405, 0.81961271, 1.04586579,
1.26560245, 0.89721521, 1.19324037, 0.948205, 0.79414261,
0.85157002, 0.95155101, 0.91969239, 0.87699126, 1.03452982,
0.97093572, 1.14355781, 0.85088592, 0.79032079, 0.84521733,
0.99547581, 0.87593455, 0.8776799, 1.05531013, 0.94557017,
0.91538439, 0.79679863, 1.03398557, 0.88379021, 0.98850319,
1.05833423, 0.90055078, 0.92267584, 0.76273738, 0.98222632,
0.86392524, 0.78242646, 1.19417739, 0.89159895, 0.97565002,
0.85818308, 0.85334266, 1.85008011, 0.87199282, 0.77873231,
0.78036174, 0.96023918, 0.91574121, 0.89217979, 1.16421151,
1.29817786, 1.18683283, 0.96096225, 0.89964569, 1.00401442,
0.80758845, 0.89458758, 0.7994919, 0.85889356, 0.73147252,
0.7777221, 0.9148438, 0.72388117, 0.91134001, 1.0892724,
1.01736424, 0.86503014, 0.77344917, 1.04515616, 1.06677211,
0.93421936, 0.8821777, 0.91860774, 0.96381507, 0.70913689,
0.82354748, 1.12416046, 0.85989778, 0.90588737, 1.22832895,
0.65955579, 0.93828405, 0.88946418, 0.92152859, 0.83168025,
0.93346887, 0.96456078, 0.9039245, 1.03598695, 0.78405559,
1.21739525, 0.79019383, 0.84034646, 1.00273203, 0.96356393,
0.948103, 0.90279217, 1.0187839, 0.91630508, 1.15965854,
0.84203423, 0.98803156, 0.91604459, 0.90986512, 0.93384826,
0.76687038, 0.96251902, 0.80648134, 0.77336547, 0.85720164,
0.9351947, 0.88004728, 0.91083961, 1.06225829, 0.90230812,
0.72383932, 0.8343425, 0.8850996, 1.19037918, 0.93595522,
0.85061223, 0.84330949, 0.82397482, 0.92075047, 0.86129584,
0.99296756, 0.84912251, 0.8569699, 0.75252201, 0.80591772,
1.03902954, 1.04379139, 0.87360195, 0.97452318, 0.93240609,
0.85406409, 1.11717394, 0.95758536, 0.82772817, 0.67947416,
0.85957788, 0.93731268, 0.90349227, 0.79464185, 0.99148637,
0.8461071, 0.95399991, 1.04320664, 0.87290871, 0.96780849,
0.99467159, 0.96421545, 0.80174643, 0.86475812, 0.74421362,
0.85230296, 0.89891758, 0.77589592, 0.98331957, 0.87387233,
0.92023388, 1.03037742, 0.83796515, 1.0296667, 0.85891747,
1.02239978, 0.90958406, 1.09731875, 0.8032638, 0.84482057,
0.8233118, 0.86184709, 0.93105929, 0.99443502, 0.77442109,
0.98367982, 0.95786272, 0.81183444, 1.0526009, 0.86993018,
0.985886, 0.92016756, 1.00847155, 1.2309469, 0.97732206,
0.83074957, 0.87406987, 0.95268492, 0.94189139, 0.87056443,
1.0135018, 0.93051004, 1.5170931, 0.80948763, 0.83737473,
1.05461331, 0.97501633, 1.01449333, 0.79760056, 1.05756482,
0.97300884, 0.92674035, 0.8933763, 0.91624084, 1.13127607,
0.88115305, 0.9351562, 0.91430431, 1.11668229, 1.10000526,
0.88171963, 0.74914744, 0.94610698, 1.13841497, 0.90551414,
0.89773592, 1.01696097, 0.85096063, 0.80935471, 0.68458106,
1.2718979, 0.93550219, 0.96071403, 0.75434294, 0.95112257,
1.16233368, 0.73664915, 1.02195777, 1.07487625, 0.8937445,
0.78006023, 0.89588994, 1.16354892, 1.02629448, 0.89208642,
1.02088244, 0.85385355, 0.88586061, 0.94571704, 0.89710576,
0.95191525, 0.99819848, 0.97117841, 1.13899808, 0.88414949,
0.90938883, 1.02937917, 0.92936684, 0.87323594, 0.8384819,
0.87766945, 1.05869911, 0.91028734, 0.969953, 1.11036647,
0.94996802, 1.01305483, 1.03697568, 0.9750155, 1.04537837,
0.9314676, 0.86589798, 1.17446667, 1.02564533, 0.82088708,
0.96481845, 0.86148642, 0.79174298, 1.18029919, 0.82132544,
0.92193776, 1.03669516, 0.96637464, 0.83725933, 0.88776321,
1.08395861, 0.91255709, 0.96884738, 0.89840008, 0.91168146,
0.99652569, 0.95693101, 0.83144932, 0.99886503, 1.02819927,
0.95273533, 0.95959945, 1.08515986, 0.70269432, 0.79529303,
0.93355669, 0.92597539, 1.0745695, 0.87949758, 0.86133964,
0.95653873, 1.09161425, 0.91402143, 1.13895454, 0.89384443,
1.16281703, 0.8427015, 0.7657266, 0.92724079, 0.95383649,
0.86820891, 0.78942366, 1.11752711, 0.97902686, 0.87425286,
0.83944794, 1.12576718, 0.9196059, 0.89844835, 1.10874172,
1.00396783, 0.9072041, 1.63580253, 0.98327489, 0.68564426,
1.01007087, 0.92746473, 1.01328833, 0.99584546, 0.86381679,
1.0082541, 0.85414132, 0.87620981, 1.22461203, 1.03935516,
0.86457326, 0.95165828, 0.84762138, 0.83080254, 0.84715241,
0.80323344, 1.09282941, 1.00902453, 1.02834261, 1.09810743,
0.86560231, 1.31568763, 1.03754782, 0.81298745, 1.14500629,
0.87364384, 0.89928367, 0.96118471, 0.83321743, 0.90590461,
0.98739499, 0.79408399, 1.18513754, 1.05619307, 0.99920088,
1.04347259, 1.07689022, 1.24916765, 0.74246274, 0.90949597,
0.87077335, 0.81233276, 1.05403934, 0.98333063, 0.77689527,
0.93181907, 0.98853585, 0.80700332, 0.89570662, 0.97102475,
0.69178123, 0.72950409, 0.89661719, 0.84821737, 0.8724469,
0.96453177, 0.9690018, 0.87132764, 0.91711564, 1.79521288,
0.75894855, 0.90733112, 0.86565687, 0.90433268, 0.83412618,
1.26779628, 1.06999114, 0.73181364, 0.90334838, 0.86634581,
0.76999285, 1.55403008, 0.74712547, 0.84702579, 0.72396203,
0.82292773, 0.73633208, 0.90524618, 0.9954355, 0.85076517,
0.96097585, 1.21655611, 0.77658146, 0.81026686, 1.07540173,
0.94219623, 0.97472554, 0.72422803, 0.85055855, 0.85905477,
1.17391419, 0.87644114, 1.03573284, 1.16647944, 0.87810532,
0.89134419, 0.83531593, 0.93448128, 1.04967869, 1.00110843,
0.936784, 1.00143426, 0.79714807, 0.82656251, 0.95057309,
0.93821813, 0.93469098, 0.99825205, 0.95384714, 1.07063008,
0.97603699, 0.816668, 0.98286184, 0.86061483, 0.88166732,
0.93730982, 0.77633837, 0.87671549, 0.99192439, 0.86452825,
0.95880282, 0.7098419, 1.12717149, 1.16707939, 0.84854333,
0.87486963, 0.9255293, 1.06534197, 0.9888494, 1.09931069,
1.21859221, 0.97489537, 0.82508579, 1.14868922, 0.98076133,
0.85524084, 0.69042079, 0.93012936, 0.96908499, 0.94284892,
0.80114327, 0.919846, 0.95753354, 1.04536666, 0.77109284,
0.99942571, 0.79004323, 0.91820045, 0.97665489, 0.64689716,
0.89444405, 0.96106598, 0.74196857, 0.92905294, 0.70500318,
0.95074586, 0.98518665, 1.0794044, 1.00364488, 0.96710486,
0.92429638, 0.94383006, 1.12554253, 0.95199191, 0.87380738,
0.72183594, 0.94453761, 0.98663804, 0.68247366, 1.02761427,
0.93255355, 0.85264705, 1.00341417, 1.07765999, 0.97396039,
0.90770805, 0.82750901, 0.73824542, 1.24491161, 0.83152629,
0.78656996, 0.99062838, 0.98276905, 0.98291014, 1.12795903,
0.98742704, 0.9579893, 0.80451701, 0.87198344, 1.24746127,
0.95839155, 1.11708725, 0.97113877, 0.7721646, 0.95781621,
0.67069168, 1.05509376, 0.96071852, 0.99768666, 0.83008521,
0.9156695, 0.86314088, 1.23081412, 1.14723685, 0.8007289,
0.81590842, 1.31857558, 0.7753396, 1.11091566, 1.03560198,
1.01837739, 0.94882818, 0.82551111, 0.93188019, 0.99532255,
0.93848495, 0.77764975, 0.85192319, 0.79913938, 0.99495229,
0.96122733, 1.13845155, 0.95846389, 0.8891543, 0.97979531,
0.87167192, 0.88119611, 0.79655111, 0.9298217, 0.96399321,
1.02005428, 1.06936503, 0.86948022, 1.02560548, 0.9149464,
0.83797207, 0.86175383, 0.92455994, 0.89218435, 0.81546463,
0.98488771, 0.92784833, 0.87895608, 0.93366386, 1.17487238,
0.79088952, 0.9237694, 0.76389869, 0.931953, 0.76272078,
1.00304977, 0.86612561, 0.87870143, 0.93808276, 1.12489343,
1.00668791, 0.88027101, 0.88845209, 0.88574216, 0.84284514,
0.96594357, 0.94363002, 0.78245367, 0.92941326, 0.99622557,
0.83812683, 0.77901691, 0.9588432, 0.82057415, 0.95178868,
1.01904651, 0.97598844, 0.99369336, 1.12041918, 1.19432836,
0.91709572, 0.94645855, 0.93656587, 0.68754669, 0.80869784,
0.86704186, 0.83033797, 0.71892193, 0.97549489, 1.12150683,
0.76214802, 1.08564181, 0.84677802, 0.68080207, 1.03577057,
1.07937239, 0.6773357, 1.0279076, 0.89945816, 0.97765439,
0.91322633, 0.92490964, 0.92693575, 1.12297137, 0.81825246,
0.87598377, 1.11873032, 0.83472799, 1.21424495, 1.02318444,
1.01563195, 1.05663193, 0.82533918, 0.88766496, 0.95906474,
0.90738779, 0.93509534, 1.06658145, 1.00231797, 1.3131534,
0.88839464, 1.081006, 0.866936, 0.89030904, 0.91197562,
0.73449761, 0.95767806, 1.03407868, 0.79812826, 1.10555445,
0.85610722, 0.87420881, 1.04251375, 1.14286242, 1.00025972,
0.83742693, 1.11116502, 0.97424809, 0.92059325, 0.93958773,
0.80386755, 0.6881267, 0.88620708, 1.01715536, 1.12403581,
0.91078992, 0.81101399, 1.17271429, 1.09980447, 0.86063042,
0.80805811, 0.87988444, 0.97398188, 0.91808966, 0.90676805,
0.80042891, 0.84060789, 0.9710147, 1.00012669, 1.04805667,
0.66912164, 0.96111694, 0.86948596, 0.9056999, 1.01489333,
1.27876763, 0.873881, 0.98276702, 0.95553234, 0.82877996,
0.79697623, 0.77015376, 0.8234212, 1.13394959, 0.96244655,
1.06516156, 0.82743856, 1.02931842, 0.78093489, 1.01322256,
1.00348929, 0.9408142, 1.06495299, 0.8599522, 0.81640723,
0.81505589, 1.02506487, 0.91148383, 1.11134309, 0.83992234,
0.82982074, 0.9721429, 0.98897262, 1.01815004, 0.87838456,
0.80573592, 1.103707, 0.97326218, 1.08921236, 1.2638062,
0.83142563, 1.16028769, 0.86701564, 1.15610014, 0.98303722,
0.87138463, 0.75281511, 1.07715535, 0.91526065, 1.08769832,
0.83598308, 1.03580956, 0.9390066, 0.78544378, 1.03635836,
0.7974467, 0.99273331, 0.89639711, 0.9250066, 1.14323824,
0.9783478, 1.15460639, 0.94265587, 1.09317654, 0.78585439,
0.99523323, 0.95104776, 0.85582572, 0.96100168, 0.9131529,
0.86496966, 0.72414589, 1.05142704, 0.85570039, 0.98217968,
0.99031168, 1.01867086, 0.96781667, 0.98581487, 1.00415938,
1.0339337, 1.13987579, 1.14205543, 0.83393745, 0.96348647,
0.91895164, 0.77055293, 1.0053723, 0.93168993, 1.00332386,
1.04195993, 1.11933891, 0.87439883, 0.87156457, 0.96050419,
0.72718399, 1.13546762, 0.89614816, 0.85081037, 0.8831463,
0.76370482, 0.99582951, 1.01844155, 1.08611311, 1.15832217,
1.17551069, 0.97057262, 0.95163548, 0.98310701, 0.65874788,
0.9655409, 0.85675853, 1.34637286, 0.93779619, 1.0005791,
0.88104966, 1.14530829, 0.93687034, 1.01472112, 1.62464726,
0.84652357, 0.84639676, 0.87513324, 0.94837881, 0.85425129,
0.89820401, 0.94906277, 0.97796792, 0.98969445, 0.8036801,
1.03936478, 0.95898918, 0.82919938, 1.29609354, 0.97833841,
0.86862799, 0.88040491, 0.8741178, 0.80617278, 0.95983882,
0.9752235, 0.84292828, 0.9327284, 0.93297136, 1.06255543,
0.88756716, 1.13601403, 0.72311518, 0.95250034, 0.95369843,
1.02562728, 0.74354691, 0.78463923, 0.88720818, 1.07763289,
0.94502062, 0.81170329, 0.96516347, 0.76884811, 0.84169312,
0.83752837, 1.1487847, 1.04311868, 0.78128663, 0.74604211,
0.96488513, 1.1722513, 0.91661948, 1.06642815, 0.92185781,
0.93289001, 0.65208625, 0.75734648, 0.99580571, 1.21871511,
0.96316283, 1.06093093, 0.7914337, 0.90494572, 0.79235327,
0.90771769, 0.91355145, 0.98754767, 0.88938619, 0.89503537,
0.82764566, 0.77267065, 0.81520031, 0.90423926, 0.94289609,
0.88678376, 1.03209085, 0.81319963, 0.91600997, 0.81608666,
0.72429125, 0.95585073, 1.14039309, 1.00326452, 0.99629944,
0.95647901, 0.8927127, 0.96558599, 0.86305195, 1.0366906,
0.90494731, 0.95148458, 1.11229696, 1.17059748, 0.74867876,
0.99621909, 0.94246499, 0.82403515, 0.92144961, 0.93209989,
0.9705427, 0.97915309, 0.92431525, 0.7589944, 0.75208652,
0.89375154, 0.78820016, 1.24061454, 1.08031776, 0.88364539,
0.86909794, 0.98635253, 0.97620372, 1.24278282, 1.01146474,
0.93726261, 0.94411536, 1.08344492, 0.75389972, 1.09979822,
0.84271329, 1.16616317, 0.88177625, 0.8451345, 0.91355741,
0.99833789, 0.86172172, 0.87076203, 0.83743078, 0.99771528,
1.0469295, 0.87952668, 1.04362453, 0.96350831, 0.95744466,
0.84284283, 0.8773066, 0.85984544, 1.00589365, 0.88069101,
1.02331332, 1.06616241, 0.78475212, 1.02296979, 0.81480926,
1.09008244, 0.71435844, 0.79655626, 1.09824162, 0.87785428,
1.18020492, 0.99852432, 0.79028362, 0.80081103, 1.10940685,
1.08752313, 0.90673214, 0.84978348, 0.69466992, 0.77497046,
0.83074014, 0.87865947, 0.78890395, 0.7925195, 0.99749611,
0.91430636, 0.87863864, 0.95392862, 0.91430684, 0.97358575,
0.87999755, 0.88234274, 0.71682337, 1.09723693, 0.71907671,
0.97487202, 0.71792963, 0.88374828, 0.73386811, 0.9315647,
1.05020628, 0.99128682, 0.71831173, 1.07119604, 1.02028122,
1.04696848, 0.93335813, 1.04275931, 0.72181913, 0.8837163,
0.90283411, 0.96642474, 0.89851984, 0.8397063, 0.91185676,
1.00573193, 0.88430729, 0.7738957, 1.07361285, 0.92617819,
0.64251751, 1.05229257, 0.73378537, 1.08270418, 0.99490809,
1.13634433, 1.11979997, 1.03383516, 1.00661234, 1.05778729,
1.05977357, 1.13779694, 0.91237075, 1.04866775, 0.9163203,
0.93152436, 0.83607634, 1.13426049, 1.26438419, 0.93515536,
0.92181847, 0.86558905, 1.01985742, 1.44095931, 0.92256398,
0.83369288, 0.93369164, 0.8243758, 0.98278708, 0.80512458,
1.02092014, 0.73575074, 1.2214659, 0.85391033, 0.97617313,
0.82054292, 1.04792993, 0.93961791, 1.01145014, 0.89301558,
0.93167504, 0.88221321, 1.23543354, 0.97023998, 1.00197517,
0.85394662, 0.89426495, 0.81344186, 1.08242456, 0.76253284,
1.00642867, 0.76685541, 1.01487961, 0.84028343, 0.87979545,
0.92796937, 0.99796437, 1.28844084, 1.02827514, 1.03663144,
0.83164521, 0.95644234, 0.77797914, 0.96748275, 1.09139879,
0.84329253, 0.9539873, 0.80094065, 1.13771172, 0.91557533,
0.93370323, 0.79977904, 1.02721929, 1.16292026, 0.92976802,
0.85806865, 0.97824974, 1.02721582, 0.82773004, 0.9297126,
0.93769842, 1.14995068, 1.02895292, 0.90307101, 0.85918303,
1.14903979, 1.0344768, 0.7502627, 1.27452448, 1.12150928,
0.87274005, 1.09807041, 0.98634666, 1.03086907, 0.94743667,
0.91145542, 1.04395791, 0.83396016, 0.94783374, 0.96693806,
0.88864359, 0.93400675, 1.08563936, 0.78599906, 0.92142347,
1.15487344, 1.19946426, 0.92729226, 0.83333347, 0.90837637,
0.89191831, 1.0581614, 0.85162688, 1.10081699, 0.98295351,
0.86684217, 1.00867408, 0.95966205, 0.73170785, 1.3207658,
0.87988622, 0.82869937, 0.9620586, 0.71668579, 1.04105616,
0.71415591, 1.30198958, 0.81934393, 0.86731955, 0.99773712,
0.99943609, 0.87678188, 1.01650692, 0.73917494, 0.92077402,
0.98322263, 0.90623212, 0.88261034, 1.12798871, 0.84698889,
0.85312827, 0.91214965, 0.8778361, 0.99621569, 0.94155734,
0.66441342, 0.85925635, 0.98064691, 0.97107172, 0.96438785,
0.95670408, 0.87601389, 0.9388234, 0.91165254, 1.14769638,
0.99856344, 0.84391431, 0.94850194, 0.93754548, 0.86398937,
0.95090327, 1.07959765, 1.16684297, 0.82354834, 0.93165852,
0.91422292, 1.14872038, 0.87050113, 0.92322683, 1.04111597,
0.87780005, 0.94602618, 1.10071675, 0.88412438, 0.91286998,
0.9045216, 0.91750005, 0.98647095, 1.10986959, 0.98912028,
1.01565645, 0.93891294, 0.97696431, 0.91186476, 0.77363533,
1.00075969, 0.89608139, 0.99828964, 0.87239569, 0.87540604,
0.76152791, 0.82501538, 0.91656546, 0.74389243, 1.07923575,
1.00241137, 1.05628365, 1.04407879, 0.90048788, 1.1134027,
0.89745966, 0.96534, 0.71151925, 0.91798511, 0.7337992,
0.83636115, 0.75279928, 0.95570185, 0.89073922, 0.90307955,
0.8030445, 0.84374939, 0.89769981, 0.99002578, 1.01849373,
0.92436541, 0.79675699, 1.03910383, 1.07487895, 0.8906169,
0.97729004, 0.97284392, 0.76338988, 0.82756432, 1.12289431,
0.9582901, 0.97160038, 0.90141331, 0.83271234, 1.16065947,
0.90605662, 1.13389282, 0.8557889, 0.77149889, 0.9462268,
0.95908887, 1.03399986, 0.92795031, 0.73529029, 0.93630494,
0.96730298, 1.05490026, 0.93313995, 0.96980639, 0.9177592,
0.95483326, 0.85262905, 0.95170479, 0.9601628, 0.94878173,
0.87627934, 1.00561764, 0.83441231, 0.90890643, 0.97177858,
1.26394809, 0.80773622, 0.72205262, 0.87692143, 1.01842034,
0.98128171, 1.10776014, 0.94400422, 0.92697961, 0.79523284,
0.8609763, 0.96303262, 1.17190075, 1.01259271, 1.04973619,
0.94837034, 0.86592734, 0.85908444, 1.14914962, 0.98113587,
1.03070712, 0.89916573, 0.90618114, 0.93223156, 0.96031901,
0.94162334, 0.98908438, 0.95170104, 0.95056422, 0.81782932,
0.81770133, 1.32039255, 1.28822384, 0.82916292, 1.01626284,
0.97537737, 0.83235746, 0.78645733, 0.77916206, 0.93591612,
0.8469273, 0.74309279, 0.91331015, 1.11240033, 1.41018987,
0.95320314, 0.95807535, 0.89382722, 0.9259679, 0.92570222,
0.84567759, 0.82332966, 0.98371126, 1.00248628, 0.72107053,
1.09687436, 0.78399705, 0.85224803, 0.92151262, 0.85618586,
0.88485527, 0.954487, 0.86659146, 1.12800711, 0.93019359,
0.91388385, 0.95298992, 0.96834137, 0.90256791, 1.01222062,
0.84883116, 1.01234642, 0.91135106, 0.83362478, 0.94928359,
0.82247066, 0.7671973, 0.85663382, 0.88838144, 0.92491567,
0.88698604, 0.87485584, 1.08494606, 0.96431031, 1.06243095,
1.14062212, 1.02081623, 0.72229471, 0.82390737, 0.86599633,
0.95284398, 0.87238315, 1.02818071, 0.98462575, 0.81992808,
1.01207538, 1.0081178, 0.88458825, 1.01726135, 0.97708359,
0.79820777, 1.06081843, 0.97028599, 0.95203124, 1.00482088,
0.71764193, 0.88115767, 0.90628038, 0.97304174, 0.77015983,
1.06109546, 0.89575454, 0.94824633, 0.93822134, 0.98048549,
0.812265, 0.95744328, 0.79087999, 1.0222571, 0.89100453,
1.03590214, 0.92699983, 0.86840126, 0.99455198, 0.87912973,
0.93506231, 0.80706147, 0.89931563, 0.7861299, 0.89253527,
0.90052785, 0.82420191, 0.97042004, 1.03249619, 0.92354267,
0.80482118, 0.9007601, 0.80123508, 0.82285143, 0.88105118,
1.03519622, 0.8620259, 0.96447485, 0.80399664, 1.00324939,
0.96317193, 0.83260244, 0.98561657, 0.88445103, 0.70777743,
0.81608832, 0.98073402, 1.1206105, 0.69903403, 0.84353026,
0.9064964, 0.97055276, 0.82747966, 0.85400205, 1.01205886,
0.85324973, 0.90899616, 0.92797575, 0.94646632, 0.89358892,
0.7981183, 0.96559671, 0.88352248, 1.09804477, 0.79152196,
1.1054838, 0.93272283, 0.96165854, 0.8899703, 0.8792494,
0.74563326, 0.85371604, 0.87760912, 0.87184716, 0.92049887,
0.99459292, 0.93699011, 0.90492494, 1.12981885, 1.10621082,
0.91391466, 1.05207781, 1.13395097, 0.87022945, 0.93165871,
0.89083332, 0.99584874, 0.98626911, 1.13885184, 1.17350384,
0.93294232, 0.79602714, 0.93670114, 1.09726582, 1.05378961,
0.9457279, 1.03257053, 1.11349021, 0.80111296, 0.96415105,
0.99447221, 0.75745769, 0.77537636, 0.83860967, 0.90122484,
0.78850128, 1.19877642, 0.91190085, 0.80851919, 0.79484738,
0.93093657, 0.87619908, 1.22781715, 0.89734952, 0.8678127,
0.76177975, 0.82089769, 0.89288915, 1.01603179, 0.95279916,
0.84037366, 0.99962719, 0.84298093, 0.77234882, 0.99876963,
1.01856707, 1.2133211, 0.73822878, 0.83465671, 1.08879938,
0.8878534, 1.24133317, 0.89264527, 0.83938655, 1.03853109,
0.9842176, 0.94257497, 0.98282054, 0.90632313, 0.75810741,
1.02540204, 0.86648513, 0.98430307, 0.84561701, 1.13483974,
1.12446434, 1.00220923, 1.23248603, 0.98999724, 0.81980761,
0.91334393, 0.92831557, 1.16798373, 0.8888053, 0.9319632,
0.89206108, 0.86764558, 0.69337981, 0.9021983, 1.09931186,
1.15290804, 0.62304114, 1.1205393, 1.27030677, 1.12718725,
0.93002501, 0.83367301, 0.96589068, 0.86578968, 0.79204086,
0.85124905, 0.89121046, 0.96406141, 0.99249204, 0.93363878,
1.11258502, 0.92020983, 1.16020824, 0.99075915, 0.73994574,
0.9335638, 0.97410789, 1.00029038, 1.43611904, 0.93089581,
0.94758878, 0.84808364, 0.92192819, 1.0249259, 0.69529827,
0.94629021, 0.7330735, 1.07902207, 0.93022729, 0.77375973,
0.95019291, 0.92333668, 0.81483081, 0.78044978, 0.85101115,
0.88859716, 0.88720344, 0.89291167, 1.10372601, 0.91132273,
1.04156844, 0.94867703, 0.83546241, 0.84227545, 0.97043199,
0.73281541, 0.74512501, 0.9128489, 0.99223543, 0.7319106,
0.93065507, 1.07907995, 0.86895295, 0.84344015, 0.89394039,
0.88802964, 1.00580322, 1.04286883, 0.82233574, 1.0279258,
0.97550628, 1.03867605, 1.10231813, 0.9642628, 0.91684874,
1.11066089, 0.99439688, 0.88595489, 0.88725073, 0.78921585,
0.80397616, 0.71088468, 0.98316478, 0.72820659, 0.96964036,
1.03825415, 1.01438989, 1.02763769, 1.29949298, 1.06450406,
0.86198627, 0.85588074, 0.90445183, 1.01268187, 0.87927487,
0.9263951, 0.93582126, 0.88738294, 1.20707424, 0.92887657,
0.97891062, 0.92893689, 0.84846424, 0.96287008, 0.99565057,
0.93483385, 1.21357183, 0.82369562, 0.65144728, 1.11249654,
0.7785981, 0.88248898, 0.8953217, 0.95884666, 0.77538093,
0.82272417, 0.91073072, 1.17185169, 0.99645708, 0.88693463,
0.90293325, 0.93368474, 0.87575633, 1.01924242, 0.80011545,
0.99762674, 0.75834671, 0.91952152, 0.86754419, 0.81073894,
0.8880299, 0.74868718, 0.99979109, 0.90652154, 0.92463566,
0.93894041, 0.92370595, 0.88766357, 1.04614978, 1.77193759,
0.85480724, 0.85208602, 0.96154559, 0.95832935, 0.84210613,
0.9604567, 0.88597666, 1.0010723, 0.91890105, 1.10529207,
0.91123688, 0.88466788, 1.09759195, 0.8946647, 0.78066485,
1.04376296, 1.02951755, 0.88455241, 0.99284282, 0.82423576,
0.80612213, 0.80915541, 0.9482253, 0.8887192, 0.86163309,
0.891385, 0.84850622, 1.03353375, 1.09248204, 1.05337218,
0.85927317, 0.89167858, 1.04868715, 0.92933249, 1.1177299,
0.99846776, 0.82418972, 0.86041965, 0.88015748, 0.89785813,
0.85997945, 0.97102367, 0.86679181, 1.00848475, 0.9091588,
0.92565039, 0.84019067, 0.86978485, 1.21977681, 1.14920817,
1.05177219, 0.84202905, 0.85356083, 1.01379321, 0.93364219,
1.01999942, 0.85906744, 0.98178266, 0.87218886, 0.93983742,
0.79713053, 1.01123331, 0.86551625, 0.81983929, 0.86782985,
0.86735664, 1.43316935, 0.8490094, 0.99909103, 0.85715326,
0.89452366, 1.08380518, 0.74686847, 1.62233058, 0.81046611,
0.83563461, 0.96925792, 0.82863186, 0.87147202, 0.92609558,
0.8879082, 0.93933353, 0.90043906, 0.81677055, 0.78016427,
0.68871014, 0.83329967, 0.81570171, 0.89780443, 0.81337668,
1.00772749, 0.96220158, 0.90035459, 1.06031906, 0.85832752,
0.93636203, 0.96336629, 0.94686138, 0.98499419, 0.87223701,
0.96079992, 0.81302793, 0.99287479, 0.99369685, 1.21897038,
0.94547481, 0.80785132, 1.02033902, 0.93270741, 0.90386512,
1.05290969, 1.08873223, 0.81226537, 0.87185463, 0.96283379,
0.95065022, 1.07603824, 1.22279786, 0.83749284, 0.93504869,
0.93554565, 0.95255889, 0.96665227, 0.92370811, 0.76627742,
1.14267254, 0.98268052, 1.10017739, 0.79569048, 0.86494449,
1.17939799, 0.80655859, 0.76799971, 1.0018905, 0.83051793,
1.37419036, 1.10424623, 0.93729691, 0.99655914, 0.94900303,
1.157402, 0.93397459, 0.8133195, 0.8592273, 1.024661,
0.83708977, 1.06537435, 0.93561942, 1.00402051, 0.68981047,
0.92807172, 0.72192097, 1.232419, 0.97080757, 0.90350598,
0.95122672, 1.04663207, 0.79080723, 0.8421381, 1.01956925,
0.93307897, 0.88011784, 0.78674974, 0.97537097, 0.7582792,
0.85704507, 0.97683858, 0.7739793, 0.96245444, 0.99506991,
0.76853035, 0.90875698, 0.97951121, 0.93350388, 1.16380858,
0.8154485, 1.16902243, 0.98644779, 0.969998, 0.73120517,
1.19059456, 0.85953661, 0.99193867, 0.88144929, 0.99254885,
1.02956121, 0.90689455, 0.89494433, 0.85625065, 0.86227273,
0.99830845, 0.97635222, 0.83420327, 1.02359646, 0.93694813,
0.88462353, 0.97040788, 1.02543309, 0.91904348, 1.2527365,
0.82235812, 0.92026753, 0.93935859, 0.88919482, 1.00405208,
1.06835782, 1.34738363, 0.97831176, 0.92053317, 1.09692339,
0.86156677, 1.02455351, 1.25572326, 0.89721167, 0.95787106,
0.85059479, 0.92044416, 0.99210399, 0.94334232, 0.76604642,
0.8239008, 0.70790815, 1.06013034, 1.12729012, 0.88584074,
0.91995677, 0.82002708, 0.91612106, 0.86556894, 0.88014564,
0.95764757, 0.96559535, 0.97882426, 0.70725389, 0.9273384,
0.86511581, 0.85436928, 1.26804081, 1.02018914, 0.95359667,
0.89336753, 0.91851577, 0.78166458, 1.02673106, 1.01340992,
1.34916703, 0.77389899, 1.12009884, 0.94523179, 0.87991868,
0.82919239, 0.98198121, 0.83653977, 0.91748611, 1.0642761,
0.86964263, 0.86304793, 1.11500797, 0.7234409, 1.00464282,
1.01835251, 0.73389264, 0.88471293, 0.85754755, 1.05383962,
0.73121546, 0.85445808, 0.768308, 0.81396206, 1.01261272,
0.76696225, 1.01770784, 0.76742866, 0.98390583, 0.96277488,
0.87998292, 0.85264282, 1.12704234, 0.79612317, 0.92206712,
1.09846877, 0.99874997, 0.87707457, 1.03404785, 1.00726392,
0.91613763, 0.74242708, 0.80247702, 0.90702146, 0.81638055,
0.78507729, 1.00066404, 0.84687328, 0.76488847, 0.89697089,
0.82524207, 0.84940145, 1.022041, 0.75856559, 1.15434195,
1.09781849, 0.93256477, 0.96021119, 1.00796782, 0.88193493,
0.87902107, 0.82245196, 1.04739362, 1.133521, 0.82969043,
1.01007529, 1.07135903, 0.981338, 0.86178089, 0.77930618,
0.82512349, 1.2017057, 1.30452154, 1.12652148, 1.03670177,
0.90631643, 0.74222362, 0.84452965, 0.86366363, 0.79192948,
1.10288297, 0.9554774, 1.00912465, 0.95545229, 0.93584303,
0.91604017, 0.91681165, 0.76792072, 1.66615421, 0.99044246,
1.05068209, 0.88197497, 0.91153792, 0.82702508, 0.95182748,
1.05320356, 0.8466656, 1.01676717, 0.65881123, 1.02589358,
1.03902555, 1.00199915, 1.03022137, 0.93427176, 0.94600332,
0.94594696, 0.86465228, 0.91241272, 0.72232997, 0.93380167,
1.1960032, 0.87463367, 0.78428202, 0.88088, 0.97202961,
0.99425528, 0.89567214, 0.84908979, 0.81004889, 0.85484368,
0.68478631, 0.96563032, 0.78298607, 0.71894276, 0.88632131,
0.8885966, 0.99235811, 0.84002222, 0.91265424, 0.91999157,
0.89786651, 1.18062511, 0.92378385, 0.82501238, 1.09009807,
0.96787582, 1.12456979, 0.86339677, 0.8786218, 0.89865768,
1.02943564, 0.98886502, 0.97135566, 0.95914954, 1.05080931,
0.76554446, 0.80142172, 0.99661393, 1.14749469, 0.93695459,
0.95769957, 1.00811373, 1.00352699, 0.98747546, 0.99436785,
1.10256609, 0.84366101, 0.85931876, 0.90745126, 1.04928733,
0.84499693, 1.14018589, 1.2337188, 0.90516077, 0.84991869,
0.72984467, 0.9729476, 0.97483938, 0.88626286, 1.02838695,
0.89750089, 0.80324802, 1.40726294, 0.91149383, 0.86837826,
1.21798148, 0.96459285, 0.71897535, 0.76230781, 0.88042964,
0.8205186, 1.0517869, 0.74269565, 0.98278109, 1.1454159,
1.03806052, 0.75238659, 0.94224089, 0.94931526, 1.24018529,
0.99048689, 0.88108251, 0.81008694, 0.95443294, 0.99975781,
0.83336879, 0.74422074, 0.87934792, 0.81994499, 0.98684546,
0.82176924, 0.91652824, 0.77571479, 0.77039071, 0.9951089,
0.92896121, 0.96234268, 1.00295341, 1.01455466, 0.75014075,
0.95568202, 0.80995874, 1.24671334, 0.89480962, 0.81300194,
0.76967074, 0.92514927, 0.89610963, 0.97441759, 1.19354494,
0.87041262, 0.97344039, 0.88983828, 0.91614149, 0.85782814,
0.78403196, 0.96665254, 0.91000054, 0.78641804, 0.96920714,
0.89670528, 0.79247817, 1.04189638, 0.86777037, 1.18686087,
0.79506403, 0.92389297, 0.76211023, 0.93617759, 0.91879446,
0.8207635, 0.78984486, 0.93005953, 0.78743101, 0.9814347,
0.94882561, 0.9577075, 0.81121566, 1.01025446, 0.90587214,
0.94842798, 0.8811194, 1.01942816, 0.94698308, 0.92603676,
0.86119014, 0.97543551, 0.84730649, 0.77552262, 0.97536054,
0.96944817, 0.8736804, 0.86809673, 0.98134953, 1.16303105,
0.81534447, 1.35930512, 0.83221293, 0.94136243, 0.76926289,
1.05844282, 0.87783288, 0.78921971, 0.84360428, 0.78722128,
1.00022607, 0.96779519, 0.95891975, 0.91900001, 1.07307813,
1.03713093, 0.96257742, 0.90363152, 0.88729834, 0.91929215,
1.00508255, 0.80838454, 0.92165553, 0.94513005, 0.95429071,
0.80829571, 0.79531708, 1.01317347, 0.75337253, 0.85965134,
0.77014567, 0.77680991, 0.77158741, 0.88882588, 0.91466414,
0.82815897, 0.80251251, 1.04901425, 1.03386161, 1.3267075,
1.12457236, 0.8267327, 0.89313417, 0.85992512, 0.93482733,
0.83456348, 0.87991138, 0.8110149, 0.77913188, 0.89391799,
0.73646974, 0.87038816, 0.99533506, 0.90744083, 0.98175496,
1.17458551, 0.86718975, 0.93125366, 0.76131575, 0.90419708,
0.95122171, 0.97531776, 1.05955142, 0.94714906, 0.79360281,
1.02765349, 0.85192628, 0.84680852, 0.85470655, 0.94950982,
0.75868699, 0.89731933, 1.00736877, 1.05171121, 0.73336848,
0.97323586, 0.9848978, 1.27418684, 0.83954394, 0.73979357,
1.06785996, 0.97832832, 0.7903268, 0.76600605, 0.94906446,
0.81383465, 0.83620612, 1.00573379, 0.86359645, 0.9962139,
0.98779432, 1.13793814, 1.02764992, 0.9070168, 0.81340349,
0.94807089, 0.90499083, 0.83805736, 0.99623054, 0.91875275,
0.95603557, 0.93156095, 0.83858677, 1.03667466, 1.01436655,
0.85551979, 0.76227045, 0.84743986, 0.88487423, 0.93800365,
0.8984666, 0.92600404, 0.89230381, 1.34625848, 1.10026015,
0.9314026, 0.82450724, 1.0299575, 0.98494286, 1.07564492,
0.96565301, 0.89677015, 1.15236174, 0.85476951, 1.00169288,
0.90520725, 1.06235248, 1.04267637, 0.8311949, 0.82017897,
0.81635968, 0.97246582, 0.84554172, 0.85409644, 1.18006461,
0.96488389, 0.69228637, 0.97812108, 0.91764623, 0.86250551,
0.91067775, 1.04692847, 0.94594707, 1.04351374, 0.9861303,
0.92192581, 0.835444, 0.84362223, 1.13770705, 0.8075574,
1.02260109, 1.13786456, 0.80862839, 0.89291687, 0.90278047,
1.11613951, 1.29900454, 1.5622857, 0.70999772, 0.99692653,
0.89109939, 0.77506441, 0.86054356, 0.99498141, 0.84222293,
0.95213508, 0.91438286, 0.89305591, 0.9716793, 0.88609491,
1.00275797, 0.90086022, 0.75336995, 1.1572679, 0.75952094,
0.89203313, 0.82115965, 0.81459913, 1.02943406, 0.67063452,
1.08707079, 0.92139483, 0.89855103, 0.89910955, 1.07169531,
0.93684641, 0.84893365, 1.08659966, 1.43385982, 0.94788914,
0.95277539, 0.94709274, 1.08412066, 0.90274516, 0.85147284,
0.89327944, 0.92176174, 0.83820774, 0.90981839, 0.82303984,
0.95189716, 0.95154905, 0.73628819, 1.18956148, 1.20224654,
0.97666968, 1.08057375, 0.90369444, 0.98589538, 0.81426873,
0.75127684, 0.93200745, 0.833666, 0.79532088, 0.91965037,
0.99540522, 0.75449668, 0.85698312, 0.79328453, 0.94667443,
0.7637764, 0.77203985, 0.73841377, 0.98587851, 1.34642268,
0.78002774, 1.04356217, 1.02266882, 1.08936378, 0.9794388,
1.07623423, 0.78069571, 1.12194495, 0.8072132, 0.91672662,
1.36102062, 0.86933509, 1.15282756, 1.06219505, 0.80295502,
1.00999033, 0.69418333, 0.93678452, 1.13002256, 0.91465628,
0.73558316, 1.1302073, 0.85856238, 0.89450543, 1.11812369,
0.75891878, 0.66859534, 0.97445338, 0.82210227, 0.76292085,
0.79289499, 1.04380135, 0.95586226, 0.87480096, 0.81244036,
0.86097575, 0.84111811, 0.85369732, 0.99160655, 0.90911501,
0.81315845, 0.74037745, 1.04369233, 1.03535223, 1.18886682,
0.87092491, 0.93562683, 0.92555142, 0.95268616, 0.9653025,
0.93447525, 0.9043932, 1.25701034, 1.10354218, 0.96588129,
0.94717991, 0.97010307, 0.78264501, 0.80991731, 0.98540974,
0.83174886, 0.66966351, 1.01747376, 1.21553117, 0.80527296,
1.06556826, 1.00870321, 1.03316522, 0.88994006, 0.89092714,
0.94119254, 0.83930854, 1.01500087, 1.03581272, 0.97608081,
1.11919255, 1.16586474, 0.85064102, 1.06070274, 1.00679658,
0.75848826, 0.97969353, 0.94834777, 1.64970724, 0.82448941,
1.02236919, 0.95252025, 0.98638842, 0.89094895, 0.95522527,
0.91533774, 0.83716951, 0.92612154, 0.8662328, 0.9675949,
0.96758398, 0.84309291, 0.95071171, 1.0165785, 0.96628063,
1.00096151, 0.83175371, 0.79063043, 0.97371271, 0.76009001,
1.02409279, 0.97232166, 0.8480577, 0.8982739, 0.9959743,
0.96604729, 0.8681602, 0.99850841, 0.96162481, 1.01259965,
0.98580061, 0.82751273, 0.90469122, 0.98254028, 0.78012425,
0.87023012, 0.96830515, 0.9415831, 0.8591063, 0.82961507,
0.89166083, 0.88509907, 0.95987837, 1.12356244, 0.71406404,
0.99047619, 0.93735587, 0.80540831, 1.0024624, 0.95179491,
0.83602101, 0.90343297, 0.90510417, 0.96477126, 0.79995299,
0.93123762, 0.73763362, 1.0619498, 0.80929865, 0.86110233,
0.84552556, 0.9943, 0.97085623, 0.75751174, 0.9201456,
1.02268858, 0.9642899, 0.79078558, 1.03160502, 0.85200219,
1.02246639, 1.08771483, 0.81997868, 0.82499763, 0.92767703,
1.06700018, 0.7882174, 0.7789828, 0.89096139, 0.73155973,
1.01717651, 0.91889525, 0.93256065, 0.84716063, 1.00965969,
0.74505112, 0.80104245, 0.76003901, 0.96662605, 0.96594583,
1.04571121, 0.97700878, 0.85461917, 0.9150222, 0.89110471,
1.11183096, 0.98143747, 1.02346975, 0.9059266, 1.00771483,
0.96336096, 0.93783898, 0.90545613, 1.10404183, 0.75297691,
0.92548654, 0.79889783, 0.88177552, 0.93896814, 0.87309811,
0.80691061, 0.89725699, 1.16586955, 0.98948281, 0.94524894,
0.86085608, 0.76716851, 0.85362573, 1.09936882, 0.9328761,
0.74819673, 0.94331186, 0.81077304, 0.88610499, 1.01452015,
0.91513953, 0.92846128, 0.93539081, 0.8946682, 0.9270336,
0.96673629, 0.9897488, 1.11891899, 0.87551585, 0.85854576,
1.13458763, 1.11450768, 0.79887951, 1.091154, 1.04180374,
0.79252573, 0.90484245, 0.94221016, 0.95721137, 0.86776103,
0.97167404, 0.83404166, 0.94634038, 0.98907413, 0.92321459,
1.03547804, 0.79660212, 0.94870239, 0.70027204, 0.79841059,
0.92563393, 1.4385341, 0.8331731, 0.844816, 0.97851389,
1.24048695, 0.83765698, 0.83600835, 1.13901283, 1.05994936,
0.84292427, 0.86759056, 0.9272156, 0.77375499, 0.99972839,
0.95570976, 0.97879539, 0.95528351, 0.84555495, 0.95296134,
0.87469056, 0.78862024, 0.793795, 0.8516853, 0.92816818,
1.02492208, 0.8037345, 0.95481283, 0.75138828, 0.72110948,
1.36815666, 0.9661646, 0.81651816, 0.87764538, 0.97397297,
0.99845266, 0.77433798, 0.9266279, 1.92493013, 1.07588789,
0.90412593, 1.03165475, 1.00826548, 0.75500744, 0.87198881,
0.86871262, 0.97854606, 0.80954477, 0.84130266, 0.89674826,
1.43926644, 0.74873088, 1.01894282, 0.93606154, 1.08241489,
0.76626357, 0.97434747, 0.82824599, 1.00267494, 0.97168761,
1.06433173, 1.22741978, 1.46998419, 0.9521923, 0.98276685,
0.92422781, 1.14241216, 1.13339577, 1.05586816, 1.04923068,
0.83364505, 0.98007268, 0.94322393, 0.84310173, 1.03481955,
1.18281181, 0.79807678, 0.840274, 1.00344058, 1.09442855,
0.88033836, 0.86189964, 1.1395012, 1.18808865, 0.78667714,
1.09323293, 0.81511099, 0.95830848, 0.99637275, 0.9146258,
0.96358155, 0.79048719, 0.80395604, 1.00828722, 0.92872342,
0.98789363, 0.96720252, 0.80541021, 0.73697557, 0.86692999,
0.86795696, 1.1516694, 0.95911714, 1.13981603, 1.02002866,
0.90808456, 0.94208296, 0.93691739, 0.87653118, 0.72824225,
0.78177906, 1.2139146, 0.83405505, 0.91764545, 0.83318595,
0.77930256, 0.86499397, 0.95599882, 0.73850016, 0.9630604,
0.97913407, 1.1790714, 0.94994057, 1.04379512, 0.80815459,
1.16560205, 0.97486893, 1.02780804, 1.10633754, 0.78679252,
0.94643528, 1.19999119, 0.98621069, 0.8899674, 0.89235261,
0.8728921, 0.77089094, 0.8492628, 0.86905159, 0.90741875,
0.81065291, 0.91208596, 1.04616696, 1.24291958, 0.98628605,
0.99751975, 0.83249612, 0.96343385, 0.77862866, 0.72381238,
1.17384381, 1.06013687, 0.73460652, 1.09554763, 0.82015886,
0.90862905, 0.89037104, 0.7866143, 0.8570287, 0.75061334,
0.94950855, 0.8091383, 1.04055212, 0.96679573, 0.78338675,
0.75968533, 1.00495071, 0.6491633, 1.02802735, 1.00725883,
0.89333988, 0.87539291, 0.99374251, 1.10241119, 1.14935785,
0.9369769, 0.84772646, 1.05024743, 0.97411124, 0.76972352,
0.92161017, 0.88689841, 0.78598549, 0.93400036, 1.14699647,
0.98636563, 0.93051079, 1.00131515, 0.82749213, 0.96665447,
0.84457933, 0.95172036, 0.86372572, 0.97034285, 0.99877807,
0.8724721, 0.86281118, 0.96253742, 1.13485439, 1.03410559,
0.83113167, 1.02644607, 1.0669284, 0.947969, 1.13373538,
0.85495039, 1.15829218, 0.72662405, 0.81755747, 0.78381403,
0.84360371, 1.10945791, 0.80215303, 0.8861351, 0.97484684,
1.02996282, 0.86219328, 0.95675062, 1.10753315, 0.92496918,
0.79323289, 0.76891191, 0.93106762, 0.94523682, 0.9534338,
0.8954424, 0.81732651, 1.00443776, 0.96178195, 0.89727229,
0.88917552, 0.88660003, 0.941933, 1.03900381, 0.75262915,
0.94265862, 0.84472046, 1.09834757, 0.81516259, 0.90865634,
0.9582531, 0.99819053, 0.8815072, 0.92425525, 0.79085083,
0.98173446, 0.95199169, 0.71653726, 1.11863725, 0.97855807,
0.87873181, 1.37925403, 0.8085008, 1.40027689, 0.79367826,
0.82070449, 0.87039383, 0.95896081, 0.75617612, 1.3196712,
0.9335008, 0.9461447, 1.0838461, 0.83347962, 0.69558254,
0.92358528, 0.99423247, 0.94884494, 0.75094955, 0.90429063,
1.13740548, 0.89354463, 1.13094104, 1.7373979, 0.87808028,
0.72820621, 1.02995089, 0.80134468, 0.97511989, 0.93823103,
0.98097787, 0.73179813, 0.93764192, 1.04399599, 0.95644709,
0.80476939, 0.87463727, 0.83220517, 0.76978546, 0.97056432,
1.1693819, 1.0368387, 0.98606478, 1.03538075, 0.88253058,
0.91105775, 0.93745618, 0.80272442, 0.77045021, 0.8482449,
1.04505306, 0.90427753, 0.706451, 1.02687396, 0.82931474,
1.24255717, 0.91343217, 0.8692726, 0.98422894, 0.82142068,
0.86854354, 0.77715916, 0.94490329, 0.97686366, 1.05198512,
0.888989, 1.09252847, 0.8034292, 1.04727187, 0.87246831,
0.89474556, 1.06031526, 0.93056174, 0.7747956, 0.87772054,
1.1183045, 0.78938083, 0.82019511, 0.82553273, 1.04324276,
0.7676436, 0.68914756, 0.88400598, 0.79611901, 0.77011016,
0.76727015, 0.84523666, 1.09972447, 1.03942974, 1.07322466,
1.01079248, 1.03469338, 0.90450148, 0.87367007, 0.88432601,
0.85312482, 0.7328442, 1.12256832, 0.8837547, 0.81023384,
0.87068285, 0.94466637, 1.13236695, 0.95958423, 0.8099625,
1.07509372, 1.03306035, 0.99385633, 1.06433672, 1.07385915,
0.92709455, 1.03502217, 0.88961476, 0.8307198, 0.98819038,
1.09916368, 0.8919766, 0.90349117, 0.97554616, 0.98376763,
0.89285893, 0.99941071, 1.16078972, 0.66336693, 1.16389515,
1.10395069, 1.20381952, 0.98928899, 1.17155389, 0.81707565,
0.82903836, 0.95892646, 0.8437454, 0.79017432, 0.81562954,
0.65169124, 0.87950793, 0.9017879, 0.82160564, 0.87079127,
0.88100146, 1.00783979, 0.84102603, 1.16817499, 0.97697533,
0.89115235, 0.77254376, 0.7679024, 0.97093775, 1.13881665,
0.90348632, 1.14654277, 1.08625707, 0.98787902, 1.49057495,
0.99639001, 0.97623973, 0.74807856, 0.76656108, 0.79095998,
1.04583503, 0.95124469, 0.90228738, 1.03129265, 1.02663212,
0.67704952, 0.95335397, 1.01726294, 0.78765385, 0.91140255,
1.04097119, 0.71881619, 1.14572601, 0.79708798, 1.07104057,
0.95925248, 0.72556831, 0.92256392, 1.08702165, 0.95977251,
0.99670254, 0.95276505, 1.15268752, 0.68215678, 1.05573208,
0.89672437, 0.89396611, 1.01814905, 0.81969778, 0.74390457,
1.20909881, 0.82388701, 1.00574083, 1.01348114, 1.01492015,
0.94759788, 0.99758684, 1.19912008, 0.92749943, 1.16660441,
0.97646538, 0.8189475, 0.97464158, 1.01050799, 0.94368665,
0.70995047, 0.94469581, 1.02534612, 1.3513094, 0.88081968,
1.00576693, 0.9695495, 1.0549135, 1.29993316, 0.91050559,
0.95543198, 1.02161725, 0.76895773, 1.03685293, 0.88201449,
0.90345561, 1.02793048, 1.00267831, 0.84653161, 0.9217411,
0.94666576, 0.94946561, 0.77482488, 0.94358305, 0.89779666,
1.01462131, 1.05829923, 1.13217729, 1.12260175, 0.89810828,
0.96305689, 0.90466377, 0.8091617, 0.93070824, 1.03997521,
1.04076373, 0.95858477, 0.94382748, 0.7585222, 1.22890096,
0.97300529, 0.87424719, 0.90435141, 0.91894865, 0.97819677,
0.80300175, 1.03729016, 1.19305569, 0.81633791, 0.7930351,
0.8141721, 0.86764479, 0.89207142, 0.89691482, 0.86243171,
0.91184679, 0.94284352, 1.01357831, 1.03806277, 0.92000143,
0.91018767, 0.90555137, 0.89089532, 1.3530331, 0.96933587,
0.82350429, 0.71549154, 1.13399156, 0.87838533, 0.99177078,
0.93296992, 1.43078263, 0.90278792, 0.85789581, 0.93531789,
0.84948314, 0.95778101, 0.80962713, 0.88865859, 1.15297165,
0.85695093, 0.88601982, 0.96665296, 0.9320964, 1.04193558,
1.006005, 0.78939639, 0.79344784, 0.87012624, 0.8532022,
0.93351167, 0.91705323, 0.74384626, 0.84219843, 0.78265573,
1.07759963, 1.0236098, 1.00202257, 1.18687122, 1.00869294,
0.8809502, 0.76397598, 0.81845324, 0.97439912, 1.10466318,
1.10678275, 0.96692316, 0.84120323, 1.13151276, 0.72574077,
0.82457571, 0.8179266, 1.01118196, 0.84303742, 0.86255339,
1.03927791, 0.82302701, 1.03586066, 0.75785864, 0.9186558,
0.97139449, 0.92424514, 1.00415659, 1.08544681, 0.80940032,
0.9073428, 0.83621672, 1.04027879, 0.79447936, 0.94829305,
1.16176292, 1.11185195, 0.88652664, 0.98676451, 0.89310091,
0.72272527, 0.79963233, 0.94651986, 0.91540761, 1.0498236,
0.84938647, 1.15539602, 1.03118991, 0.86565049, 0.77764016,
0.77866522, 0.78008955, 0.89062575, 0.81285464, 0.92554114,
1.08747324, 0.84338687, 0.76746516, 0.99205474, 0.86649541,
0.97586166, 0.9721711, 1.14895298, 1.04659345, 1.0605085,
1.06392238, 1.08286448, 0.93612266, 0.82545354, 0.84305431,
0.83650404, 1.11073704, 0.91760695, 0.83281572, 0.84244131,
1.05843708, 0.94695861, 0.95469608, 0.96038612, 0.81373042,
0.94943303, 1.00824522, 0.86416102, 0.87121008, 1.04208739,
0.81171276, 1.12798927, 0.99122576, 0.80626996, 1.07103151,
0.99809277, 1.08490135, 0.9441509, 0.98766371, 1.33205139,
0.92145678, 0.88112784, 0.9297591, 1.17549838, 0.8481953,
0.96359948, 0.98478935, 0.77028684, 0.86408555, 0.92863805,
0.94593549, 0.78705212, 1.1923026, 0.9983487, 0.99152533,
0.95313678, 1.01847515, 1.05728959, 0.88009142, 1.00351951,
1.00549552, 0.81671365, 0.90545602, 0.77895202, 0.82217088,
0.94838645, 0.85928327, 0.90729044, 0.92975916, 0.91946285,
0.80537364, 1.11885357, 0.84691232, 0.85356231, 0.85102988,
1.06499659, 1.0242127, 0.91245632, 0.83131215, 0.72151085,
0.9295769, 0.89549018, 0.87914839, 0.93541175, 0.97319188,
0.791944, 1.08008186, 0.79549907, 0.90967683, 0.80506028,
1.1206821, 0.91258859, 1.24855319, 0.96112955, 1.14305514,
0.79327927, 0.84209204, 0.94494251, 0.89573237, 1.0571304,
0.94504292, 0.84446547, 0.92060829, 0.82347072, 0.86280426,
0.85516098, 0.78649432, 0.89522516, 0.94529795, 0.90322825,
0.9616288, 0.77439126, 1.0130917, 0.84021262, 0.97337238,
0.93206526, 0.93809914, 0.87626441, 0.92706652, 0.86819358,
0.74060652, 0.84046045, 0.94130171, 0.92537388, 0.80485074,
0.81633347, 0.76401825, 0.81300784, 0.8052467, 1.27234895,
0.92674704, 1.12106762, 0.91743016, 0.94694287, 0.87309918,
0.99163895, 0.83777703, 0.89713459, 0.88208343, 0.90205904,
0.9708827, 0.94965009, 0.81446019, 0.89512677, 0.97025135,
1.02314481, 0.88399736, 1.01059963, 0.86193889, 0.94621507,
0.97334837, 0.90122433, 0.71015398, 1.17491792, 1.13869784,
1.03908735, 0.85480742, 0.98971408, 1.04147459, 0.85170846,
0.94861439, 0.7778831, 0.73445723, 0.89587488, 0.88627975,
0.98253057, 0.86159356, 1.06559385, 0.90852704, 0.86562284,
0.92122779, 0.98233847, 0.94989946, 0.97171474, 0.92428639,
1.03712828, 0.88170861, 0.86802004, 0.79670394, 0.85606075,
1.09636421, 0.85048902, 0.99393971, 1.10510884, 0.80515088,
0.95559246, 0.96803475, 0.98115871, 0.94603995, 0.8654312,
0.90759845, 0.9010954, 0.77979965, 0.83322032, 0.8485444,
0.89217626, 0.78817966, 1.03815705, 0.84076982, 0.93362471,
1.06173045, 0.82612852, 0.8336989, 0.93943901, 0.91775212,
1.00501856, 1.04269442, 0.93195426, 0.78377288, 1.03372915,
0.8415154, 1.02888978, 0.93202174, 0.78683383, 0.85106996,
0.9724203, 0.93409182, 0.97876305, 1.17153649, 0.9434591,
0.81361398, 1.09554602, 1.48193137, 0.96349931, 0.93586569,
1.0210303, 0.88980694, 0.88890459, 1.05330284, 1.09511186,
0.91202441, 0.78753378, 0.98074421, 1.04268892, 1.14265114,
0.86482628, 0.87233851, 1.18915875, 0.82556032, 0.87461473,
1.08396187, 0.69206719, 0.88113605, 0.96951674, 0.89248729,
0.909926, 0.82966779, 0.8261611, 0.9551228, 0.79879533,
1.09416042, 1.01020839, 1.04133795, 1.09654304, 0.84060693,
1.02612223, 1.00177693, 0.90510435, 1.2091018, 1.03290288,
0.80529305, 0.74332311, 1.04728164, 1.04647891, 0.83707027,
0.81648396, 1.07180239, 0.7926372, 0.99855278, 1.16851397,
0.94566149, 0.75612408, 0.94975744, 0.92924923, 1.03215206,
0.82394984, 0.84142091, 0.88028348, 1.11036047, 0.82451341,
0.83694112, 0.84207459, 0.94095384, 1.00173733, 1.10241786,
0.86609134, 0.86859604, 1.1211537, 0.84188088, 0.89023025,
0.99062899, 0.96828743, 0.80106184, 0.86745454, 0.99013196,
0.91838615, 0.86400837, 0.95679525, 0.78893711, 1.03753175,
0.97177648, 0.88685941, 0.9441012, 0.69289996, 0.84219432,
1.01050959, 0.83578317, 0.79907595, 1.21281139, 0.91613925,
1.00202544, 0.95293036, 0.84583258, 0.84574886, 0.76470341,
1.23606485, 1.10063291, 0.93852084, 0.97201415, 0.68523403,
0.94560108, 0.81903039, 1.14332074, 0.80914367, 1.46398921,
0.85155227, 1.41106313, 0.85740937, 0.91107708, 0.9003576,
0.94132363, 0.85710825, 0.74805485, 1.2521402, 0.95307547,
0.94274593, 0.86732331, 0.83850172, 0.96835288, 1.09443821,
0.68532627, 0.84736457, 1.06989165, 0.81424504, 1.02942437,
0.80255995, 0.89258275, 0.93560962, 1.04192911, 1.13498644,
1.24409985, 0.93295415, 1.08360355, 1.16468059, 0.81482388,
0.92387137, 1.07508578, 0.86564567, 1.0142773, 0.86143907,
0.91214944, 0.9757589, 0.90588817, 0.74168224, 0.91222552,
0.96119617, 0.95431519, 0.78080736, 1.0327991, 1.05112022,
0.92761155, 1.0183631, 0.73188757, 0.85617225, 0.93341155,
0.95106173, 0.9481304, 0.92996766, 1.08092599, 0.96485228,
0.97964284, 0.94224551, 1.00654477, 1.01367565, 0.89785325,
0.80725703, 0.7495798, 0.78240339, 1.04479122, 0.88200252,
1.0664992, 1.05951775, 0.82508097, 0.81201381, 0.81860218,
1.07561763, 1.02830358, 0.87348993, 1.0081337, 0.87470565,
1.45597242, 0.77540871, 0.8036279, 0.80514427, 0.92688461,
0.88152328, 1.56288788, 0.87251203, 0.92808414, 1.03548911,
0.65226699, 0.81243827, 1.03103554, 1.11995602, 0.78956176,
0.96734427, 0.91600861, 0.8246106, 1.09390498, 0.98187349,
0.8919928, 0.98746862, 0.96298125, 0.93854424, 0.83060031,
0.74692856, 0.99757209, 0.78888849, 1.17517182, 1.06657933,
1.1244446, 0.93608433, 0.88898472, 0.96823218, 0.87496056,
0.81776683, 0.98863687, 0.82962648, 1.02395766, 0.99622674,
1.07138771, 0.86669915, 0.98172208, 0.8787271, 0.86125353,
0.79554881, 0.93382729, 1.00706175, 1.08386454, 0.69664542,
0.77316657, 0.79978147, 0.80764736, 0.9969375, 0.83554928,
0.91017317, 0.95323454, 1.29872357, 1.08851275, 1.01673108,
0.79536208, 0.84878371, 0.95165619, 0.87733936, 0.86319684,
0.96758495, 0.87763237, 0.95094713, 1.00143077, 1.0596993,
1.27278299, 0.82281481, 0.89765404, 0.94538181, 0.88161857,
0.77679456, 0.84274277, 0.89864342, 0.98705162, 0.95456512,
0.92712401, 0.77427128, 1.03292269, 0.87034158, 1.24316113,
0.98278702, 1.17325118, 1.18863971, 0.88678137, 0.90389731,
1.01740421, 0.80228624, 0.97742223, 0.82741518, 0.8359407,
0.7177401, 1.02297899, 0.81896048, 0.77127181, 0.83328601,
0.96939523, 0.94073198, 0.90356023, 1.12355064, 1.12811114,
0.92403138, 1.05423548, 0.70827734, 0.95891358, 0.89898027,
1.02318421, 0.93775375, 0.8245529, 0.80604304, 0.77555283,
0.92112699, 0.85662169, 0.92725859, 0.93599147, 0.78971931,
0.8337306, 0.93775212, 0.91025099, 0.75308822, 0.95391173,
0.96840576, 0.8394416, 0.89087015, 0.73703219, 0.97812386,
0.8787356, 0.93985266, 0.96406021, 0.88666152, 0.89242745,
0.97900374, 0.85697634, 0.8795755, 0.78581812, 0.87138735,
0.74602994, 0.96158936, 0.84529806, 0.85333232, 1.06116542,
1.05929382, 1.09720986, 1.28959453, 0.91541148, 0.87657407,
1.06514793, 0.8668096, 1.07325125, 0.85009534, 0.95542191,
0.86977409, 0.96249874, 0.97715908, 0.89360331, 0.98859647,
0.67560717, 0.90213348, 1.12051182, 0.99684949, 0.9863559,
1.32246221, 0.84632664, 0.89707447, 1.00486846, 0.90843649,
1.02399424, 0.97899017, 0.95693977, 0.8384806, 0.93927435,
0.79153251, 1.08694094, 1.01785553, 0.99674552, 0.898566,
0.94116882, 0.95224977, 0.99859129, 0.81125029, 0.85985586,
1.14418875, 0.96306241, 1.31398561, 0.77961419, 1.01958366,
0.9575668, 0.771084, 1.04473363, 1.01569517, 1.04560744,
0.9648178, 0.93466398, 1.09313672, 0.90349389, 1.00193114,
0.79991514, 0.91102351, 0.9795356, 0.89285193, 1.04898573,
0.93031782, 0.95087069, 1.15644699, 0.91155375, 0.93005986,
0.70098757, 0.82751625, 0.85462106, 1.34969332, 0.93382692,
1.05558387, 1.25417819, 1.0546501, 1.05217032, 0.86031346,
1.00864463, 0.73592482, 1.01899722, 1.00462831, 0.96882832,
0.81334751, 1.05102745, 0.82288113, 1.05798623, 0.77971966,
1.38584414, 1.0248193, 0.78951056, 0.76171823, 0.78407227,
1.14808104, 0.97890501, 0.99870905, 0.96006489, 0.78442704,
0.99315422, 0.83653213, 0.95210661, 0.97233777, 0.78140495,
0.95996216, 0.76318841, 0.82333311, 0.87123204, 0.79531258,
0.82681452, 1.00492217, 0.93549261, 1.00240153, 1.02086339,
1.00424549, 0.87437775, 0.84675564, 0.98014462, 0.77262117,
1.02620976, 0.91162462, 1.0275041, 1.1475431, 0.78167746,
0.86273856, 0.84499552, 0.99712362, 0.9694771, 0.94523806,
0.8450763, 0.93068519, 1.29362523, 1.0249628, 1.05522183,
1.13433408, 1.06981137, 0.85666419, 0.98203234, 0.75867592,
0.8844762, 0.89708521, 0.75482121, 0.80137918, 0.90412883,
0.88815714, 1.11497471, 0.77441965, 0.93853353, 0.8962444,
0.83055142, 0.99776183, 0.92581583, 0.78783745, 0.90934299,
0.81136457, 0.99000726, 0.9669203, 1.2890399, 1.01923088,
1.11076459, 1.01331706, 1.02470946, 0.92950448, 1.10298478,
1.03723287, 1.09129035, 0.95138186, 0.85764624, 0.86606803,
0.8141785, 1.0129293, 0.93267714, 0.95663734, 1.01940702,
0.8072268, 1.0707215, 0.90482063, 1.01546955, 0.84018308,
0.95938216, 0.96454054, 0.93114659, 1.09705112, 0.88720628,
0.81067916, 0.82667413, 0.89494027, 0.9173495, 0.73326273,
1.00209461, 0.9560545, 1.09126364, 0.95709908, 0.81314274,
0.8274943, 1.37605062, 0.99097917, 1.02221806, 0.90277482,
1.01611791, 0.79663017, 1.16686882, 1.19669266, 0.88366356,
0.77661102, 0.73467145, 1.15438391, 0.91439204, 0.78280849,
1.07238853, 1.03588797, 1.0438292, 0.75935005, 0.76200114,
0.81603429, 0.74402367, 1.1171573, 0.90227791, 0.94762351,
0.92462278, 0.8847803, 1.1343863, 0.8662186, 1.00410699,
1.05008842, 0.94783969, 0.89555844, 0.98278045, 0.80396855,
1.00483139, 0.82540491, 0.83284354, 0.93132265, 0.91191039,
0.95753995, 1.18260689, 0.84124197, 0.87429189, 0.67617592,
0.89495946, 0.92898357, 1.10528183, 1.06994417, 0.82259834,
0.74746328, 0.99070832, 1.07386274, 0.84007203, 0.89720099,
0.9670094, 1.02728082, 0.78001838, 0.97709347, 0.90602469,
1.49985196, 0.80256976, 1.05905677, 0.98298874, 0.94679703,
0.94305923, 0.98720786, 0.82091251, 0.91644161, 0.79576881,
0.98942172, 0.92974761, 0.99307545, 0.86959859, 0.88549807,
1.09246144, 0.87265047, 1.01449921, 0.74353851, 0.95029192,
0.94385304, 0.84779449, 1.00690543, 0.79727923, 0.92285822,
0.83164749, 1.06508941, 1.09757529, 0.9059649, 0.9146043,
0.74474669, 0.71306438, 0.77989422, 0.84965464, 0.9424323,
0.82492634, 0.85076686, 1.01110574, 1.01445751, 0.87929754,
0.8773275, 0.72314196, 0.92285502, 1.18173931, 0.86460799,
0.91795108, 1.16580482, 0.79880497, 0.72734786, 0.97579653,
0.76967834, 0.97543732, 1.04996964, 1.16439594, 1.08656546,
1.15644902, 0.98333436, 1.24374723, 0.95810117, 0.8488915,
1.06288523, 0.99055893, 0.75517736, 0.95856183, 0.85574796,
1.00426506, 1.25275675, 0.92735225, 0.83351314, 0.90216604,
0.87996386, 1.13312875, 1.00891523, 0.76513657, 0.85659621,
0.91142459, 1.05893495, 0.92253051, 0.87153684, 1.03190013,
0.92160845, 1.01768282, 0.80590054, 1.05172907, 0.92758177,
0.86902046, 0.93927127, 0.80389584, 0.96016014, 0.9720314,
0.93255573, 0.85792534, 0.97826842, 0.80506149, 0.97170364,
1.08397772, 1.01866333, 1.18898045, 1.02855427, 0.94848891,
0.94336541, 0.93119013, 0.92907817, 1.11806635, 0.88409637,
0.88809707, 1.06735612, 0.98447974, 0.88816438, 1.00099784,
0.92443453, 1.00325146, 0.86977836, 0.84621801, 0.92361073,
0.85573903, 0.77309241, 0.86717528, 1.19892035, 1.07497019,
1.02178857, 0.8718756, 0.90646803, 0.92912096, 1.04538692,
0.95245707, 0.99698525, 0.94583199, 0.92537599, 0.86720487,
0.89927054, 0.86111792, 0.94401208, 1.01130191, 1.03759681,
0.8177749, 1.07784373, 0.79823294, 1.00839713, 1.39409602,
0.87146241, 1.21218822, 0.84895926, 1.01742432, 0.8044077,
0.78632084, 1.07751744, 1.13147508, 0.90268302, 0.90024653,
0.92072578, 0.87763264, 1.00736787, 0.90978808, 0.90895492,
0.90766826, 0.98956566, 0.92075658, 0.77613105, 0.93815569,
0.95455546, 1.00607757, 0.82187828, 0.94197599, 0.867015,
0.90709762, 0.75604815, 0.91312261, 0.9286002, 0.74623204,
0.87368702, 0.83879278, 0.92224793, 0.81676402, 0.90355168,
0.92762955, 0.91784037, 0.82273304, 0.75947806, 0.92687078,
0.87971276, 1.15037445, 0.86707445, 0.8611453, 0.91921763,
1.07088129, 1.05150864, 1.02162325, 0.90305964, 0.99912687,
0.87693204, 0.6186911, 0.95526533, 1.15975655, 1.00061222,
0.74608861, 0.954568, 0.84965574, 0.79177899, 0.9741051,
1.0119514, 0.79147502, 0.81367071, 0.87757421, 1.01270813,
0.86044808, 0.9689615, 0.9577413, 0.79480242, 0.76073002,
0.83131288, 0.96379259, 0.84679732, 0.82508685, 0.89977283,
0.86766439, 1.12231836, 0.93058445, 1.04584181, 0.88838751,
0.96615893, 0.98731619, 1.05517799, 1.02860493, 0.98881473,
0.85210319, 0.91497438, 0.9275787, 0.97456134, 0.9011687,
0.69417417, 0.89661214, 0.79038577, 1.08118303, 1.0509366,
0.97813138, 0.85714945, 0.97330329, 0.83611871, 0.99772489,
0.83591193, 0.75592677, 0.85392601, 1.02734573, 0.72404609,
0.83534547, 0.91630472, 0.88463459, 1.12044562, 1.10991104,
0.96047701, 1.12342573, 0.72046647, 0.96852239, 0.89605698,
0.98310243, 0.92300659, 0.87794646, 0.83109321, 1.43297752,
0.80609029, 0.8692251, 0.90254649, 0.81647796, 1.07521371,
1.03942973, 0.96156488, 1.25225334, 1.0265727, 0.9518054,
0.87765718, 1.15552582, 0.79577766, 0.66849239, 0.87236017,
1.03437641, 0.98567811, 0.78463682, 1.09573491, 0.89858959,
0.94056747, 1.16075317, 1.06296054, 0.85844006, 0.95475376,
0.67038747, 0.7924646, 0.94009167, 0.88282093, 0.97711174,
0.9209607, 1.03230176, 0.99981312, 1.12345314, 1.11705968,
1.02453864, 0.91724212, 0.98337942, 0.89195196, 0.83800177,
0.95044243, 0.76543521, 0.8613025, 0.83907753, 0.69333275,
0.84411739, 0.68621941, 0.9847701, 1.13328481, 1.1432074,
0.97156328, 0.86464461, 0.74258211, 0.97319505, 1.11453917,
0.87344741, 0.91382664, 1.01635943, 1.38708812, 0.81377942,
1.3828856, 0.74476285, 0.86657537, 1.1216954, 0.91008346,
0.800862, 0.98356936, 0.92409916, 1.13970543, 0.97547004,
0.99385865, 1.16476579, 0.78678084, 1.003947, 0.81491463,
1.19724322, 0.9173622, 0.93274116, 0.80047839, 0.86798029,
0.9433708, 0.82376832, 1.01726905, 0.81914971, 0.73290844])
class Medpar1(object):
'''
The medpar1 data can be found here.
https://www.stata-press.com/data/hh2/medpar1
'''
def __init__(self):
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"stata_medpar1_glm.csv")
data = pd.read_csv(filename).to_records()
self.endog = data.los
dummies = pd.get_dummies(data.admitype, prefix="race", drop_first=True)
design = np.column_stack((data.codes, dummies)).astype(float)
self.exog = add_constant(design, prepend=False)
class InvGaussLog(Medpar1):
"""
InvGaussLog is used with TestGlmInvgaussLog
"""
def __init__(self):
super(InvGaussLog, self).__init__()
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"medparlogresids.csv")
self.resids = pd.read_csv(filename, sep=',', header=None).values
self.null_deviance = 335.1539777981053 # from R, Rpy bug
self.params = np.array([0.09927544, -0.19161722, 1.05712336])
self.bse = np.array([0.00600728, 0.02632126, 0.04915765])
self.aic_R = 18545.836421595981
self.aic_Stata = 6.619000588187141
self.deviance = 304.27188306012789
self.scale = 0.10240599519220173
# self.llf = -9268.9182107979905 # from R
self.llf = -12162.72308108797 # from Stata, big rounding diff with R
self.bic_Stata = -29849.51723280784
self.chi2 = 398.5465213008323 # from Stata not in sm
self.df_model = 2
self.df_resid = 3673
self.fittedvalues = np.array([
7.03292237, 7.03292237, 7.03292237, 7.03292237, 5.76642001,
7.03292237, 7.03292237, 6.36826384, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 5.76642001, 7.03292237,
5.22145448, 7.03292237, 5.22145448, 4.72799187, 4.72799187,
7.03292237, 7.03292237, 6.36826384, 7.03292237, 5.76642001,
7.03292237, 4.28116479, 7.03292237, 7.03292237, 7.03292237,
5.76642001, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 3.87656588, 7.03292237, 7.03292237, 4.28116479,
7.03292237, 7.03292237, 4.72799187, 7.03292237, 7.03292237,
7.03292237, 5.22145448, 6.36826384, 6.36826384, 4.28116479,
4.72799187, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
5.22145448, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 5.76642001, 6.36826384,
6.36826384, 5.22145448, 7.03292237, 7.03292237, 7.03292237,
5.76642001, 7.03292237, 7.03292237, 3.87656588, 5.76642001,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
5.22145448, 5.22145448, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 7.03292237, 4.72799187, 7.03292237, 6.36826384,
7.03292237, 6.36826384, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 5.22145448, 6.36826384, 5.22145448,
7.03292237, 7.03292237, 4.72799187, 5.76642001, 7.03292237,
4.72799187, 6.36826384, 3.87656588, 7.03292237, 7.03292237,
5.22145448, 5.22145448, 6.36826384, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 4.28116479,
7.03292237, 6.36826384, 6.36826384, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
6.36826384, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 5.76642001,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 5.76642001,
7.03292237, 7.03292237, 6.36826384, 3.87656588, 7.03292237,
7.03292237, 5.22145448, 7.03292237, 5.76642001, 4.28116479,
5.76642001, 6.36826384, 6.36826384, 7.03292237, 7.03292237,
5.76642001, 7.03292237, 7.03292237, 4.28116479, 7.03292237,
6.36826384, 7.03292237, 6.36826384, 7.03292237, 5.22145448,
7.03292237, 4.28116479, 4.72799187, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 6.36826384,
7.03292237, 4.28116479, 5.22145448, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 4.28116479, 7.03292237, 7.03292237,
7.03292237, 6.36826384, 5.76642001, 7.03292237, 7.03292237,
7.03292237, 4.72799187, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
6.36826384, 7.03292237, 6.36826384, 4.28116479, 5.76642001,
5.22145448, 6.36826384, 6.36826384, 7.03292237, 7.03292237,
7.03292237, 6.36826384, 7.03292237, 7.03292237, 6.36826384,
5.76642001, 7.03292237, 5.22145448, 5.76642001, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
4.28116479, 7.03292237, 5.22145448, 7.03292237, 6.36826384,
5.76642001, 4.28116479, 4.28116479, 7.03292237, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 4.28116479,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 5.22145448,
7.03292237, 7.03292237, 7.03292237, 5.22145448, 7.03292237,
5.76642001, 7.03292237, 4.72799187, 4.28116479, 6.36826384,
5.76642001, 7.03292237, 7.03292237, 7.03292237, 5.22145448,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
3.87656588, 4.72799187, 7.03292237, 7.03292237, 7.03292237,
4.72799187, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 6.36826384, 3.87656588, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 4.28116479, 7.03292237, 6.36826384,
7.03292237, 5.22145448, 5.22145448, 6.36826384, 7.03292237,
6.36826384, 6.36826384, 7.03292237, 4.28116479, 7.03292237,
7.03292237, 7.03292237, 5.22145448, 6.36826384, 7.03292237,
3.87656588, 6.36826384, 5.22145448, 5.76642001, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 4.28116479, 7.03292237,
5.22145448, 7.03292237, 6.36826384, 5.22145448, 4.72799187,
7.03292237, 7.03292237, 7.03292237, 4.72799187, 6.36826384,
7.03292237, 6.36826384, 5.76642001, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 5.22145448, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 3.87656588, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 4.72799187, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 6.36826384, 7.03292237, 7.03292237,
6.36826384, 7.03292237, 6.36826384, 7.03292237, 5.22145448,
6.36826384, 7.03292237, 6.36826384, 7.03292237, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 3.87656588,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 6.36826384, 7.03292237, 7.03292237, 6.36826384,
5.76642001, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
5.76642001, 7.03292237, 6.36826384, 7.03292237, 7.03292237,
7.03292237, 3.87656588, 7.03292237, 6.36826384, 6.36826384,
4.72799187, 5.76642001, 6.36826384, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 3.87656588, 5.22145448, 4.72799187,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 4.72799187, 6.36826384,
7.03292237, 7.03292237, 5.76642001, 7.03292237, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 5.76642001, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 6.36826384,
7.03292237, 5.22145448, 5.76642001, 7.03292237, 5.76642001,
6.36826384, 5.76642001, 5.76642001, 7.03292237, 5.76642001,
7.03292237, 7.03292237, 7.03292237, 4.72799187, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
6.36826384, 7.03292237, 6.36826384, 7.03292237, 4.72799187,
7.03292237, 7.03292237, 4.28116479, 6.36826384, 3.87656588,
7.03292237, 3.5102043, 7.03292237, 7.03292237, 5.76642001,
5.22145448, 7.03292237, 5.76642001, 4.28116479, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 5.76642001, 4.72799187,
7.03292237, 6.36826384, 7.03292237, 5.22145448, 7.03292237,
4.72799187, 7.03292237, 7.03292237, 7.03292237, 5.22145448,
5.22145448, 4.72799187, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
4.28116479, 7.03292237, 7.03292237, 7.03292237, 5.76642001,
6.36826384, 7.03292237, 7.03292237, 5.76642001, 7.03292237,
7.03292237, 6.36826384, 4.72799187, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 4.72799187, 5.76642001, 7.03292237, 5.76642001,
6.36826384, 7.03292237, 7.03292237, 7.03292237, 4.72799187,
7.03292237, 7.03292237, 7.03292237, 5.76642001, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
6.36826384, 7.03292237, 7.03292237, 5.76642001, 6.36826384,
4.72799187, 7.03292237, 7.03292237, 7.03292237, 5.76642001,
7.03292237, 6.36826384, 5.22145448, 5.76642001, 4.72799187,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 5.22145448, 7.03292237, 7.03292237,
6.36826384, 7.03292237, 7.03292237, 5.76642001, 7.03292237,
7.03292237, 7.03292237, 6.36826384, 7.03292237, 5.22145448,
7.03292237, 7.03292237, 7.03292237, 5.76642001, 7.03292237,
6.36826384, 6.36826384, 7.03292237, 5.76642001, 7.03292237,
6.36826384, 7.03292237, 7.03292237, 7.03292237, 4.72799187,
5.22145448, 7.03292237, 3.87656588, 5.76642001, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 6.36826384, 7.03292237, 7.03292237,
4.72799187, 7.03292237, 6.36826384, 7.03292237, 4.28116479,
7.03292237, 7.03292237, 5.76642001, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
4.72799187, 6.36826384, 3.87656588, 7.03292237, 7.03292237,
6.36826384, 4.72799187, 4.28116479, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 3.87656588, 7.03292237, 7.03292237, 7.03292237,
3.87656588, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 6.36826384, 7.03292237, 7.03292237, 3.87656588,
7.03292237, 4.72799187, 5.22145448, 5.22145448, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 6.36826384, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 5.22145448, 5.22145448, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 4.72799187, 6.36826384, 5.76642001,
5.76642001, 6.36826384, 7.03292237, 7.03292237, 7.03292237,
6.36826384, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 4.72799187, 7.03292237, 5.76642001, 7.03292237,
7.03292237, 7.03292237, 4.72799187, 4.28116479, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 5.76642001,
7.03292237, 5.76642001, 7.03292237, 7.03292237, 7.03292237,
5.22145448, 7.03292237, 7.03292237, 7.03292237, 5.22145448,
6.36826384, 7.03292237, 7.03292237, 6.36826384, 6.36826384,
7.03292237, 7.03292237, 5.76642001, 7.03292237, 5.22145448,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 4.72799187, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 4.28116479,
7.03292237, 6.36826384, 7.03292237, 5.76642001, 4.28116479,
5.76642001, 7.03292237, 3.87656588, 7.03292237, 7.03292237,
7.03292237, 3.5102043, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 5.76642001, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 5.76642001, 5.76642001, 5.76642001, 7.03292237,
7.03292237, 6.36826384, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 5.76642001, 7.03292237, 4.28116479, 6.36826384,
5.76642001, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 5.22145448,
7.03292237, 7.03292237, 7.03292237, 5.76642001, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 5.76642001, 7.03292237, 7.03292237, 7.03292237,
6.36826384, 6.36826384, 7.03292237, 7.03292237, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 7.03292237, 5.22145448, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 3.5102043, 7.03292237, 7.03292237,
7.03292237, 3.87656588, 6.36826384, 5.76642001, 7.03292237,
7.03292237, 6.36826384, 4.72799187, 7.03292237, 7.03292237,
5.76642001, 7.03292237, 3.87656588, 5.22145448, 6.36826384,
4.28116479, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 6.36826384,
7.03292237, 5.22145448, 6.36826384, 6.36826384, 7.03292237,
6.36826384, 7.03292237, 7.03292237, 7.03292237, 5.22145448,
7.03292237, 6.36826384, 7.03292237, 7.03292237, 4.72799187,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
6.36826384, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 3.5102043, 7.03292237, 5.22145448,
5.22145448, 7.03292237, 6.36826384, 7.03292237, 4.72799187,
7.03292237, 7.03292237, 7.03292237, 4.72799187, 7.03292237,
5.76642001, 7.03292237, 3.87656588, 7.03292237, 5.22145448,
3.87656588, 4.72799187, 6.36826384, 5.76642001, 7.03292237,
6.36826384, 7.03292237, 4.28116479, 5.76642001, 7.03292237,
7.03292237, 7.03292237, 5.22145448, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 4.28116479, 6.36826384, 7.03292237, 7.03292237,
7.03292237, 5.76642001, 6.36826384, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
3.5102043, 4.72799187, 7.03292237, 4.28116479, 7.03292237,
4.72799187, 7.03292237, 5.22145448, 5.76642001, 5.76642001,
3.87656588, 5.76642001, 5.22145448, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 5.22145448, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 5.22145448, 7.03292237,
7.03292237, 7.03292237, 5.22145448, 7.03292237, 7.03292237,
6.36826384, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 4.28116479,
4.72799187, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 6.36826384, 7.03292237, 7.03292237, 6.36826384,
6.36826384, 5.76642001, 7.03292237, 5.76642001, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 5.76642001, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
5.22145448, 7.03292237, 7.03292237, 5.76642001, 6.36826384,
5.76642001, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
4.72799187, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 6.36826384, 5.76642001, 6.36826384, 4.72799187,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 4.72799187,
7.03292237, 6.36826384, 7.03292237, 5.22145448, 7.03292237,
7.03292237, 6.36826384, 7.03292237, 5.76642001, 6.36826384,
5.76642001, 7.03292237, 7.03292237, 7.03292237, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
4.72799187, 7.03292237, 5.22145448, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 4.72799187, 6.36826384, 7.03292237,
7.03292237, 6.36826384, 7.03292237, 5.76642001, 5.22145448,
7.03292237, 7.03292237, 7.03292237, 5.22145448, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 4.28116479,
5.76642001, 7.03292237, 4.28116479, 7.03292237, 6.36826384,
7.03292237, 7.03292237, 4.28116479, 7.03292237, 7.03292237,
6.36826384, 3.87656588, 3.5102043, 6.36826384, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 5.76642001,
7.03292237, 4.72799187, 5.76642001, 7.03292237, 7.03292237,
3.87656588, 7.03292237, 7.03292237, 7.03292237, 4.28116479,
7.03292237, 7.03292237, 7.03292237, 5.76642001, 5.76642001,
7.03292237, 6.36826384, 5.76642001, 7.03292237, 6.36826384,
5.76642001, 7.03292237, 5.76642001, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
4.28116479, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 4.72799187, 5.76642001, 6.36826384, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 5.76642001, 4.28116479,
7.03292237, 5.76642001, 4.72799187, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 5.76642001,
6.36826384, 6.36826384, 7.03292237, 7.03292237, 6.36826384,
3.87656588, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
3.5102043, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 6.36826384, 7.03292237, 7.03292237,
7.03292237, 4.72799187, 7.03292237, 6.36826384, 4.72799187,
4.72799187, 7.03292237, 5.76642001, 7.03292237, 7.03292237,
7.03292237, 6.36826384, 4.28116479, 7.03292237, 7.03292237,
7.03292237, 5.76642001, 7.03292237, 7.03292237, 7.03292237,
4.72799187, 7.03292237, 7.03292237, 6.36826384, 5.22145448,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 5.22145448, 7.03292237, 7.03292237, 6.36826384,
7.03292237, 7.03292237, 5.22145448, 7.03292237, 6.36826384,
6.36826384, 7.03292237, 6.36826384, 7.03292237, 7.03292237,
7.03292237, 6.36826384, 6.36826384, 7.03292237, 4.72799187,
4.28116479, 4.72799187, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 4.28116479,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
6.36826384, 4.28116479, 4.28116479, 7.03292237, 5.22145448,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 6.36826384, 7.03292237, 7.03292237, 7.03292237,
5.76642001, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 4.72799187, 7.03292237,
3.87656588, 7.03292237, 4.72799187, 7.03292237, 7.03292237,
7.03292237, 5.22145448, 7.03292237, 4.28116479, 7.03292237,
7.03292237, 4.72799187, 5.22145448, 7.03292237, 7.03292237,
7.03292237, 6.36826384, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 6.36826384, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 5.76642001, 5.22145448,
7.03292237, 7.03292237, 3.87656588, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 4.72799187, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
4.28116479, 7.03292237, 7.03292237, 7.03292237, 5.76642001,
7.03292237, 5.22145448, 4.72799187, 7.03292237, 7.03292237,
7.03292237, 6.36826384, 5.76642001, 7.03292237, 5.76642001,
7.03292237, 4.28116479, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 5.76642001, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 5.76642001,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 5.76642001, 7.03292237, 3.87656588,
6.36826384, 5.76642001, 7.03292237, 4.28116479, 7.03292237,
5.76642001, 5.22145448, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 6.36826384, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
5.76642001, 7.03292237, 7.03292237, 7.03292237, 3.5102043,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 4.28116479, 4.72799187, 6.36826384, 7.03292237,
7.03292237, 4.28116479, 5.76642001, 7.03292237, 7.03292237,
7.03292237, 4.28116479, 7.03292237, 7.03292237, 5.22145448,
6.36826384, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 4.72799187,
7.03292237, 5.22145448, 6.36826384, 7.03292237, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 5.22145448,
7.03292237, 7.03292237, 5.22145448, 7.03292237, 6.36826384,
7.03292237, 7.03292237, 5.76642001, 7.03292237, 7.03292237,
3.5102043, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 5.76642001, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 6.36826384,
4.72799187, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
6.36826384, 7.03292237, 6.36826384, 4.72799187, 5.22145448,
5.76642001, 7.03292237, 6.36826384, 6.36826384, 7.03292237,
6.36826384, 7.03292237, 5.22145448, 4.72799187, 5.76642001,
6.36826384, 7.03292237, 7.03292237, 5.76642001, 5.22145448,
7.03292237, 6.36826384, 3.87656588, 6.36826384, 7.03292237,
5.76642001, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 3.5102043, 7.03292237, 7.03292237, 7.03292237,
5.22145448, 7.03292237, 6.36826384, 7.03292237, 6.36826384,
7.03292237, 6.36826384, 5.22145448, 7.03292237, 7.03292237,
7.03292237, 6.36826384, 6.36826384, 7.03292237, 7.03292237,
6.36826384, 4.72799187, 7.03292237, 5.22145448, 7.03292237,
4.72799187, 7.03292237, 4.28116479, 7.03292237, 7.03292237,
6.36826384, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
4.28116479, 6.36826384, 7.03292237, 3.87656588, 7.03292237,
7.03292237, 7.03292237, 6.36826384, 7.03292237, 7.03292237,
7.03292237, 6.36826384, 7.03292237, 5.22145448, 7.03292237,
7.03292237, 5.76642001, 6.36826384, 7.03292237, 4.72799187,
7.03292237, 7.03292237, 5.22145448, 7.03292237, 3.5102043,
6.36826384, 6.36826384, 7.03292237, 6.36826384, 7.03292237,
5.22145448, 6.36826384, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 5.76642001, 4.28116479, 7.03292237, 7.03292237,
4.72799187, 4.72799187, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 5.76642001, 7.03292237, 5.76642001,
4.28116479, 7.03292237, 4.28116479, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 3.5102043, 7.03292237, 5.22145448,
7.03292237, 6.36826384, 7.03292237, 6.36826384, 7.03292237,
4.72799187, 7.03292237, 7.03292237, 4.72799187, 3.5102043,
3.17846635, 3.87656588, 5.22145448, 6.36826384, 7.03292237,
4.28116479, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
6.36826384, 7.03292237, 7.03292237, 5.76642001, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 4.72799187, 7.03292237,
7.03292237, 7.03292237, 5.76642001, 7.03292237, 3.5102043,
7.03292237, 7.03292237, 5.22145448, 6.36826384, 3.87656588,
4.72799187, 7.03292237, 7.03292237, 3.87656588, 7.03292237,
6.36826384, 7.03292237, 6.36826384, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 4.72799187, 7.03292237,
7.03292237, 6.36826384, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 5.76642001, 7.03292237, 4.28116479, 7.03292237,
7.03292237, 7.03292237, 4.72799187, 6.36826384, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
6.36826384, 4.72799187, 6.36826384, 7.03292237, 7.03292237,
5.22145448, 7.03292237, 5.76642001, 7.03292237, 7.03292237,
7.03292237, 5.76642001, 7.03292237, 6.36826384, 6.36826384,
7.03292237, 7.03292237, 6.36826384, 7.03292237, 5.22145448,
7.03292237, 5.22145448, 5.22145448, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
4.72799187, 4.28116479, 7.03292237, 6.36826384, 7.03292237,
5.76642001, 7.03292237, 7.03292237, 7.03292237, 4.72799187,
7.03292237, 5.76642001, 7.03292237, 4.72799187, 7.03292237,
7.03292237, 4.72799187, 5.76642001, 6.36826384, 7.03292237,
4.28116479, 6.36826384, 7.03292237, 6.36826384, 5.76642001,
7.03292237, 4.28116479, 5.22145448, 4.72799187, 7.03292237,
7.03292237, 6.36826384, 5.22145448, 7.03292237, 5.76642001,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 4.28116479, 7.03292237,
6.36826384, 5.22145448, 5.76642001, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 6.36826384, 7.03292237, 7.03292237,
5.22145448, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 4.28116479,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 6.36826384, 7.03292237, 7.03292237, 7.03292237,
6.36826384, 7.03292237, 4.72799187, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 5.22145448, 6.36826384, 7.03292237,
5.76642001, 5.76642001, 7.03292237, 7.03292237, 7.03292237,
4.28116479, 7.03292237, 5.76642001, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 7.03292237, 3.87656588, 6.36826384, 6.36826384,
5.22145448, 7.03292237, 5.22145448, 7.03292237, 7.03292237,
7.03292237, 4.28116479, 7.03292237, 3.87656588, 7.03292237,
7.03292237, 5.22145448, 6.36826384, 4.72799187, 7.03292237,
7.03292237, 7.03292237, 6.36826384, 7.03292237, 5.76642001,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 5.22145448,
4.28116479, 7.03292237, 6.36826384, 7.03292237, 7.03292237,
5.76642001, 5.22145448, 5.76642001, 7.03292237, 4.28116479,
7.03292237, 7.03292237, 4.72799187, 6.36826384, 7.03292237,
4.72799187, 5.76642001, 7.03292237, 7.03292237, 6.36826384,
6.36826384, 5.76642001, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 4.72799187, 7.03292237, 6.36826384,
7.03292237, 4.72799187, 4.72799187, 5.76642001, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 4.72799187,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
5.76642001, 7.03292237, 4.72799187, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
6.36826384, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
3.5102043, 6.36826384, 5.22145448, 7.03292237, 5.22145448,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
4.72799187, 7.03292237, 6.36826384, 7.03292237, 7.03292237,
7.03292237, 6.36826384, 7.03292237, 7.03292237, 7.03292237,
6.36826384, 4.72799187, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 6.36826384, 7.03292237, 5.76642001, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 5.22145448, 4.72799187,
7.03292237, 7.03292237, 7.03292237, 4.28116479, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 3.87656588, 7.03292237,
5.22145448, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
3.5102043, 7.03292237, 6.36826384, 7.03292237, 7.03292237,
7.03292237, 4.72799187, 7.03292237, 7.03292237, 4.28116479,
6.36826384, 7.03292237, 5.22145448, 7.03292237, 7.03292237,
5.76642001, 7.03292237, 7.03292237, 7.03292237, 5.76642001,
4.72799187, 7.03292237, 4.72799187, 6.36826384, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 6.36826384, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 3.87656588, 5.22145448, 7.03292237, 7.03292237,
6.36826384, 4.28116479, 7.03292237, 5.76642001, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 3.87656588, 6.36826384, 7.03292237,
7.03292237, 5.76642001, 7.03292237, 5.22145448, 7.03292237,
5.76642001, 4.72799187, 7.03292237, 7.03292237, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 5.76642001,
5.22145448, 7.03292237, 5.76642001, 6.36826384, 4.28116479,
7.03292237, 4.72799187, 3.87656588, 5.22145448, 7.03292237,
6.36826384, 5.76642001, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 6.36826384, 5.76642001, 6.36826384, 7.03292237,
5.76642001, 7.03292237, 5.76642001, 5.22145448, 3.87656588,
5.76642001, 6.36826384, 7.03292237, 5.22145448, 6.36826384,
5.22145448, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
6.36826384, 7.03292237, 5.76642001, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 4.72799187, 5.76642001, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 4.72799187,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 5.22145448, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 3.5102043,
3.87656588, 7.03292237, 4.72799187, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 3.87656588,
5.22145448, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 5.76642001, 7.03292237, 7.03292237, 7.03292237,
4.28116479, 7.03292237, 4.72799187, 4.72799187, 7.03292237,
6.36826384, 5.76642001, 7.03292237, 4.28116479, 7.03292237,
7.03292237, 7.03292237, 5.76642001, 7.03292237, 7.03292237,
5.76642001, 5.22145448, 7.03292237, 4.72799187, 7.03292237,
4.28116479, 5.76642001, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 4.28116479, 7.03292237,
7.03292237, 7.03292237, 5.22145448, 5.22145448, 7.03292237,
7.03292237, 7.03292237, 5.76642001, 6.36826384, 7.03292237,
7.03292237, 5.22145448, 7.03292237, 7.03292237, 5.76642001,
5.22145448, 7.03292237, 7.03292237, 7.03292237, 3.87656588,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 5.76642001, 7.03292237, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 4.28116479, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 3.5102043,
7.03292237, 7.03292237, 7.03292237, 5.76642001, 4.28116479,
5.22145448, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 5.76642001, 6.36826384, 7.03292237,
5.22145448, 5.76642001, 5.76642001, 7.03292237, 7.03292237,
5.22145448, 7.03292237, 7.03292237, 5.22145448, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 5.22145448,
6.36826384, 5.22145448, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 5.22145448, 7.03292237, 5.76642001, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 4.72799187, 7.03292237,
7.03292237, 7.03292237, 6.36826384, 4.72799187, 5.22145448,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
6.36826384, 7.03292237, 7.03292237, 5.76642001, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 7.03292237,
4.72799187, 3.87656588, 7.03292237, 7.03292237, 4.72799187,
7.03292237, 7.03292237, 6.36826384, 7.03292237, 5.22145448,
7.03292237, 7.03292237, 7.03292237, 3.87656588, 5.76642001,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 5.76642001,
5.22145448, 7.03292237, 6.36826384, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 5.76642001,
5.76642001, 7.03292237, 5.76642001, 3.87656588, 6.36826384,
7.03292237, 7.03292237, 7.03292237, 6.36826384, 5.76642001,
5.22145448, 7.03292237, 5.22145448, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 5.22145448, 4.72799187,
7.03292237, 6.36826384, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 7.03292237, 7.03292237, 5.22145448, 6.36826384,
7.03292237, 7.03292237, 3.17846635, 5.76642001, 7.03292237,
3.5102043, 7.03292237, 7.03292237, 7.03292237, 3.87656588,
7.03292237, 6.36826384, 6.36826384, 7.03292237, 5.22145448,
7.03292237, 7.03292237, 7.03292237, 7.03292237, 7.03292237,
7.03292237, 4.28116479, 6.36826384, 7.03292237, 6.36826384,
4.72799187, 7.03292237, 7.03292237, 5.22145448, 4.28116479,
7.03292237, 6.36826384, 7.03292237, 4.72799187, 5.76642001,
6.36826384, 5.22145448, 7.03292237, 7.03292237, 7.03292237,
6.36826384, 7.03292237, 7.03292237, 3.87656588, 7.03292237,
4.72799187, 7.03292237, 3.53462742, 4.76088805, 5.25778406,
4.31095206, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.25778406, 5.25778406, 5.80654132, 5.80654132,
3.90353806, 5.25778406, 4.31095206, 5.80654132, 5.25778406,
3.53462742, 2.89810483, 5.80654132, 5.25778406, 5.80654132,
2.89810483, 5.80654132, 5.25778406, 3.53462742, 4.76088805,
5.80654132, 3.20058132, 5.80654132, 5.80654132, 4.76088805,
5.80654132, 3.53462742, 3.53462742, 5.80654132, 5.80654132,
5.80654132, 4.76088805, 5.80654132, 4.76088805, 3.90353806,
5.80654132, 3.53462742, 5.80654132, 2.6242144, 3.20058132,
5.80654132, 5.80654132, 3.90353806, 3.20058132, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
2.89810483, 5.80654132, 5.80654132, 3.90353806, 3.53462742,
4.31095206, 5.80654132, 5.80654132, 4.76088805, 5.80654132,
3.53462742, 5.80654132, 4.76088805, 2.89810483, 5.25778406,
4.31095206, 5.80654132, 4.31095206, 5.80654132, 5.80654132,
4.76088805, 4.31095206, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 4.76088805, 5.80654132, 5.25778406,
5.25778406, 5.80654132, 5.80654132, 3.53462742, 5.80654132,
3.53462742, 5.80654132, 4.31095206, 5.80654132, 5.80654132,
5.25778406, 5.80654132, 3.20058132, 5.80654132, 5.80654132,
3.20058132, 3.90353806, 5.80654132, 5.80654132, 5.25778406,
3.53462742, 3.20058132, 5.80654132, 4.31095206, 5.80654132,
5.80654132, 5.80654132, 3.20058132, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 4.31095206, 5.80654132, 3.90353806,
5.80654132, 4.31095206, 4.31095206, 5.80654132, 4.76088805,
3.90353806, 3.90353806, 4.76088805, 3.90353806, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.25778406, 3.53462742, 5.80654132, 3.53462742,
5.80654132, 5.80654132, 5.80654132, 2.89810483, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 4.76088805, 4.76088805,
5.80654132, 2.89810483, 5.80654132, 4.76088805, 5.80654132,
5.80654132, 4.31095206, 3.20058132, 5.80654132, 4.76088805,
5.80654132, 2.89810483, 2.89810483, 5.25778406, 3.90353806,
5.80654132, 5.80654132, 5.25778406, 5.80654132, 5.80654132,
3.90353806, 5.80654132, 5.25778406, 4.76088805, 5.80654132,
2.89810483, 5.25778406, 5.80654132, 5.80654132, 4.31095206,
5.25778406, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
2.89810483, 5.80654132, 3.53462742, 3.90353806, 5.25778406,
5.80654132, 3.20058132, 2.89810483, 5.80654132, 4.31095206,
5.80654132, 3.53462742, 5.25778406, 4.76088805, 5.80654132,
3.53462742, 3.90353806, 5.80654132, 3.20058132, 5.80654132,
5.80654132, 3.53462742, 5.25778406, 4.76088805, 4.76088805,
5.80654132, 5.80654132, 2.89810483, 3.20058132, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.25778406, 5.25778406,
5.80654132, 5.80654132, 4.76088805, 5.80654132, 4.31095206,
5.25778406, 5.80654132, 4.31095206, 4.31095206, 5.80654132,
5.80654132, 3.53462742, 4.76088805, 3.53462742, 4.76088805,
4.31095206, 5.80654132, 3.90353806, 5.80654132, 4.76088805,
5.80654132, 5.80654132, 5.80654132, 4.31095206, 3.90353806,
5.80654132, 4.76088805, 4.76088805, 3.53462742, 5.80654132,
5.80654132, 5.25778406, 3.53462742, 3.20058132, 3.53462742,
3.90353806, 5.80654132, 4.31095206, 4.76088805, 5.80654132,
5.80654132, 5.80654132, 3.90353806, 4.76088805, 2.89810483,
5.80654132, 5.80654132, 5.80654132, 4.76088805, 5.25778406,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 3.90353806, 5.25778406, 4.76088805,
5.80654132, 4.76088805, 3.90353806, 5.80654132, 5.80654132,
4.76088805, 5.80654132, 5.25778406, 5.80654132, 2.89810483,
5.80654132, 5.25778406, 3.90353806, 3.90353806, 5.80654132,
5.25778406, 3.53462742, 5.80654132, 4.76088805, 5.25778406,
5.80654132, 3.90353806, 4.31095206, 5.80654132, 5.25778406,
3.90353806, 3.53462742, 5.25778406, 2.89810483, 5.80654132,
3.53462742, 4.76088805, 4.31095206, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 3.90353806, 5.80654132,
4.31095206, 5.80654132, 5.80654132, 5.25778406, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.25778406, 5.25778406,
5.80654132, 5.25778406, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.25778406, 4.31095206, 5.80654132, 5.25778406,
5.80654132, 5.25778406, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 4.31095206, 5.25778406, 3.53462742, 2.89810483,
5.80654132, 5.80654132, 3.20058132, 5.80654132, 4.31095206,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 3.90353806,
3.90353806, 3.90353806, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 4.76088805, 3.20058132, 4.31095206, 5.80654132,
3.90353806, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 3.90353806, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 3.90353806, 5.80654132, 3.90353806, 3.53462742,
5.80654132, 4.76088805, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
4.76088805, 5.25778406, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 5.25778406,
3.53462742, 5.25778406, 5.80654132, 3.53462742, 5.80654132,
3.90353806, 5.80654132, 5.80654132, 5.80654132, 3.90353806,
3.20058132, 5.80654132, 5.80654132, 3.90353806, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 3.53462742, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 3.53462742, 5.25778406, 3.90353806,
5.80654132, 4.76088805, 4.76088805, 3.90353806, 5.80654132,
5.80654132, 4.31095206, 2.89810483, 5.80654132, 5.80654132,
3.90353806, 5.80654132, 3.53462742, 3.90353806, 5.80654132,
5.80654132, 4.76088805, 5.80654132, 4.31095206, 5.25778406,
5.25778406, 3.20058132, 3.53462742, 5.80654132, 4.31095206,
5.80654132, 4.76088805, 3.90353806, 4.76088805, 4.76088805,
5.80654132, 5.80654132, 5.25778406, 3.90353806, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 3.53462742, 4.31095206, 3.90353806, 4.76088805,
4.31095206, 3.53462742, 3.90353806, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 3.20058132, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 3.90353806, 4.76088805,
5.25778406, 3.53462742, 3.20058132, 5.80654132, 3.90353806,
5.80654132, 3.53462742, 5.80654132, 5.80654132, 3.90353806,
5.80654132, 3.90353806, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 4.76088805, 3.90353806, 4.76088805, 5.25778406,
2.89810483, 5.80654132, 4.31095206, 5.80654132, 4.76088805,
5.80654132, 5.25778406, 5.80654132, 5.80654132, 5.80654132,
3.53462742, 2.89810483, 5.80654132, 5.80654132, 5.80654132,
3.90353806, 4.76088805, 5.80654132, 5.25778406, 4.76088805,
5.25778406, 5.80654132, 5.80654132, 5.25778406, 5.80654132,
5.80654132, 5.80654132, 2.89810483, 5.25778406, 5.80654132,
5.80654132, 4.76088805, 4.76088805, 5.25778406, 5.80654132,
5.80654132, 4.31095206, 3.20058132, 3.53462742, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 5.25778406,
5.80654132, 5.80654132, 3.90353806, 4.76088805, 5.80654132,
3.53462742, 5.80654132, 5.25778406, 2.89810483, 5.80654132,
5.25778406, 5.80654132, 5.80654132, 5.80654132, 5.25778406,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 4.31095206, 5.80654132, 3.20058132, 5.80654132,
5.25778406, 4.76088805, 5.25778406, 5.80654132, 4.76088805,
5.80654132, 3.90353806, 4.31095206, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 5.25778406, 5.80654132, 3.90353806,
4.76088805, 3.90353806, 5.80654132, 3.53462742, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 3.53462742, 5.80654132,
4.76088805, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 3.90353806,
2.6242144, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
4.76088805, 5.80654132, 3.53462742, 5.80654132, 5.80654132,
3.90353806, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 3.20058132, 3.20058132, 5.80654132,
5.80654132, 5.80654132, 3.90353806, 5.80654132, 5.25778406,
4.31095206, 5.25778406, 4.31095206, 4.31095206, 4.76088805,
5.80654132, 4.76088805, 5.80654132, 3.53462742, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 3.20058132,
5.80654132, 3.90353806, 5.80654132, 4.76088805, 5.80654132,
3.90353806, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.25778406, 5.80654132, 4.31095206, 5.25778406,
4.31095206, 5.80654132, 3.90353806, 5.80654132, 3.53462742,
5.25778406, 5.80654132, 5.80654132, 4.31095206, 3.90353806,
3.53462742, 5.80654132, 5.80654132, 5.80654132, 4.31095206,
5.80654132, 5.80654132, 5.25778406, 4.76088805, 4.31095206,
3.20058132, 5.80654132, 3.53462742, 3.20058132, 5.80654132,
5.80654132, 3.20058132, 3.20058132, 5.80654132, 4.31095206,
4.31095206, 5.80654132, 5.80654132, 3.90353806, 3.90353806,
3.53462742, 5.80654132, 3.90353806, 3.53462742, 5.80654132,
3.90353806, 5.25778406, 5.80654132, 3.53462742, 5.80654132,
5.25778406, 5.80654132, 4.31095206, 3.90353806, 5.80654132,
5.80654132, 4.31095206, 5.25778406, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 5.25778406,
3.20058132, 5.25778406, 2.89810483, 3.90353806, 5.80654132,
3.53462742, 5.80654132, 5.25778406, 5.80654132, 2.89810483,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 3.20058132,
5.80654132, 5.25778406, 3.53462742, 4.31095206, 4.76088805,
3.90353806, 5.80654132, 5.80654132, 5.25778406, 3.90353806,
4.76088805, 4.31095206, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 3.90353806, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 5.25778406,
3.53462742, 5.80654132, 5.80654132, 5.25778406, 5.80654132,
3.20058132, 5.80654132, 4.76088805, 5.80654132, 4.76088805,
5.80654132, 5.25778406, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.25778406, 2.89810483, 5.80654132, 5.80654132,
2.89810483, 3.53462742, 5.80654132, 5.80654132, 2.89810483,
4.31095206, 3.53462742, 4.31095206, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 4.31095206,
4.76088805, 5.25778406, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.25778406, 3.90353806, 5.80654132, 5.25778406,
5.80654132, 2.89810483, 2.89810483, 5.80654132, 3.53462742,
5.80654132, 3.53462742, 5.80654132, 4.31095206, 2.89810483,
5.80654132, 5.80654132, 2.89810483, 4.76088805, 5.80654132,
5.80654132, 3.20058132, 5.80654132, 3.90353806, 5.80654132,
5.80654132, 3.20058132, 3.90353806, 4.76088805, 4.76088805,
5.80654132, 3.90353806, 4.31095206, 5.80654132, 4.31095206,
5.80654132, 3.20058132, 4.31095206, 4.76088805, 3.53462742,
5.80654132, 5.80654132, 3.53462742, 3.53462742, 3.53462742,
5.80654132, 5.80654132, 3.90353806, 3.90353806, 3.20058132,
5.80654132, 5.80654132, 2.89810483, 3.90353806, 5.80654132,
2.89810483, 3.53462742, 3.53462742, 4.31095206, 5.80654132,
3.53462742, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 5.25778406, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 4.76088805, 5.80654132, 5.80654132, 4.76088805,
5.80654132, 5.80654132, 4.76088805, 4.76088805, 5.80654132,
5.25778406, 4.31095206, 5.80654132, 4.76088805, 3.90353806,
4.31095206, 5.80654132, 2.89810483, 4.31095206, 5.25778406,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 3.20058132,
5.25778406, 5.80654132, 4.76088805, 5.80654132, 4.31095206,
5.80654132, 5.80654132, 4.76088805, 4.31095206, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 4.31095206,
4.31095206, 3.20058132, 4.76088805, 5.80654132, 3.20058132,
3.20058132, 5.80654132, 3.90353806, 5.25778406, 3.20058132,
4.76088805, 3.20058132, 3.53462742, 4.76088805, 5.80654132,
5.80654132, 4.31095206, 4.76088805, 5.80654132, 4.31095206,
5.80654132, 4.76088805, 4.31095206, 2.89810483, 5.80654132,
5.80654132, 5.80654132, 4.76088805, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 4.76088805, 5.25778406, 4.31095206,
5.80654132, 3.90353806, 3.53462742, 4.76088805, 5.80654132,
4.31095206, 5.80654132, 5.80654132, 3.20058132, 5.80654132,
5.25778406, 5.80654132, 5.80654132, 5.80654132, 3.53462742,
2.6242144, 5.80654132, 5.80654132, 3.53462742, 5.25778406,
3.90353806, 5.80654132, 2.89810483, 5.80654132, 3.90353806,
5.80654132, 5.80654132, 3.90353806, 2.89810483, 5.80654132,
4.76088805, 4.31095206, 5.80654132, 5.25778406, 5.80654132,
5.80654132, 4.31095206, 5.80654132, 5.80654132, 5.80654132,
3.90353806, 4.76088805, 5.80654132, 4.76088805, 5.80654132,
4.76088805, 3.53462742, 3.90353806, 5.80654132, 5.80654132,
5.80654132, 5.25778406, 5.80654132, 5.80654132, 5.25778406,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
3.53462742, 3.53462742, 3.90353806, 5.80654132, 4.31095206,
3.53462742, 5.80654132, 4.76088805, 4.76088805, 3.20058132,
3.90353806, 5.80654132, 5.25778406, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 4.31095206, 5.25778406, 4.31095206,
5.80654132, 3.20058132, 5.80654132, 4.31095206, 4.31095206,
4.76088805, 5.80654132, 4.76088805, 4.31095206, 5.80654132,
5.25778406, 3.53462742, 3.53462742, 5.25778406, 5.80654132,
3.90353806, 5.25778406, 4.31095206, 4.31095206, 3.53462742,
5.80654132, 3.90353806, 5.80654132, 5.80654132, 4.76088805,
5.25778406, 3.20058132, 3.90353806, 5.80654132, 5.25778406,
5.80654132, 5.80654132, 5.25778406, 5.80654132, 4.31095206,
5.25778406, 4.76088805, 5.80654132, 5.80654132, 5.25778406,
3.53462742, 5.80654132, 5.80654132, 5.80654132, 5.25778406,
5.25778406, 5.80654132, 3.20058132, 5.80654132, 5.80654132,
3.53462742, 5.80654132, 5.80654132, 5.80654132, 4.31095206,
5.80654132, 4.76088805, 5.80654132, 5.80654132, 5.80654132,
3.90353806, 4.31095206, 5.25778406, 5.80654132, 3.53462742,
3.90353806, 5.25778406, 4.31095206, 5.80654132, 5.25778406,
5.25778406, 2.89810483, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 5.25778406, 5.80654132, 4.76088805,
5.80654132, 5.80654132, 5.80654132, 4.31095206, 5.80654132,
3.20058132, 3.90353806, 5.80654132, 5.80654132, 5.25778406,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 5.80654132, 2.6242144, 5.80654132, 3.90353806,
5.25778406, 4.76088805, 5.80654132, 5.80654132, 3.90353806,
5.80654132, 3.53462742, 2.89810483, 5.80654132, 3.53462742,
2.89810483, 4.76088805, 5.80654132, 5.80654132, 5.80654132,
4.31095206, 5.80654132, 4.76088805, 3.90353806, 2.89810483,
4.76088805, 5.80654132, 2.6242144, 3.53462742, 4.31095206,
5.25778406, 5.25778406, 3.20058132, 4.31095206, 4.31095206,
3.20058132, 4.31095206, 5.25778406, 4.31095206, 5.25778406,
3.90353806, 4.31095206, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 3.90353806, 5.80654132, 5.80654132, 5.80654132,
4.31095206, 5.80654132, 5.80654132, 5.80654132, 3.90353806,
5.25778406, 3.90353806, 4.31095206, 4.76088805, 3.90353806,
5.80654132, 5.80654132, 5.80654132, 2.89810483, 5.80654132,
5.80654132, 5.80654132, 5.80654132, 5.80654132, 5.80654132,
5.80654132, 3.90353806, 3.20058132, 5.25778406, 4.76088805,
5.25778406])
class InvGaussIdentity(Medpar1):
"""
Accuracy is different for R vs Stata ML vs Stata IRLS, we are close.
"""
def __init__(self):
super(InvGaussIdentity, self).__init__()
self.params = np.array([0.44538838, -1.05872706, 2.83947966])
self.bse = np.array([0.02586783, 0.13830023, 0.20834864])
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"igaussident_resids.csv")
self.resids = pd.read_csv(filename, sep=',', header=None).values
self.null_deviance = 335.1539777981053 # from R, Rpy bug
self.df_null = 3675
self.deviance = 305.33661191013988
self.df_resid = 3673
self.df_model = 2
self.aic_R = 18558.677276882016
self.aic_Stata = 6.619290231464371
self.bic_Stata = -29848.45250412075
self.llf_stata = -12163.25544543151
self.chi2 = 567.1229375785638 # in Stata not sm
# self.llf = -9275.3386384410078 # from R
self.llf = -12163.25545 # from Stata, big diff with R
self.scale = 0.10115387793455666
self.pearson_chi2 = 371.5346609292967 # deviance_p in Stata
self.fittedvalues = np.array([
6.84797506, 6.84797506, 6.84797506, 6.84797506, 5.9571983,
6.84797506, 6.84797506, 6.40258668, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 5.9571983, 6.84797506,
5.51180993, 6.84797506, 5.51180993, 5.06642155, 5.06642155,
6.84797506, 6.84797506, 6.40258668, 6.84797506, 5.9571983,
6.84797506, 4.62103317, 6.84797506, 6.84797506, 6.84797506,
5.9571983, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 4.17564479, 6.84797506, 6.84797506, 4.62103317,
6.84797506, 6.84797506, 5.06642155, 6.84797506, 6.84797506,
6.84797506, 5.51180993, 6.40258668, 6.40258668, 4.62103317,
5.06642155, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
5.51180993, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 5.9571983, 6.40258668,
6.40258668, 5.51180993, 6.84797506, 6.84797506, 6.84797506,
5.9571983, 6.84797506, 6.84797506, 4.17564479, 5.9571983,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
5.51180993, 5.51180993, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.84797506, 5.06642155, 6.84797506, 6.40258668,
6.84797506, 6.40258668, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 5.51180993, 6.40258668, 5.51180993,
6.84797506, 6.84797506, 5.06642155, 5.9571983, 6.84797506,
5.06642155, 6.40258668, 4.17564479, 6.84797506, 6.84797506,
5.51180993, 5.51180993, 6.40258668, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 4.62103317,
6.84797506, 6.40258668, 6.40258668, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.40258668, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 5.9571983,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 5.9571983,
6.84797506, 6.84797506, 6.40258668, 4.17564479, 6.84797506,
6.84797506, 5.51180993, 6.84797506, 5.9571983, 4.62103317,
5.9571983, 6.40258668, 6.40258668, 6.84797506, 6.84797506,
5.9571983, 6.84797506, 6.84797506, 4.62103317, 6.84797506,
6.40258668, 6.84797506, 6.40258668, 6.84797506, 5.51180993,
6.84797506, 4.62103317, 5.06642155, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.40258668,
6.84797506, 4.62103317, 5.51180993, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 4.62103317, 6.84797506, 6.84797506,
6.84797506, 6.40258668, 5.9571983, 6.84797506, 6.84797506,
6.84797506, 5.06642155, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.40258668, 6.84797506, 6.40258668, 4.62103317, 5.9571983,
5.51180993, 6.40258668, 6.40258668, 6.84797506, 6.84797506,
6.84797506, 6.40258668, 6.84797506, 6.84797506, 6.40258668,
5.9571983, 6.84797506, 5.51180993, 5.9571983, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
4.62103317, 6.84797506, 5.51180993, 6.84797506, 6.40258668,
5.9571983, 4.62103317, 4.62103317, 6.84797506, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 4.62103317,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 5.51180993,
6.84797506, 6.84797506, 6.84797506, 5.51180993, 6.84797506,
5.9571983, 6.84797506, 5.06642155, 4.62103317, 6.40258668,
5.9571983, 6.84797506, 6.84797506, 6.84797506, 5.51180993,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
4.17564479, 5.06642155, 6.84797506, 6.84797506, 6.84797506,
5.06642155, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.40258668, 4.17564479, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 4.62103317, 6.84797506, 6.40258668,
6.84797506, 5.51180993, 5.51180993, 6.40258668, 6.84797506,
6.40258668, 6.40258668, 6.84797506, 4.62103317, 6.84797506,
6.84797506, 6.84797506, 5.51180993, 6.40258668, 6.84797506,
4.17564479, 6.40258668, 5.51180993, 5.9571983, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 4.62103317, 6.84797506,
5.51180993, 6.84797506, 6.40258668, 5.51180993, 5.06642155,
6.84797506, 6.84797506, 6.84797506, 5.06642155, 6.40258668,
6.84797506, 6.40258668, 5.9571983, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 5.51180993, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 4.17564479, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 5.06642155, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.40258668, 6.84797506, 6.84797506,
6.40258668, 6.84797506, 6.40258668, 6.84797506, 5.51180993,
6.40258668, 6.84797506, 6.40258668, 6.84797506, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 4.17564479,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.40258668, 6.84797506, 6.84797506, 6.40258668,
5.9571983, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
5.9571983, 6.84797506, 6.40258668, 6.84797506, 6.84797506,
6.84797506, 4.17564479, 6.84797506, 6.40258668, 6.40258668,
5.06642155, 5.9571983, 6.40258668, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 4.17564479, 5.51180993, 5.06642155,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 5.06642155, 6.40258668,
6.84797506, 6.84797506, 5.9571983, 6.84797506, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 5.9571983, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.40258668,
6.84797506, 5.51180993, 5.9571983, 6.84797506, 5.9571983,
6.40258668, 5.9571983, 5.9571983, 6.84797506, 5.9571983,
6.84797506, 6.84797506, 6.84797506, 5.06642155, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.40258668, 6.84797506, 6.40258668, 6.84797506, 5.06642155,
6.84797506, 6.84797506, 4.62103317, 6.40258668, 4.17564479,
6.84797506, 3.73025641, 6.84797506, 6.84797506, 5.9571983,
5.51180993, 6.84797506, 5.9571983, 4.62103317, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 5.9571983, 5.06642155,
6.84797506, 6.40258668, 6.84797506, 5.51180993, 6.84797506,
5.06642155, 6.84797506, 6.84797506, 6.84797506, 5.51180993,
5.51180993, 5.06642155, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
4.62103317, 6.84797506, 6.84797506, 6.84797506, 5.9571983,
6.40258668, 6.84797506, 6.84797506, 5.9571983, 6.84797506,
6.84797506, 6.40258668, 5.06642155, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 5.06642155, 5.9571983, 6.84797506, 5.9571983,
6.40258668, 6.84797506, 6.84797506, 6.84797506, 5.06642155,
6.84797506, 6.84797506, 6.84797506, 5.9571983, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.40258668, 6.84797506, 6.84797506, 5.9571983, 6.40258668,
5.06642155, 6.84797506, 6.84797506, 6.84797506, 5.9571983,
6.84797506, 6.40258668, 5.51180993, 5.9571983, 5.06642155,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 5.51180993, 6.84797506, 6.84797506,
6.40258668, 6.84797506, 6.84797506, 5.9571983, 6.84797506,
6.84797506, 6.84797506, 6.40258668, 6.84797506, 5.51180993,
6.84797506, 6.84797506, 6.84797506, 5.9571983, 6.84797506,
6.40258668, 6.40258668, 6.84797506, 5.9571983, 6.84797506,
6.40258668, 6.84797506, 6.84797506, 6.84797506, 5.06642155,
5.51180993, 6.84797506, 4.17564479, 5.9571983, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.40258668, 6.84797506, 6.84797506,
5.06642155, 6.84797506, 6.40258668, 6.84797506, 4.62103317,
6.84797506, 6.84797506, 5.9571983, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
5.06642155, 6.40258668, 4.17564479, 6.84797506, 6.84797506,
6.40258668, 5.06642155, 4.62103317, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 4.17564479, 6.84797506, 6.84797506, 6.84797506,
4.17564479, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.40258668, 6.84797506, 6.84797506, 4.17564479,
6.84797506, 5.06642155, 5.51180993, 5.51180993, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.40258668, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 5.51180993, 5.51180993, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 5.06642155, 6.40258668, 5.9571983,
5.9571983, 6.40258668, 6.84797506, 6.84797506, 6.84797506,
6.40258668, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 5.06642155, 6.84797506, 5.9571983, 6.84797506,
6.84797506, 6.84797506, 5.06642155, 4.62103317, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 5.9571983,
6.84797506, 5.9571983, 6.84797506, 6.84797506, 6.84797506,
5.51180993, 6.84797506, 6.84797506, 6.84797506, 5.51180993,
6.40258668, 6.84797506, 6.84797506, 6.40258668, 6.40258668,
6.84797506, 6.84797506, 5.9571983, 6.84797506, 5.51180993,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 5.06642155, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 4.62103317,
6.84797506, 6.40258668, 6.84797506, 5.9571983, 4.62103317,
5.9571983, 6.84797506, 4.17564479, 6.84797506, 6.84797506,
6.84797506, 3.73025641, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 5.9571983, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 5.9571983, 5.9571983, 5.9571983, 6.84797506,
6.84797506, 6.40258668, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 5.9571983, 6.84797506, 4.62103317, 6.40258668,
5.9571983, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 5.51180993,
6.84797506, 6.84797506, 6.84797506, 5.9571983, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 5.9571983, 6.84797506, 6.84797506, 6.84797506,
6.40258668, 6.40258668, 6.84797506, 6.84797506, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.84797506, 5.51180993, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 3.73025641, 6.84797506, 6.84797506,
6.84797506, 4.17564479, 6.40258668, 5.9571983, 6.84797506,
6.84797506, 6.40258668, 5.06642155, 6.84797506, 6.84797506,
5.9571983, 6.84797506, 4.17564479, 5.51180993, 6.40258668,
4.62103317, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.40258668,
6.84797506, 5.51180993, 6.40258668, 6.40258668, 6.84797506,
6.40258668, 6.84797506, 6.84797506, 6.84797506, 5.51180993,
6.84797506, 6.40258668, 6.84797506, 6.84797506, 5.06642155,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.40258668, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 3.73025641, 6.84797506, 5.51180993,
5.51180993, 6.84797506, 6.40258668, 6.84797506, 5.06642155,
6.84797506, 6.84797506, 6.84797506, 5.06642155, 6.84797506,
5.9571983, 6.84797506, 4.17564479, 6.84797506, 5.51180993,
4.17564479, 5.06642155, 6.40258668, 5.9571983, 6.84797506,
6.40258668, 6.84797506, 4.62103317, 5.9571983, 6.84797506,
6.84797506, 6.84797506, 5.51180993, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 4.62103317, 6.40258668, 6.84797506, 6.84797506,
6.84797506, 5.9571983, 6.40258668, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
3.73025641, 5.06642155, 6.84797506, 4.62103317, 6.84797506,
5.06642155, 6.84797506, 5.51180993, 5.9571983, 5.9571983,
4.17564479, 5.9571983, 5.51180993, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 5.51180993, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 5.51180993, 6.84797506,
6.84797506, 6.84797506, 5.51180993, 6.84797506, 6.84797506,
6.40258668, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 4.62103317,
5.06642155, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.40258668, 6.84797506, 6.84797506, 6.40258668,
6.40258668, 5.9571983, 6.84797506, 5.9571983, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 5.9571983, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
5.51180993, 6.84797506, 6.84797506, 5.9571983, 6.40258668,
5.9571983, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
5.06642155, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.40258668, 5.9571983, 6.40258668, 5.06642155,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 5.06642155,
6.84797506, 6.40258668, 6.84797506, 5.51180993, 6.84797506,
6.84797506, 6.40258668, 6.84797506, 5.9571983, 6.40258668,
5.9571983, 6.84797506, 6.84797506, 6.84797506, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
5.06642155, 6.84797506, 5.51180993, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 5.06642155, 6.40258668, 6.84797506,
6.84797506, 6.40258668, 6.84797506, 5.9571983, 5.51180993,
6.84797506, 6.84797506, 6.84797506, 5.51180993, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 4.62103317,
5.9571983, 6.84797506, 4.62103317, 6.84797506, 6.40258668,
6.84797506, 6.84797506, 4.62103317, 6.84797506, 6.84797506,
6.40258668, 4.17564479, 3.73025641, 6.40258668, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 5.9571983,
6.84797506, 5.06642155, 5.9571983, 6.84797506, 6.84797506,
4.17564479, 6.84797506, 6.84797506, 6.84797506, 4.62103317,
6.84797506, 6.84797506, 6.84797506, 5.9571983, 5.9571983,
6.84797506, 6.40258668, 5.9571983, 6.84797506, 6.40258668,
5.9571983, 6.84797506, 5.9571983, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
4.62103317, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 5.06642155, 5.9571983, 6.40258668, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 5.9571983, 4.62103317,
6.84797506, 5.9571983, 5.06642155, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 5.9571983,
6.40258668, 6.40258668, 6.84797506, 6.84797506, 6.40258668,
4.17564479, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
3.73025641, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.40258668, 6.84797506, 6.84797506,
6.84797506, 5.06642155, 6.84797506, 6.40258668, 5.06642155,
5.06642155, 6.84797506, 5.9571983, 6.84797506, 6.84797506,
6.84797506, 6.40258668, 4.62103317, 6.84797506, 6.84797506,
6.84797506, 5.9571983, 6.84797506, 6.84797506, 6.84797506,
5.06642155, 6.84797506, 6.84797506, 6.40258668, 5.51180993,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 5.51180993, 6.84797506, 6.84797506, 6.40258668,
6.84797506, 6.84797506, 5.51180993, 6.84797506, 6.40258668,
6.40258668, 6.84797506, 6.40258668, 6.84797506, 6.84797506,
6.84797506, 6.40258668, 6.40258668, 6.84797506, 5.06642155,
4.62103317, 5.06642155, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 4.62103317,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.40258668, 4.62103317, 4.62103317, 6.84797506, 5.51180993,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.40258668, 6.84797506, 6.84797506, 6.84797506,
5.9571983, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 5.06642155, 6.84797506,
4.17564479, 6.84797506, 5.06642155, 6.84797506, 6.84797506,
6.84797506, 5.51180993, 6.84797506, 4.62103317, 6.84797506,
6.84797506, 5.06642155, 5.51180993, 6.84797506, 6.84797506,
6.84797506, 6.40258668, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.40258668, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 5.9571983, 5.51180993,
6.84797506, 6.84797506, 4.17564479, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 5.06642155, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
4.62103317, 6.84797506, 6.84797506, 6.84797506, 5.9571983,
6.84797506, 5.51180993, 5.06642155, 6.84797506, 6.84797506,
6.84797506, 6.40258668, 5.9571983, 6.84797506, 5.9571983,
6.84797506, 4.62103317, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 5.9571983, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 5.9571983,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 5.9571983, 6.84797506, 4.17564479,
6.40258668, 5.9571983, 6.84797506, 4.62103317, 6.84797506,
5.9571983, 5.51180993, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.40258668, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
5.9571983, 6.84797506, 6.84797506, 6.84797506, 3.73025641,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 4.62103317, 5.06642155, 6.40258668, 6.84797506,
6.84797506, 4.62103317, 5.9571983, 6.84797506, 6.84797506,
6.84797506, 4.62103317, 6.84797506, 6.84797506, 5.51180993,
6.40258668, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 5.06642155,
6.84797506, 5.51180993, 6.40258668, 6.84797506, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 5.51180993,
6.84797506, 6.84797506, 5.51180993, 6.84797506, 6.40258668,
6.84797506, 6.84797506, 5.9571983, 6.84797506, 6.84797506,
3.73025641, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 5.9571983, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.40258668,
5.06642155, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.40258668, 6.84797506, 6.40258668, 5.06642155, 5.51180993,
5.9571983, 6.84797506, 6.40258668, 6.40258668, 6.84797506,
6.40258668, 6.84797506, 5.51180993, 5.06642155, 5.9571983,
6.40258668, 6.84797506, 6.84797506, 5.9571983, 5.51180993,
6.84797506, 6.40258668, 4.17564479, 6.40258668, 6.84797506,
5.9571983, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 3.73025641, 6.84797506, 6.84797506, 6.84797506,
5.51180993, 6.84797506, 6.40258668, 6.84797506, 6.40258668,
6.84797506, 6.40258668, 5.51180993, 6.84797506, 6.84797506,
6.84797506, 6.40258668, 6.40258668, 6.84797506, 6.84797506,
6.40258668, 5.06642155, 6.84797506, 5.51180993, 6.84797506,
5.06642155, 6.84797506, 4.62103317, 6.84797506, 6.84797506,
6.40258668, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
4.62103317, 6.40258668, 6.84797506, 4.17564479, 6.84797506,
6.84797506, 6.84797506, 6.40258668, 6.84797506, 6.84797506,
6.84797506, 6.40258668, 6.84797506, 5.51180993, 6.84797506,
6.84797506, 5.9571983, 6.40258668, 6.84797506, 5.06642155,
6.84797506, 6.84797506, 5.51180993, 6.84797506, 3.73025641,
6.40258668, 6.40258668, 6.84797506, 6.40258668, 6.84797506,
5.51180993, 6.40258668, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 5.9571983, 4.62103317, 6.84797506, 6.84797506,
5.06642155, 5.06642155, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 5.9571983, 6.84797506, 5.9571983,
4.62103317, 6.84797506, 4.62103317, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 3.73025641, 6.84797506, 5.51180993,
6.84797506, 6.40258668, 6.84797506, 6.40258668, 6.84797506,
5.06642155, 6.84797506, 6.84797506, 5.06642155, 3.73025641,
3.28486804, 4.17564479, 5.51180993, 6.40258668, 6.84797506,
4.62103317, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.40258668, 6.84797506, 6.84797506, 5.9571983, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 5.06642155, 6.84797506,
6.84797506, 6.84797506, 5.9571983, 6.84797506, 3.73025641,
6.84797506, 6.84797506, 5.51180993, 6.40258668, 4.17564479,
5.06642155, 6.84797506, 6.84797506, 4.17564479, 6.84797506,
6.40258668, 6.84797506, 6.40258668, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 5.06642155, 6.84797506,
6.84797506, 6.40258668, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 5.9571983, 6.84797506, 4.62103317, 6.84797506,
6.84797506, 6.84797506, 5.06642155, 6.40258668, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.40258668, 5.06642155, 6.40258668, 6.84797506, 6.84797506,
5.51180993, 6.84797506, 5.9571983, 6.84797506, 6.84797506,
6.84797506, 5.9571983, 6.84797506, 6.40258668, 6.40258668,
6.84797506, 6.84797506, 6.40258668, 6.84797506, 5.51180993,
6.84797506, 5.51180993, 5.51180993, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
5.06642155, 4.62103317, 6.84797506, 6.40258668, 6.84797506,
5.9571983, 6.84797506, 6.84797506, 6.84797506, 5.06642155,
6.84797506, 5.9571983, 6.84797506, 5.06642155, 6.84797506,
6.84797506, 5.06642155, 5.9571983, 6.40258668, 6.84797506,
4.62103317, 6.40258668, 6.84797506, 6.40258668, 5.9571983,
6.84797506, 4.62103317, 5.51180993, 5.06642155, 6.84797506,
6.84797506, 6.40258668, 5.51180993, 6.84797506, 5.9571983,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 4.62103317, 6.84797506,
6.40258668, 5.51180993, 5.9571983, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.40258668, 6.84797506, 6.84797506,
5.51180993, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 4.62103317,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.40258668, 6.84797506, 6.84797506, 6.84797506,
6.40258668, 6.84797506, 5.06642155, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 5.51180993, 6.40258668, 6.84797506,
5.9571983, 5.9571983, 6.84797506, 6.84797506, 6.84797506,
4.62103317, 6.84797506, 5.9571983, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.84797506, 4.17564479, 6.40258668, 6.40258668,
5.51180993, 6.84797506, 5.51180993, 6.84797506, 6.84797506,
6.84797506, 4.62103317, 6.84797506, 4.17564479, 6.84797506,
6.84797506, 5.51180993, 6.40258668, 5.06642155, 6.84797506,
6.84797506, 6.84797506, 6.40258668, 6.84797506, 5.9571983,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 5.51180993,
4.62103317, 6.84797506, 6.40258668, 6.84797506, 6.84797506,
5.9571983, 5.51180993, 5.9571983, 6.84797506, 4.62103317,
6.84797506, 6.84797506, 5.06642155, 6.40258668, 6.84797506,
5.06642155, 5.9571983, 6.84797506, 6.84797506, 6.40258668,
6.40258668, 5.9571983, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 5.06642155, 6.84797506, 6.40258668,
6.84797506, 5.06642155, 5.06642155, 5.9571983, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 5.06642155,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
5.9571983, 6.84797506, 5.06642155, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.40258668, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
3.73025641, 6.40258668, 5.51180993, 6.84797506, 5.51180993,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
5.06642155, 6.84797506, 6.40258668, 6.84797506, 6.84797506,
6.84797506, 6.40258668, 6.84797506, 6.84797506, 6.84797506,
6.40258668, 5.06642155, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.40258668, 6.84797506, 5.9571983, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 5.51180993, 5.06642155,
6.84797506, 6.84797506, 6.84797506, 4.62103317, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 4.17564479, 6.84797506,
5.51180993, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
3.73025641, 6.84797506, 6.40258668, 6.84797506, 6.84797506,
6.84797506, 5.06642155, 6.84797506, 6.84797506, 4.62103317,
6.40258668, 6.84797506, 5.51180993, 6.84797506, 6.84797506,
5.9571983, 6.84797506, 6.84797506, 6.84797506, 5.9571983,
5.06642155, 6.84797506, 5.06642155, 6.40258668, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.40258668, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 4.17564479, 5.51180993, 6.84797506, 6.84797506,
6.40258668, 4.62103317, 6.84797506, 5.9571983, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 4.17564479, 6.40258668, 6.84797506,
6.84797506, 5.9571983, 6.84797506, 5.51180993, 6.84797506,
5.9571983, 5.06642155, 6.84797506, 6.84797506, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 5.9571983,
5.51180993, 6.84797506, 5.9571983, 6.40258668, 4.62103317,
6.84797506, 5.06642155, 4.17564479, 5.51180993, 6.84797506,
6.40258668, 5.9571983, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.40258668, 5.9571983, 6.40258668, 6.84797506,
5.9571983, 6.84797506, 5.9571983, 5.51180993, 4.17564479,
5.9571983, 6.40258668, 6.84797506, 5.51180993, 6.40258668,
5.51180993, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.40258668, 6.84797506, 5.9571983, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 5.06642155, 5.9571983, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 5.06642155,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 5.51180993, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 3.73025641,
4.17564479, 6.84797506, 5.06642155, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 4.17564479,
5.51180993, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 5.9571983, 6.84797506, 6.84797506, 6.84797506,
4.62103317, 6.84797506, 5.06642155, 5.06642155, 6.84797506,
6.40258668, 5.9571983, 6.84797506, 4.62103317, 6.84797506,
6.84797506, 6.84797506, 5.9571983, 6.84797506, 6.84797506,
5.9571983, 5.51180993, 6.84797506, 5.06642155, 6.84797506,
4.62103317, 5.9571983, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 4.62103317, 6.84797506,
6.84797506, 6.84797506, 5.51180993, 5.51180993, 6.84797506,
6.84797506, 6.84797506, 5.9571983, 6.40258668, 6.84797506,
6.84797506, 5.51180993, 6.84797506, 6.84797506, 5.9571983,
5.51180993, 6.84797506, 6.84797506, 6.84797506, 4.17564479,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 5.9571983, 6.84797506, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 4.62103317, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 3.73025641,
6.84797506, 6.84797506, 6.84797506, 5.9571983, 4.62103317,
5.51180993, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 5.9571983, 6.40258668, 6.84797506,
5.51180993, 5.9571983, 5.9571983, 6.84797506, 6.84797506,
5.51180993, 6.84797506, 6.84797506, 5.51180993, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 5.51180993,
6.40258668, 5.51180993, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 5.51180993, 6.84797506, 5.9571983, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 5.06642155, 6.84797506,
6.84797506, 6.84797506, 6.40258668, 5.06642155, 5.51180993,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.40258668, 6.84797506, 6.84797506, 5.9571983, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 6.84797506,
5.06642155, 4.17564479, 6.84797506, 6.84797506, 5.06642155,
6.84797506, 6.84797506, 6.40258668, 6.84797506, 5.51180993,
6.84797506, 6.84797506, 6.84797506, 4.17564479, 5.9571983,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 5.9571983,
5.51180993, 6.84797506, 6.40258668, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 5.9571983,
5.9571983, 6.84797506, 5.9571983, 4.17564479, 6.40258668,
6.84797506, 6.84797506, 6.84797506, 6.40258668, 5.9571983,
5.51180993, 6.84797506, 5.51180993, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 5.51180993, 5.06642155,
6.84797506, 6.40258668, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 6.84797506, 6.84797506, 5.51180993, 6.40258668,
6.84797506, 6.84797506, 3.28486804, 5.9571983, 6.84797506,
3.73025641, 6.84797506, 6.84797506, 6.84797506, 4.17564479,
6.84797506, 6.40258668, 6.40258668, 6.84797506, 5.51180993,
6.84797506, 6.84797506, 6.84797506, 6.84797506, 6.84797506,
6.84797506, 4.62103317, 6.40258668, 6.84797506, 6.40258668,
5.06642155, 6.84797506, 6.84797506, 5.51180993, 4.62103317,
6.84797506, 6.40258668, 6.84797506, 5.06642155, 5.9571983,
6.40258668, 5.51180993, 6.84797506, 6.84797506, 6.84797506,
6.40258668, 6.84797506, 6.84797506, 4.17564479, 6.84797506,
5.06642155, 6.84797506, 3.56230611, 4.89847125, 5.34385962,
4.45308287, 5.789248, 5.789248, 5.789248, 5.789248,
5.789248, 5.34385962, 5.34385962, 5.789248, 5.789248,
4.00769449, 5.34385962, 4.45308287, 5.789248, 5.34385962,
3.56230611, 2.67152936, 5.789248, 5.34385962, 5.789248,
2.67152936, 5.789248, 5.34385962, 3.56230611, 4.89847125,
5.789248, 3.11691773, 5.789248, 5.789248, 4.89847125,
5.789248, 3.56230611, 3.56230611, 5.789248, 5.789248,
5.789248, 4.89847125, 5.789248, 4.89847125, 4.00769449,
5.789248, 3.56230611, 5.789248, 2.22614098, 3.11691773,
5.789248, 5.789248, 4.00769449, 3.11691773, 5.789248,
5.789248, 5.789248, 5.789248, 5.789248, 5.789248,
2.67152936, 5.789248, 5.789248, 4.00769449, 3.56230611,
4.45308287, 5.789248, 5.789248, 4.89847125, 5.789248,
3.56230611, 5.789248, 4.89847125, 2.67152936, 5.34385962,
4.45308287, 5.789248, 4.45308287, 5.789248, 5.789248,
4.89847125, 4.45308287, 5.789248, 5.789248, 5.789248,
5.789248, 5.789248, 4.89847125, 5.789248, 5.34385962,
5.34385962, 5.789248, 5.789248, 3.56230611, 5.789248,
3.56230611, 5.789248, 4.45308287, 5.789248, 5.789248,
5.34385962, 5.789248, 3.11691773, 5.789248, 5.789248,
3.11691773, 4.00769449, 5.789248, 5.789248, 5.34385962,
3.56230611, 3.11691773, 5.789248, 4.45308287, 5.789248,
5.789248, 5.789248, 3.11691773, 5.789248, 5.789248,
5.789248, 5.789248, 4.45308287, 5.789248, 4.00769449,
5.789248, 4.45308287, 4.45308287, 5.789248, 4.89847125,
4.00769449, 4.00769449, 4.89847125, 4.00769449, 5.789248,
5.789248, 5.789248, 5.789248, 5.789248, 5.789248,
5.789248, 5.34385962, 3.56230611, 5.789248, 3.56230611,
5.789248, 5.789248, 5.789248, 2.67152936, 5.789248,
5.789248, 5.789248, 5.789248, 4.89847125, 4.89847125,
5.789248, 2.67152936, 5.789248, 4.89847125, 5.789248,
5.789248, 4.45308287, 3.11691773, 5.789248, 4.89847125,
5.789248, 2.67152936, 2.67152936, 5.34385962, 4.00769449,
5.789248, 5.789248, 5.34385962, 5.789248, 5.789248,
4.00769449, 5.789248, 5.34385962, 4.89847125, 5.789248,
2.67152936, 5.34385962, 5.789248, 5.789248, 4.45308287,
5.34385962, 5.789248, 5.789248, 5.789248, 5.789248,
2.67152936, 5.789248, 3.56230611, 4.00769449, 5.34385962,
5.789248, 3.11691773, 2.67152936, 5.789248, 4.45308287,
5.789248, 3.56230611, 5.34385962, 4.89847125, 5.789248,
3.56230611, 4.00769449, 5.789248, 3.11691773, 5.789248,
5.789248, 3.56230611, 5.34385962, 4.89847125, 4.89847125,
5.789248, 5.789248, 2.67152936, 3.11691773, 5.789248,
5.789248, 5.789248, 5.789248, 5.34385962, 5.34385962,
5.789248, 5.789248, 4.89847125, 5.789248, 4.45308287,
5.34385962, 5.789248, 4.45308287, 4.45308287, 5.789248,
5.789248, 3.56230611, 4.89847125, 3.56230611, 4.89847125,
4.45308287, 5.789248, 4.00769449, 5.789248, 4.89847125,
5.789248, 5.789248, 5.789248, 4.45308287, 4.00769449,
5.789248, 4.89847125, 4.89847125, 3.56230611, 5.789248,
5.789248, 5.34385962, 3.56230611, 3.11691773, 3.56230611,
4.00769449, 5.789248, 4.45308287, 4.89847125, 5.789248,
5.789248, 5.789248, 4.00769449, 4.89847125, 2.67152936,
5.789248, 5.789248, 5.789248, 4.89847125, 5.34385962,
5.789248, 5.789248, 5.789248, 5.789248, 5.789248,
5.789248, 5.789248, 4.00769449, 5.34385962, 4.89847125,
5.789248, 4.89847125, 4.00769449, 5.789248, 5.789248,
4.89847125, 5.789248, 5.34385962, 5.789248, 2.67152936,
5.789248, 5.34385962, 4.00769449, 4.00769449, 5.789248,
5.34385962, 3.56230611, 5.789248, 4.89847125, 5.34385962,
5.789248, 4.00769449, 4.45308287, 5.789248, 5.34385962,
4.00769449, 3.56230611, 5.34385962, 2.67152936, 5.789248,
3.56230611, 4.89847125, 4.45308287, 5.789248, 5.789248,
5.789248, 5.789248, 5.789248, 4.00769449, 5.789248,
4.45308287, 5.789248, 5.789248, 5.34385962, 5.789248,
5.789248, 5.789248, 5.789248, 5.34385962, 5.34385962,
5.789248, 5.34385962, 5.789248, 5.789248, 5.789248,
5.789248, 5.34385962, 4.45308287, 5.789248, 5.34385962,
5.789248, 5.34385962, 5.789248, 5.789248, 5.789248,
5.789248, 4.45308287, 5.34385962, 3.56230611, 2.67152936,
5.789248, 5.789248, 3.11691773, 5.789248, 4.45308287,
5.789248, 5.789248, 5.789248, 5.789248, 4.00769449,
4.00769449, 4.00769449, 5.789248, 5.789248, 5.789248,
5.789248, 4.89847125, 3.11691773, 4.45308287, 5.789248,
4.00769449, 5.789248, 5.789248, 5.789248, 5.789248,
5.789248, 4.00769449, 5.789248, 5.789248, 5.789248,
5.789248, 4.00769449, 5.789248, 4.00769449, 3.56230611,
5.789248, 4.89847125, 5.789248, 5.789248, 5.789248,
5.789248, 5.789248, 5.789248, 5.789248, 5.789248,
4.89847125, 5.34385962, 5.789248, 5.789248, 5.789248,
5.789248, 5.789248, 5.789248, 5.789248, 5.34385962,
3.56230611, 5.34385962, 5.789248, 3.56230611, 5.789248,
4.00769449, 5.789248, 5.789248, 5.789248, 4.00769449,
3.11691773, 5.789248, 5.789248, 4.00769449, 5.789248,
5.789248, 5.789248, 5.789248, 5.789248, 5.789248,
5.789248, 5.789248, 5.789248, 5.789248, 5.789248,
5.789248, 5.789248, 3.56230611, 5.789248, 5.789248,
5.789248, 5.789248, 3.56230611, 5.34385962, 4.00769449,
5.789248, 4.89847125, 4.89847125, 4.00769449, 5.789248,
5.789248, 4.45308287, 2.67152936, 5.789248, 5.789248,
4.00769449, 5.789248, 3.56230611, 4.00769449, 5.789248,
5.789248, 4.89847125, 5.789248, 4.45308287, 5.34385962,
5.34385962, 3.11691773, 3.56230611, 5.789248, 4.45308287,
5.789248, 4.89847125, 4.00769449, 4.89847125, 4.89847125,
5.789248, 5.789248, 5.34385962, 4.00769449, 5.789248,
5.789248, 5.789248, 5.789248, 5.789248, 5.789248,
5.789248, 3.56230611, 4.45308287, 4.00769449, 4.89847125,
4.45308287, 3.56230611, 4.00769449, 5.789248, 5.789248,
5.789248, 5.789248, 3.11691773, 5.789248, 5.789248,
5.789248, 5.789248, 5.789248, 4.00769449, 4.89847125,
5.34385962, 3.56230611, 3.11691773, 5.789248, 4.00769449,
5.789248, 3.56230611, 5.789248, 5.789248, 4.00769449,
5.789248, 4.00769449, 5.789248, 5.789248, 5.789248,
5.789248, 4.89847125, 4.00769449, 4.89847125, 5.34385962,
2.67152936, 5.789248, 4.45308287, 5.789248, 4.89847125,
5.789248, 5.34385962, 5.789248, 5.789248, 5.789248,
3.56230611, 2.67152936, 5.789248, 5.789248, 5.789248,
4.00769449, 4.89847125, 5.789248, 5.34385962, 4.89847125,
5.34385962, 5.789248, 5.789248, 5.34385962, 5.789248,
5.789248, 5.789248, 2.67152936, 5.34385962, 5.789248,
5.789248, 4.89847125, 4.89847125, 5.34385962, 5.789248,
5.789248, 4.45308287, 3.11691773, 3.56230611, 5.789248,
5.789248, 5.789248, 5.789248, 5.789248, 5.34385962,
5.789248, 5.789248, 4.00769449, 4.89847125, 5.789248,
3.56230611, 5.789248, 5.34385962, 2.67152936, 5.789248,
5.34385962, 5.789248, 5.789248, 5.789248, 5.34385962,
5.789248, 5.789248, 5.789248, 5.789248, 5.789248,
5.789248, 4.45308287, 5.789248, 3.11691773, 5.789248,
5.34385962, 4.89847125, 5.34385962, 5.789248, 4.89847125,
5.789248, 4.00769449, 4.45308287, 5.789248, 5.789248,
5.789248, 5.789248, 5.34385962, 5.789248, 4.00769449,
4.89847125, 4.00769449, 5.789248, 3.56230611, 5.789248,
5.789248, 5.789248, 5.789248, 3.56230611, 5.789248,
4.89847125, 5.789248, 5.789248, 5.789248, 5.789248,
5.789248, 5.789248, 5.789248, 5.789248, 4.00769449,
2.22614098, 5.789248, 5.789248, 5.789248, 5.789248,
4.89847125, 5.789248, 3.56230611, 5.789248, 5.789248,
4.00769449, 5.789248, 5.789248, 5.789248, 5.789248,
5.789248, 5.789248, 3.11691773, 3.11691773, 5.789248,
5.789248, 5.789248, 4.00769449, 5.789248, 5.34385962,
4.45308287, 5.34385962, 4.45308287, 4.45308287, 4.89847125,
5.789248, 4.89847125, 5.789248, 3.56230611, 5.789248,
5.789248, 5.789248, 5.789248, 5.789248, 3.11691773,
5.789248, 4.00769449, 5.789248, 4.89847125, 5.789248,
4.00769449, 5.789248, 5.789248, 5.789248, 5.789248,
5.789248, 5.34385962, 5.789248, 4.45308287, 5.34385962,
4.45308287, 5.789248, 4.00769449, 5.789248, 3.56230611,
5.34385962, 5.789248, 5.789248, 4.45308287, 4.00769449,
3.56230611, 5.789248, 5.789248, 5.789248, 4.45308287,
5.789248, 5.789248, 5.34385962, 4.89847125, 4.45308287,
3.11691773, 5.789248, 3.56230611, 3.11691773, 5.789248,
5.789248, 3.11691773, 3.11691773, 5.789248, 4.45308287,
4.45308287, 5.789248, 5.789248, 4.00769449, 4.00769449,
3.56230611, 5.789248, 4.00769449, 3.56230611, 5.789248,
4.00769449, 5.34385962, 5.789248, 3.56230611, 5.789248,
5.34385962, 5.789248, 4.45308287, 4.00769449, 5.789248,
5.789248, 4.45308287, 5.34385962, 5.789248, 5.789248,
5.789248, 5.789248, 5.789248, 5.789248, 5.34385962,
3.11691773, 5.34385962, 2.67152936, 4.00769449, 5.789248,
3.56230611, 5.789248, 5.34385962, 5.789248, 2.67152936,
5.789248, 5.789248, 5.789248, 5.789248, 3.11691773,
5.789248, 5.34385962, 3.56230611, 4.45308287, 4.89847125,
4.00769449, 5.789248, 5.789248, 5.34385962, 4.00769449,
4.89847125, 4.45308287, 5.789248, 5.789248, 5.789248,
5.789248, 5.789248, 4.00769449, 5.789248, 5.789248,
5.789248, 5.789248, 5.789248, 5.789248, 5.34385962,
3.56230611, 5.789248, 5.789248, 5.34385962, 5.789248,
3.11691773, 5.789248, 4.89847125, 5.789248, 4.89847125,
5.789248, 5.34385962, 5.789248, 5.789248, 5.789248,
5.789248, 5.34385962, 2.67152936, 5.789248, 5.789248,
2.67152936, 3.56230611, 5.789248, 5.789248, 2.67152936,
4.45308287, 3.56230611, 4.45308287, 5.789248, 5.789248,
5.789248, 5.789248, 5.789248, 5.789248, 4.45308287,
4.89847125, 5.34385962, 5.789248, 5.789248, 5.789248,
5.789248, 5.34385962, 4.00769449, 5.789248, 5.34385962,
5.789248, 2.67152936, 2.67152936, 5.789248, 3.56230611,
5.789248, 3.56230611, 5.789248, 4.45308287, 2.67152936,
5.789248, 5.789248, 2.67152936, 4.89847125, 5.789248,
5.789248, 3.11691773, 5.789248, 4.00769449, 5.789248,
5.789248, 3.11691773, 4.00769449, 4.89847125, 4.89847125,
5.789248, 4.00769449, 4.45308287, 5.789248, 4.45308287,
5.789248, 3.11691773, 4.45308287, 4.89847125, 3.56230611,
5.789248, 5.789248, 3.56230611, 3.56230611, 3.56230611,
5.789248, 5.789248, 4.00769449, 4.00769449, 3.11691773,
5.789248, 5.789248, 2.67152936, 4.00769449, 5.789248,
2.67152936, 3.56230611, 3.56230611, 4.45308287, 5.789248,
3.56230611, 5.789248, 5.789248, 5.789248, 5.789248,
5.789248, 5.789248, 5.34385962, 5.789248, 5.789248,
5.789248, 5.789248, 5.789248, 5.789248, 5.789248,
5.789248, 4.89847125, 5.789248, 5.789248, 4.89847125,
5.789248, 5.789248, 4.89847125, 4.89847125, 5.789248,
5.34385962, 4.45308287, 5.789248, 4.89847125, 4.00769449,
4.45308287, 5.789248, 2.67152936, 4.45308287, 5.34385962,
5.789248, 5.789248, 5.789248, 5.789248, 3.11691773,
5.34385962, 5.789248, 4.89847125, 5.789248, 4.45308287,
5.789248, 5.789248, 4.89847125, 4.45308287, 5.789248,
5.789248, 5.789248, 5.789248, 5.789248, 4.45308287,
4.45308287, 3.11691773, 4.89847125, 5.789248, 3.11691773,
3.11691773, 5.789248, 4.00769449, 5.34385962, 3.11691773,
4.89847125, 3.11691773, 3.56230611, 4.89847125, 5.789248,
5.789248, 4.45308287, 4.89847125, 5.789248, 4.45308287,
5.789248, 4.89847125, 4.45308287, 2.67152936, 5.789248,
5.789248, 5.789248, 4.89847125, 5.789248, 5.789248,
5.789248, 5.789248, 4.89847125, 5.34385962, 4.45308287,
5.789248, 4.00769449, 3.56230611, 4.89847125, 5.789248,
4.45308287, 5.789248, 5.789248, 3.11691773, 5.789248,
5.34385962, 5.789248, 5.789248, 5.789248, 3.56230611,
2.22614098, 5.789248, 5.789248, 3.56230611, 5.34385962,
4.00769449, 5.789248, 2.67152936, 5.789248, 4.00769449,
5.789248, 5.789248, 4.00769449, 2.67152936, 5.789248,
4.89847125, 4.45308287, 5.789248, 5.34385962, 5.789248,
5.789248, 4.45308287, 5.789248, 5.789248, 5.789248,
4.00769449, 4.89847125, 5.789248, 4.89847125, 5.789248,
4.89847125, 3.56230611, 4.00769449, 5.789248, 5.789248,
5.789248, 5.34385962, 5.789248, 5.789248, 5.34385962,
5.789248, 5.789248, 5.789248, 5.789248, 5.789248,
5.789248, 5.789248, 5.789248, 5.789248, 5.789248,
3.56230611, 3.56230611, 4.00769449, 5.789248, 4.45308287,
3.56230611, 5.789248, 4.89847125, 4.89847125, 3.11691773,
4.00769449, 5.789248, 5.34385962, 5.789248, 5.789248,
5.789248, 5.789248, 4.45308287, 5.34385962, 4.45308287,
5.789248, 3.11691773, 5.789248, 4.45308287, 4.45308287,
4.89847125, 5.789248, 4.89847125, 4.45308287, 5.789248,
5.34385962, 3.56230611, 3.56230611, 5.34385962, 5.789248,
4.00769449, 5.34385962, 4.45308287, 4.45308287, 3.56230611,
5.789248, 4.00769449, 5.789248, 5.789248, 4.89847125,
5.34385962, 3.11691773, 4.00769449, 5.789248, 5.34385962,
5.789248, 5.789248, 5.34385962, 5.789248, 4.45308287,
5.34385962, 4.89847125, 5.789248, 5.789248, 5.34385962,
3.56230611, 5.789248, 5.789248, 5.789248, 5.34385962,
5.34385962, 5.789248, 3.11691773, 5.789248, 5.789248,
3.56230611, 5.789248, 5.789248, 5.789248, 4.45308287,
5.789248, 4.89847125, 5.789248, 5.789248, 5.789248,
4.00769449, 4.45308287, 5.34385962, 5.789248, 3.56230611,
4.00769449, 5.34385962, 4.45308287, 5.789248, 5.34385962,
5.34385962, 2.67152936, 5.789248, 5.789248, 5.789248,
5.789248, 5.789248, 5.34385962, 5.789248, 4.89847125,
5.789248, 5.789248, 5.789248, 4.45308287, 5.789248,
3.11691773, 4.00769449, 5.789248, 5.789248, 5.34385962,
5.789248, 5.789248, 5.789248, 5.789248, 5.789248,
5.789248, 5.789248, 2.22614098, 5.789248, 4.00769449,
5.34385962, 4.89847125, 5.789248, 5.789248, 4.00769449,
5.789248, 3.56230611, 2.67152936, 5.789248, 3.56230611,
2.67152936, 4.89847125, 5.789248, 5.789248, 5.789248,
4.45308287, 5.789248, 4.89847125, 4.00769449, 2.67152936,
4.89847125, 5.789248, 2.22614098, 3.56230611, 4.45308287,
5.34385962, 5.34385962, 3.11691773, 4.45308287, 4.45308287,
3.11691773, 4.45308287, 5.34385962, 4.45308287, 5.34385962,
4.00769449, 4.45308287, 5.789248, 5.789248, 5.789248,
5.789248, 4.00769449, 5.789248, 5.789248, 5.789248,
4.45308287, 5.789248, 5.789248, 5.789248, 4.00769449,
5.34385962, 4.00769449, 4.45308287, 4.89847125, 4.00769449,
5.789248, 5.789248, 5.789248, 2.67152936, 5.789248,
5.789248, 5.789248, 5.789248, 5.789248, 5.789248,
5.789248, 4.00769449, 3.11691773, 5.34385962, 4.89847125,
5.34385962])
class Committee(object):
def __init__(self):
self.resids = np.array([
[-5.04950800e-01, -6.29721800e-01, -8.35499100e+01,
-1.30628500e+00, -6.62028600e+00],
[-2.34152200e-01, -2.55423500e-01, -2.16830700e+02,
-7.58866000e-01, -7.18370200e+00],
[1.02423700e+00, 7.98775800e-01, 4.83736300e+02,
2.50351500e+00, 2.25135300e+01],
[-2.85061700e-01, -3.17796600e-01, -7.04115100e+04,
-2.37991800e+00, -1.41745600e+02],
[2.09902500e-01, 1.96787700e-01, 2.24751400e+03,
9.51945500e-01, 2.17724200e+01],
[-4.03483500e-01, -4.75741500e-01, -1.95633600e+04,
-2.63502600e+00, -8.89461400e+01],
[-1.64413400e-01, -1.74401100e-01, -1.73310300e+04,
-1.16235500e+00, -5.34213500e+01],
[-4.29607700e-01, -5.13466700e-01, -5.30037000e+03,
-2.24496200e+00, -4.78260300e+01],
[3.23713000e-01, 2.94184600e-01, 4.11079400e+03,
1.48684400e+00, 3.65598400e+01],
[1.50367200e-01, 1.43429400e-01, 7.28532100e+03,
8.85542900e-01, 3.31355000e+01],
[4.21288600e-01, 3.73428000e-01, 1.37315700e+03,
1.52133200e+00, 2.41570200e+01],
[4.50658700e-01, 3.96586700e-01, 1.70146900e+03,
1.66177900e+00, 2.78032600e+01],
[2.43537500e-01, 2.26174000e-01, 3.18402300e+03,
1.13656200e+00, 2.79073400e+01],
[1.05182900e+00, 8.16205400e-01, 6.00135200e+03,
3.89079700e+00, 7.97131300e+01],
[-5.54450300e-01, -7.12749000e-01, -2.09485200e+03,
-2.45496500e+00, -3.42189900e+01],
[-6.05750600e-01, -8.06411100e-01, -2.74738200e+02,
-1.90774400e+00, -1.30510500e+01],
[-3.41215700e-01, -3.90244600e-01, -6.31138000e+02,
-1.27022900e+00, -1.47600100e+01],
[2.21898500e-01, 2.07328700e-01, 6.91135800e+02,
8.16876400e-01, 1.24392900e+01],
[2.45592500e-01, 2.26639200e-01, 1.99250600e-01,
2.57948300e-01, 2.74723700e-01],
[-7.58952600e-01, -1.15300800e+00, -2.56739000e+02,
-2.40716600e+00, -1.41474200e+01]])
self.null_deviance = 27.81104693643434 # from R, Rpy bug
self.params = np.array([
-0.0268147, 1.25103364, 2.91070663,
-0.34799563, 0.00659808, -0.31303026, -6.44847076])
self.bse = np.array([
1.99956263e-02, 4.76820254e-01,
6.48362654e-01, 4.17956107e-01, 1.41512690e-03, 1.07770186e-01,
1.99557656e+00])
self.aic_R = 216.66573352377935
self.aic_Stata = 10.83328660860436
self.deviance = 5.615520158267981
self.scale = 0.38528595746569905
self.llf = -101.33286676188968 # from R
self.llf_Stata = -101.3328660860436 # same as R
self.bic_Stata = -33.32900074962649
self.chi2 = 5.008550263545408
self.df_model = 6
self.df_resid = 13
self.fittedvalues = np.array([
12.62019383, 30.18289514, 21.48377849, 496.74068604,
103.23024673, 219.94693494, 324.4301163, 110.82526477,
112.44244488, 219.86056381, 56.84399998, 61.19840382,
114.09290269, 75.29071944, 61.21994387, 21.05130889,
42.75939828, 55.56133536, 0.72532053, 18.14664665])
class Wfs(object):
"""
Wfs used for TestGlmPoissonOffset
Results are from Stata and R.
"""
def __init__(self):
self.resids = glm_test_resids.wfs_resids
self.null_deviance = 3731.85161919 # from R
self.params = [
.9969348, 1.3693953, 1.6137574, 1.7849111, 1.9764051,
.11241858, .15166023, .02297282, -.10127377, -.31014953,
-.11709716]
self.bse = [
.0527437, .0510688, .0511949, .0512138, .0500341,
.0324963, .0283292, .0226563, .0309871, .0552107, .0549118]
self.aic_R = 522.14215776 # R adds 2 for dof to AIC
self.aic_Stata = 7.459173652869477 # stata divides by nobs
# self.deviance = 70.6652992116034 # from Stata
self.deviance = 70.665301270867 # from R
self.scale = 1.0
self.llf = -250.0710778504317 # from Stata, ours with scale=1
self.bic_Stata = -179.9959200693088 # no bic in R?
self.df_model = 10
self.df_resid = 59
# TODO: taken from Stata; not available in sm yet
self.chi2 = 2699.138063147485
self.fittedvalues = [
7.11599, 19.11356, 33.76075, 33.26743, 11.94399,
27.49849, 35.07923, 37.22563, 64.18037, 108.0408,
100.0948, 35.67896, 24.10508, 73.99577, 52.2802,
38.88975, 35.06507, 102.1198, 107.251, 41.53885,
196.3685, 335.8434, 205.3413, 43.20131, 41.98048,
96.65113, 63.2286, 30.78585, 70.46306, 172.2402,
102.5898, 43.06099, 358.273, 549.8983, 183.958,
26.87062, 62.53445, 141.687, 52.47494, 13.10253,
114.9587, 214.803, 90.33611, 18.32685, 592.5995,
457.4376, 140.9273, 3.812064, 111.3119, 97.62744,
57.48056, 19.43552, 130.4872, 151.7268, 69.67963,
13.04879, 721.728, 429.2136, 128.2132, 9.04735,
301.7067, 177.3487, 46.40818, 4.707507, 330.4211,
330.7497, 84.38604, 1456.757, 451.005, 67.51025]
class CpunishTweediePower15(object):
"""
# From R
setwd('c:/workspace')
data <- read.csv('cpunish.csv', sep=",")
library(statmod)
library(tweedie)
summary(glm(EXECUTIONS ~ INCOME + SOUTH - 1,
family=tweedie(var.power=1.5, link.power=1),
data=data))
"""
def __init__(self):
resid_resp = [
28.90498242, 0.5714367394, 4.3135711827, -3.7417822942,
-4.9544111888, 0.4666602184, 0.0747051827, -6.114236142,
-1.0048540116, -6.9747602544, -0.7626907093,
-0.5688093336, -6.9845579527, -1.1594503855,
-0.6365453438, -0.3994222036, -0.732355528]
resid_dev = [
3.83881147757395, 0.113622743768915, 2.01981988071128,
-0.938107751845672, -1.29607304923555, 0.316205676540778,
0.045273675744568, -1.69968893354602, -0.699080227540624,
-2.1707839733642, -0.568738719015137, -0.451266938413727,
-2.17218106358745, -0.774613533242944, -0.493831656345955,
-0.336453094366771, -0.551210030548659]
resid_pear = [
6.02294407053171, 0.115516970886608, 2.9148208139849,
-0.806210703943481, -1.04601155367613, 0.338668788938945,
0.045708693925888, -1.27176471794657, -0.5964031365026,
-1.46974255264233, -0.498557360800493,
-0.405777068096011, -1.47045242302365, -0.65086941662954,
-0.439928270112046, -0.310433407220704,
-0.485001313250992]
resid_work = [
28.9049727916181, 0.571427719513967, 4.31357425907762,
-3.74179256698823, -4.9544210736226, 0.466663015515745,
0.0747086948013966, -6.114245735344, -1.00485035431368,
-6.97477010217068, -0.76268749374494, -0.568806471745149,
-6.98456778258272, -1.15944644619981, -0.636542358439925,
-0.399419650775458, -0.732352367853816]
self.resid_response = resid_resp
self.resid_deviance = resid_dev
self.resid_pearson = resid_pear
self.resid_working = resid_work
# self.null_deviance = 3731.85161919 # N/A
self.params = [0.0000471043, 6.4721324886]
self.bse = [0.0000246888, 3.5288126173]
# self.aic_R = 522.14215776 # R adds 2 for dof to AIC
# self.aic_Stata = 7.459173652869477 # stata divides by nobs
# self.deviance = 70.6652992116034 # from Stata
self.deviance = 36.087307138233 # from R
# self.scale = 1.0
# self.llf = -250.0710778504317 # from Stata, ours with scale=1
# self.bic_Stata = -179.9959200693088 # no bic in R?
self.df_model = 1
self.df_resid = 15
# TODO: taken from Stata; not available in sm yet
# self.chi2 = 2699.138063147485
self.fittedvalues = [
8.09501758000751, 8.42856326056927,
1.68642881732415, 7.74178229423817,
7.95441118875248, 1.53333978161934,
1.92529481734232, 8.11423614202829,
2.00485401159015, 7.97476025442155,
1.76269070926448, 1.56880933358418,
7.98455795270665, 2.15945038549266,
1.63654534384372, 1.39942220361664,
1.73235552803559]
class CpunishTweediePower2(object):
"""
# From R
setwd('c:/workspace')
data <- read.csv('cpunish.csv', sep=",")
library(statmod)
library(tweedie)
summary(glm(EXECUTIONS ~ INCOME + SOUTH - 1,
family=tweedie(var.power=2, link.power=1),
data=data))
"""
def __init__(self):
resid_resp = [
28.9397568116168, 0.605199215492085, 4.30845487128123,
-3.7059362524505, -4.91921022348665, 0.46200835064931,
0.068864196242604, -6.07952005594693, -1.01093636580438,
-6.9396210244365, -0.768038385056284, -0.573568809339664,
-6.94944844711606, -1.16600175635393, -0.641510318056987,
-0.403667790321936, -0.737611172529194]
resid_dev = [
2.03295746713119, 0.0704291140028282, 1.60058476017728,
-0.591230836989137, -0.836067997150736, 0.274690511542166,
0.0352446721149477, -1.13465831620614, -0.625909330466303,
-1.5477830210949, -0.520517540529698, -0.421531194473357,
-1.54848147513823, -0.684927882583903, -0.45784673829438,
-0.320960880764019, -0.505992145923248]
resid_pear = [
3.59043221590711, 0.0720921473930558, 2.54705286789752,
-0.480919661289957, -0.621174344999372,
0.300397177607798, 0.0356599448410699,
-0.752460543924524, -0.502719222246499,
-0.874049404005278, -0.434401419984914,
-0.364501892726482, -0.874205109115113,
-0.538319857282425, -0.390804925805356,
-0.287580717535275, -0.424497254731367]
resid_work = [
28.9397568116168, 0.605199215492085, 4.30845487128123,
-3.7059362524505, -4.91921022348665, 0.46200835064931,
0.068864196242604, -6.07952005594693, -1.01093636580438,
-6.9396210244365, -0.768038385056284, -0.573568809339664,
-6.94944844711606, -1.16600175635393, -0.641510318056987,
-0.403667790321936, -0.737611172529194]
self.resid_response = resid_resp
self.resid_deviance = resid_dev
self.resid_pearson = resid_pear
self.resid_working = resid_work
# self.null_deviance = 3731.85161919 # N/A
self.params = [4.72472244209477e-05, 6.43243456540827]
self.bse = [1.86839521185429e-05, 3.83231672422612]
# self.aic_R = 522.14215776 # R adds 2 for dof to AIC
# self.aic_Stata = 7.459173652869477 # stata divides by nobs
# self.deviance = 70.6652992116034 # from Stata
self.deviance = 15.7840685407599 # from R
# self.scale = 1.0
# self.llf = -250.0710778504317 # from Stata, ours with scale=1
# self.bic_Stata = -179.9959200693088 # no bic in R?
self.df_model = 1
self.df_resid = 15
# TODO: taken from Stata; not available in sm yet
# self.chi2 = 2699.138063147485
self.fittedvalues = [
8.06024318838318, 8.39480078450791,
1.69154512871877, 7.7059362524505,
7.91921022348665, 1.53799164935069,
1.9311358037574, 8.07952005594693,
2.01093636580438, 7.9396210244365,
1.76803838505628, 1.57356880933966,
7.94944844711606, 2.16600175635393,
1.64151031805699, 1.40366779032194,
1.73761117252919]
class CpunishTweedieLog1(object):
"""
# From R
setwd('c:/workspace')
data <- read.csv('cpunish.csv', sep=",")
library(statmod)
library(tweedie)
summary(glm(EXECUTIONS ~ INCOME + SOUTH - 1,
family=tweedie(var.power=1, link.power=0),
data=data))
"""
def __init__(self):
resid_resp = [
28.7231009386298, -0.307318358456484, 4.19015460156576,
-3.30975297068573, -4.87746969906705, 0.285041779927669,
0.0315071085472043, -6.33304532673002, -1.02436294926752,
-6.9340610414309, -0.859055122126197, -0.736490247380883,
-6.96145354225969, -1.13750232106315, -0.778363801217565,
-0.636042191521576, -0.839322392162821]
resid_dev = [
7.30513948467594, -0.101296157943519, 2.44987904003561,
-1.34021826264378, -1.99062116973315, 0.212014827300475,
0.0223969676885324, -2.63775728156667, -0.798884085657077,
-3.11862021596631, -0.691356293575324, -0.607658243497501,
-3.12628915913493, -0.869326536299756, -0.636663290048755,
-0.536212950673418, -0.67812263418512]
resid_pear = [
9.98383729954486, -0.100734032611758, 3.11465040934513,
-1.22417704160631, -1.73780566805242, 0.217661565866984,
0.0224564769560215, -2.19386916576256,
-0.719962160947025, -2.46172701579962,
-0.630049829146329, -0.558895774299477,
-2.4671965358931, -0.778034748813176,
-0.583676657782738, -0.497265896656757,
-0.61887064145702]
resid_work = [
3.47027319357873, -0.0330190014589175, 2.31520029566659,
-0.452785885372436, -0.619167053050639,
0.166209168591668, 0.0160057009522403,
-0.759991705123147, -0.506017436072008,
-0.873961141113221, -0.46209233491888,
-0.424125760851072, -0.874394795536774,
-0.532164250702372, -0.437685360377137,
-0.388768819543728, -0.456321521305397]
self.resid_response = resid_resp
self.resid_deviance = resid_dev
self.resid_working = resid_work
self.resid_pearson = resid_pear
# self.null_deviance = 3731.85161919 # N/A
self.params = [1.65700638623525e-05, 1.54257997850499]
self.bse = [1.81044999017907e-05, 0.725739640176733]
# self.aic_R = 522.14215776 # R adds 2 for dof to AIC
# self.aic_Stata = 7.459173652869477 # stata divides by nobs
# self.deviance = 70.6652992116034 # from Stata
self.deviance = 95.0325613464258 # from R
# self.scale = 1.0
# self.llf = -250.0710778504317 # from Stata, ours with scale=1
# self.bic_Stata = -179.9959200693088 # no bic in R?
self.df_model = 1
self.df_resid = 15
# TODO: taken from Stata; not available in sm yet
# self.chi2 = 2699.138063147485
self.fittedvalues = [
8.27689906137016, 9.30731835845648,
1.80984539843424, 7.30975297068573,
7.87746969906705, 1.71495822007233,
1.9684928914528, 8.33304532673002,
2.02436294926752, 7.9340610414309,
1.8590551221262, 1.73649024738088,
7.96145354225969, 2.13750232106315,
1.77836380121756, 1.63604219152158,
1.83932239216282]
class FairTweedieLog15(object):
"""
# From R
setwd('c:/workspace')
data <- read.csv('fair.csv', sep=",")
library(statmod)
library(tweedie)
model <- glm(affairs ~ rate_marriage + age + yrs_married -1, data=data,
family=tweedie(var.power=1.5, link.power = 0))
r <- resid(model, type='response')
paste(as.character(r[1:17]), collapse=",")
r <- resid(model, type='deviance')
paste(as.character(r[1:17]), collapse=",")
r <- resid(model, type='pearson')
paste(as.character(r[1:17]), collapse=",")
r <- resid(model, type='working')
paste(as.character(r[1:17]), collapse=",")
paste(as.character(model$coefficients[1:17]), collapse=",")
s <- summary(model)
paste(as.character(sqrt(diag(s$cov.scaled))), collapse=",")
s$deviance
paste(as.character(model$fitted.values[1:17]), collapse=",")
"""
def __init__(self):
resid_resp = [
-0.997868449815039, 2.69283106662728, 0.677397439981157,
0.220024942629269, 4.30244966465517, 4.12917275616972,
0.669303122309246, 1.64321562230925, 3.73361710426128,
0.271937359562684, 1.70030700747884, 1.55430573164611,
-0.263723852468304, 1.51263973164611, 2.75223392654071,
0.310487741565721, 1.28077676333896, -0.722602160018842]
resid_dev = [
-1.40274708439925, 2.48476334070913, 0.722690630291423,
0.333179337353702, 4.00781035212304, 3.33344591331998,
1.51543361886727, 2.82502498800952, 2.2795411865605,
0.245239170945663, 0.993721205729013, 1.74920359743562,
-0.363141475997386, 1.71412357710318, 2.57445879456298,
0.279858474280908, 1.22953362433333, -1.84397406923697]
resid_pear = [
-0.923380371255914, 4.28706294677515, 0.864309147553743,
0.366063826152319, 9.17690493704408, 6.57783985712941,
2.39340023647571, 5.87607098775551, 3.55791152198837,
0.260052421285998, 1.21439278430259, 2.66470328868695,
-0.327698246542009, 2.59327105694137, 4.53096038849505,
0.299198418236691, 1.6399313081981, -0.921987034618483]
resid_work = [
-0.899807800767353, 5.00583784559752, 0.937441759049674,
0.433762277766879, 11.8128959278604, 7.6822784352496,
3.65998654763585, 8.98568506862295, 3.50120010377224,
0.256207345500911, 1.08551656668241, 3.18923357641756,
-0.352302468597673, 3.10374035363038, 5.35005901385941,
0.29552727652976, 1.78077778644209, -1]
self.resid_response = resid_resp
self.resid_deviance = resid_dev
self.resid_working = resid_work
self.resid_pearson = resid_pear
# self.null_deviance = 3731.85161919 # N/A
self.params = [
-0.389168171340452, 0.0670222370664611, -0.0970852004566712]
self.bse = [
0.0323435784513691, 0.0063805300018014, 0.00893580175352525]
# self.aic_R = 522.14215776 # R adds 2 for dof to AIC
# self.aic_Stata = 7.459173652869477 # stata divides by nobs
# self.deviance = 70.6652992116034 # from Stata
self.deviance = 20741.82 # from R
# self.scale = 1.0
# self.llf = -250.0710778504317 # from Stata, ours with scale=1
# self.bic_Stata = -179.9959200693088 # no bic in R?
self.df_model = 2
self.df_resid = 6363
# TODO: taken from Stata; not available in sm yet
# self.chi2 = 2699.138063147485
self.fittedvalues = [
1.10897954981504, 0.537938133372725,
0.722602160018842, 0.507247757370731,
0.364216335344828, 0.537493243830281,
0.182870377690754, 0.182870377690754,
1.06638209573872, 1.06139564043732,
1.56635749252116, 0.487360268353893,
0.748572252468304, 0.487360268353893,
0.514430573459285, 1.05062295843428,
0.71922323666104, 0.722602160018842]
|
jseabold/statsmodels
|
statsmodels/genmod/tests/results/results_glm.py
|
Python
|
bsd-3-clause
| 273,613
|
[
"Gaussian"
] |
b40982c2a086a46ff6f1e261815694f93d39f101601a493f75c9c97b91690646
|
import os
import sys
try:
from setuptools import setup
except:
from distutils.core import setup
readme_note = '''\
.. note::
For the latest source, issues and discussion, etc, please visit the
`GitHub repository <https://github.com/pharmbio/sciluigi>`_\n\n
'''
with open('README.rst') as fobj:
long_description = readme_note + fobj.read()
setup(
name='sciluigi',
version='0.9.7',
description='Helper library for writing dynamic, flexible workflows in luigi',
long_description=long_description,
author='Samuel Lampa',
author_email='samuel.lampa@rilnet.com',
url='https://github.com/pharmbio/sciluigi',
license='MIT',
keywords='workflows workflow pipeline luigi',
packages=[
'sciluigi',
],
install_requires=[
'luigi',
'psycopg2',
'boto3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
],
)
|
samuell/sciluigi
|
setup.py
|
Python
|
mit
| 1,532
|
[
"VisIt"
] |
87e6d66afcbfbab8137ebdd1c5f7537ba0d627fc9420818452487601a7017f82
|
# -*-python-*-
#
# Copyright (C) 1999-2013 The ViewCVS Group. All Rights Reserved.
#
# By using this file, you agree to the terms and conditions set forth in
# the LICENSE.html file which can be found at the top level of the ViewVC
# distribution or at http://viewvc.org/license-1.html.
#
# For more information, visit http://viewvc.org/
#
# -----------------------------------------------------------------------
import os
import os.path
def canonicalize_rootpath(rootpath):
assert os.path.isabs(rootpath)
return os.path.normpath(rootpath)
def _is_cvsroot(path):
return os.path.exists(os.path.join(path, "CVSROOT", "config"))
def expand_root_parent(parent_path):
# Each subdirectory of PARENT_PATH that contains a child
# "CVSROOT/config" is added the set of returned roots. Or, if the
# PARENT_PATH itself contains a child "CVSROOT/config", then all its
# subdirectories are returned as roots.
assert os.path.isabs(parent_path)
roots = {}
subpaths = os.listdir(parent_path)
for rootname in subpaths:
rootpath = os.path.join(parent_path, rootname)
if _is_cvsroot(parent_path) or _is_cvsroot(rootpath):
roots[rootname] = canonicalize_rootpath(rootpath)
return roots
def find_root_in_parent(parent_path, rootname):
"""Search PARENT_PATH for a root named ROOTNAME, returning the
canonicalized ROOTPATH of the root if found; return None if no such
root is found."""
# Is PARENT_PATH itself a CVS repository? If so, we allow ROOTNAME
# to be any subdir within it. Otherwise, we expect
# PARENT_PATH/ROOTNAME to be a CVS repository.
assert os.path.isabs(parent_path)
rootpath = os.path.join(parent_path, rootname)
if (_is_cvsroot(parent_path) and os.path.exists(rootpath)) \
or _is_cvsroot(rootpath):
return canonicalize_rootpath(rootpath)
return None
def CVSRepository(name, rootpath, authorizer, utilities, use_rcsparse):
rootpath = canonicalize_rootpath(rootpath)
if use_rcsparse:
import ccvs
return ccvs.CCVSRepository(name, rootpath, authorizer, utilities)
else:
import bincvs
return bincvs.BinCVSRepository(name, rootpath, authorizer, utilities)
|
marcellodesales/svnedge-console
|
svn-server/lib/viewvc/vclib/ccvs/__init__.py
|
Python
|
agpl-3.0
| 2,149
|
[
"VisIt"
] |
0385c6dd809d243bc1012860717641d1c5496e6d6df853a57509762f5f4ec18e
|
#!/usr/bin/env python2.3
################################################################################
#
# This file is part of the General Hidden Markov Model Library,
# GHMM version __VERSION__, see http://ghmm.org
#
# file: ghmm.py
# authors: Benjamin Georgi, Wasinee Rungsarityotin, Alexander Schliep,
# Janne Grunau
#
# Copyright (C) 1998-2004 Alexander Schliep
# Copyright (C) 1998-2001 ZAIK/ZPR, Universitaet zu Koeln
# Copyright (C) 2002-2004 Max-Planck-Institut fuer Molekulare Genetik,
# Berlin
#
# Contact: schliep@ghmm.org
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
#
################################################################################
"""@mainpage GHMM - an open source library for Hidden Markov Models (HMM)
HMMs are stochastic models which encode a probability density over
sequences of symbols. These symbols can be discrete letters (A,C,G and
T for DNA; 1,2,3,4,5,6 for dice), real numbers (weather measurement
over time: temperature) or vectors of either or the combination
thereof (weather again: temperature, pressure, percipitation).
@note
We will always talk about emissions, emission sequence and so
forth when we refer to the sequence of symbols. Another name
for the same object is observation resp. observation sequence.
A simple model with a fair and one unfair coin can be created as follows
>> fair = [0.5, 0.5]
>> loaded = [0.9, 0.1]
>> A = [[0.9, 0.1], [0.3, 0.7]]
>> pi = [0.9, 0.1]
>> B = [fair, loaded]
>> sigma = ghmm.IntegerRange(0,2)
>> m = ghmm.HMMFromMatrices(sigma, ghmm.DiscreteDistribution(sigma), A, B, pi)
The objects one has to deal with in HMM modelling are the following
-# The domain the emissions come from: the EmissionDomain. Domain
is to be understood mathematically and to encompass both discrete,
finite alphabets and fields such as the real numbers or intervals
of the reals.\n
For technical reasons there can be two representations of an
emission symbol: an external and an internal. The external
representation is the view of the application using ghmm.py. The
internal one is what is used in both ghmm.py and the ghmm
C-library. Representations can coincide, but this is not
guaranteed. Discrete alphabets of size k are represented as
[0,1,2,...,k-1] internally. It is the domain objects job to
provide a mapping between representations in both directions.
@note
Do not make assumptions about the internal
representations. It might change.
-# Every domain has to afford a distribution, which is usually
parameterized. A distribution associated with a domain
should allow us to compute \f$Prob[x| distribution parameters]\f$
efficiently.\n
The distribution defines the \b type of distribution which
we will use to model emissions in <b>every state</b> of the HMM.
The \b type of distribution will be identical for all states,
their \b parameterizations will differ from state to state.
-# We will consider a Sequence of emissions from the same emission
domain and very often sets of such sequences: SequenceSet
-# The HMM: The HMM consists of two major components: A Markov chain
over states (implemented as a weighted directed graph with
adjacency and inverse-adjacency lists) and the emission
distributions per-state. For reasons of efficiency the HMM itself
is *static*, as far as the topology of the underlying Markov chain
(and obviously the EmissionDomain) are concerned. You cannot add or
delete transitions in an HMM.\n
Transition probabilities and the parameters of the per-state
emission distributions can be easily modified. Particularly,
Baum-Welch reestimation is supported. While a transition cannot be
deleted from the graph, you can set the transition probability to
zero, which has the same effect from the theoretical point of
view. However, the corresponding edge in the graph is still
traversed in the computation.\n
States in HMMs are referred to by their integer index. State sequences
are simply list of integers.\n
If you want to store application specific data for each state you
have to do it yourself.\n
Subclasses of HMM implement specific types of HMM. The type depends
on the EmissionDomain, the Distribution used, the specific
extensions to the 'standard' HMMs and so forth
"""
import ghmmwrapper
import ghmmhelper
import modhmmer
import re
import StringIO
import copy
import math
import sys
import os
import logging
from string import join
from textwrap import fill
# Initialize logging to stderr
#logging.basicConfig(format="%(asctime)s %(filename)s:%(lineno)d %(levelname)-5s - %(message)s")
log = logging.getLogger("GHMM")
# creating StreamHandler to stderr
hdlr = logging.StreamHandler(sys.stderr)
# setting message format
#fmt = logging.Formatter("%(name)s %(asctime)s %(filename)s:%(lineno)d %(levelname)s %(thread)-5s - %(message)s")
fmt = logging.Formatter("%(name)s %(filename)s:%(lineno)d - %(message)s")
hdlr.setFormatter(fmt)
# adding handler to logger object
log.addHandler(hdlr)
# Set the minimal severity of a message to be shown. The levels in
# increasing severity are: DEBUG, INFO, WARNING, ERROR, CRITICAL
log.setLevel(logging.WARNING)
log.info( " I'm the ghmm in "+ __file__)
c_log = [log.critical, log.error, log.warning, log.info, log.debug]
def logwrapper(level, message):
c_log[level](message)
ghmmwrapper.set_pylogging(logwrapper)
# Initialize global random number generator by system time
ghmmwrapper.ghmm_rng_init()
ghmmwrapper.time_seed()
#-------------------------------------------------------------------------------
#- Exceptions ------------------------------------------------------------------
class GHMMError(Exception):
"""Base class for exceptions in this module."""
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class UnknownInputType(GHMMError):
def __init__(self,message):
self.message = message
def __str__(self):
return repr(self.message)
class NoValidCDataType(GHMMError):
def __init__(self,message):
self.message = message
def __str__(self):
return repr(self.message)
class badCPointer(GHMMError):
def __init__(self,message):
self.message = message
def __str__(self):
return repr(self.message)
class SequenceCannotBeBuild(GHMMError):
def __init__(self,message):
self.message = message
def __str__(self):
return repr(self.message)
class InvalidModelParameters(GHMMError):
def __init__(self,message):
self.message = message
def __str__(self):
return repr(self.message)
class GHMMOutOfDomain(GHMMError):
def __init__(self,message):
self.message = message
def __str__(self):
return repr(self.message)
class UnsupportedFeature(GHMMError):
def __init__(self,message):
self.message = message
def __str__(self):
return repr(self.message)
class WrongFileType(GHMMError):
def __init__(self,message):
self.message = message
def __str__(self):
return repr(self.message)
class ParseFileError(GHMMError):
def __init__(self,message):
self.message = message
def __str__(self):
return repr(self.message)
#-------------------------------------------------------------------------------
#- constants -------------------------------------------------------------------
kNotSpecified = ghmmwrapper.kNotSpecified
kLeftRight = ghmmwrapper.kLeftRight
kSilentStates = ghmmwrapper.kSilentStates
kTiedEmissions = ghmmwrapper.kTiedEmissions
kHigherOrderEmissions = ghmmwrapper.kHigherOrderEmissions
kBackgroundDistributions = ghmmwrapper.kBackgroundDistributions
kLabeledStates = ghmmwrapper.kLabeledStates
kTransitionClasses = ghmmwrapper.kTransitionClasses
kDiscreteHMM = ghmmwrapper.kDiscreteHMM
kContinuousHMM = ghmmwrapper.kContinuousHMM
kPairHMM = ghmmwrapper.kPairHMM
types = {
kLeftRight:'kLeftRight',
kSilentStates:'kSilentStates',
kTiedEmissions:'kTiedEmissions',
kHigherOrderEmissions:'kHigherOrderEmissions',
kBackgroundDistributions:'kBackgroundDistributions',
kLabeledStates:'kLabeledStates',
kTransitionClasses:'kTransitionClasses',
kDiscreteHMM:'kDiscreteHMM',
kContinuousHMM:'kContinuousHMM',
kPairHMM:'kPairHMM',
}
#-------------------------------------------------------------------------------
#- EmissionDomain and derived -------------------------------------------------
class EmissionDomain(object):
""" Abstract base class for emissions produced by an HMM.
There can be two representations for emissions:
-# An internal, used in ghmm.py and the ghmm C-library
-# An external, used in your particular application
Example:\n
The underlying library represents symbols from a finite,
discrete domain as integers (see Alphabet).
EmissionDomain is the identity mapping
"""
def internal(self, emission):
""" Given a emission return the internal representation
"""
return emission
def internalSequence(self, emissionSequence):
""" Given a emissionSequence return the internal representation
"""
return emissionSequence
def external(self, internal):
""" Given an internal representation return the external representation
"""
return internal
def externalSequence(self, internalSequence):
""" Given a sequence with the internal representation return the external
representation
"""
return internalSequence
def isAdmissable(self, emission):
""" Check whether \p emission is admissable (contained in) the domain
raises GHMMOutOfDomain else
"""
return None
class Alphabet(EmissionDomain):
""" Discrete, finite alphabet
"""
def __init__(self, listOfCharacters, calphabet = None):
"""
Creates an alphabet out of a listOfCharacters
@param listOfCharacters a list of strings (single characters most of
the time), ints, or other objects that can be used as dictionary keys
for a mapping of the external sequences to the internal representation
@param calphabet can alternatively be a SWIG pointer to a
C alphabet_s struct
@note
Alphabets should be considered as imutable. That means the
listOfCharacters and the mapping should never be touched after
construction.
"""
self.index = {} # Which index belongs to which character
if calphabet is None:
self.listOfCharacters = listOfCharacters
else:
self.listOfCharacters = [calphabet.getSymbol(i) for i in range(calphabet.size)]
for i,c in enumerate(self.listOfCharacters):
self.index[c] = i
lens = {}
try:
for c in self.listOfCharacters:
lens[len(c)] = 1
except TypeError:
self._lengthOfCharacters = None
else:
if len(lens) == 1:
self._lengthOfCharacters = lens.keys()[0]
else:
self._lengthOfCharacters = None
self.CDataType = "int" # flag indicating which C data type should be used
def __str__(self):
strout = ["<Alphabet:"]
strout.append( str(self.listOfCharacters) +'>')
return join(strout,'')
def verboseStr(self):
strout = ["GHMM Alphabet:\n"]
strout.append("Number of symbols: " + str(len(self)) + "\n")
strout.append("External: " + str(self.listOfCharacters) + "\n")
strout.append("Internal: " + str(range(len(self))) + "\n")
return join(strout,'')
def __eq__(self,alph):
if not isinstance(alph,Alphabet):
return False
else:
if self.listOfCharacters == alph.listOfCharacters and self.index == alph.index and self.CDataType==alph.CDataType:
return True
else:
return False
def __len__(self):
return len(self.listOfCharacters)
def __hash__(self):
#XXX rewrite
# defining hash and eq is not recommended for mutable types.
# => listOfCharacters should be considered immutable
return id(self)
def size(self):
""" @deprecated use len() instead
"""
log.warning( "Warning: The use of .size() is deprecated. Use len() instead.")
return len(self.listOfCharacters)
def internal(self, emission):
""" Given a emission return the internal representation
"""
return self.index[emission]
def internalSequence(self, emissionSequence):
""" Given a emission_sequence return the internal representation
Raises KeyError
"""
result = copy.deepcopy(emissionSequence)
try:
result = map(lambda i: self.index[i], result)
except IndexError:
raise KeyError
return result
def external(self, internal):
""" Given an internal representation return the external representation
@note the internal code -1 always represents a gap character '-'
Raises KeyError
"""
if internal == -1:
return "-"
if internal < -1 or len(self.listOfCharacters) < internal:
raise KeyError("Internal symbol "+str(internal)+" not recognized.")
return self.listOfCharacters[internal]
def externalSequence(self, internalSequence):
""" Given a sequence with the internal representation return the external
representation
Raises KeyError
"""
result = copy.deepcopy(internalSequence)
try:
result = map(lambda i: self.listOfCharacters[i], result)
except IndexError:
raise KeyError
return result
def isAdmissable(self, emission):
""" Check whether emission is admissable (contained in) the domain
"""
return emission in self.listOfCharacters
def getExternalCharacterLength(self):
"""
If all external characters are of the same length the length is
returned. Otherwise None.
@return length of the external characters or None
"""
return self._lengthOfCharacters
def toCstruct(self):
calphabet = ghmmwrapper.ghmm_alphabet(len(self), "<unused>")
for i,symbol in enumerate(self.listOfCharacters):
calphabet.setSymbol(i, str(symbol))
return calphabet
DNA = Alphabet(['a','c','g','t'])
AminoAcids = Alphabet(['A','C','D','E','F','G','H','I','K','L',
'M','N','P','Q','R','S','T','V','W','Y'])
def IntegerRange(a,b):
return Alphabet(range(a,b))
# To be used for labelled HMMs. We could use an Alphabet directly but this way it is more explicit.
class LabelDomain(Alphabet):
def __init__(self, listOfLabels, calphabet = None):
Alphabet.__init__(self, listOfLabels, calphabet)
class Float(EmissionDomain):
def __init__(self):
self.CDataType = "double" # flag indicating which C data type should be used
def __eq__(self, other):
return isinstance(other, Float)
def __hash__(self):
# defining hash and eq is not recommended for mutable types.
# for float it is fine because it is kind of state less
return id(self)
def isAdmissable(self, emission):
""" Check whether emission is admissable (contained in) the domain
raises GHMMOutOfDomain else
"""
return isinstance(emission,float)
#-------------------------------------------------------------------------------
#- Distribution and derived ---------------------------------------------------
class Distribution(object):
""" Abstract base class for distribution over EmissionDomains
"""
# add density, mass, cumuliative dist, quantils, sample, fit pars,
# moments
class DiscreteDistribution(Distribution):
""" A DiscreteDistribution over an Alphabet: The discrete distribution
is parameterized by the vectors of probabilities.
"""
def __init__(self, alphabet):
self.alphabet = alphabet
self.prob_vector = None
def set(self, prob_vector):
self.prob_vector = prob_vector
def get(self):
return self.prob_vector
class ContinuousDistribution(Distribution):
pass
class UniformDistribution(ContinuousDistribution):
def __init__(self, domain):
self.emissionDomain = domain
self.max = None
self.min = None
def set(self, values):
"""
@param values tuple of maximum, minimum
"""
maximum, minimum = values
self.max = maximum
self.min = minimum
def get(self):
return (self.max, self.min)
class GaussianDistribution(ContinuousDistribution):
# XXX attributes unused at this point
def __init__(self, domain):
self.emissionDomain = domain
self.mu = None
self.sigma = None
def set(self, values):
"""
@param values tuple of mu, sigma, trunc
"""
mu, sigma = values
self.mu = mu
self.sigma = sigma
def get(self):
return (self.mu, self.sigma)
class TruncGaussianDistribution(GaussianDistribution):
# XXX attributes unused at this point
def __init__(self, domain):
self.GaussianDistribution(self,domain)
self.trunc = None
def set(self, values):
"""
@param values tuple of mu, sigma, trunc
"""
mu, sigma, trunc = values
self.mu = mu
self.sigma = sigma
self.trunc = trunc
def get(self):
return (self.mu, self.sigma, self.trunc)
class GaussianMixtureDistribution(ContinuousDistribution):
# XXX attributes unused at this point
def __init__(self, domain):
self.emissionDomain = domain
self.M = None # number of mixture components
self.mu = []
self.sigma = []
self.weight = []
def set(self, index, values):
"""
@param index index of mixture component
@param values tuple of mu, sigma, w
"""
mu, sigma, w = values
pass
def get(self):
pass
class ContinuousMixtureDistribution(ContinuousDistribution):
def __init__(self, domain):
self.emissionDomain = domain
self.M = 0 # number of mixture components
self.components = []
self.weight = []
self.fix = []
def add(self,w,fix,distribution):
assert isinstance(distribution,ContinuousDistribution)
self.M = self.M + 1
self.weight.append(w)
self.component.append(distribution)
if isinstance(distribution,UniformDistribution):
# uniform distributions are fixed by definition
self.fix.append(1)
else:
self.fix.append(fix)
def set(self, index, w, fix, distribution):
if index >= M:
raise IndexError
assert isinstance(distribution,ContinuousDistribution)
self.weight[i] = w
self.components[i] = distribution
if isinstance(distribution,UniformDistribution):
# uniform distributions are fixed by definition
self.fix[i](1)
else:
self.fix[i](fix)
def get(self,i):
assert M > i
return (self.weigth[i],self.fix[i],self.component[i])
def check(self):
assert self.M == len(self.components)
assert sum(self.weight) == 1
assert sum(self.weight > 1) == 0
assert sum(self.weight < 0) == 0
class MultivariateGaussianDistribution(ContinuousDistribution):
def __init__(self, domain):
self.emissionDomain = domain
#-------------------------------------------------------------------------------
#Sequence, SequenceSet and derived ------------------------------------------
class EmissionSequence(object):
""" An EmissionSequence contains the *internal* representation of
a sequence of emissions.
It also contains a reference to the domain where the emissions orginated from.
"""
def __init__(self, emissionDomain, sequenceInput, labelDomain = None, labelInput = None, ParentSequenceSet=None):
self.emissionDomain = emissionDomain
if ParentSequenceSet is not None:
# optional reference to a parent SequenceSet. Is needed for reference counting
if not isinstance(ParentSequenceSet,SequenceSet):
raise TypeError("Invalid reference. Only SequenceSet is valid.")
self.ParentSequenceSet = ParentSequenceSet
if self.emissionDomain.CDataType == "int":
# necessary C functions for accessing the ghmm_dseq struct
self.sequenceAllocationFunction = ghmmwrapper.ghmm_dseq
self.allocSingleSeq = ghmmwrapper.int_array_alloc
self.seq_read = ghmmwrapper.ghmm_dseq_read
self.seq_ptr_array_getitem = ghmmwrapper.dseq_ptr_array_getitem
self.sequence_carray = ghmmwrapper.list2int_array
elif self.emissionDomain.CDataType == "double":
# necessary C functions for accessing the ghmm_cseq struct
self.sequenceAllocationFunction = ghmmwrapper.ghmm_cseq
self.allocSingleSeq = ghmmwrapper.double_array_alloc
self.seq_read = ghmmwrapper.ghmm_cseq_read
self.seq_ptr_array_getitem = ghmmwrapper.cseq_ptr_array_getitem
self.sequence_carray = ghmmwrapper.list2double_array
else:
raise NoValidCDataType("C data type " + str(self.emissionDomain.CDataType) + " invalid.")
# check if ghmm is build with asci sequence file support
if isinstance(sequenceInput, str) or isinstance(sequenceInput, unicode):
if ghmmwrapper.ASCI_SEQ_FILE:
if not os.path.exists(sequenceInput):
raise IOError('File ' + str(sequenceInput) + ' not found.')
else:
tmp = self.seq_read(sequenceInput)
if len(tmp) > 0:
self.cseq = tmp[0]
else:
raise ParseFileError('File ' + str(sequenceInput) + ' not valid.')
else:
raise UnsupportedFeature("asci sequence files are deprecated. Please convert your files"
+ " to the new xml-format or rebuild the GHMM with"
+ " the conditional \"GHMM_OBSOLETE\".")
#create a ghmm_dseq with state_labels, if the appropiate parameters are set
elif isinstance(sequenceInput, list):
internalInput = self.emissionDomain.internalSequence(sequenceInput)
seq = self.sequence_carray(internalInput)
self.cseq = self.sequenceAllocationFunction(seq, len(sequenceInput))
if labelInput is not None and labelDomain is not None:
assert len(sequenceInput)==len(labelInput), "Length of the sequence and labels don't match."
assert isinstance(labelInput, list), "expected a list of labels."
assert isinstance(labelDomain, LabelDomain), "labelDomain is not a LabelDomain class."
self.labelDomain = labelDomain
#translate the external labels in internal
internalLabel = self.labelDomain.internalSequence(labelInput)
label = ghmmwrapper.list2int_array(internalLabel)
self.cseq.init_labels(label, len(internalInput))
# internal use
elif isinstance(sequenceInput, ghmmwrapper.ghmm_dseq) or isinstance(sequenceInput, ghmmwrapper.ghmm_cseq):
if sequenceInput.seq_number > 1:
raise badCPointer("Use SequenceSet for multiple sequences.")
self.cseq = sequenceInput
if labelDomain != None:
self.labelDomain = labelDomain
else:
raise UnknownInputType("inputType " + str(type(sequenceInput)) + " not recognized.")
def __del__(self):
"Deallocation of C sequence struct."
log.debug( "__del__ EmissionSequence " + str(self.cseq))
# if a parent SequenceSet exits, we use cseq.subseq_free() to free memory
if self.ParentSequenceSet is not None:
self.cseq.subseq_free()
def __len__(self):
"Returns the length of the sequence."
return self.cseq.getLength(0)
def __setitem__(self, index, value):
internalValue = self.emissionDomain.internal(value)
self.cseq.setSymbol(0, index, internalValue)
def __getitem__(self, index):
"""
@returns the symbol at position 'index'.
"""
if index < len(self):
return self.cseq.getSymbol(0, index)
else:
raise IndexError
def getSeqLabel(self):
if not ghmmwrapper.SEQ_LABEL_FIELD:
raise UnsupportedFeature("the seq_label field is obsolete. If you need it rebuild the GHMM with the conditional \"GHMM_OBSOLETE\".")
return ghmmwrapper.long_array_getitem(self.cseq.seq_label,0)
def setSeqLabel(self,value):
if not ghmmwrapper.SEQ_LABEL_FIELD:
raise UnsupportedFeature("the seq_label field is obsolete. If you need it rebuild the GHMM with the conditional \"GHMM_OBSOLETE\".")
ghmmwrapper.long_array_setitem(self.cseq.seq_label,0,value)
def getStateLabel(self):
"""
@returns the labeling of the sequence in external representation
"""
if self.cseq.state_labels != None:
iLabel = ghmmwrapper.int_array2list(self.cseq.getLabels(0), self.cseq.getLabelsLength(0))
return self.labelDomain.externalSequence(iLabel)
else:
raise IndexError(str(0) + " is out of bounds, only " + str(self.cseq.seq_number) + "labels")
def hasStateLabels(self):
"""
@returns whether the sequence is labeled or not
"""
return self.cseq.state_labels != None
def getGeneratingStates(self):
"""
@returns the state path from which the sequence was generated as
a Python list.
"""
l_state = []
for j in range(ghmmwrapper.int_array_getitem(self.cseq.states_len,0) ):
l_state.append(ghmmwrapper.int_matrix_getitem(self.cseq.states,0,j))
return l_state
def __str__(self):
"Defines string representation."
seq = self.cseq
strout = []
l = seq.getLength(0)
if l <= 80:
for j in range(l):
strout.append(str( self.emissionDomain.external(self[j]) ) )
if self.emissionDomain.CDataType == "double":
strout.append(" ")
else:
for j in range(0,5):
strout.append(str( self.emissionDomain.external(self[j]) ) )
if self.emissionDomain.CDataType == "double":
strout.append(" ")
strout.append('...')
for j in range(l-5,l):
strout.append(str( self.emissionDomain.external(self[j]) ) )
if self.emissionDomain.CDataType == "double":
strout.append(" ")
return join(strout,'')
def verboseStr(self):
"Defines string representation."
seq = self.cseq
strout = []
strout.append("\nEmissionSequence Instance:\nlength " + str(seq.getLength(0)))
strout.append(", weight " + str(seq.getWeight(0)) + ":\n")
for j in range(seq.getLength(0)):
strout.append(str( self.emissionDomain.external(self[j]) ) )
if self.emissionDomain.CDataType == "double":
strout.append(" ")
# checking for labels
if self.emissionDomain.CDataType == "int" and self.cseq.state_labels != None:
strout.append("\nState labels:\n")
for j in range(seq.getLabelsLength(0)):
strout.append(str( self.labelDomain.external(ghmmwrapper.int_matrix_getitem(seq.state_labels,0,j)))+ ", ")
return join(strout,'')
def sequenceSet(self):
"""
@return a one-element SequenceSet with this sequence.
"""
# in order to copy the sequence in 'self', we first create an empty SequenceSet and then
# add 'self'
seqSet = SequenceSet(self.emissionDomain, [])
seqSet.cseq.add(self.cseq)
return seqSet
def write(self,fileName):
"Writes the EmissionSequence into file 'fileName'."
self.cseq.write(fileName)
def setWeight(self, value):
self.cseq.setWeight(0, value)
self.cseq.total_w = value
def getWeight(self):
return self.cseq.getWeight(0)
def asSequenceSet(self):
"""
@returns this EmissionSequence as a one element SequenceSet
"""
log.debug("EmissionSequence.asSequenceSet() -- begin " + repr(self.cseq))
seq = self.sequenceAllocationFunction(1)
# checking for state labels in the source C sequence struct
if self.emissionDomain.CDataType == "int" and self.cseq.state_labels is not None:
log.debug("EmissionSequence.asSequenceSet() -- found labels !")
seq.calloc_state_labels()
self.cseq.copyStateLabel(0, seq, 0)
seq.setLength(0, self.cseq.getLength(0))
seq.setSequence(0, self.cseq.getSequence(0))
seq.setWeight(0, self.cseq.getWeight(0))
log.debug("EmissionSequence.asSequenceSet() -- end " + repr(seq))
return SequenceSetSubset(self.emissionDomain, seq, self)
class SequenceSet(object):
""" A SequenceSet contains the *internal* representation of a number of
sequences of emissions.
It also contains a reference to the domain where the emissions orginated from.
"""
def __init__(self, emissionDomain, sequenceSetInput, labelDomain = None, labelInput = None):
"""
@p sequenceSetInput is a set of sequences from @p emissionDomain.
There are several valid types for @p sequenceSetInput:
- if @p sequenceSetInput is a string, it is interpreted as the filename
of a sequence file to be read. File format should be fasta.
- if @p sequenceSetInput is a list, it is considered as a list of lists
containing the input sequences
- @p sequenceSetInput can also be a pointer to a C sequence struct but
this is only meant for internal use
"""
self.emissionDomain = emissionDomain
self.cseq = None
if self.emissionDomain.CDataType == "int":
# necessary C functions for accessing the ghmm_dseq struct
self.sequenceAllocationFunction = ghmmwrapper.ghmm_dseq
self.allocSingleSeq = ghmmwrapper.int_array_alloc
self.seq_read = ghmmwrapper.ghmm_dseq_read
self.seq_ptr_array_getitem = ghmmwrapper.dseq_ptr_array_getitem
self.sequence_cmatrix = ghmmhelper.list2int_matrix
elif self.emissionDomain.CDataType == "double":
# necessary C functions for accessing the ghmm_cseq struct
self.sequenceAllocationFunction = ghmmwrapper.ghmm_cseq
self.allocSingleSeq = ghmmwrapper.double_array_alloc
self.seq_read = ghmmwrapper.ghmm_cseq_read
self.seq_ptr_array_getitem = ghmmwrapper.cseq_ptr_array_getitem
self.sequence_cmatrix = ghmmhelper.list2double_matrix
else:
raise NoValidCDataType("C data type " + str(self.emissionDomain.CDataType) + " invalid.")
# reads in the first sequence struct in the input file
if isinstance(sequenceSetInput, str) or isinstance(sequenceSetInput, unicode):
if sequenceSetInput[-3:] == ".fa" or sequenceSetInput[-6:] == ".fasta":
# assuming FastA file:
alfa = emissionDomain.toCstruct()
cseq = ghmmwrapper.ghmm_dseq(sequenceSetInput, alfa)
if cseq is None:
raise ParseFileError("invalid FastA file: " + sequenceSetInput)
self.cseq = cseq
# check if ghmm is build with asci sequence file support
elif not ghmmwrapper.ASCI_SEQ_FILE:
raise UnsupportedFeature("asci sequence files are deprecated. \
Please convert your files to the new xml-format or rebuild the GHMM \
with the conditional \"GHMM_OBSOLETE\".")
else:
if not os.path.exists(sequenceSetInput):
raise IOError, 'File ' + str(sequenceSetInput) + ' not found.'
else:
tmp = self.seq_read(sequenceSetInput)
if len(tmp) > 0:
self.cseq = ghmmwrapper.ghmm_cseq(tmp[0])
else:
raise ParseFileError('File ' + str(sequenceSetInput) + ' not valid.')
elif isinstance(sequenceSetInput, list):
internalInput = [self.emissionDomain.internalSequence(seq) for seq in sequenceSetInput]
(seq, lengths) = self.sequence_cmatrix(internalInput)
lens = ghmmwrapper.list2int_array(lengths)
self.cseq = self.sequenceAllocationFunction(seq, lens, len(sequenceSetInput))
if isinstance(labelInput, list) and isinstance(labelDomain, LabelDomain):
assert len(sequenceSetInput)==len(labelInput), "no. of sequences and labels do not match."
self.labelDomain = labelDomain
internalLabels = [self.labelDomain.internalSequence(oneLabel) for oneLabel in labelInput]
(label,labellen) = ghmmhelper.list2int_matrix(internalLabels)
lens = ghmmwrapper.list2int_array(labellen)
self.cseq.init_labels(label, lens)
#internal use
elif isinstance(sequenceSetInput, ghmmwrapper.ghmm_dseq) or isinstance(sequenceSetInput, ghmmwrapper.ghmm_cseq):
log.debug("SequenceSet.__init__()", str(sequenceSetInput))
self.cseq = sequenceSetInput
if labelDomain is not None:
self.labelDomain = labelDomain
else:
raise UnknownInputType("inputType " + str(type(sequenceSetInput)) + " not recognized.")
def __del__(self):
"Deallocation of C sequence struct."
log.debug( "__del__ SequenceSet " + str(self.cseq))
def __str__(self):
"Defines string representation."
seq = self.cseq
strout = ["SequenceSet (N=" + str(seq.seq_number)+")"]
if seq.seq_number <= 6:
iter_list = range(seq.seq_number)
else:
iter_list = [0,1,'X',seq.seq_number-2,seq.seq_number-1]
for i in iter_list:
if i == 'X':
strout.append('\n\n ...\n')
else:
strout.append("\n seq " + str(i)+ "(len=" + str(seq.getLength(i)) + ")\n")
strout.append(' '+str(self[i]))
return join(strout,'')
def verboseStr(self):
"Defines string representation."
seq = self.cseq
strout = ["\nNumber of sequences: " + str(seq.seq_number)]
for i in range(seq.seq_number):
strout.append("\nSeq " + str(i)+ ", length " + str(seq.getLength(i)))
strout.append(", weight " + str(seq.getWeight(i)) + ":\n")
for j in range(seq.getLength(i)):
if self.emissionDomain.CDataType == "int":
strout.append(str( self.emissionDomain.external(( ghmmwrapper.int_matrix_getitem(self.cseq.seq, i, j) )) ))
elif self.emissionDomain.CDataType == "double":
strout.append(str( self.emissionDomain.external(( ghmmwrapper.double_matrix_getitem(self.cseq.seq, i, j) )) ) + " ")
# checking for labels
if self.emissionDomain.CDataType == "int" and self.cseq.state_labels != None:
strout.append("\nState labels:\n")
for j in range(seq.getLabelsLength(i)):
strout.append(str( self.labelDomain.external(ghmmwrapper.int_matrix_getitem(seq.state_labels,i,j))) +", ")
return join(strout,'')
def __len__(self):
"""
@returns the number of sequences in the SequenceSet.
"""
return self.cseq.seq_number
def sequenceLength(self, i):
"""
@returns the lenght of sequence 'i' in the SequenceSet
"""
return self.cseq.getLength(i)
def getWeight(self, i):
"""
@returns the weight of sequence i. @note Weights are used in Baum-Welch
"""
return self.cseq.getWeight(i)
def setWeight(self, i, w):
"""
Set the weight of sequence i. @note Weights are used in Baum-Welch
"""
ghmmwrapper.double_array_setitem(self.cseq.seq_w, i, w)
def __getitem__(self, index):
"""
@returns an EmissionSequence object initialized with a reference to
sequence 'index'.
"""
# check the index for correct range
if index >= self.cseq.seq_number:
raise IndexError
seq = self.cseq.get_singlesequence(index)
return EmissionSequence(self.emissionDomain, seq, ParentSequenceSet=self)
def getSeqLabel(self,index):
if not ghmmwrapper.SEQ_LABEL_FIELD:
raise UnsupportedFeature("the seq_label field is obsolete. If you need it rebuild the GHMM with the conditional \"GHMM_OBSOLETE\".")
return ghmmwrapper.long_array_getitem(self.cseq.seq_label,index)
def setSeqLabel(self,index,value):
if not ghmmwrapper.SEQ_LABEL_FIELD:
raise UnsupportedFeature("the seq_label field is obsolete. If you need it rebuild the GHMM with the conditional \"GHMM_OBSOLETE\".")
ghmmwrapper.long_array_setitem(self.cseq.seq_label,index,value)
def getGeneratingStates(self):
"""
@returns the state paths from which the sequences were generated as a
Python list of lists.
"""
states_len = ghmmwrapper.int_array2list(self.cseq.states_len, len(self))
l_state = []
for i, length in enumerate(states_len):
col = ghmmwrapper.int_matrix_get_col(self.cseq.states, i)
l_state.append(ghmmwrapper.int_array2list(col, length))
return l_state
def getSequence(self, index):
"""
@returns the index-th sequence in internal representation
"""
seq = []
if self.cseq.seq_number > index:
for j in range(self.cseq.getLength(index)):
seq.append(self.cseq.getSymbol(index, j))
return seq
else:
raise IndexError(str(index) + " is out of bounds, only " + str(self.cseq.seq_number) + "sequences")
def getStateLabel(self,index):
"""
@returns the labeling of the index-th sequence in internal representation
"""
label = []
if self.cseq.seq_number > index and self.cseq.state_labels != None:
for j in range(self.cseq.getLabelsLength(index)):
label.append(self.labelDomain.external(ghmmwrapper.int_matrix_getitem(self.cseq.state_labels, index, j)))
return label
else:
raise IndexError(str(0) + " is out of bounds, only " + str(self.cseq.seq_number) + "labels")
def hasStateLabels(self):
"""
@returns whether the sequence is labeled or not
"""
return self.cseq.state_labels != None
def merge(self, emissionSequences): # Only allow EmissionSequence?
"""
Merges 'emissionSequences' into 'self'.
@param emissionSequences can either be an EmissionSequence or SequenceSet
object.
"""
if not isinstance(emissionSequences,EmissionSequence) and not isinstance(emissionSequences,SequenceSet):
raise TypeError("EmissionSequence or SequenceSet required, got " + str(emissionSequences.__class__.__name__))
self.cseq.add(emissionSequences.cseq)
del(emissionSequences) # removing merged sequences
def getSubset(self, seqIndixes):
"""
@returns a SequenceSet containing (references to) the sequences with the
indices in 'seqIndixes'.
"""
seqNumber = len(seqIndixes)
seq = self.sequenceAllocationFunction(seqNumber)
# checking for state labels in the source C sequence struct
if self.emissionDomain.CDataType == "int" and self.cseq.state_labels is not None:
log.debug( "SequenceSet: found labels !")
seq.calloc_state_labels()
for i,seq_nr in enumerate(seqIndixes):
len_i = self.cseq.getLength(seq_nr)
seq.setSequence(i, self.cseq.getSequence(seq_nr))
seq.setLength(i, len_i)
seq.setWeight(i, self.cseq.getWeight(i))
# setting labels if appropriate
if self.emissionDomain.CDataType == "int" and self.cseq.state_labels is not None:
self.cseq.copyStateLabel(seqIndixes[i], seq, seqIndixes[i])
seq.seq_number = seqNumber
return SequenceSetSubset(self.emissionDomain, seq, self)
def write(self,fileName):
"Writes (appends) the SequenceSet into file 'fileName'."
self.cseq.write(fileName)
def asSequenceSet(self):
"""convenience function, returns only self"""
return self
class SequenceSetSubset(SequenceSet):
"""
SequenceSetSubset contains a subset of the sequences from a SequenceSet
object.
@note On the C side only the references are used.
"""
def __init__(self, emissionDomain, sequenceSetInput, ParentSequenceSet , labelDomain = None, labelInput = None):
# reference on the parent SequenceSet object
log.debug("SequenceSetSubset.__init__ -- begin -", str(ParentSequenceSet))
self.ParentSequenceSet = ParentSequenceSet
SequenceSet.__init__(self, emissionDomain, sequenceSetInput, labelDomain, labelInput)
def __del__(self):
""" Since we do not want to deallocate the sequence memory,
the destructor has to be overloaded.
"""
log.debug( "__del__ SequenceSubSet " + str(self.cseq))
if self.cseq is not None:
self.cseq.subseq_free()
# remove reference on parent SequenceSet object
self.ParentSequenceSet = None
def SequenceSetOpen(emissionDomain, fileName):
# XXX Name doof
""" Reads a sequence file with multiple sequence sets.
@returns a list of SequenceSet objects.
"""
if not os.path.exists(fileName):
raise IOError('File ' + str(fileName) + ' not found.')
if emissionDomain.CDataType == "int":
seq_read_func_ptr = ghmmwrapper.ghmm_dseq_read
seq_ctor_func_ptr = ghmmwrapper.ghmm_dseq
elif emissionDomain.CDataType == "double":
seq_read_func_ptr = ghmmwrapper.ghmm_cseq_read
seq_ctor_func_ptr = ghmmwrapper.ghmm_cseq
else:
raise TypeError("Invalid c data type " + str(emissionDomain.CDataType))
seqs = seq_read_func_ptr(fileName)
# ugly workaround for swig bug. swig is not always creating a proxy class
seqs = [seq_ctor_func_ptr(ptr) for ptr in seqs]
sequenceSets = [SequenceSet(emissionDomain, seq_ptr) for seq_ptr in seqs]
return sequenceSets
def writeToFasta(seqSet,fn):
"""
Writes a SequenceSet into a fasta file.
"""
if not isinstance(seqSet, SequenceSet):
raise TypeError("SequenceSet expected.")
f = open(fn,'w')
for i in range(len(seqSet)):
rseq = []
for j in range(seqSet.sequenceLength(i)):
rseq.append(str(seqSet.emissionDomain.external(
ghmmwrapper.int_matrix_getitem(seqSet.cseq.seq, i, j)
)))
f.write('>seq'+str(i)+'\n')
f.write(fill(join(rseq,'') ))
f.write('\n')
f.close()
#-------------------------------------------------------------------------------
# HMMFactory and derived -----------------------------------------------------
class HMMFactory(object):
""" A HMMFactory is the base class of HMM factories.
A HMMFactory has just a constructor and a call method
"""
GHMM_FILETYPE_SMO = 'smo'
GHMM_FILETYPE_XML = 'xml'
GHMM_FILETYPE_HMMER = 'hmm'
class HMMOpenFactory(HMMFactory):
""" Opens a HMM from a file.
Currently four formats are supported:
HMMer, our smo file format, and two xml formats.
@note the support for smo files and the old xml format will phase out
"""
def __init__(self, defaultFileType=None):
self.defaultFileType = defaultFileType
def guessFileType(self, filename):
""" guesses the file format from the filename """
if filename.endswith('.'+GHMM_FILETYPE_XML):
return GHMM_FILETYPE_XML
elif filename.endswith('.'+GHMM_FILETYPE_SMO):
return GHMM_FILETYPE_SMO
elif filename.endswith('.'+GHMM_FILETYPE_HMMER):
return GHMM_FILETYPE_HMMER
else:
return None
def __call__(self, fileName, modelIndex=None, filetype=None):
if not isinstance(fileName,StringIO.StringIO):
if not os.path.exists(fileName):
raise IOError('File ' + str(fileName) + ' not found.')
if not filetype:
if self.defaultFileType:
log.warning("HMMOpenHMMER, HMMOpenSMO and HMMOpenXML are deprecated. "
+ "Use HMMOpen and the filetype parameter if needed.")
filetype = self.defaultFileType
else:
filetype = self.guessFileType(fileName)
if not filetype:
raise WrongFileType("Could not guess the type of file " + str(fileName)
+ " and no filetype specified")
# XML file: both new and old format
if filetype == GHMM_FILETYPE_XML:
# try to validate against ghmm.dtd
if ghmmwrapper.ghmm_xmlfile_validate(fileName):
return self.openNewXML(fileName, modelIndex)
else:
return self.openOldXML(fileName)
elif filetype == GHMM_FILETYPE_SMO:
return self.openSMO(fileName, modelIndex)
elif filetype == GHMM_FILETYPE_HMMER:
return self.openHMMER(fileName)
else:
raise TypeError("Invalid file type " + str(filetype))
def openNewXML(self, fileName, modelIndex):
""" Open one ore more HMM in the new xml format """
# opens and parses the file
file = ghmmwrapper.ghmm_xmlfile_parse(fileName)
if file == None:
log.debug( "XML has file format problems!")
raise WrongFileType("file is not in GHMM xml format")
nrModels = file.noModels
modelType = file.modelType
# we have a continuous HMM, prepare for hmm creation
if (modelType & ghmmwrapper.kContinuousHMM):
emission_domain = Float()
if (modelType & ghmmwrapper.kMultivariate):
distribution = MultivariateGaussianDistribution
hmmClass = MultivariateGaussianMixtureHMM
else:
distribution = ContinuousMixtureDistribution
hmmClass = ContinuousMixtureHMM
getModel = file.get_cmodel
# we have a discrete HMM, prepare for hmm creation
elif ((modelType & ghmmwrapper.kDiscreteHMM)
and not (modelType & ghmmwrapper.kTransitionClasses)
and not (modelType & ghmmwrapper.kPairHMM)):
emission_domain = 'd'
distribution = DiscreteDistribution
getModel = file.get_dmodel
if (modelType & ghmmwrapper.kLabeledStates):
hmmClass = StateLabelHMM
else:
hmmClass = DiscreteEmissionHMM
# currently not supported
else:
raise UnsupportedFeature("Non-supported model type")
# read all models to list at first
result = []
for i in range(nrModels):
cmodel = getModel(i)
if emission_domain is 'd':
emission_domain = Alphabet([], cmodel.alphabet)
if modelType & ghmmwrapper.kLabeledStates:
labelDomain = LabelDomain([], cmodel.label_alphabet)
m = hmmClass(emission_domain, distribution(emission_domain), labelDomain, cmodel)
else:
m = hmmClass(emission_domain, distribution(emission_domain), cmodel)
result.append(m)
# for a single
if modelIndex != None:
if modelIndex < nrModels:
result = result[modelIndex]
else:
raise IndexError("the file %s has only %s models"% fileName, str(nrModels))
elif nrModels == 1:
result = result[0]
return result
def openOldXML(self, fileName):
from ghmm_gato import xmlutil
hmm_dom = xmlutil.HMM(fileName)
emission_domain = hmm_dom.AlphabetType()
if emission_domain == int:
[alphabets, A, B, pi, state_orders] = hmm_dom.buildMatrices()
emission_domain = Alphabet(alphabets)
distribution = DiscreteDistribution(emission_domain)
# build adjacency list
# check for background distributions
(background_dist, orders, code2name) = hmm_dom.getBackgroundDist()
# (background_dist, orders) = hmm_dom.getBackgroundDist()
bg_list = []
# if background distribution exists, set background distribution here
if background_dist != {}:
# transformation to list for input into BackgroundDistribution,
# ensure the rigth order
for i in range(len(code2name.keys())-1):
bg_list.append(background_dist[code2name[i]])
bg = BackgroundDistribution(emission_domain, bg_list)
# check for state labels
(label_list, labels) = hmm_dom.getLabels()
if labels == ['None']:
labeldom = None
label_list = None
else:
labeldom = LabelDomain(labels)
m = HMMFromMatrices(emission_domain, distribution, A, B, pi, None, labeldom, label_list)
# old xml is discrete, set appropiate flag
m.cmodel.addModelTypeFlags(ghmmwrapper.kDiscreteHMM)
if background_dist != {}:
ids = [-1]*m.N
for s in hmm_dom.state.values():
ids[s.index-1] = s.background # s.index ranges from [1, m.N]
m.setBackground(bg, ids)
log.debug( "model_type %x" % m.cmodel.model_type)
log.debug("background_id" + str( ghmmwrapper.int_array2list(m.cmodel.background_id, m.N)))
else:
m.cmodel.bp = None
m.cmodel.background_id = None
# check for tied states
tied = hmm_dom.getTiedStates()
if len(tied) > 0:
m.setFlags(kTiedEmissions)
m.cmodel.tied_to = ghmmwrapper.list2int_array(tied)
durations = hmm_dom.getStateDurations()
if len(durations) == m.N:
log.debug("durations: " + str(durations))
m.extendDurations(durations)
return m
def openSMO(self, fileName, modelIndex):
# MO & SMO Files, format is deprecated
# check if ghmm is build with smo support
if not ghmmwrapper.SMO_FILE_SUPPORT:
raise UnsupportedFeature("smo files are deprecated. Please convert your files"
"to the new xml-format or rebuild the GHMM with the"
"conditional \"GHMM_OBSOLETE\".")
(hmmClass, emission_domain, distribution) = self.determineHMMClass(fileName)
log.debug("determineHMMClass = "+ str( (hmmClass, emission_domain, distribution)))
# XXX broken since silent states are not supported by .smo file format
if hmmClass == DiscreteEmissionHMM:
models = ghmmwrapper.ghmm_dmodel_read(fileName)
base_model_type = ghmmwrapper.KDiscreteHMM
else:
models = ghmmwrapper.ghmm_cmodel_read(fileName)
base_model_type = ghmmwrapper.kContinuousHMM
if modelIndex == None:
result = []
for cmodel in models:
# ugly workaround for SWIG not creating a proxy class
cmodel = ghmmwrapper.ghmm_cmodel(cmodel)
cmodel.addModelTypeFlags(base_model_type)
m = hmmClass(emission_domain, distribution(emission_domain), cmodel)
result.append(m)
else:
if modelIndex < nrModels:
cmodel = models[modelIndex]
cmodel.addModelTypeFlags(base_model_type)
result = hmmClass(emission_domain, distribution(emission_domain), cmodel)
else:
raise IndexError(fileName + "has only " + len(models) + "models")
return result
def openSingleHMMER(self, fileName):
# HMMER format models
h = modhmmer.hmmer(fileName)
if h.m == 4: # DNA model
emission_domain = DNA
elif h.m == 20: # Peptide model
emission_domain = AminoAcids
else: # some other model
emission_domain = IntegerRange(0,h.m)
distribution = DiscreteDistribution(emission_domain)
# XXX TODO: Probably slow for large matrices (Rewrite for 0.9)
[A,B,pi,modelName] = h.getGHMMmatrices()
return HMMFromMatrices(emission_domain, distribution, A, B, pi, hmmName=modelName)
def openHMMER(self, fileName):
"""
Reads a file containing multiple HMMs in HMMER format, returns list of
HMM objects or a single HMM object.
"""
if not os.path.exists(fileName):
raise IOError('File ' + str(fileName) + ' not found.')
modelList = []
string = ""
f = open(fileName,"r")
res = re.compile("^//")
stat = re.compile("^ACC\s+(\w+)")
for line in f.readlines():
string = string + line
m = stat.match(line)
if m:
name = m.group(1)
log.info( "Reading model " + str(name) + ".")
match = res.match(line)
if match:
fileLike = StringIO.StringIO(string)
modelList.append(self.openSingleHMMER(fileLike))
string = ""
match = None
if len(modelList) == 1:
return modelList[0]
return modelList
def determineHMMClass(self, fileName):
#
# smo files. Obsolete
#
file = open(fileName,'r')
hmmRe = re.compile("^HMM\s*=")
shmmRe = re.compile("^SHMM\s*=")
mvalueRe = re.compile("M\s*=\s*([0-9]+)")
densityvalueRe = re.compile("density\s*=\s*([0-9]+)")
cosvalueRe = re.compile("cos\s*=\s*([0-9]+)")
emission_domain = None
while 1:
l = file.readline()
if not l:
break
l = l.strip()
if len(l) > 0 and l[0] != '#': # Not a comment line
hmm = hmmRe.search(l)
shmm = shmmRe.search(l)
mvalue = mvalueRe.search(l)
densityvalue = densityvalueRe.search(l)
cosvalue = cosvalueRe.search(l)
if hmm != None:
if emission_domain != None and emission_domain != 'int':
log.error( "HMMOpenFactory:determineHMMClass: both HMM and SHMM? " + str(emission_domain))
else:
emission_domain = 'int'
if shmm != None:
if emission_domain != None and emission_domain != 'double':
log.error( "HMMOpenFactory:determineHMMClass: both HMM and SHMM? " + str(emission_domain))
else:
emission_domain = 'double'
if mvalue != None:
M = int(mvalue.group(1))
if densityvalue != None:
density = int(densityvalue.group(1))
if cosvalue != None:
cos = int(cosvalue.group(1))
file.close()
if emission_domain == 'int':
# only integer alphabet
emission_domain = IntegerRange(0,M)
distribution = DiscreteDistribution
hmm_class = DiscreteEmissionHMM
return (hmm_class, emission_domain, distribution)
elif emission_domain == 'double':
# M number of mixture components
# density component type
# cos number of state transition classes
if M == 1 and density == 0:
emission_domain = Float()
distribution = GaussianDistribution
hmm_class = GaussianEmissionHMM
return (hmm_class, emission_domain, distribution)
elif M > 1 and density == 0:
emission_domain = Float()
distribution = GaussianMixtureDistribution
hmm_class = GaussianMixtureHMM
return (hmm_class, emission_domain, distribution)
else:
raise TypeError("Model type can not be determined.")
return (None, None, None)
# the following three methods are deprecated
HMMOpenHMMER = HMMOpenFactory(GHMM_FILETYPE_HMMER) # read single HMMER model from file
HMMOpenSMO = HMMOpenFactory(GHMM_FILETYPE_SMO)
HMMOpenXML = HMMOpenFactory(GHMM_FILETYPE_XML)
# use only HMMOpen and specify the filetype if it can't guessed from the extension
HMMOpen = HMMOpenFactory()
class HMMFromMatricesFactory(HMMFactory):
""" @todo Document matrix formats """
# XXX TODO: this should use the editing context
def __call__(self, emissionDomain, distribution, A, B, pi, hmmName = None, labelDomain= None, labelList = None, densities = None):
if isinstance(emissionDomain, Alphabet):
if not emissionDomain == distribution.alphabet:
raise TypeError("emissionDomain and distribution must be compatible")
# checking matrix dimensions and argument validation, only some obvious errors are checked
if not len(A) == len(A[0]):
raise InvalidModelParameters("A is not quadratic.")
if not len(pi) == len(A):
raise InvalidModelParameters("Length of pi does not match length of A.")
if not len(A) == len(B):
raise InvalidModelParameters("Different number of entries in A and B.")
if (labelDomain is None and labelList is not None) or (labelList is None and labelList is not None):
raise InvalidModelParameters("Specify either both labelDomain and labelInput or neither.")
if isinstance(distribution,DiscreteDistribution):
# HMM has discrete emissions over finite alphabet: DiscreteEmissionHMM
cmodel = ghmmwrapper.ghmm_dmodel(len(A), len(emissionDomain))
# assign model identifier (if specified)
if hmmName != None:
cmodel.name = hmmName
else:
cmodel.name = ''
states = ghmmwrapper.dstate_array_alloc(cmodel.N)
silent_states = []
tmpOrder = []
#initialize states
for i in range(cmodel.N):
state = ghmmwrapper.dstate_array_getRef(states, i)
# compute state order
if cmodel.M > 1:
order = math.log(len(B[i]), cmodel.M)-1
else:
order = len(B[i]) - 1
log.debug( "order in state " + str(i) + " = " + str(order) )
# check or valid number of emission parameters
order = int(order)
if cmodel.M**(order+1) == len(B[i]):
tmpOrder.append(order)
else:
raise InvalidModelParameters("The number of " + str(len(B[i])) +
" emission parameters for state " +
str(i) + " is invalid. State order can not be determined.")
state.b = ghmmwrapper.list2double_array(B[i])
state.pi = pi[i]
if sum(B[i]) == 0.0:
silent_states.append(1)
else:
silent_states.append(0)
#set out probabilities
state.out_states, state.out_id, state.out_a = ghmmhelper.extract_out(A[i])
#set "in" probabilities
A_col_i = map(lambda x: x[i], A)
# Numarray use A[,:i]
state.in_states, state.in_id, state.in_a = ghmmhelper.extract_out(A_col_i)
#fix probabilities in reestimation, else 0
state.fix = 0
cmodel.s = states
if sum(silent_states) > 0:
cmodel.model_type |= kSilentStates
cmodel.silent = ghmmwrapper.list2int_array(silent_states)
cmodel.maxorder = max(tmpOrder)
if cmodel.maxorder > 0:
log.debug( "Set kHigherOrderEmissions.")
cmodel.model_type |= kHigherOrderEmissions
cmodel.order = ghmmwrapper.list2int_array(tmpOrder)
# initialize lookup table for powers of the alphabet size,
# speeds up models with higher order states
powLookUp = [1] * (cmodel.maxorder+2)
for i in range(1,len(powLookUp)):
powLookUp[i] = powLookUp[i-1] * cmodel.M
cmodel.pow_lookup = ghmmwrapper.list2int_array(powLookUp)
# check for state labels
if labelDomain is not None and labelList is not None:
if not isinstance(labelDomain,LabelDomain):
raise TypeError("LabelDomain object required.")
cmodel.model_type |= kLabeledStates
m = StateLabelHMM(emissionDomain, distribution, labelDomain, cmodel)
m.setLabels(labelList)
return m
else:
return DiscreteEmissionHMM(emissionDomain, distribution, cmodel)
else:
raise GHMMError(type(distribution), "Not a valid distribution for Alphabet")
elif isinstance(emissionDomain, Float):
# determining number of transition classes
cos = ghmmhelper.classNumber(A)
if cos == 1:
A = [A]
cmodel = ghmmwrapper.ghmm_cmodel(len(A[0]), cos)
log.debug("cmodel.cos = " + str(cmodel.cos))
self.constructSwitchingTransitions(cmodel, pi, A)
if isinstance(distribution, GaussianDistribution):
#initialize emissions
for i in range(cmodel.N):
state = ghmmwrapper.cstate_array_getRef(cmodel.s, i)
state.M = 1
# set up emission(s), density type is normal
emissions = ghmmwrapper.c_emission_array_alloc(1)
emission = ghmmwrapper.c_emission_array_getRef(emissions, 0)
emission.type = ghmmwrapper.normal
emission.dimension = 1
(mu, sigma) = B[i]
emission.mean.val = mu #mu = mue in GHMM C-lib.
emission.variance.val = sigma
emission.fixed = 0 # fixing of emission deactivated by default
emission.setDensity(0)
# append emission to state
state.e = emissions
state.c = ghmmwrapper.list2double_array([1.0])
return GaussianEmissionHMM(emissionDomain, distribution, cmodel)
elif isinstance(distribution, GaussianMixtureDistribution):
# Interpretation of B matrix for the mixture case
# (Example with three states and two components each):
# B = [
# [ ["mu11","mu12"],["sig11","sig12"],["w11","w12"] ],
# [ ["mu21","mu22"],["sig21","sig22"],["w21","w22"] ],
# [ ["mu31","mu32"],["sig31","sig32"],["w31","w32"] ],
# ]
log.debug( "*** mixture model")
cmodel.M = len(B[0][0])
#initialize states
for i in range(cmodel.N):
state = ghmmwrapper.cstate_array_getRef(cmodel.s, i)
state.M = len(B[0][0])
# allocate arrays of emmission parameters
mu_list = B[i][0]
sigma_list = B[i][1]
weight_list = B[i][2]
state.c = ghmmwrapper.list2double_array(weight_list)
# set up emission(s), density type is normal
emissions = ghmmwrapper.c_emission_array_alloc(state.M)
for j in range(state.M):
emission = ghmmwrapper.c_emission_array_getRef(emissions, j)
emission.type = ghmmwrapper.normal
emission.dimension = 1
mu = mu_list[j]
sigma = sigma_list[j]
emission.mean.val = mu #mu = mue in GHMM C-lib.
emission.variance.val = sigma
emission.fixed = 0 # fixing of emission deactivated by default
emission.setDensity(0)
# append emissions to state
state.e = emissions
return GaussianMixtureHMM(emissionDomain, distribution, cmodel)
elif isinstance(distribution, ContinuousMixtureDistribution):
# Interpretation of B matrix for the mixture case
# (Example with three states and two components each):
# B = [
# [["mu11","mu12"], ["sig11","sig12"], ["a11","a12"], ["w11","w12"]],
# [["mu21","mu22"], ["sig21","sig22"], ["a21","a22"], ["w21","w22"]],
# [["mu31","mu32"], ["sig31","sig32"], ["a31","a32"], ["w31","w32"]],
# ]
#
# ghmmwrapper.uniform: mu = min, sig = max
# ghmmwrapper.normal_right or ghmmwrapper.normal_left: a = cutoff
log.debug( "*** general mixture model")
cmodel.M = len(B[0][0])
#initialize states
for i in range(cmodel.N):
state = ghmmwrapper.cstate_array_getRef(cmodel.s, i)
state.M = len(B[i][0])
# set up emission(s), density type is normal
emissions = ghmmwrapper.c_emission_array_alloc(state.M)
weight_list = B[i][3]
combined_map = [(first, B[i][0][n], B[i][1][n], B[i][2][n])
for n, first in enumerate(densities[i])]
for j, parameters in enumerate(combined_map):
emission = ghmmwrapper.c_emission_array_getRef(emissions, j)
emission.type = densities[i][j]
emission.dimension = 1
if (emission.type == ghmmwrapper.normal
or emission.type == ghmmwrapper.normal_approx):
emission.mean.val = parameters[1]
emission.variance.val = parameters[2]
elif emission.type == ghmmwrapper.normal_right:
emission.mean.val = parameters[1]
emission.variance.val = parameters[2]
emission.min = parameters[3]
elif emission.type == ghmmwrapper.normal_left:
emission.mean.val = parameters[1]
emission.variance.val = parameters[2]
emission.max = parameters[3]
elif emission.type == ghmmwrapper.uniform:
emission.max = parameters[1]
emission.min = parameters[2]
else:
raise TypeError("Unknown Distribution type:" + str(emission.type))
# append emissions to state
state.e = emissions
state.c = ghmmwrapper.list2double_array(weight_list)
return ContinuousMixtureHMM(emissionDomain, distribution, cmodel)
elif isinstance(distribution, MultivariateGaussianDistribution):
log.debug( "*** multivariate gaussian distribution model")
# this is being extended to also support mixtures of multivariate gaussians
# Interpretation of B matrix for the multivariate gaussian case
# (Example with three states and two mixture components with two dimensions):
# B = [
# [["mu111","mu112"],["sig1111","sig1112","sig1121","sig1122"],
# ["mu121","mu122"],["sig1211","sig1212","sig1221","sig1222"],
# ["w11","w12"] ],
# [["mu211","mu212"],["sig2111","sig2112","sig2121","sig2122"],
# ["mu221","mu222"],["sig2211","sig2212","sig2221","sig2222"],
# ["w21","w22"] ],
# [["mu311","mu312"],["sig3111","sig3112","sig3121","sig3122"],
# ["mu321","mu322"],["sig3211","sig3212","sig3221","sig3222"],
# ["w31","w32"] ],
# ]
#
# ["mu311","mu312"] is the mean vector of the two dimensional
# gaussian in state 3, mixture component 1
# ["sig1211","sig1212","sig1221","sig1222"] is the covariance
# matrix of the two dimensional gaussian in state 1, mixture component 2
# ["w21","w22"] are the weights of the mixture components
# in state 2
# For states with only one mixture component, a implicit weight
# of 1.0 is assumed
cmodel.addModelTypeFlags(ghmmwrapper.kMultivariate)
cmodel.dim = len(B[0][0]) # all states must have same dimension
#initialize states
for i in range(cmodel.N):
# set up state parameterss
state = ghmmwrapper.cstate_array_getRef(cmodel.s, i)
state.M = len(B[i])/2
if state.M > cmodel.M:
cmodel.M = state.M
# multiple mixture components
if state.M > 1:
state.c = ghmmwrapper.list2double_array(B[i][state.M*2]) # Mixture weights.
else:
state.c = ghmmwrapper.list2double_array([1.0])
# set up emission(s), density type is normal
emissions = ghmmwrapper.c_emission_array_alloc(state.M) # M emission components in this state
for em in range(state.M):
emission = ghmmwrapper.c_emission_array_getRef(emissions,em)
emission.dimension = len(B[0][0]) # dimension must be same in all states and emissions
mu = B[i][em*2]
sigma = B[i][em*2+1]
emission.mean.vec = ghmmwrapper.list2double_array(mu)
emission.variance.mat = ghmmwrapper.list2double_array(sigma)
emission.sigmacd = ghmmwrapper.list2double_array(sigma) # just for allocating the space
emission.sigmainv = ghmmwrapper.list2double_array(sigma) # just for allocating the space
emission.fixed = 0 # fixing of emission deactivated by default
emission.setDensity(6)
# calculate inverse and determinant of covariance matrix
determinant = ghmmwrapper.list2double_array([0.0])
ghmmwrapper.ighmm_invert_det(emission.sigmainv, determinant,
emission.dimension, emission.variance.mat)
emission.det = ghmmwrapper.double_array_getitem(determinant, 0)
# append emissions to state
state.e = emissions
return MultivariateGaussianMixtureHMM(emissionDomain, distribution, cmodel)
else:
raise GHMMError(type(distribution),
"Cannot construct model for this domain/distribution combination")
else:
raise TypeError("Unknown emission doamin" + str(emissionDomain))
def constructSwitchingTransitions(self, cmodel, pi, A):
""" @internal function: creates switching transitions """
#initialize states
for i in range(cmodel.N):
state = ghmmwrapper.cstate_array_getRef(cmodel.s, i)
state.pi = pi[i]
#set out probabilities
trans = ghmmhelper.extract_out_cos(A, cmodel.cos, i)
state.out_states = trans[0]
state.out_id = trans[1]
state.out_a = trans[2]
#set "in" probabilities
trans = ghmmhelper.extract_in_cos(A,cmodel.cos, i)
state.in_states = trans[0]
state.in_id = trans[1]
state.in_a = trans[2]
HMMFromMatrices = HMMFromMatricesFactory()
#-------------------------------------------------------------------------------
#- Background distribution
class BackgroundDistribution(object):
""" Background distributions object
holds discrete distributions used as background while training
discrete HMMs to avoid overfitting.
Input is a discrete EmissionDomain and a list of list. Each list is
a distinct distribution. The distributions can be of higher order.
The length of a single distribution is a power of len(EmissionDomain)
"""
def __init__(self, emissionDomain, bgInput):
if type(bgInput) == list:
self.emissionDomain = emissionDomain
distNum = len(bgInput)
order = ghmmwrapper.int_array_alloc(distNum)
b = ghmmwrapper.double_matrix_alloc_row(distNum)
for i in range(distNum):
if len(emissionDomain) > 1:
o = math.log(len(bgInput[i]), len(emissionDomain)) - 1
else:
o = len(bgInput[i]) - 1
assert (o % 1) == 0, "Invalid order of distribution " + str(i) + ": " + str(o)
ghmmwrapper.int_array_setitem(order, i, int(o))
# dynamic allocation, rows have different lenghts
b_i = ghmmwrapper.list2double_array(bgInput[i])
ghmmwrapper.double_matrix_set_col(b, i, b_i)
self.cbackground = ghmmwrapper.ghmm_dbackground(distNum, len(emissionDomain), order, b)
elif isinstance(bgInput, ghmmwrapper.background_distributions):
self.cbackground = bgInput
self.emissionDomain = emissionDomain
else:
raise TypeError("Input type "+str(type(bgInput)) +" not recognized.")
def __del__(self):
log.debug( "__del__ BackgroundDistribution " + str(self.cbackground))
del self.cbackground
self.cbackground = None
def __str__(self):
outstr = 'BackgroundDistribution (N= '+str(self.cbackground.n)+'):\n'
outstr += str(self.emissionDomain) + "\n"
d = ghmmhelper.double_matrix2list(self.cbackground.b, self.cbackground.n, len(self.emissionDomain))
outstr += "Distributions:\n"
f = lambda x: "%.2f" % (x,) # float rounding function
for i in range(self.cbackground.n):
outstr += ' '+str(i+1) + ":(order= " + str(self.cbackground.getOrder(i))+"): "
outstr += " "+join(map(f,d[i]),', ')+"\n"
return outstr
def verboseStr(self):
outstr = "BackgroundDistribution instance:\n"
outstr += "Number of distributions: " + str(self.cbackground.n)+"\n\n"
outstr += str(self.emissionDomain) + "\n"
d = ghmmhelper.double_matrix2list(self.cbackground.b, self.cbackground.n, len(self.emissionDomain))
outstr += "Distributions:\n"
for i in range(self.cbackground.n):
outstr += " Order: " + str(self.cbackground.getOrder(i))+"\n"
outstr += " " + str(i+1) +": "+str(d[i])+"\n"
return outstr
def getCopy(self):
return self.cbackground.copy()
def toLists(self):
dim = self.cbackground.m
distNum = self.cbackground.n
orders = ghmmwrapper.int_array2list(self.cbackground.order, distNum)
B = []
for i in xrange(distNum):
order = orders[i]
size = int(pow(m,(order+1)))
b = [0.0]*size
for j in xrange(size):
b[j] = ghmmwrapper.double_matrix_getitem(self.cbackground.b,i,j)
B.append(b)
return (distNum,orders,B)
#-------------------------------------------------------------------------------
#- HMM and derived
class HMM(object):
""" The HMM base class.
All functions where the C signatures allows it will be defined in here.
Unfortunately there stil is a lot of overloading going on in derived classes.
Generic features (these apply to all derived classes):
- Forward algorithm
- Viterbi algorithm
- Baum-Welch training
- HMM distance metric
- ...
"""
def __init__(self, emissionDomain, distribution, cmodel):
self.emissionDomain = emissionDomain
self.distribution = distribution
self.cmodel = cmodel
self.N = self.cmodel.N # number of states
self.M = self.cmodel.M # number of symbols / mixture components
def __del__(self):
""" Deallocation routine for the underlying C data structures. """
log.debug( "__del__ HMM" + str(self.cmodel))
def loglikelihood(self, emissionSequences):
""" Compute log( P[emissionSequences| model]) using the forward algorithm
assuming independence of the sequences in emissionSequences
@param emissionSequences can either be a SequenceSet or a EmissionSequence
@returns log( P[emissionSequences| model]) of type float which is
computed as \f$\sum_{s} log( P[s| model])\f$ when emissionSequences
is a SequenceSet
@note The implementation does not compute the full forward matrix since
we are only interested in the likelihoods in this case.
"""
return sum(self.loglikelihoods(emissionSequences))
def loglikelihoods(self, emissionSequences):
""" Compute a vector ( log( P[s| model]) )_{s} of log-likelihoods of the
individual emission_sequences using the forward algorithm
@param emissionSequences is of type SequenceSet
@returns log( P[emissionSequences| model]) of type float
(numarray) vector of floats
"""
log.debug("HMM.loglikelihoods() -- begin")
emissionSequences = emissionSequences.asSequenceSet()
seqNumber = len(emissionSequences)
likelihoodList = []
for i in range(seqNumber):
log.warning("\ngetting likelihood for sequence %i\n"%i)
seq = emissionSequences.cseq.getSequence(i)
tmp = emissionSequences.cseq.getLength(i)
ret_val,likelihood = self.cmodel.logp(seq, tmp)
if ret_val == -1:
log.warning("forward returned -1: Sequence "+str(i)+" cannot be build.")
# XXX TODO Eventually this should trickle down to C-level
# Returning -DBL_MIN instead of infinity is stupid, since the latter allows
# to continue further computations with that inf, which causes
# things to blow up later.
# cmodel.logp() could do without a return value if -Inf is returned
# What should be the semantics in case of computing the likelihood of
# a set of sequences
likelihoodList.append(-float('Inf'))
else:
likelihoodList.append(likelihood)
del emissionSequences
log.debug("HMM.loglikelihoods() -- end")
return likelihoodList
# Further Marginals ...
def pathPosterior(self, sequence, path):
"""
@returns the log posterior probability for 'path' having generated
'sequence'.
@attention pathPosterior needs to calculate the complete forward and
backward matrices. If you are interested in multiple paths it would
be more efficient to use the 'posterior' function directly and not
multiple calls to pathPosterior
@todo for silent states things are more complicated -> to be done
"""
# XXX TODO for silent states things are more complicated -> to be done
if self.hasFlags(kSilentStates):
raise NotImplementedError("Models with silent states not yet supported.")
# calculate complete posterior matrix
post = self.posterior(sequence)
path_posterior = []
if not self.hasFlags(kSilentStates):
# if there are no silent states things are straightforward
assert len(path) == len(sequence), "Path and sequence have different lengths"
# appending posteriors for each element of path
for p,state in enumerate(path):
try:
path_posterior.append(post[p][state])
except IndexError:
raise IndexError("Invalid state index " + str(state) + ". Model and path are incompatible")
return path_posterior
# # XXX TODO silent states are yet to be done
# else:
# # for silent state models we have to propagate the silent states in each column of the
# # posterior matrix
#
# assert not self.isSilent(path[0]), "First state in path must not be silent."
#
# j = 0 # path index
# for i in range(len(sequence)):
# pp = post[i][path[j]]
#
# print pp
#
# if pp == 0:
# return float('-inf')
# else:
# path_log_lik += math.log(post[p][path[p]])
# j+=1
#
#
# # propagate path up until the next emitting state
# while self.isSilent(path[j]):
#
# print "** silent state ",path[j]
#
# pp = post[i][path[j]]
# if pp == 0:
# return float('-inf')
# else:
# path_log_lik += math.log(post[p][path[p]])
# j+=1
#
# return path_log_lik
def statePosterior(self, sequence, state, time):
"""
@returns the log posterior probability for being at 'state'
at time 'time' in 'sequence'.
@attention: statePosterior needs to calculate the complete forward
and backward matrices. If you are interested in multiple states
it would be more efficient to use the posterior function directly
and not multiple calls to statePosterior
@todo for silent states things are more complicated -> to be done
"""
# XXX TODO for silent states things are more complicated -> to be done
if self.hasFlags(kSilentStates):
raise NotImplementedError("Models with silent states not yet supported.")
# checking function arguments
if not 0 <= time < len(sequence):
raise IndexError("Invalid sequence index: "+str(time)+" (sequence has length "+str(len(sequence))+" ).")
if not 0 <= state < self.N:
raise IndexError("Invalid state index: " +str(state)+ " (models has "+str(self.N)+" states ).")
post = self.posterior(sequence)
return post[time][state]
def posterior(self, sequence):
""" Posterior distribution matrix for 'sequence'.
@todo for silent states things are more complicated -> to be done
"""
# XXX TODO for silent states things are more complicated -> to be done
if self.hasFlags(kSilentStates):
raise NotImplementedError("Models with silent states not yet supported.")
if not isinstance(sequence, EmissionSequence):
raise TypeError("Input to posterior must be EmissionSequence object")
(alpha,scale) = self.forward(sequence)
beta = self.backward(sequence,scale)
return map(lambda v,w : map(lambda x,y : x*y, v, w), alpha, beta)
def joined(self, emissionSequence, stateSequence):
""" log P[ emissionSequence, stateSequence| m] """
if not isinstance(emissionSequence,EmissionSequence):
raise TypeError("EmissionSequence required, got " + str(emissionSequence.__class__.__name__))
seqdim = 1
if emissionSequence.emissionDomain == Float():
seqdim = emissionSequence.cseq.dim
if seqdim < 1:
seqdim = 1
t = len(emissionSequence)
s = len(stateSequence)
if t/seqdim != s and not self.hasFlags(kSilentStates):
raise IndexError("sequence and state sequence have different lengths " +
"but the model has no silent states.")
seq = emissionSequence.cseq.getSequence(0)
states = ghmmwrapper.list2int_array(stateSequence)
err, logp = self.cmodel.logp_joint(seq, t, states, s)
if err != 0:
log.error("logp_joint finished with -1: EmissionSequence cannot be build under stateSequence.")
return
# deallocation
ghmmwrapper.free(states)
return logp
# The functions for model training are defined in the derived classes.
def baumWelch(self, trainingSequences, nrSteps=ghmmwrapper.MAX_ITER_BW, loglikelihoodCutoff=ghmmwrapper.EPS_ITER_BW):
raise NotImplementedError("to be defined in derived classes")
def baumWelchSetup(self, trainingSequences, nrSteps):
raise NotImplementedError("to be defined in derived classes")
def baumWelchStep(self, nrSteps, loglikelihoodCutoff):
raise NotImplementedError("to be defined in derived classes")
def baumWelchDelete(self):
raise NotImplementedError("to be defined in derived classes")
# extern double ghmm_c_prob_distance(smodel *cm0, smodel *cm, int maxT, int symmetric, int verbose);
def distance(self, model, seqLength):
"""
@returns the distance between 'self.cmodel' and 'model'.
"""
return self.cmodel.prob_distance(model.cmodel, seqLength, 0, 0)
def forward(self, emissionSequence):
"""
@returns the (N x T)-matrix containing the forward-variables
and the scaling vector
"""
log.debug("HMM.forward -- begin")
# XXX Allocations should be in try, except, finally blocks
# to assure deallocation even in the case of errrors.
# This will leak otherwise.
seq = emissionSequence.cseq.getSequence(0)
t = len(emissionSequence)
calpha = ghmmwrapper.double_matrix_alloc(t, self.N)
cscale = ghmmwrapper.double_array_alloc(t)
error, unused = self.cmodel.forward(seq, t, calpha, cscale)
if error == -1:
log.error( "forward finished with -1: EmissionSequence cannot be build.")
# translate alpha / scale to python lists
pyscale = ghmmwrapper.double_array2list(cscale, t)
pyalpha = ghmmhelper.double_matrix2list(calpha, t, self.N)
# deallocation
ghmmwrapper.free(cscale)
ghmmwrapper.double_matrix_free(calpha, t)
log.debug("HMM.forward -- end")
return pyalpha, pyscale
def backward(self, emissionSequence, scalingVector):
"""
@returns the (N x T)-matrix containing the backward-variables
"""
log.debug("HMM.backward -- begin")
seq = emissionSequence.cseq.getSequence(0)
# parsing 'scalingVector' to C double array.
cscale = ghmmwrapper.list2double_array(scalingVector)
# alllocating beta matrix
t = len(emissionSequence)
cbeta = ghmmwrapper.double_matrix_alloc(t, self.N)
error = self.cmodel.backward(seq,t,cbeta,cscale)
if error == -1:
log.error( "backward finished with -1: EmissionSequence cannot be build.")
pybeta = ghmmhelper.double_matrix2list(cbeta,t,self.N)
# deallocation
ghmmwrapper.free(cscale)
ghmmwrapper.double_matrix_free(cbeta,t)
log.debug("HMM.backward -- end")
return pybeta
def viterbi(self, eseqs):
""" Compute the Viterbi-path for each sequence in emissionSequences
@param eseqs can either be a SequenceSet or an EmissionSequence
@returns [q_0, ..., q_T] the viterbi-path of \p eseqs is an
EmmissionSequence object,
[[q_0^0, ..., q_T^0], ..., [q_0^k, ..., q_T^k]} for a k-sequence
SequenceSet
"""
log.debug("HMM.viterbi() -- begin")
emissionSequences = eseqs.asSequenceSet()
seqNumber = len(emissionSequences)
allLogs = []
allPaths = []
for i in range(seqNumber):
seq = emissionSequences.cseq.getSequence(i)
seq_len = emissionSequences.cseq.getLength(i)
if seq_len > 0:
viterbiPath, pathlen, log_p = self.cmodel.viterbi(seq, seq_len)
else:
viterbiPath = None
onePath = ghmmwrapper.int_array2list(viterbiPath, pathlen)
allPaths.append(onePath)
allLogs.append(log_p)
ghmmwrapper.free(viterbiPath)
log.debug("HMM.viterbi() -- end")
if seqNumber > 1:
return allPaths, allLogs
else:
return allPaths[0], allLogs[0]
def sample(self, seqNr ,T, seed=0):
""" Sample emission sequences.
@param seqNr number of sequences to be sampled
@param T maximal length of each sequence
@param seed initialization value for rng, default 0 leaves the state
of the rng alone
@returns a SequenceSet object.
"""
seqPtr = self.cmodel.generate_sequences(seed, T, seqNr, -1)
return SequenceSet(self.emissionDomain, seqPtr)
def sampleSingle(self, T, seed=0):
""" Sample a single emission sequence of length at most T.
@param T maximal length of the sequence
@param seed initialization value for rng, default 0 leaves the state
of the rng alone
@returns a EmissionSequence object.
"""
log.debug("HMM.sampleSingle() -- begin")
seqPtr = self.cmodel.generate_sequences(seed, T, 1, -1)
log.debug("HMM.sampleSingle() -- end")
return EmissionSequence(self.emissionDomain, seqPtr)
def clearFlags(self, flags):
""" Clears one or more model type flags.
@attention Use with care.
"""
log.debug("clearFlags: " + self.printtypes(flags))
self.cmodel.model_type &= ~flags
def hasFlags(self, flags):
""" Checks if the model has one or more model type flags set
"""
return self.cmodel.model_type & flags
def setFlags(self, flags):
""" Sets one or more model type flags.
@attention Use with care.
"""
log.debug("setFlags: " + self.printtypes(flags))
self.cmodel.model_type |= flags
def state(self, stateLabel):
""" Given a stateLabel return the integer index to the state
"""
raise NotImplementedError
def getInitial(self, i):
""" Accessor function for the initial probability \f$\pi_i\f$ """
state = self.cmodel.getState(i)
return state.pi
def setInitial(self, i, prob, fixProb=False):
""" Accessor function for the initial probability \f$\pi_i\f$.
If 'fixProb' = True \f$\pi\f$ will be rescaled to 1 with 'pi[i]'
fixed to the arguement value of 'prob'.
"""
state = self.cmodel.getState(i)
old_pi = state.pi
state.pi = prob
# renormalizing pi, pi(i) is fixed on value 'prob'
if fixProb:
coeff = (1.0 - old_pi) / prob
for j in range(self.N):
if i != j:
state = self.cmodel.getState(j)
p = state.pi
state.pi = p / coeff
def getTransition(self, i, j):
""" Accessor function for the transition a_ij """
state = self.cmodel.getState(i)
# ensure proper indices
if not 0 <= i < self.N:
raise IndexError("Index " + str(i) + " out of bounds.")
if not 0 <= j < self.N:
raise IndexError("Index " + str(j) + " out of bounds.")
transition = self.cmodel.get_transition(i, j)
if transition < 0.0:
transition = 0.0
return transition
def setTransition(self, i, j, prob):
""" Accessor function for the transition a_ij. """
# ensure proper indices
if not 0 <= i < self.N:
raise IndexError("Index " + str(i) + " out of bounds.")
if not 0 <= j < self.N:
raise IndexError("Index " + str(j) + " out of bounds.")
if not 0.0 <= prob <= 1.0:
raise ValueError("Transition " + str(prop) + " is not a probability.")
self.cmodel.set_transition(i, j, prob)
def getEmission(self, i):
"""
Accessor function for the emission distribution parameters of state 'i'.
For discrete models the distribution over the symbols is returned,
for continuous models a matrix of the form
[ [mu_1, sigma_1, weight_1] ... [mu_M, sigma_M, weight_M] ] is returned.
"""
raise NotImplementedError
def setEmission(self, i, distributionParemters):
""" Set the emission distribution parameters
Defined in derived classes.
"""
raise NotImplementedError
def asMatrices(self):
"To be defined in derived classes."
raise NotImplementedError
def normalize(self):
""" Normalize transition probs, emission probs (if applicable)
"""
log.debug( "Normalizing now.")
i_error = self.cmodel.normalize()
if i_error == -1:
log.error("normalization failed")
def randomize(self, noiseLevel):
""" to be defined in derived class """
raise NotImplementedError
def write(self,fileName):
""" Writes HMM to file 'fileName'.
"""
self.cmodel.write_xml(fileName)
def printtypes(self, model_type):
strout = []
if model_type == kNotSpecified:
return 'kNotSpecified'
for k in types.keys():
if model_type & k:
strout.append(types[k])
return ' '.join(strout)
def HMMwriteList(fileName, hmmList, fileType=GHMM_FILETYPE_XML):
if (fileType == GHMM_FILETYPE_XML):
if os.path.exists(fileName):
log.warning( "HMMwriteList: File " + str(fileName) + " already exists. Model will be overwritted.")
models = ghmmwrapper.cmodel_ptr_array_alloc(len(hmmList))
for i, model in enumerate(hmmList):
ghmmwrapper.cmodel_ptr_array_setitem(models, i, model.cmodel)
ghmmwrapper.ghmm_cmodel_xml_write(models, fileName, len(hmmList))
ghmmwrapper.free(models)
elif (fileType==GHMM_FILETYPE_SMO):
raise WrongFileType("the smo file format is deprecated, use xml instead")
else:
raise WrongFileType("unknown file format" + str(fileType))
class DiscreteEmissionHMM(HMM):
""" HMMs with discrete emissions.
Optional features:
- silent states
- higher order states
- parameter tying in training
- background probabilities in training
"""
def __init__(self, emissionDomain, distribution, cmodel):
HMM.__init__(self, emissionDomain, distribution, cmodel)
self.model_type = self.cmodel.model_type # model type
self.maxorder = self.cmodel.maxorder
self.background = None
def __str__(self):
hmm = self.cmodel
strout = [str(self.__class__.__name__)]
if self.cmodel.name:
strout.append( " " + str(self.cmodel.name))
strout.append( "(N="+ str(hmm.N))
strout.append( ", M="+ str(hmm.M)+')\n')
f = lambda x: "%.2f" % (x,) # float rounding function
if self.hasFlags(kHigherOrderEmissions):
order = ghmmwrapper.int_array2list(self.cmodel.order, self.N)
else:
order = [0]*hmm.N
if hmm.N <= 4:
iter_list = range(self.N)
else:
iter_list = [0,1,'X',hmm.N-2,hmm.N-1]
for k in iter_list:
if k == 'X':
strout.append('\n ...\n\n')
continue
state = hmm.getState(k)
strout.append( " state "+ str(k) +' (')
if order[k] > 0:
strout.append( 'order='+ str(order[k])+',')
strout.append( "initial=" + f(state.pi)+')\n')
strout.append( " Emissions: ")
for outp in range(hmm.M**(order[k]+1)):
strout.append(f(ghmmwrapper.double_array_getitem(state.b,outp)))
if outp < hmm.M**(order[k]+1)-1:
strout.append( ', ')
else:
strout.append('\n')
strout.append( " Transitions:")
#trans = [0.0] * hmm.N
for i in range( state.out_states):
strout.append( " ->" + str( state.getOutState(i))+' ('+ f(ghmmwrapper.double_array_getitem(state.out_a,i) ) +')' )
if i < state.out_states-1:
strout.append( ',')
#strout.append(" with probability " + str(ghmmwrapper.double_array_getitem(state.out_a,i)))
strout.append('\n')
return join(strout,'')
def verboseStr(self):
hmm = self.cmodel
strout = ["\nGHMM Model\n"]
strout.append( "Name: " + str(self.cmodel.name))
strout.append( "\nModelflags: "+ self.printtypes(self.cmodel.model_type))
strout.append( "\nNumber of states: "+ str(hmm.N))
strout.append( "\nSize of Alphabet: "+ str(hmm.M))
if self.hasFlags(kHigherOrderEmissions):
order = ghmmwrapper.int_array2list(self.cmodel.order, self.N)
else:
order = [0]*hmm.N
for k in range(hmm.N):
state = hmm.getState(k)
strout.append( "\n\nState number "+ str(k) +":")
strout.append( "\nState order: " + str(order[k]))
strout.append( "\nInitial probability: " + str(state.pi))
#strout.append("\nsilent state: " + str(self.cmodel.silent[k]))
strout.append( "\nOutput probabilites: ")
for outp in range(hmm.M**(order[k]+1)):
strout.append(str(ghmmwrapper.double_array_getitem(state.b,outp)))
if outp % hmm.M == hmm.M-1:
strout.append( "\n")
else:
strout.append( ", ")
strout.append( "\nOutgoing transitions:")
for i in range( state.out_states):
strout.append( "\ntransition to state " + str( state.getOutState(i)))
strout.append(" with probability " + str(ghmmwrapper.double_array_getitem(state.out_a,i)))
strout.append( "\nIngoing transitions:")
for i in range(state.in_states):
strout.append( "\ntransition from state " + str( state.getInState(i)))
strout.append( " with probability " + str(ghmmwrapper.double_array_getitem(state.in_a,i)))
strout.append( "\nint fix:" + str(state.fix) + "\n")
if self.hasFlags(kSilentStates):
strout.append("\nSilent states: \n")
for k in range(hmm.N):
strout.append( str(self.cmodel.getSilent(k)) + ", ")
strout.append( "\n")
return join(strout,'')
def extendDurations(self, durationlist):
""" extend states with durations larger than one.
@note this done by explicit state copying in C
"""
for i in range(len(durationlist)):
if durationlist[i] > 1:
error = self.cmodel.duration_apply(i, durationlist[i])
if error:
log.error( "durations not applied")
else:
self.N = self.cmodel.N
def getEmission(self, i):
state = self.cmodel.getState(i)
if self.hasFlags(kHigherOrderEmissions):
order = ghmmwrapper.int_array_getitem(self.cmodel.order, i)
emissions = ghmmwrapper.double_array2list(state.b, self.M**(order+1))
else:
emissions = ghmmwrapper.double_array2list(state.b, self.M)
return emissions
def setEmission(self, i, distributionParameters):
""" Set the emission distribution parameters for a discrete model."""
if not len(distributionParameters) == self.M:
raise TypeError
# ensure proper indices
if not 0 <= i < self.N:
raise IndexError("Index " + str(i) + " out of bounds.")
state = self.cmodel.getState(i)
# updating silent flag and/or model type if necessary
if self.hasFlags(kSilentStates):
if sum(distributionParameters) == 0.0:
self.cmodel.setSilent(i, 1)
else:
self.cmodel.setSilent(i, 0)
#change model_type and free array if no silent state is left
if 0 == sum(ghmmwrapper.int_array2list(self.cmodel.silent,self.N)):
self.clearFlags(kSilentStates)
ghmmwrapper.free(self.cmodel.silent)
self.cmodel.silent = None
#if the state becomes the first silent state allocate memory and set the silen flag
elif sum(distributionParameters) == 0.0:
self.setFlags(kSilentStates)
slist = [0]*self.N
slist[i] = 1
self.cmodel.silent = ghmmwrapper.list2int_array(slist)
#set the emission probabilities
ghmmwrapper.free(state.b)
state.b = ghmmwrapper.list2double_array(distributionParameters)
# XXX Change name?
def backwardTermination(self, emissionSequence, pybeta, scalingVector):
"""
Result: the backward log probability of emissionSequence
"""
seq = emissionSequence.cseq.getSequence(0)
# parsing 'scalingVector' to C double array.
cscale = ghmmwrapper.list2double_array(scalingVector)
# alllocating beta matrix
t = len(emissionSequence)
cbeta = ghmmhelper.list2double_matrix(pybeta)
#print cbeta[0]
error, logp = self.cmodel.backward_termination(seq, t, cbeta[0], cscale)
if error == -1:
log.error("backward finished with -1: EmissionSequence cannot be build.")
# deallocation
ghmmwrapper.free(cscale)
ghmmwrapper.double_matrix_free(cbeta[0],t)
return logp
def baumWelch(self, trainingSequences, nrSteps=ghmmwrapper.MAX_ITER_BW, loglikelihoodCutoff=ghmmwrapper.EPS_ITER_BW):
""" Reestimates the model with the sequence in 'trainingSequences'.
@note that training for models including silent states is not yet
supported.
@param trainingSequences EmissionSequence or SequenceSet object
@param nrSteps the maximal number of BW-steps
@param loglikelihoodCutoff the least relative improvement in likelihood
with respect to the last iteration required to continue.
"""
if not isinstance(trainingSequences,EmissionSequence) and not isinstance(trainingSequences,SequenceSet):
raise TypeError("EmissionSequence or SequenceSet required, got " + str(trainingSequences.__class__.__name__))
if self.hasFlags(kSilentStates):
raise NotImplementedError("Sorry, training of models containing silent states not yet supported.")
self.cmodel.baum_welch_nstep(trainingSequences.cseq, nrSteps, loglikelihoodCutoff)
def applyBackgrounds(self, backgroundWeight):
"""
Apply the background distribution to the emission probabilities of states
which have been assigned one (usually in the editor and coded in the XML).
applyBackground computes a convex combination of the emission probability
and the background
@param backgroundWeight (within [0,1]) controls the background's
contribution for each state.
"""
if not len(backgroundWeight) == self.N:
raise TypeError("Argument 'backgroundWeight' does not match number of states.")
cweights = ghmmwrapper.list2double_array(backgroundWeight)
result = self.cmodel.background_apply(cweights)
ghmmwrapper.free(cweights)
if result:
log.error("applyBackground failed.")
def setBackgrounds(self, backgroundObject, stateBackground):
"""
Configure model to use the background distributions in 'backgroundObject'.
@param backgroundObject BackgroundDistribution
@param 'stateBackground' a list of indixes (one for each state) refering
to distributions in 'backgroundObject'.
@note values in backgroundObject are deep copied into the model
"""
if not isinstance(backgroundObject,BackgroundDistribution):
raise TypeError("BackgroundDistribution required, got " + str(emissionSequences.__class__.__name__))
if not type(stateBackground) == list:
raise TypeError("list required got "+ str(type(stateBackground)))
if not len(stateBackground) == self.N:
raise TypeError("Argument 'stateBackground' does not match number of states.")
if self.background != None:
del(self.background)
ghmmwrapper.free(self.cmodel.background_id)
self.cmodel.bp = backgroundObject.getCopy()
self.background = backgroundObject
self.cmodel.background_id = ghmmwrapper.list2int_array(stateBackground)
# updating model type
self.setFlags(kBackgroundDistributions)
def setBackgroundAssignments(self, stateBackground):
""" Change all the assignments of background distributions to states.
Input is a list of background ids or '-1' for no background
"""
if not type(stateBackground) == list:
raise TypeError("list required got "+ str(type(stateBackground)))
assert self.cmodel.background_id is not None, "Error: No backgrounds defined in model."
assert len(stateBackground) == self.N, "Error: Number of weigths does not match number of states."
# check for valid background id
for d in stateBackground:
assert d in range(self.cbackground.n), "Error: Invalid background distribution id."
for i, b_id in enumerate(stateBackground):
self.cmodel.background_id[i] = b_id
def getBackgroundAssignments(self):
""" Get the background assignments of all states
'-1' -> no background
"""
if self.hasFlags(kBackgroundDistributions):
return ghmmwrapper.int_array2list(self.cmodel.background_id, self.N)
def updateTiedEmissions(self):
""" Averages emission probabilities of tied states. """
assert self.hasFlags(kTiedEmissions) and self.cmodel.tied_to is not None, "cmodel.tied_to is undefined."
self.cmodel.update_tie_groups()
def setTieGroups(self, tieList):
""" Sets the tied emission groups
@param tieList contains for every state either '-1' or the index
of the tied emission group leader.
@note The tied emission group leader is tied to itself
"""
if len(tieList) != self.N:
raise IndexError("Number of entries in tieList is different from number of states.")
if self.cmodel.tied_to is None:
log.debug( "allocating tied_to")
self.cmodel.tied_to = ghmmwrapper.list2int_array(tieList)
self.setFlags(kTiedEmissions)
else:
log.debug( "tied_to already initialized")
for i, in range(self.N):
self.cmodel.tied_to[i] = tieList[i]
def removeTieGroups(self):
""" Removes all tied emission information. """
if self.hasFlags(kTiedEmissions) and self.cmodel.tied_to != None:
ghmmwrapper.free(self.cmodel.tied_to)
self.cmodel.tied_to = None
self.clearFlags(kTiedEmissions)
def getTieGroups(self):
""" Gets tied emission group structure. """
if not self.hasFlags(kTiedEmissions) or self.cmodel.tied_to is None:
raise TypeError("HMM has no tied emissions or self.cmodel.tied_to is undefined.")
return ghmmwrapper.int_array2list(self.cmodel.tied_to, self.N)
def getSilentFlag(self,state):
if self.hasFlags(kSilentStates):
return self.cmodel.getSilent(state)
else:
return 0
def asMatrices(self):
"Return the parameters in matrix form."
A = []
B = []
pi = []
if self.hasFlags(kHigherOrderEmissions):
order = ghmmwrapper.int_array2list(self.cmodel.order, self.N)
else:
order = [0]*self.N
for i in range(self.cmodel.N):
A.append([0.0] * self.N)
state = self.cmodel.getState(i)
pi.append(state.pi)
B.append(ghmmwrapper.double_array2list(state.b,self.M ** (order[i]+1)))
for j in range(state.out_states):
state_index = ghmmwrapper.int_array_getitem(state.out_id, j)
A[i][state_index] = ghmmwrapper.double_array_getitem(state.out_a,j)
return [A,B,pi]
def isSilent(self,state):
"""
@returns True if 'state' is silent, False otherwise
"""
if not 0 <= state <= self.N-1:
raise IndexError("Invalid state index")
if self.hasFlags(kSilentStates) and self.cmodel.silent[state]:
return True
else:
return False
def write(self,fileName):
"""
Writes HMM to file 'fileName'.
"""
if self.cmodel.alphabet is None:
self.cmodel.alphabet = self.emissionDomain.toCstruct()
self.cmodel.write_xml(fileName)
######################################################
class StateLabelHMM(DiscreteEmissionHMM):
""" Labelled HMMs with discrete emissions.
Same feature list as in DiscreteEmissionHMM models.
"""
def __init__(self, emissionDomain, distribution, labelDomain, cmodel):
DiscreteEmissionHMM.__init__(self, emissionDomain, distribution, cmodel)
if not isinstance(labelDomain, LabelDomain):
raise TypeError("Invalid labelDomain")
self.labelDomain = labelDomain
def __str__(self):
hmm = self.cmodel
strout = [str(self.__class__.__name__)]
if self.cmodel.name:
strout.append( " " + str(self.cmodel.name))
strout.append( "(N= "+ str(hmm.N))
strout.append( ", M= "+ str(hmm.M)+')\n')
f = lambda x: "%.2f" % (x,) # float rounding function
if self.hasFlags(kHigherOrderEmissions):
order = ghmmwrapper.int_array2list(self.cmodel.order, self.N)
else:
order = [0]*hmm.N
label = ghmmwrapper.int_array2list(hmm.label, self.N)
if hmm.N <= 4:
iter_list = range(self.N)
else:
iter_list = [0,1,'X',hmm.N-2,hmm.N-1]
for k in iter_list:
if k == 'X':
strout.append('\n ...\n\n')
continue
state = hmm.getState(k)
strout.append( " state "+ str(k) +' (')
if order[k] > 0:
strout.append( 'order= '+ str(order[k])+',')
strout.append( "initial= " + f(state.pi)+', label= ' + str(self.labelDomain.external(label[k])) + ')\n')
strout.append( " Emissions: ")
for outp in range(hmm.M**(order[k]+1)):
strout.append(f(ghmmwrapper.double_array_getitem(state.b,outp)))
if outp < hmm.M**(order[k]+1)-1:
strout.append( ', ')
else:
strout.append('\n')
strout.append( " Transitions:")
#trans = [0.0] * hmm.N
for i in range( state.out_states):
strout.append( " ->" + str( state.getOutState(i))+' ('+ f(ghmmwrapper.double_array_getitem(state.out_a,i) ) +')' )
if i < state.out_states-1:
strout.append( ',')
#strout.append(" with probability " + str(ghmmwrapper.double_array_getitem(state.out_a,i)))
strout.append('\n')
return join(strout,'')
def verboseStr(self):
hmm = self.cmodel
strout = ["\nGHMM Model\n"]
strout.append("Name: " + str(self.cmodel.name))
strout.append("\nModelflags: "+ self.printtypes(self.cmodel.model_type))
strout.append("\nNumber of states: "+ str(hmm.N))
strout.append("\nSize of Alphabet: "+ str(hmm.M))
if hmm.model_type & kHigherOrderEmissions:
order = ghmmwrapper.int_array2list(hmm.order, self.N)
else:
order = [0]*hmm.N
label = ghmmwrapper.int_array2list(hmm.label, self.N)
for k in range(hmm.N):
state = hmm.getState(k)
strout.append("\n\nState number "+ str(k) +":")
strout.append("\nState label: "+str(self.labelDomain.external(label[k])))
strout.append("\nState order: " + str(order[k]))
strout.append("\nInitial probability: " + str(state.pi))
strout.append("\nOutput probabilites:\n")
for outp in range(hmm.M**(order[k]+1)):
strout+=str(ghmmwrapper.double_array_getitem(state.b,outp))
if outp % hmm.M == hmm.M-1:
strout.append("\n")
else:
strout.append(", ")
strout.append("Outgoing transitions:")
for i in range( state.out_states):
strout.append("\ntransition to state " + str(state.getOutState(i)) + " with probability " + str(state.getOutProb(i)))
strout.append( "\nIngoing transitions:")
for i in range(state.in_states):
strout.append( "\ntransition from state " + str(state.getInState(i)) + " with probability " + str(state.getInProb(i)))
strout.append("\nint fix:" + str(state.fix) + "\n")
if hmm.model_type & kSilentStates:
strout.append("\nSilent states: \n")
for k in range(hmm.N):
strout.append(str(hmm.silent[k]) + ", ")
strout.append("\n")
return join(strout,'')
def setLabels(self, labelList):
""" Set the state labels to the values given in labelList.
LabelList is in external representation.
"""
assert len(labelList) == self.N, "Invalid number of labels."
# set state label to to the appropiate index
for i in range(self.N):
if not self.labelDomain.isAdmissable(labelList[i]):
raise GHMMOutOfDomain("Label "+str(labelList[i])+" not included in labelDomain.")
ghmmwrapper.free(self.cmodel.label)
self.cmodel.label = ghmmwrapper.list2int_array([self.labelDomain.internal(l) for l in labelList])
def getLabels(self):
labels = ghmmwrapper.int_array2list(self.cmodel.label, self.N)
return [self.labelDomain.external(l) for l in labels]
def getLabel(self,stateIndex):
"""
@returns label of the state 'stateIndex'.
"""
return self.cmodel.getLabel(stateIndex)
def externalLabel(self, internal):
"""
@returns label representation of an int or list of ints
"""
if type(internal) is int:
return self.labelDomain.external[internal] # return Label
elif type(internal) is list:
return self.labelDomain.externalSequence(internal)
else:
raise TypeError('int or list needed')
def internalLabel(self, external):
"""
@returns int representation of an label or list of labels
"""
if type(external) is list:
return self.labelDomain.internalSequence(external)
else:
return self.labelDomain.internal(external)
def sampleSingle(self, seqLength, seed = 0):
seqPtr = self.cmodel.label_generate_sequences(seed, seqLength, 1, seqLength)
return EmissionSequence(self.emissionDomain, seqPtr, labelDomain = self.labelDomain )
def sample(self, seqNr,seqLength, seed = 0):
seqPtr = self.cmodel.label_generate_sequences(seed, seqLength, seqNr, seqLength)
return SequenceSet(self.emissionDomain,seqPtr, labelDomain = self.labelDomain)
def labeledViterbi(self, emissionSequences):
"""
@returns the labeling of the input sequence(s) as given by the viterbi
path.
For one EmissionSequence a list of labels is returned; for an SequenceSet
a list of lists of labels.
"""
emissionSequences = emissionSequences.asSequenceSet()
seqNumber = len(emissionSequences)
if not emissionSequences.emissionDomain == self.emissionDomain:
raise TypeError("Sequence and model emissionDomains are incompatible.")
vPath, log_p = self.viterbi(emissionSequences)
f = lambda i: self.labelDomain.external(self.getLabel(i))
if seqNumber == 1:
labels = map(f, vPath)
else:
labels = [map(f, vp) for vp in vPath]
return (labels, log_p)
def kbest(self, emissionSequences, k = 1):
""" Compute the k probable labeling for each sequence in emissionSequences
@param emissionSequences can either be a SequenceSet or an
EmissionSequence
@param k the number of labelings to produce
Result: [l_0, ..., l_T] the labeling of emissionSequences is an
EmmissionSequence object,
[[l_0^0, ..., l_T^0], ..., [l_0^j, ..., l_T^j]} for a j-sequence
SequenceSet
"""
if self.hasFlags(kSilentStates):
raise NotimplementedError("Sorry, k-best decoding on models containing silent states not yet supported.")
emissionSequences = emissionSequences.asSequenceSet()
seqNumber = len(emissionSequences)
allLogs = []
allLabels = []
for i in range(seqNumber):
seq = emissionSequences.cseq.getSequence(i)
seq_len = emissionSequences.cseq.getLength(i)
labeling, log_p = self.cmodel.label_kbest(seq, seq_len, k)
oneLabel = ghmmwrapper.int_array2list(labeling, seq_len)
allLabels.append(oneLabel)
allLogs.append(log_p)
ghmmwrapper.free(labeling)
if emissionSequences.cseq.seq_number > 1:
return (map(self.externalLabel, allLabels), allLogs)
else:
return (self.externalLabel(allLabels[0]), allLogs[0])
def gradientSearch(self, emissionSequences, eta=.1, steps=20):
""" trains a model with given sequences using a gradient descent algorithm
@param emissionSequences can either be a SequenceSet or an
EmissionSequence
@param eta algortihm terminates if the descent is smaller than eta
@param steps number of iterations
"""
# check for labels
if not self.hasFlags(kLabeledStates):
raise NotImplementedError("Error: Model is no labeled states.")
emissionSequences = emissionSequences.asSequenceSet()
seqNumber = len(emissionSequences)
tmp_model = self.cmodel.label_gradient_descent(emissionSequences.cseq, eta, steps)
if tmp_model is None:
log.error("Gradient descent finished not successfully.")
return False
else:
self.cmodel = tmp_model
return True
def labeledlogikelihoods(self, emissionSequences):
""" Compute a vector ( log( P[s,l| model]) )_{s} of log-likelihoods of the
individual \p emissionSequences using the forward algorithm
@param emissionSequences SequenceSet
Result: log( P[emissionSequences,labels| model]) of type float
(numarray) vector of floats
"""
emissionSequences = emissionSequences.asSequenceSet()
seqNumber = len(emissionSequences)
if emissionSequences.cseq.state_labels is None:
raise TypeError("Sequence needs to be labeled.")
likelihoodList = []
for i in range(seqNumber):
seq = emissionSequences.cseq.getSequence(i)
labels = ghmmwrapper.int_matrix_get_col(emissionSequences.cseq.state_labels,i)
tmp = emissionSequences.cseq.getLength(i)
ret_val,likelihood = self.cmodel.label_logp(seq, labels, tmp)
if ret_val == -1:
log.warning("forward returned -1: Sequence"+ str(i) +"cannot be build.")
likelihoodList.append(-float('Inf'))
else:
likelihoodList.append(likelihood)
return likelihoodList
def labeledForward(self, emissionSequence, labelSequence):
"""
Result: the (N x T)-matrix containing the forward-variables
and the scaling vector
"""
if not isinstance(emissionSequence,EmissionSequence):
raise TypeError("EmissionSequence required, got " + str(emissionSequence.__class__.__name__))
n_states = self.cmodel.N
t = emissionSequence.cseq.getLength(0)
if t != len(labelSequence):
raise TypeError("emissionSequence and labelSequence must have same length")
calpha = ghmmwrapper.double_matrix_alloc(t, n_states)
cscale = ghmmwrapper.double_array_alloc(t)
seq = emissionSequence.cseq.getSequence(0)
label = ghmmwrapper.list2int_array(self.internalLabel(labelSequence))
error, logp = self.cmodel.label_forward(seq, label, t, calpha, cscale)
if error == -1:
log.error( "Forward finished with -1: Sequence cannot be build.")
# translate alpha / scale to python lists
pyscale = ghmmwrapper.double_array2list(cscale, t)
pyalpha = ghmmhelper.double_matrix2list(calpha,t,n_states)
ghmmwrapper.free(label)
ghmmwrapper.free(cscale)
ghmmwrapper.double_matrix_free(calpha,t)
return (logp, pyalpha, pyscale)
def labeledBackward(self, emissionSequence, labelSequence, scalingVector):
"""
Result: the (N x T)-matrix containing the backward-variables
"""
if not isinstance(emissionSequence,EmissionSequence):
raise TypeError("EmissionSequence required, got " + str(emissionSequence.__class__.__name__))
t = emissionSequence.cseq.getLength(0)
if t != len(labelSequence):
raise TypeError("emissionSequence and labelSequence must have same length")
seq = emissionSequence.cseq.getSequence(0)
label = ghmmwrapper.list2int_array(self.internalLabel(labelSequence))
# parsing 'scalingVector' to C double array.
cscale = ghmmwrapper.list2double_array(scalingVector)
# alllocating beta matrix
cbeta = ghmmwrapper.double_matrix_alloc(t, self.cmodel.N)
error,logp = self.cmodel.label_backward(seq, label, t, cbeta, cscale)
if error == -1:
log.error( "backward finished with -1: EmissionSequence cannot be build.")
pybeta = ghmmhelper.double_matrix2list(cbeta,t,self.cmodel.N)
# deallocation
ghmmwrapper.free(cscale)
ghmmwrapper.free(label)
ghmmwrapper.double_matrix_free(cbeta,t)
return (logp, pybeta)
def labeledBaumWelch(self, trainingSequences, nrSteps=ghmmwrapper.MAX_ITER_BW,
loglikelihoodCutoff=ghmmwrapper.EPS_ITER_BW):
""" Reestimates the model with the sequence in 'trainingSequences'.
@note that training for models including silent states is not yet
supported.
@param trainingSequences EmissionSequence or SequenceSet object
@param nrSteps the maximal number of BW-steps
@param loglikelihoodCutoff the least relative improvement in likelihood
with respect to the last iteration required to continue.
"""
if not isinstance(trainingSequences,EmissionSequence) and not isinstance(trainingSequences,SequenceSet):
raise TypeError("EmissionSequence or SequenceSet required, got " + str(trainingSequences.__class__.__name__))
if self.hasFlags(kSilentStates):
raise NotImplementedError("Sorry, training of models containing silent states not yet supported.")
self.cmodel.label_baum_welch_nstep(trainingSequences.cseq, nrSteps, loglikelihoodCutoff)
def write(self,fileName):
""" Writes HMM to file 'fileName'.
"""
if self.cmodel.alphabet is None:
self.cmodel.alphabet = self.emissionDomain.toCstruct()
if self.cmodel.label_alphabet is None:
self.cmodel.label_alphabet = self.labelDomain.toCstruct()
self.cmodel.write_xml(fileName)
class GaussianEmissionHMM(HMM):
""" HMMs with Gaussian distribution as emissions.
"""
def __init__(self, emissionDomain, distribution, cmodel):
HMM.__init__(self, emissionDomain, distribution, cmodel)
# Baum Welch context, call baumWelchSetup to initalize
self.BWcontext = None
def getTransition(self, i, j):
""" @returns the probability of the transition from state i to state j.
Raises IndexError if the transition is not allowed
"""
# ensure proper indices
if not 0 <= i < self.N:
raise IndexError("Index " + str(i) + " out of bounds.")
if not 0 <= j < self.N:
raise IndexError("Index " + str(j) + " out of bounds.")
transition = self.cmodel.get_transition(i, j, 0)
if transition < 0.0: # Tried to access non-existing edge:
transition = 0.0
return transition
def setTransition(self, i, j, prob):
""" Accessor function for the transition a_ij """
# ensure proper indices
if not 0 <= i < self.N:
raise IndexError("Index " + str(i) + " out of bounds.")
if not 0 <= j < self.N:
raise IndexError("Index " + str(j) + " out of bounds.")
if not self.cmodel.check_transition(i, j, 0):
raise ValueError("No transition between state " + str(i) + " and " + str(j))
self.cmodel.set_transition(i, j, 0, float(prob))
def getEmission(self, i):
""" @returns (mu, sigma^2) """
if not 0 <= i < self.N:
raise IndexError("Index " + str(i) + " out of bounds.")
state = self.cmodel.getState(i)
mu = state.getMean(0)
sigma = state.getStdDev(0)
return (mu, sigma)
def setEmission(self, i, values):
""" Set the emission distributionParameters for state i
@param i index of a state
@param values tuple of mu, sigma
"""
mu, sigma = values
# ensure proper indices
if not 0 <= i < self.N:
raise IndexError("Index " + str(i) + " out of bounds.")
state = self.cmodel.getState(i)
state.setMean(0, float(mu))
state.setStdDev(0, float(sigma))
def getEmissionProbability(self, value, i):
""" @returns probability of emitting value in state i """
# ensure proper index
assert 0 <= i < self.N, "Index " + str(i) + " out of bounds."
# value can be float or vector of floats
try:
assert len(value) == self.cmodel.dim
except (TypeError):
assert 1 == self.cmodel.dim
v = [float(value)]
else:
v = value
state = self.cmodel.getState(i)
valueptr = ghmmwrapper.list2double_array(v)
p = state.calc_b(valueptr)
ghmmwrapper.free(valueptr)
return p
def getStateFix(self,state):
s = self.cmodel.getState(state)
return s.fix
def setStateFix(self, state ,flag):
s = self.cmodel.getState(state)
s.fix = flag
def __str__(self):
hmm = self.cmodel
strout = [str(self.__class__.__name__)]
if self.cmodel.name:
strout.append( " " + str(self.cmodel.name))
strout.append( "(N="+ str(hmm.N)+')\n')
f = lambda x: "%.2f" % (x,) # float rounding function
if hmm.N <= 4:
iter_list = range(self.N)
else:
iter_list = [0,1,'X',hmm.N-2,hmm.N-1]
for k in iter_list:
if k == 'X':
strout.append('\n ...\n\n')
continue
state = hmm.getState(k)
strout.append(" state "+ str(k) + " (")
strout.append( "initial=" + f(state.pi) )
if self.cmodel.cos > 1:
strout.append(', cos='+ str(self.cmodel.cos))
strout.append(", mu=" + f(state.getMean(0))+', ')
strout.append("sigma=" + f(state.getStdDev(0)) )
strout.append(')\n')
strout.append( " Transitions: ")
if self.cmodel.cos > 1:
strout.append("\n")
for c in range(self.cmodel.cos):
if self.cmodel.cos > 1:
strout.append(' class: ' + str(c)+ ':' )
for i in range( state.out_states):
strout.append('->' + str(state.getOutState(i)) + ' (' + f(state.getOutProb(i, c))+')' )
if i < state.out_states-1:
strout.append( ', ')
strout.append('\n')
return join(strout,'')
def verboseStr(self):
hmm = self.cmodel
strout = ["\nHMM Overview:"]
strout.append("\nNumber of states: " + str(hmm.N))
strout.append("\nNumber of mixture components: " + str(hmm.M))
for k in range(hmm.N):
state = hmm.getState(k)
strout.append("\n\nState number "+ str(k) + ":")
strout.append("\nInitial probability: " + str(state.pi) + "\n")
weight = ""
mue = ""
u = ""
weight += str(ghmmwrapper.double_array_getitem(state.c,0))
mue += str(state.getMean(0))
u += str(state.getStdDev(0))
strout.append(" mean: " + str(mue) + "\n")
strout.append(" variance: " + str(u) + "\n")
strout.append(" fix: " + str(state.fix) + "\n")
for c in range(self.cmodel.cos):
strout.append("\n Class : " + str(c) )
strout.append("\n Outgoing transitions:")
for i in range( state.out_states):
strout.append("\n transition to state " + str(state.getOutState(i)) + " with probability = " + str(state.getOutProb(i, c)))
strout.append("\n Ingoing transitions:")
for i in range(state.in_states):
strout.append("\n transition from state " + str(state.getInState(i)) +" with probability = "+ str(state.getInProb(i, c)))
return join(strout,'')
def forward(self, emissionSequence):
"""
Result: the (N x T)-matrix containing the forward-variables
and the scaling vector
"""
if not isinstance(emissionSequence,EmissionSequence):
raise TypeError("EmissionSequence required, got " + str(emissionSequence.__class__.__name__))
i = self.cmodel.N
t = emissionSequence.cseq.getLength(0)
calpha = ghmmwrapper.double_matrix_alloc (t, i)
cscale = ghmmwrapper.double_array_alloc(t)
seq = emissionSequence.cseq.getSequence(0)
error, logp = self.cmodel.forward(seq, t, None, calpha, cscale)
if error == -1:
log.error( "Forward finished with -1: Sequence " + str(seq_nr) + " cannot be build.")
# translate alpha / scale to python lists
pyscale = ghmmwrapper.double_array2list(cscale, t) # XXX return Python2.5 arrays???
pyalpha = ghmmhelper.double_matrix2list(calpha,t,i) # XXX return Python2.5 arrays? Also
# XXX Check Matrix-valued input.
ghmmwrapper.free(cscale)
ghmmwrapper.double_matrix_free(calpha,t)
return (pyalpha,pyscale)
def backward(self, emissionSequence, scalingVector):
"""
Result: the (N x T)-matrix containing the backward-variables
"""
if not isinstance(emissionSequence,EmissionSequence):
raise TypeError("EmissionSequence required, got " + str(emissionSequence.__class__.__name__))
seq = emissionSequence.cseq.getSequence(0)
# parsing 'scalingVector' to C double array.
cscale = ghmmwrapper.list2double_array(scalingVector)
# alllocating beta matrix
t = emissionSequence.cseq.getLength(0)
cbeta = ghmmwrapper.double_matrix_alloc(t, self.cmodel.N)
error = self.cmodel.backward(seq,t,None,cbeta,cscale)
if error == -1:
log.error( "backward finished with -1: EmissionSequence cannot be build.")
pybeta = ghmmhelper.double_matrix2list(cbeta,t,self.cmodel.N)
# deallocation
ghmmwrapper.free(cscale)
ghmmwrapper.double_matrix_free(cbeta,t)
return pybeta
def loglikelihoods(self, emissionSequences):
""" Compute a vector ( log( P[s| model]) )_{s} of log-likelihoods of the
individual emissionSequences using the forward algorithm.
@param emissionSequences SequenceSet
Result: log( P[emissionSequences| model]) of type float
(numarray) vector of floats
"""
emissionSequences = emissionSequences.asSequenceSet()
seqNumber = len(emissionSequences)
if self.cmodel.cos > 1:
log.debug( "self.cmodel.cos = " + str( self.cmodel.cos) )
assert self.cmodel.class_change is not None, "Error: class_change not initialized."
likelihoodList = []
for i in range(seqNumber):
seq = emissionSequences.cseq.getSequence(i)
tmp = emissionSequences.cseq.getLength(i)
if self.cmodel.cos > 1:
self.cmodel.class_change.k = i
ret_val, likelihood = self.cmodel.logp(seq, tmp)
if ret_val == -1:
log.warning( "forward returned -1: Sequence "+str(i)+" cannot be build.")
# XXX TODO: Eventually this should trickle down to C-level
# Returning -DBL_MIN instead of infinity is stupid, since the latter allows
# to continue further computations with that inf, which causes
# things to blow up later.
# cmodel.logp() could do without a return value if -Inf is returned
# What should be the semantics in case of computing the likelihood of
# a set of sequences?
likelihoodList.append(-float('Inf'))
else:
likelihoodList.append(likelihood)
# resetting class_change->k to default
if self.cmodel.cos > 1:
self.cmodel.class_change.k = -1
return likelihoodList
def viterbi(self, emissionSequences):
""" Compute the Viterbi-path for each sequence in emissionSequences
@param emissionSequences can either be a SequenceSet or an
EmissionSequence
Result: [q_0, ..., q_T] the viterbi-path of emission_sequences is an
EmmissionSequence object,
[[q_0^0, ..., q_T^0], ..., [q_0^k, ..., q_T^k]} for a k-sequence
SequenceSet
"""
emissionSequences = emissionSequences.asSequenceSet()
seqNumber = len(emissionSequences)
if self.cmodel.cos > 1:
log.debug( "self.cmodel.cos = "+ str( self.cmodel.cos))
assert self.cmodel.class_change is not None, "Error: class_change not initialized."
allLogs = []
allPaths = []
for i in range(seqNumber):
if self.cmodel.cos > 1:
# if emissionSequence is a sequenceSet with multiple sequences,
# use sequence index as class_change.k
self.cmodel.class_change.k = i
seq = emissionSequences.cseq.getSequence(i)
seq_len = emissionSequences.cseq.getLength(i)
try:
viterbiPath, log_p = self.cmodel.viterbi(seq, seq_len)
except TypeError:
viterbiPath, log_p = (None, float("-infinity"))
if viterbiPath != None:
onePath = ghmmwrapper.int_array2list(viterbiPath, seq_len/self.cmodel.dim)
else:
onePath = []
allPaths.append(onePath)
allLogs.append(log_p)
ghmmwrapper.free(viterbiPath)
# resetting class_change->k to default
if self.cmodel.cos > 1:
self.cmodel.class_change.k = -1
if emissionSequences.cseq.seq_number > 1:
return (allPaths, allLogs)
else:
return (allPaths[0], allLogs[0])
def baumWelch(self, trainingSequences, nrSteps=ghmmwrapper.MAX_ITER_BW, loglikelihoodCutoff=ghmmwrapper.EPS_ITER_BW):
""" Reestimate the model parameters given the training_sequences.
Perform at most nr_steps until the improvement in likelihood
is below likelihood_cutoff
@param trainingSequences can either be a SequenceSet or a Sequence
@param nrSteps the maximal number of BW-steps
@param loglikelihoodCutoff the least relative improvement in likelihood
with respect to the last iteration required to continue.
Result: Final loglikelihood
"""
if not isinstance(trainingSequences, SequenceSet) and not isinstance(trainingSequences, EmissionSequence):
raise TypeError("baumWelch requires a SequenceSet or EmissionSequence object.")
if not self.emissionDomain.CDataType == "double":
raise TypeError("Continuous sequence needed.")
self.baumWelchSetup(trainingSequences, nrSteps, loglikelihoodCutoff)
ghmmwrapper.ghmm_cmodel_baum_welch(self.BWcontext)
likelihood = ghmmwrapper.double_array_getitem(self.BWcontext.logp, 0)
#(steps_made, loglikelihood_array, scale_array) = self.baumWelchStep(nrSteps,
# loglikelihoodCutoff)
self.baumWelchDelete()
return likelihood
def baumWelchSetup(self, trainingSequences, nrSteps, loglikelihoodCutoff=ghmmwrapper.EPS_ITER_BW):
""" Setup necessary temporary variables for Baum-Welch-reestimation.
Use with baumWelchStep for more control over the training, computing
diagnostics or doing noise-insertion
@param trainingSequences can either be a SequenceSet or a Sequence
@param nrSteps the maximal number of BW-steps
@param loglikelihoodCutoff the least relative improvement in likelihood
with respect to the last iteration required to continue.
"""
self.BWcontext = ghmmwrapper.ghmm_cmodel_baum_welch_context(
self.cmodel, trainingSequences.cseq)
self.BWcontext.eps = loglikelihoodCutoff
self.BWcontext.max_iter = nrSteps
def baumWelchStep(self, nrSteps, loglikelihoodCutoff):
"""
Compute one iteration of Baum Welch estimation.
Use with baumWelchSetup for more control over the training, computing
diagnostics or doing noise-insertion
"""
# XXX Implement me
raise NotImplementedError
def baumWelchDelete(self):
"""
Delete the necessary temporary variables for Baum-Welch-reestimation
"""
self.BWcontext = None
def asMatrices(self):
"Return the parameters in matrix form."
A = []
B = []
pi = []
for i in range(self.cmodel.N):
A.append([0.0] * self.N)
B.append([0.0] * 2)
state = self.cmodel.getState(i)
pi.append(state.pi)
B[i][0] = state.getMean(0)
B[i][1] = state.getStdDev(0)
for j in range(state.out_states):
state_index = ghmmwrapper.int_array_getitem(state.out_id, j)
A[i][state_index] = ghmmwrapper.double_matrix_getitem(state.out_a,0,j)
return [A,B,pi]
# XXX - this class will taken over by ContinuousMixtureHMM
class GaussianMixtureHMM(GaussianEmissionHMM):
""" HMMs with mixtures of Gaussians as emissions.
Optional features:
- fixing mixture components in training
"""
def getEmission(self, i, comp):
"""
@returns (mu, sigma^2, weight) of component 'comp' in state 'i'
"""
state = self.cmodel.getState(i)
mu = state.getMean(comp)
sigma = state.getStdDev(comp)
weigth = state.getWeight(comp)
return (mu, sigma, weigth)
def setEmission(self, i, comp, values):
""" Set the emission distribution parameters for a single component in a single state.
@param i index of a state
@param comp index of a mixture component
@param values tuple of mu, sigma, weight
"""
mu, sigma, weight = values
# ensure proper indices
if not 0 <= i < self.N:
raise IndexError("Index " + str(i) + " out of bounds.")
state = self.cmodel.getState(i)
state.setMean(comp, float(mu)) # GHMM C is german: mue instead of mu
state.setStdDev(comp, float(sigma))
state.setWeight(comp, float(weight))
def getMixtureFix(self,state):
s = self.cmodel.getState(state)
mixfix = []
for i in range(s.M):
emission = s.getEmission(i)
mixfix.append(emission.fixed)
return mixfix
def setMixtureFix(self, state ,flags):
s = self.cmodel.getState(state)
for i in range(s.M):
emission = s.getEmission(i)
emission.fixed = flags[i]
def __str__(self):
hmm = self.cmodel
strout = [str(self.__class__.__name__)]
if self.cmodel.name:
strout.append( " " + str(self.cmodel.name))
strout.append( "(N="+ str(hmm.N)+')\n')
f = lambda x: "%.2f" % (x,) # float rounding function
if hmm.N <= 4:
iter_list = range(self.N)
else:
iter_list = [0,1,'X',hmm.N-2,hmm.N-1]
for k in iter_list:
if k == 'X':
strout.append('\n ...\n\n')
continue
state = hmm.getState(k)
strout.append(" state "+ str(k) + " (")
strout.append( "initial=" + f(state.pi) )
if self.cmodel.cos > 1:
strout.append(', cos='+ str(self.cmodel.cos))
strout.append(')\n')
weight = ""
mue = ""
u = ""
for outp in range(state.M):
emission = state.getEmission(outp)
weight += str(ghmmwrapper.double_array_getitem(state.c,outp))+", "
mue += str(emission.mean.val)+", "
u += str(emission.variance.val)+", "
strout.append( " Emissions (")
strout.append("weights=" + str(weight) + ", ")
strout.append("mu=" + str(mue) + ", ")
strout.append("sigma=" + str(u) + ")\n")
strout.append( " Transitions: ")
if self.cmodel.cos > 1:
strout.append("\n")
for c in range(self.cmodel.cos):
if self.cmodel.cos > 1:
strout.append(' class: ' + str(c)+ ':' )
for i in range( state.out_states):
strout.append('->' + str(state.getOutState(i)) + ' (' + str(state.getOutProb(i, c))+')' )
if i < state.out_states-1:
strout.append( ', ')
strout.append('\n')
return join(strout,'')
def verboseStr(self):
"defines string representation"
hmm = self.cmodel
strout = ["\nOverview of HMM:"]
strout.append("\nNumber of states: "+ str(hmm.N))
strout.append("\nNumber of mixture components: "+ str(hmm.M))
for k in range(hmm.N):
state = hmm.getState(k)
strout.append("\n\nState number "+ str(k) +":")
strout.append("\nInitial probability: " + str(state.pi))
strout.append("\n"+ str(state.M) + " mixture component(s):\n")
weight = ""
mue = ""
u = ""
for outp in range(state.M):
emission = state.getEmission(outp)
weight += str(ghmmwrapper.double_array_getitem(state.c,outp))+", "
mue += str(emission.mean.val)+", "
u += str(emission.variance.val)+", "
strout.append(" pdf component weights : " + str(weight) + "\n")
strout.append(" mean vector: " + str(mue) + "\n")
strout.append(" variance vector: " + str(u) + "\n")
for c in range(self.cmodel.cos):
strout.append("\n Class : " + str(c) )
strout.append("\n Outgoing transitions:")
for i in range( state.out_states):
strout.append("\n transition to state " + str(state.getOutState(i)) + " with probability = " + str(state.getOutProb(i, c)))
strout.append("\n Ingoing transitions:")
for i in range(state.in_states):
strout.append("\n transition from state " + str(state.getInState(i)) +" with probability = "+ str(state.getInProb(i, c)))
strout.append("\nint fix:" + str(state.fix) + "\n")
return join(strout,'')
def asMatrices(self):
"Return the parameters in matrix form."
A = []
B = []
pi = []
for i in range(self.cmodel.N):
A.append([0.0] * self.N)
B.append([])
state = self.cmodel.getState(i)
pi.append(state.pi)
mulist = []
siglist = []
for j in range(state.M):
emission = state.getEmission(j)
mulist.append(emission.mean.val)
siglist.append(emission.variance.val)
B[i].append(mulist)
B[i].append(siglist)
B[i].append(ghmmwrapper.double_array2list(state.c, state.M))
for j in range(state.out_states):
state_index = ghmmwrapper.int_array_getitem(state.out_id, j)
A[i][state_index] = ghmmwrapper.double_matrix_getitem(state.out_a,0,j)
return [A,B,pi]
class ContinuousMixtureHMM(GaussianMixtureHMM):
""" HMMs with mixtures of any univariate (one dimensional) Continuous
Distributions as emissions.
Optional features:
- fixing mixture components in training
"""
def getEmission(self, i, comp):
"""
@returns the paramenters of component 'comp' in state 'i'
- (type, mu, sigma^2, weight) - for a gaussian component
- (type, mu, sigma^2, min, weight) - for a right tail gaussian
- (type, mu, sigma^2, max, weight) - for a left tail gaussian
- (type, max, mix, weight) - for a uniform
"""
state = self.cmodel.getState(i)
emission = state.getEmission(comp)
if (emission.type == ghmmwrapper.normal or
emission.type == ghmmwrapper.normal_approx):
return (emission.type, emission.mean.val, emission.variance.val, state.getWeight(comp))
elif emission.type == ghmmwrapper.normal_right:
return (emission.type, emission.mean.val, emission.variance.val,
emission.min, state.getWeight(comp))
elif emission.type == ghmmwrapper.normal_left:
return (emission.type, emission.mean.val, emission.variance.val,
emission.max, state.getWeight(comp))
elif emission.type == ghmmwrapper.uniform:
return (emission.type, emission.max, emission.min, state.getWeight(comp))
def setEmission(self, i, comp, distType, values):
""" Set the emission distribution parameters for a mixture component
of a single state.
@param i index of a state
@param comp index of a mixture component
@param distType type of the distribution
@param values tuple (mu, sigma, a , weight) and is interpreted depending
on distType
- mu - mean for normal, normal_approx, normal_right, normal_left
- mu - max for uniform
- sigma - standard deviation for normal, normal_approx, normal_right,
normal_left
- sigma - min for uniform
- a - cut-off normal_right and normal_left
- weight - always component weight
"""
mu, sigma, a, weight = values
# ensure proper indices
if not 0 <= i < self.N:
raise IndexError("Index " + str(i) + " out of bounds.")
state = self.cmodel.getState(i)
state.setWeight(comp, float(weight))
emission = state.getEmission(comp)
emission.type = distType
if (emission.type == ghmmwrapper.normal or
emission.type == ghmmwrapper.normal_approx or
emission.type == ghmmwrapper.normal_right or
emission.type == ghmmwrapper.normal_left):
emission.mean.val = mu
emission.variance.val = sigma
if emission.type == ghmmwrapper.normal_right:
emission.min = a
if emission.type == ghmmwrapper.normal_left:
emission.max = a
elif emission.type == ghmmwrapper.uniform:
emission.min = sigma
emission.max = mu
else:
raise TypeError("Unknown distribution type" + str(distType))
def __str__(self):
""" defines string representation """
return "<ContinuousMixtureHMM with "+str(self.cmodel.N)+" states>"
def verboseStr(self):
""" Human readable model description """
hmm = self.cmodel
strout = ["\nOverview of HMM:"]
strout.append("\nNumber of states: "+ str(hmm.N))
strout.append("\nMaximum number of output distributions per state: "+ str(hmm.M))
for k in range(hmm.N):
state = hmm.getState(k)
strout.append("\n\nState number "+ str(k) +":")
strout.append("\n Initial probability: " + str(state.pi))
strout.append("\n "+ str(state.M) + " density function(s):")
for outp in range(state.M):
comp_str = "\n " + str(state.getWeight(outp)) + " * "
emission = state.getEmission(outp)
type = emission.type
if type == ghmmwrapper.normal:
comp_str += "normal(mean = " + str(emission.mean.val)
comp_str += ", variance = " + str(emission.variance.val) + ")"
elif type == ghmmwrapper.normal_right:
comp_str += "normal right tail(mean = " + str(emission.mean.val)
comp_str += ", variance = " + str(emission.variance.val)
comp_str += ", minimum = " + str(emission.min) + ")"
elif type == ghmmwrapper.normal_left:
comp_str += "normal left tail(mean = " + str(emission.mean.val)
comp_str += ", variance = " + str(emission.variance.val)
comp_str += ", maximum = " + str(emission.max) + ")"
elif type == ghmmwrapper.uniform:
comp_str += "uniform(minimum = " + str(emission.min)
comp_str += ", maximum = " + str(emission.max) + ")"
strout.append(comp_str)
for c in range(self.cmodel.cos):
strout.append("\n Class : " + str(c))
strout.append("\n Outgoing transitions:")
for i in range( state.out_states):
strout.append("\n transition to state " + str(state.getOutState(i)) +
" with probability = " + str(state.getOutProb(i, c)))
strout.append("\n Ingoing transitions:")
for i in range(state.in_states):
strout.append("\n transition from state " + str(state.getInState(i)) +
" with probability = "+ str(state.getInProb(i, c)))
strout.append("\n int fix:" + str(state.fix))
strout.append("\n")
return join(strout,'')
def asMatrices(self):
"""Return the parameters in matrix form.
It also returns the density type"""
# XXX inherit transitions ????
A = []
B = []
pi = []
d = []
for i in range(self.cmodel.N):
A.append([0.0] * self.N)
B.append([])
state = self.cmodel.getState(i)
pi.append(state.pi)
denList = []
parlist = []
for j in range(state.M):
emission = state.getEmission(j)
denList.append(emission.type)
if emission.type == ghmmwrapper.normal:
parlist.append([emission.mean.val, emission.variance.val,
0, state.getWeight(j)])
elif emission.type == ghmmwrapper.normal_right:
parlist.append([emission.mean.val, emission.variance.val,
emission.min, state.getWeight(j)])
elif emission.type == ghmmwrapper.normal_left:
parlist.append([emission.mean.val, emission.variance.val,
emission.max, state.getWeight(j)])
elif emission.type == ghmmwrapper.uniform:
parlist.append([emission.max, emission.min, 0, state.getWeight(j)])
else:
raise TypeError("Unsupported distribution" + str(emission.type))
for j in range(4):
B[i].append([l[j] for l in parlist])
d.append(denList)
for j in range(state.out_states):
state_index = state.getOutState(j)
A[i][state_index] = ghmmwrapper.double_matrix_getitem(state.out_a,0,j)
return [A,B,pi,d]
class MultivariateGaussianMixtureHMM(GaussianEmissionHMM):
""" HMMs with Multivariate Gaussian distribution as emissions.
States can have multiple mixture components.
"""
def __init__(self, emissionDomain, distribution, cmodel):
HMM.__init__(self, emissionDomain, distribution, cmodel)
# Baum Welch context, call baumWelchSetup to initalize
self.BWcontext = ""
def getEmission(self, i, m):
"""
@returns mean and covariance matrix of component m in state i
"""
# ensure proper index
assert 0 <= i < self.N, "Index " + str(i) + " out of bounds."
state = self.cmodel.getState(i)
assert 0 <=m < state.M, "Index " + str(m) + " out of bounds."
emission = state.getEmission(m)
mu = ghmmwrapper.double_array2list(emission.mean.vec,emission.dimension)
sigma = ghmmwrapper.double_array2list(emission.variance.mat,emission.dimension*emission.dimension)
return (mu, sigma)
def setEmission(self, i, m, values):
""" Set the emission distributionParameters for mixture component m in
state i
@param i index of a state
@param m index of a mixture component
@param values tuple of mu, sigma
"""
mu, sigma = values
# ensure proper indices
assert 0 <= i < self.N, "Index " + str(i) + " out of bounds."
state = self.cmodel.getState(i)
assert 0 <=m < state.M, "Index " + str(m) + " out of bounds."
emission = state.getEmission(m)
emission.mean.vec = ghmmwrapper.list2double_array(mu)
emission.variance.mat = ghmmwrapper.list2double_array(sigma)
def __str__(self):
hmm = self.cmodel
strout = ["\nHMM Overview:"]
strout.append("\nNumber of states: " + str(hmm.N))
strout.append("\nmaximum Number of mixture components: " + str(hmm.M))
strout.append("\nNumber of dimensions: " + str(hmm.dim))
for k in range(hmm.N):
state = hmm.getState(k)
strout.append("\n\nState number "+ str(k) + ":")
strout.append("\nInitial probability: " + str(state.pi))
strout.append("\nNumber of mixture components: " + str(state.M))
for m in range(state.M):
strout.append("\n\n Emission number "+ str(m) + ":")
weight = ""
mue = ""
u = ""
uinv = ""
ucd = ""
weight += str(ghmmwrapper.double_array_getitem(state.c,m))
emission = state.getEmission(m)
mue += str(ghmmwrapper.double_array2list(emission.mean.vec,emission.dimension))
u += str(ghmmwrapper.double_array2list(emission.variance.mat,emission.dimension*emission.dimension))
uinv += str(ghmmwrapper.double_array2list(emission.sigmainv,emission.dimension*emission.dimension))
ucd += str(ghmmwrapper.double_array2list(emission.sigmacd,emission.dimension*emission.dimension))
strout.append("\n emission type: " + str(emission.type))
strout.append("\n emission weight: " + str(weight))
strout.append("\n mean: " + str(mue))
strout.append("\n covariance matrix: " + str(u))
strout.append("\n inverse of covariance matrix: " + str(uinv))
strout.append("\n determinant of covariance matrix: " + str(emission.det))
strout.append("\n cholesky decomposition of covariance matrix: " + str(ucd))
strout.append("\n fix: " + str(state.fix))
for c in range(self.cmodel.cos):
strout.append("\n\n Class : " + str(c) )
strout.append("\n Outgoing transitions:")
for i in range( state.out_states):
strout.append("\n transition to state " + str(state.getOutState(i) ) + " with probability = " + str(state.getOutProb(i, c)))
strout.append("\n Ingoing transitions:")
for i in range(state.in_states):
strout.append("\n transition from state " + str(state.getInState(i)) +" with probability = "+ str(state.getInProb(i, c)))
return join(strout,'')
def asMatrices(self):
"Return the parameters in matrix form."
A = []
B = []
pi = []
for i in range(self.cmodel.N):
A.append([0.0] * self.N)
emissionparams = []
state = self.cmodel.getState(i)
pi.append(state.pi)
for m in range(state.M):
emission = state.getEmission(m)
mu = ghmmwrapper.double_array2list(emission.mean.vec,emission.dimension)
sigma = ghmmwrapper.double_array2list(emission.variance.mat,(emission.dimension*emission.dimension))
emissionparams.append(mu)
emissionparams.append(sigma)
if state.M > 1:
weights = ghmmwrapper.double_array2list(state.c,state.M)
emissionparams.append(weights)
B.append(emissionparams)
for j in range(state.out_states):
state_index = ghmmwrapper.int_array_getitem(state.out_id, j)
A[i][state_index] = ghmmwrapper.double_matrix_getitem(state.out_a,0,j)
return [A,B,pi]
def HMMDiscriminativeTraining(HMMList, SeqList, nrSteps = 50, gradient = 0):
""" Trains a couple of HMMs to increase the probablistic distance
if the the HMMs are used as classifier.
@param HMMList List of labeled HMMs
@param SeqList List of labeled sequences, one for each HMM
@param nrSteps maximal number of iterations
@param gradient @todo document me
@note this method does a initial expectation maximization training
"""
if len(HMMList) != len(SeqList):
raise TypeError('Input list are not equally long')
if not isinstance(HMMList[0], StateLabelHMM):
raise TypeError('Input is not a StateLabelHMM')
if not SeqList[0].hasStateLabels:
raise TypeError('Input sequence has no labels')
inplen = len(HMMList)
if gradient not in [0, 1]:
raise UnknownInputType("TrainingType " + gradient + " not supported.")
for i in range(inplen):
if HMMList[i].emissionDomain.CDataType == "double":
raise TypeError('discriminative training is at the moment only implemented on discrete HMMs')
#initial training with Baum-Welch
HMMList[i].baumWelch(SeqList[i], 3, 1e-9)
HMMArray = ghmmwrapper.dmodel_ptr_array_alloc(inplen)
SeqArray = ghmmwrapper.dseq_ptr_array_alloc(inplen)
for i in range(inplen):
ghmmwrapper.dmodel_ptr_array_setitem(HMMArray, i, HMMList[i].cmodel)
ghmmwrapper.dseq_ptr_array_setitem(SeqArray, i, SeqList[i].cseq)
ghmmwrapper.ghmm_dmodel_label_discriminative(HMMArray, SeqArray, inplen, nrSteps, gradient)
for i in range(inplen):
HMMList[i].cmodel = ghmmwrapper.dmodel_ptr_array_getitem(HMMArray, i)
SeqList[i].cseq = ghmmwrapper.dseq_ptr_array_getitem(SeqArray, i)
ghmmwrapper.free(HMMArray)
ghmmwrapper.free(SeqArray)
return HMMDiscriminativePerformance(HMMList, SeqList)
def HMMDiscriminativePerformance(HMMList, SeqList):
""" Computes the discriminative performce of the HMMs in HMMList
under the sequences in SeqList
"""
if len(HMMList) != len(SeqList):
raise TypeRrror('Input list are not equally long')
if not isinstance(HMMList[0], StateLabelHMM):
raise TypeError('Input is not a StateLabelHMM')
if not SeqList[0].hasStateLabels:
raise TypeError('Input sequence has no labels')
inplen = len(HMMList)
single = [0.0] * inplen
HMMArray = ghmmwrapper.dmodel_ptr_array_alloc(inplen)
SeqArray = ghmmwrapper.dseq_ptr_array_alloc(inplen)
for i in range(inplen):
ghmmwrapper.dmodel_ptr_array_setitem(HMMArray, i, HMMList[i].cmodel)
ghmmwrapper.dseq_ptr_array_setitem(SeqArray, i, SeqList[i].cseq)
retval = ghmmwrapper.ghmm_dmodel_label_discrim_perf(HMMArray, SeqArray, inplen)
ghmmwrapper.free(HMMArray)
ghmmwrapper.free(SeqArray)
return retval
########## Here comes all the Pair HMM stuff ##########
class DiscretePairDistribution(DiscreteDistribution):
"""
A DiscreteDistribution over TWO Alphabets: The discrete distribution
is parameterized by the vector of probabilities.
To get the index of the vector that corresponds to a pair of characters
use the getPairIndex method.
"""
def __init__(self, alphabetX, alphabetY, offsetX, offsetY):
"""
construct a new DiscretePairDistribution
@param alphabetX Alphabet object for sequence X
@param alphabetY Alphabet object for sequence Y
@param offsetX number of characters the alphabet of sequence X
consumes at a time
@param offsetY number of characters the alphabet of sequence Y
consumes at a time
"""
self.alphabetX = alphabetX
self.alphabetY = alphabetY
self.offsetX = offsetX
self.offsetY = offsetY
self.prob_vector = None
self.pairIndexFunction = ghmmwrapper.ghmm_dpmodel_pair
def getPairIndex(self, charX, charY):
"""
get the index of a pair of two characters in the probability vector
(if you use the int representation both values must be ints)
@param charX character chain or int representation
@param charY character chain or int representation
@return the index of the pair in the probability vector
"""
if (not (type(charX) == type(1) and type(charY) == type(1))):
if (charX == "-"):
intX = 0 # check this!
else:
intX = self.alphabetX.internal(charX)
if (charY == "-"):
intY = 0 # check this!
else:
intY = self.alphabetY.internal(charY)
else:
intX = charX
intY = charY
return self.pairIndexFunction(intX, intY,
len(self.alphabetX),
self.offsetX, self.offsetY)
def setPairProbability(self, charX, charY, probability):
"""
set the probability of the [air charX and charY to probability
@param charX character chain or int representation
@param charY character chain or int representation
@param probability probability (0<=float<=1)
"""
self.prob_vector[self.getPairIndex(charX, charY)] = probability
def getEmptyProbabilityVector(self):
"""
get an empty probability vector for this distribution (filled with 0.0)
@return list of floats
"""
length = self.pairIndexFunction(len(self.alphabetX) - 1,
len(self.alphabetY) - 1,
len(self.alphabetX),
self.offsetX, self.offsetY) + 1
return [0.0 for i in range(length)]
def getCounts(self, sequenceX, sequenceY):
"""
extract the pair counts for aligned sequences sequenceX and sequenceY
@param sequenceX string for sequence X
@param sequenceY strinf for sequence Y
@return a list of counts
"""
counts = self.getEmptyProbabilityVector()
if (self.offsetX != 0 and self.offsetY != 0):
assert len(sequenceX) / self.offsetX == len(sequenceY) / self.offsetY
for i in range(len(sequenceX) / self.offsetX):
charX = sequenceX[i*self.offsetX:(i+1)*self.offsetX]
charY = sequenceY[i*self.offsetY:(i+1)*self.offsetY]
counts[self.getPairIndex(charX, charY)] += 1
return counts
elif (self.offsetX == 0 and self.offsetY == 0):
log.error( "Silent states (offsetX==0 and offsetY==0) not supported")
return counts
elif (self.offsetX == 0):
charX = "-"
for i in range(len(sequenceY) / self.offsetY):
charY = sequenceY[i*self.offsetY:(i+1)*self.offsetY]
counts[self.getPairIndex(charX, charY)] += 1
return counts
elif (self.offsetY == 0):
charY = "-"
for i in range(len(sequenceX) / self.offsetX):
charX = sequenceX[i*self.offsetX:(i+1)*self.offsetX]
counts[self.getPairIndex(charX, charY)] += 1
return counts
# XXX Change to MultivariateEmissionSequence
class ComplexEmissionSequence(object):
"""
A MultivariateEmissionSequence is a sequence of multiple emissions per
time-point. Emissions can be from distinct EmissionDomains. In particular,
integer and floating point emissions are allowed. Access to emissions is
given by the index, seperately for discrete and continuous EmissionDomains.
Example: XXX
MultivariateEmissionSequence also links to the underlying C-structure.
Note: ComplexEmissionSequence has to be considered imutable for the moment.
There are no means to manipulate the sequence positions yet.
"""
def __init__(self, emissionDomains, sequenceInputs, labelDomain = None, labelInput = None):
"""
@param emissionDomains a list of EmissionDomain objects corresponding
to the list of sequenceInputs
@param sequenceInputs a list of sequences of the same length (e.g.
nucleotides and double values) that will be encoded
by the corresponding EmissionDomain
@bug @param labelDomain unused
@bug @param labelInput unused
"""
assert len(emissionDomains) == len(sequenceInputs)
assert len(sequenceInputs) > 0
self.length = len(sequenceInputs[0])
for sequenceInput in sequenceInputs:
assert self.length == len(sequenceInput)
self.discreteDomains = []
self.discreteInputs = []
self.continuousDomains = []
self.continuousInputs = []
for i in range(len(emissionDomains)):
if emissionDomains[i].CDataType == "int":
self.discreteDomains.append(emissionDomains[i])
self.discreteInputs.append(sequenceInputs[i])
if emissionDomains[i].CDataType == "double":
self.continuousDomains.append(emissionDomains[i])
self.continuousInputs.append(sequenceInputs[i])
self.cseq = ghmmwrapper.ghmm_dpseq(self.length,
len(self.discreteDomains),
len(self.continuousDomains))
for i in range(len(self.discreteInputs)):
internalInput = []
offset = self.discreteDomains[i].getExternalCharacterLength()
if (offset == None):
internalInput = self.discreteDomains[i].internalSequence(self.discreteInputs[i])
else:
if (type(self.discreteInputs[i]) == type([])):
# we have string sequences with equally large characters so
# we can join the list representation
self.discreteInputs[i] = ("").join(self.discreteInputs[i])
for j in range(offset - 1):
internalInput.append(-1) # put -1 at the start
for j in range(offset-1, len(self.discreteInputs[i])):
internalInput.append(self.discreteDomains[i].internal(
self.discreteInputs[i][j-(offset-1):j+1]))
pointerDiscrete = self.cseq.get_discrete(i)
for j in range(len(self)):
ghmmwrapper.int_array_setitem(pointerDiscrete, j, internalInput[j])
# self.cseq.set_discrete(i, seq)
for i in range(len(self.continuousInputs)):
#seq = [float(x) for x in self.continuousInputs[i]]
#seq = ghmmwrapper.list2double_array(seq)
pointerContinuous = self.cseq.get_continuous(i)
for j in range(len(self)):
ghmmwrapper.double_array_setitem(pointerContinuous, j, self.continuousInputs[i][j])
# self.cseq.set_continuous(i, seq)
def __del__(self):
"""
Deallocation of C sequence struct.
"""
del self.cseq
self.cseq = None
def __len__(self):
"""
@return the length of the sequence.
"""
return self.length
def getInternalDiscreteSequence(self, index):
"""
access the underlying C structure and return the internal
representation of the discrete sequence number 'index'
@param index number of the discrete sequence
@return a python list of ints
"""
int_pointer = self.cseq.get_discrete(index)
internal = ghmmwrapper.int_array2list(int_pointer, len(self))
int_pointer = None
return internal
def getInternalContinuousSequence(self, index):
"""
access the underlying C structure and return the internal
representation of the continuous sequence number 'index'
@param index number of the continuous sequence
@return a python list of floats
"""
d_pointer = self.cseq.get_continuous(index)
internal = ghmmwrapper.double_array2list(d_pointer, len(self))
return internal
def getDiscreteSequence(self, index):
"""
get the 'index'th discrete sequence as it has been given at the input
@param index number of the discrete sequence
@return a python sequence
"""
return self.discreteInputs[index]
def __getitem__(self, key):
"""
get a slice of the complex emission sequence
@param key either int (makes no big sense) or slice object
@return a new ComplexEmissionSequence containing a slice of the
original
"""
domains = []
for domain in self.discreteDomains:
domains.append(domain)
for domain in self.continuousDomains:
domains.append(domain)
slicedInput = []
for input in self.discreteInputs:
slicedInput.append(input[key])
for input in self.continuousInputs:
slicedInput.append(input[key])
return ComplexEmissionSequence(domains, slicedInput)
def __str__(self):
"""
string representation. Access the underlying C-structure and return
the sequence in all it's encodings (can be quite long)
@return string representation
"""
return "<ComplexEmissionSequence>"
def verboseStr(self):
"""
string representation. Access the underlying C-structure and return
the sequence in all it's encodings (can be quite long)
@return string representation
"""
s = ("ComplexEmissionSequence (len=%i, discrete=%i, continuous=%i)\n"%
(self.cseq.length, len(self.discreteDomains),
len(self.continuousDomains)))
for i in range(len(self.discreteDomains)):
s += ("").join([str(self.discreteDomains[i].external(x))
for x in self.getInternalDiscreteSequence(i)])
s += "\n"
for i in range(len(self.continuousDomains)):
s += (",").join([str(self.continuousDomains[i].external(x))
for x in self.getInternalContinuousSequence(i)])
s += "\n"
return s
class PairHMM(HMM):
"""
Pair HMMs with discrete emissions over multiple alphabets.
Optional features: continuous values for transition classes
"""
def __init__(self, emissionDomains, distribution, cmodel):
"""
create a new PairHMM object (this should only be done using the
factory: e.g model = PairHMMOpenXML(modelfile) )
@param emissionDomains list of EmissionDomain objects
@param distribution (not used) inherited from HMM
@param cmodel a swig pointer on the underlying C structure
"""
HMM.__init__(self, emissionDomains[0], distribution, cmodel)
self.emissionDomains = emissionDomains
self.alphabetSizes = []
for domain in self.emissionDomains:
if (isinstance(domain, Alphabet)):
self.alphabetSizes.append(len(domain))
self.maxSize = 10000
self.model_type = self.cmodel.model_type # model type
self.background = None
self.states = {}
def __str__(self):
"""
string representation (more for debuging) shows the contents of the C
structure ghmm_dpmodel
@return string representation
"""
return "<PairHMM with " + str(self.cmodel.N) + " states>"
def verboseStr(self):
"""
string representation (more for debuging) shows the contents of the C
structure ghmm_dpmodel
@return string representation
"""
hmm = self.cmodel
strout = ["\nGHMM Model\n"]
strout.append("Name: " + str(self.cmodel.name))
strout.append("\nModelflags: "+ self.printtypes(self.cmodel.model_type))
strout.append("\nNumber of states: "+ str(hmm.N))
strout.append("\nSize of Alphabet: "+ str(hmm.M))
for k in range(hmm.N):
state = hmm.getState(k)
strout.append("\n\nState number "+ str(k) +":")
strout.append("\nInitial probability: " + str(state.pi))
strout.append("\nOutput probabilites: ")
#strout.append(str(ghmmwrapper.double_array_getitem(state.b,outp)))
strout.append("\n")
strout.append("\nOutgoing transitions:")
for i in range( state.out_states):
strout.append("\ntransition to state " + str(state.out_id[i]) + " with probability " + str(ghmmwrapper.double_array_getitem(state.out_a,i)))
strout.append("\nIngoing transitions:")
for i in range(state.in_states):
strout.append("\ntransition from state " + str(state.in_id[i]) + " with probability " + str(ghmmwrapper.double_array_getitem(state.in_a,i)))
strout.append("\nint fix:" + str(state.fix) + "\n")
if hmm.model_type & kSilentStates:
strout.append("\nSilent states: \n")
for k in range(hmm.N):
strout.append(str(hmm.silent[k]) + ", ")
strout.append("\n")
return join(strout,'')
def viterbi(self, complexEmissionSequenceX, complexEmissionSequenceY):
"""
run the naive implementation of the Viterbi algorithm and
return the viterbi path and the log probability of the path
@param complexEmissionSequenceX sequence X encoded as ComplexEmissionSequence
@param complexEmissionSequenceY sequence Y encoded as ComplexEmissionSequence
@return (path, log_p)
"""
# get a pointer on a double and a int to get return values by reference
log_p_ptr = ghmmwrapper.double_array_alloc(1)
length_ptr = ghmmwrapper.int_array_alloc(1)
# call log_p and length will be passed by reference
cpath = self.cmodel.viterbi(complexEmissionSequenceX.cseq,
complexEmissionSequenceY.cseq,
log_p_ptr, length_ptr)
# get the values from the pointers
log_p = ghmmwrapper.double_array_getitem(log_p_ptr, 0)
length = length_ptr[0]
path = [cpath[x] for x in range(length)]
# free the memory
ghmmwrapper.free(log_p_ptr)
ghmmwrapper(length_ptr)
ghmmwrapper.free(cpath)
return (path, log_p)
def viterbiPropagate(self, complexEmissionSequenceX, complexEmissionSequenceY, startX=None, startY=None, stopX=None, stopY=None, startState=None, startLogp=None, stopState=None, stopLogp=None):
"""
run the linear space implementation of the Viterbi algorithm and
return the viterbi path and the log probability of the path
@param complexEmissionSequenceX sequence X encoded as ComplexEmissionSequence
@param complexEmissionSequenceY sequence Y encoded as ComplexEmissionSequence
Optional parameters to run the algorithm only on a segment:
@param startX start index in X
@param startY start index in Y
@param stopX stop index in X
@param stopY stop index in Y
@param startState start the path in this state
@param stopState path ends in this state
@param startLogp initialize the start state with this log probability
@param stopLogp if known this is the logp of the partial path
@return (path, log_p)
"""
# get a pointer on a double and a int to get return values by reference
log_p_ptr = ghmmwrapper.double_array_alloc(1)
length_ptr = ghmmwrapper.int_array_alloc(1)
# call log_p and length will be passed by reference
if (not (startX and startY and stopX and stopY and startState and stopState and startLogp)):
cpath = self.cmodel.viterbi_propagate(
complexEmissionSequenceX.cseq,
complexEmissionSequenceY.cseq,
log_p_ptr, length_ptr,
self.maxSize)
else:
if (stopLogp == None):
stopLogp = 0
cpath = self.cmodel.viterbi_propagate_segment(
complexEmissionSequenceX.cseq,
complexEmissionSequenceY.cseq,
log_p_ptr, length_ptr, self.maxSize,
startX, startY, stopX, stopY, startState, stopState,
startLogp, stopLogp)
# get the values from the pointers
log_p = ghmmwrapper.double_array_getitem(log_p_ptr, 0)
length = length_ptr[0]
path = [cpath[x] for x in range(length)]
# free the memory
ghmmwrapper.free(log_p_ptr)
ghmmwrapper.free(length_ptr)
ghmmwrapper.free(cpath)
return (path, log_p)
def logP(self, complexEmissionSequenceX, complexEmissionSequenceY, path):
"""
compute the log probability of two sequences X and Y and a path
@param complexEmissionSequenceX sequence X encoded as
ComplexEmissionSequence
@param complexEmissionSequenceY sequence Y encoded as
ComplexEmissionSequence
@param path the state path
@return log probability
"""
cpath = ghmmwrapper.list2int_array(path)
logP = self.cmodel.viterbi_logP(complexEmissionSequenceX.cseq,
complexEmissionSequenceY.cseq,
cpath, len(path))
ghmmwrapper.free(cpath)
return logP
def addEmissionDomains(self, emissionDomains):
"""
add additional EmissionDomains that are not specified in the XML file.
This is used to add information for the transition classes.
@param emissionDomains a list of EmissionDomain objects
"""
self.emissionDomains.extend(emissionDomains)
discreteDomains = []
continuousDomains = []
for i in range(len(emissionDomains)):
if emissionDomains[i].CDataType == "int":
discreteDomains.append(emissionDomains[i])
self.alphabetSizes.append(len(emissionDomains[i]))
if emissionDomains[i].CDataType == "double":
continuousDomains.append(emissionDomains[i])
self.cmodel.number_of_alphabets += len(discreteDomains)
self.cmodel.size_of_alphabet = ghmmwrapper.list2int_array(self.alphabetSizes)
self.cmodel.number_of_d_seqs += len(continuousDomains)
def checkEmissions(self, eps=0.0000000000001):
"""
checks the sum of emission probabilities in all states
@param eps precision (if the sum is > 1 - eps it passes)
@return 1 if the emission of all states sum to one, 0 otherwise
"""
allok = 1
for state in self.states:
emissionSum = sum(state.emissions)
if (abs(1 - emissionSum) > eps):
log.debug(("Emissions in state %s (%s) do not sum to 1 (%s)" % (state.id, state.label, emissionSum)))
allok = 0
return allok
def checkTransitions(self, eps=0.0000000000001):
"""
checks the sum of outgoing transition probabilities for all states
@param eps precision (if the sum is > 1 - eps it passes)
@return 1 if the transitions of all states sum to one, 0 otherwise
"""
allok = 1
# from build matrices in xmlutil:
orders = {}
k = 0 # C style index
for s in self.states: # ordering from XML
orders[s.index] = k
k = k + 1
for state in self.states:
for tclass in range(state.kclasses):
outSum = 0.0
c_state = self.cmodel.getState(orders[state.index])
for out in range(c_state.out_states):
outSum += ghmmwrapper.double_matrix_getitem(c_state.out_a,
out, tclass)
if (abs(1 - outSum) > eps):
log.debug("Outgoing transitions in state %s (%s) do not sum to 1 (%s) for class %s" % (state.id, state.label, outSum, tclass))
allok = 0
return allok
class PairHMMOpenFactory(HMMOpenFactory):
"""
factory to create PairHMM objects from XML files
"""
def __call__(self, fileName_file_or_dom, modelIndex = None):
"""
a call to the factory loads a model from a file specified by the
filename or from a file object or from a XML Document object and
initializes the model on the C side (libghmm).
@param fileName_file_or_dom load the model from a file specified by
a filename, a file object or a XML Document object
@param modelIndex not used (inherited from HMMOpenFactory)
@return PairHMM object
"""
import xml.dom.minidom
from ghmm_gato import xmlutil
if not (isinstance(fileName_file_or_dom, StringIO.StringIO) or
isinstance(fileName_file_or_dom, xml.dom.minidom.Document)):
if not os.path.exists(fileName_file_or_dom):
raise IOError('File ' + str(fileName_file_or_dom) + ' not found.')
hmm_dom = xmlutil.HMM(fileName_file_or_dom)
if (not hmm_dom.modelType == "pairHMM"):
raise InvalidModelParameters("Model type specified in the XML file (%s) is not pairHMM" % hmm_dom.modelType)
# obviously it's a pair HMM
[alphabets, A, B, pi, state_orders] = hmm_dom.buildMatrices()
if not len(A) == len(A[0]):
raise InvalidModelParameters("A is not quadratic.")
if not len(pi) == len(A):
raise InvalidModelParameters("Length of pi does not match length of A.")
if not len(A) == len(B):
raise InvalidModelParameters("Different number of entries in A and B.")
cmodel = ghmmwrapper.ghmm_dp_init()
cmodel.N = len(A)
cmodel.M = -1 # no use anymore len(emissionDomain)
# tie groups are deactivated by default
cmodel.tied_to = None
# assign model identifier (if specified)
if hmm_dom.name != None:
cmodel.name = hmm_dom.name
else:
cmodel.name = 'Unused'
alphabets = hmm_dom.getAlphabets()
cmodel.number_of_alphabets = len(alphabets.keys())
sizes = [len(alphabets[k]) for k in alphabets.keys()]
cmodel.size_of_alphabet = ghmmwrapper.list2int_array(sizes)
# set number of d_seqs to zero. If you want to use them you have to
# set them manually
cmodel.number_of_d_seqs = 0
# c array of states allocated
cstates = ghmmwrapper.dpstate_array_alloc(cmodel.N)
# python list of states from xml
pystates = hmm_dom.state.values()
silent_flag = 0
silent_states = []
maxOffsetX = 0
maxOffsetY = 0
transitionClassFlag = 0
maxTransitionIndexDiscrete = len(alphabets.keys())
maxTransitionIndexContinuous = 0
# from build matrices in xmlutil:
orders = {}
k = 0 # C style index
for s in pystates: # ordering from XML
orders[s.index] = k
k = k + 1
#initialize states
for i in range(cmodel.N):
cstate = ghmmwrapper.dpstate_array_getitem(cstates, i)
pystate = pystates[i]
size = len(pystate.itsHMM.hmmAlphabets[pystate.alphabet_id])
if (pystate.offsetX != 0 and pystate.offsetY != 0):
size = size**2
if (len(B[i]) != size):
raise InvalidModelParameters("in state %s len(emissions) = %i size should be %i" % (pystate.id, len(B[i]), size))
cstate.b = ghmmwrapper.list2double_array(B[i])
cstate.pi = pi[i]
if (pi[i] != 0):
cstate.log_pi = math.log(pi[i])
else:
cstate.log_pi = 1
cstate.alphabet = pystate.alphabet_id
cstate.offset_x = pystate.offsetX
cstate.offset_y = pystate.offsetY
cstate.kclasses = pystate.kclasses
if (pystate.offsetX > maxOffsetX):
maxOffsetX = pystate.offsetX
if (pystate.offsetY > maxOffsetY):
maxOffsetY = pystate.offsetY
if (sum(B[i]) == 0 ):
silent_states.append(1)
silent_flag = 4
else:
silent_states.append(0)
# transition probability
# cstate.out_states, cstate.out_id, out_a = ghmmhelper.extract_out(A[i])
v = pystate.index
#print "C state index: %i pystate index: %i order: %i" % (i, v, orders[v])
outprobs = []
for j in range(len(hmm_dom.G.OutNeighbors(v))):
outprobs.append([0.0] * pystate.kclasses)
myoutid = []
j = 0
for outid in hmm_dom.G.OutNeighbors(v):
myorder = orders[outid]
myoutid.append(myorder)
for tclass in range(pystate.kclasses):
outprobs[j][tclass] = hmm_dom.G.edgeWeights[tclass][(v,outid)]
j += 1
cstate.out_states = len(myoutid)
cstate.out_id = ghmmwrapper.list2int_array(myoutid)
(cstate.out_a, col_len) = ghmmhelper.list2double_matrix(outprobs)
#set "in" probabilities
# A_col_i = map( lambda x: x[i], A)
# Numarray use A[,:i]
# cstate.in_states, cstate.in_id, cstate.in_a = ghmmhelper.extract_out(A_col_i)
inprobs = []
for inid in hmm_dom.G.InNeighbors(v):
myorder = orders[inid]
# for every class in source
inprobs.append([0.0] * pystates[myorder].kclasses)
myinid = []
j = 0
for inid in hmm_dom.G.InNeighbors(v):
myorder = orders[inid]
myinid.append(myorder)
# for every transition class of the source state add a prob
for tclass in range(pystates[myorder].kclasses):
inprobs[j][tclass] = hmm_dom.G.edgeWeights[tclass][(inid,v)]
j += 1
j = 0
#for inid in myinid:
# print "Transitions (%i, %i)" % (inid ,i)
# print inprobs[j]
# j += 1
cstate.in_states = len(myinid)
cstate.in_id = ghmmwrapper.list2int_array(myinid)
(cstate.in_a, col_len) = ghmmhelper.list2double_matrix(inprobs)
#fix probabilities by reestimation, else 0
cstate.fix = 0
# set the class determination function
cstate.class_change = ghmmwrapper.ghmm_dp_init_class_change()
if (pystate.transitionFunction != -1):
transitionClassFlag = 1
tf = hmm_dom.transitionFunctions[pystate.transitionFunction]
# for the moment: do not use the offsets because they
# add the risk of segmentation faults at the ends of
# the loops or neccessitate index checks at every query
# which is not desirable because the transition
# functions are used in every iteration. Instead use
# shifted input values!
if (tf.type == "lt_sum"):
ghmmwrapper.set_to_lt_sum(
cstate.class_change,
int(tf.paramDict["seq_index"]),
float(tf.paramDict["threshold"]),
0, # int(tf.paramDict["offset_x"]),
0) # int(tf.paramDict["offset_y"]))
maxTransitionIndexContinuous = max(
int(tf.paramDict["seq_index"]),
maxTransitionIndexContinuous)
elif (tf.type == "gt_sum"):
ghmmwrapper.set_to_gt_sum(
cstate.class_change,
int(tf.paramDict["seq_index"]),
float(tf.paramDict["threshold"]),
0, # int(tf.paramDict["offset_x"]),
0) # int(tf.paramDict["offset_y"]))
maxTransitionIndexContinuous = max(
int(tf.paramDict["seq_index"]),
maxTransitionIndexContinuous)
elif (tf.type == "boolean_and"):
ghmmwrapper.set_to_boolean_and(
cstate.class_change,
int(tf.paramDict["seq_index"]),
0, # int(tf.paramDict["offset_x"]),
0) # int(tf.paramDict["offset_y"]))
maxTransitionIndexDiscrete = max(
int(tf.paramDict["seq_index"]),
maxTransitionIndexDiscrete)
elif (tf.type == "boolean_or"):
ghmmwrapper.set_to_boolean_or(
cstate.class_change,
int(tf.paramDict["seq_index"]),
0, # int(tf.paramDict["offset_x"]),
0) # int(tf.paramDict["offset_y"]))
maxTransitionIndexDiscrete = max(
int(tf.paramDict["seq_index"]),
maxTransitionIndexDiscrete)
else:
ghmmwrapper.ghmm_dp_set_to_default_transition_class(cstate.class_change)
cmodel.s = cstates
cmodel.max_offset_x = maxOffsetX
cmodel.max_offset_y = maxOffsetY
cmodel.model_type += silent_flag
cmodel.silent = ghmmwrapper.list2int_array(silent_states)
distribution = DiscreteDistribution(DNA)
emissionDomains = [Alphabet(hmm_dom.hmmAlphabets[alphabet].name.values()) for alphabet in alphabets]
model = PairHMM(emissionDomains, distribution, cmodel)
model.states = pystates
model.transitionFunctions = hmm_dom.transitionFunctions
model.usesTransitionClasses = transitionClassFlag
model.alphabetSizes = sizes
model.maxTransitionIndexContinuous = maxTransitionIndexContinuous
model.maxTransitionIndexDiscrete = maxTransitionIndexDiscrete
return model
PairHMMOpenXML = PairHMMOpenFactory()
|
tempbottle/ghmm
|
ghmmwrapper/ghmm.py
|
Python
|
gpl-3.0
| 197,239
|
[
"Gaussian"
] |
f25524259c3925c7f7a389afbdad3875dcb1c17a60b2abe1a65c657988047b05
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
This module implements classes to perform bond valence analyses.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Oct 26, 2012"
import collections
import numpy as np
import operator
import os
from math import exp, sqrt
from six.moves import filter
from six.moves import zip
from monty.serialization import loadfn
import six
from pymatgen.core.periodic_table import Element, Specie
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.periodic_table import get_el_sp
#Let's initialize some module level properties.
#List of electronegative elements specified in M. O'Keefe, & N. Brese,
#JACS, 1991, 113(9), 3226-3229. doi:10.1021/ja00009a002.
ELECTRONEG = [Element(sym) for sym in ["H", "B", "C", "Si",
"N", "P", "As", "Sb",
"O", "S", "Se", "Te",
"F", "Cl", "Br", "I"]]
module_dir = os.path.dirname(os.path.abspath(__file__))
#Read in BV parameters.
BV_PARAMS = {}
for k, v in loadfn(os.path.join(module_dir, "bvparam_1991.yaml")).items():
BV_PARAMS[Element(k)] = v
#Read in yaml containing data-mined ICSD BV data.
all_data = loadfn(os.path.join(module_dir, "icsd_bv.yaml"))
ICSD_BV_DATA = {Specie.from_string(sp): data
for sp, data in all_data["bvsum"].items()}
PRIOR_PROB = {Specie.from_string(sp): data
for sp, data in all_data["occurrence"].items()}
def calculate_bv_sum(site, nn_list, scale_factor=1.0):
"""
Calculates the BV sum of a site.
Args:
site:
The site
nn_list:
List of nearest neighbors in the format [(nn_site, dist), ...].
anion_el:
The most electronegative element in the structure.
scale_factor:
A scale factor to be applied. This is useful for scaling distance,
esp in the case of calculation-relaxed structures which may tend
to under (GGA) or over bind (LDA).
"""
el1 = Element(site.specie.symbol)
bvsum = 0
for (nn, dist) in nn_list:
el2 = Element(nn.specie.symbol)
if (el1 in ELECTRONEG or el2 in ELECTRONEG) and el1 != el2:
r1 = BV_PARAMS[el1]["r"]
r2 = BV_PARAMS[el2]["r"]
c1 = BV_PARAMS[el1]["c"]
c2 = BV_PARAMS[el2]["c"]
R = r1 + r2 - r1 * r2 * (sqrt(c1) - sqrt(c2)) ** 2 / \
(c1 * r1 + c2 * r2)
vij = exp((R - dist * scale_factor) / 0.31)
bvsum += vij * (1 if el1.X < el2.X else -1)
return bvsum
def calculate_bv_sum_unordered(site, nn_list, scale_factor=1):
"""
Calculates the BV sum of a site for unordered structures.
Args:
site:
The site
nn_list:
List of nearest neighbors in the format [(nn_site, dist), ...].
anion_el:
The most electronegative element in the structure.
scale_factor:
A scale factor to be applied. This is useful for scaling distance,
esp in the case of calculation-relaxed structures which may tend
to under (GGA) or over bind (LDA).
"""
# If the site "site" has N partial occupations as : f_{site}_0,
# f_{site}_1, ... f_{site}_N of elements
# X_{site}_0, X_{site}_1, ... X_{site}_N, and each neighbors nn_i in nn
# has N_{nn_i} partial occupations as :
# f_{nn_i}_0, f_{nn_i}_1, ..., f_{nn_i}_{N_{nn_i}}, then the bv sum of
# site "site" is obtained as :
# \sum_{nn} \sum_j^N \sum_k^{N_{nn}} f_{site}_j f_{nn_i}_k vij_full
# where vij_full is the valence bond of the fully occupied bond
bvsum = 0
for specie1, occu1 in six.iteritems(site.species_and_occu):
el1 = Element(specie1.symbol)
for (nn, dist) in nn_list:
for specie2, occu2 in six.iteritems(nn.species_and_occu):
el2 = Element(specie2.symbol)
if (el1 in ELECTRONEG or el2 in ELECTRONEG) and el1 != el2:
r1 = BV_PARAMS[el1]["r"]
r2 = BV_PARAMS[el2]["r"]
c1 = BV_PARAMS[el1]["c"]
c2 = BV_PARAMS[el2]["c"]
R = r1 + r2 - r1 * r2 * (sqrt(c1) - sqrt(c2)) ** 2 / \
(c1 * r1 + c2 * r2)
vij = exp((R - dist * scale_factor) / 0.31)
bvsum += occu1 * occu2 * vij * (1 if el1.X < el2.X else -1)
return bvsum
class BVAnalyzer(object):
"""
This class implements a maximum a posteriori (MAP) estimation method to
determine oxidation states in a structure. The algorithm is as follows:
1) The bond valence sum of all symmetrically distinct sites in a structure
is calculated using the element-based parameters in M. O'Keefe, & N. Brese,
JACS, 1991, 113(9), 3226-3229. doi:10.1021/ja00009a002.
2) The posterior probabilities of all oxidation states is then calculated
using: P(oxi_state/BV) = K * P(BV/oxi_state) * P(oxi_state), where K is
a constant factor for each element. P(BV/oxi_state) is calculated as a
Gaussian with mean and std deviation determined from an analysis of
the ICSD. The posterior P(oxi_state) is determined from a frequency
analysis of the ICSD.
3) The oxidation states are then ranked in order of decreasing probability
and the oxidation state combination that result in a charge neutral cell
is selected.
"""
CHARGE_NEUTRALITY_TOLERANCE = 0.00001
def __init__(self, symm_tol=0.1, max_radius=4, max_permutations=100000,
distance_scale_factor=1.015,
charge_neutrality_tolerance=CHARGE_NEUTRALITY_TOLERANCE,
forbidden_species=None):
"""
Initializes the BV analyzer, with useful defaults.
Args:
symm_tol:
Symmetry tolerance used to determine which sites are
symmetrically equivalent. Set to 0 to turn off symmetry.
max_radius:
Maximum radius in Angstrom used to find nearest neighbors.
max_permutations:
The maximum number of permutations of oxidation states to test.
distance_scale_factor:
A scale factor to be applied. This is useful for scaling
distances, esp in the case of calculation-relaxed structures
which may tend to under (GGA) or over bind (LDA). The default
of 1.015 works for GGA. For experimental structure, set this to
1.
charge_neutrality_tolerance:
Tolerance on the charge neutrality when unordered structures
are at stake.
forbidden_species:
List of species that are forbidden (example : ["O-"] cannot be
used) It is used when e.g. someone knows that some oxidation
state cannot occur for some atom in a structure or list of
structures.
"""
self.symm_tol = symm_tol
self.max_radius = max_radius
self.max_permutations = max_permutations
self.dist_scale_factor = distance_scale_factor
self.charge_neutrality_tolerance = charge_neutrality_tolerance
forbidden_species = [get_el_sp(sp) for sp in forbidden_species] if \
forbidden_species else []
self.icsd_bv_data = {get_el_sp(specie): data
for specie, data in ICSD_BV_DATA.items()
if not specie in forbidden_species} \
if len(forbidden_species) > 0 else ICSD_BV_DATA
def _calc_site_probabilities(self, site, nn):
el = site.specie.symbol
bv_sum = calculate_bv_sum(site, nn,
scale_factor=self.dist_scale_factor)
prob = {}
for sp, data in self.icsd_bv_data.items():
if sp.symbol == el and sp.oxi_state != 0 and data["std"] > 0:
u = data["mean"]
sigma = data["std"]
#Calculate posterior probability. Note that constant
#factors are ignored. They have no effect on the results.
prob[sp.oxi_state] = exp(-(bv_sum - u) ** 2 / 2 /
(sigma ** 2)) \
/ sigma * PRIOR_PROB[sp]
#Normalize the probabilities
try:
prob = {k: v / sum(prob.values()) for k, v in prob.items()}
except ZeroDivisionError:
prob = {k: 0.0 for k in prob}
return prob
def _calc_site_probabilities_unordered(self, site, nn):
bv_sum = calculate_bv_sum_unordered(
site, nn, scale_factor=self.dist_scale_factor)
prob = {}
for specie, occu in six.iteritems(site.species_and_occu):
el = specie.symbol
prob[el] = {}
for sp, data in self.icsd_bv_data.items():
if sp.symbol == el and sp.oxi_state != 0 and data["std"] > 0:
u = data["mean"]
sigma = data["std"]
#Calculate posterior probability. Note that constant
#factors are ignored. They have no effect on the results.
prob[el][sp.oxi_state] = exp(-(bv_sum - u) ** 2 / 2 /
(sigma ** 2)) \
/ sigma * PRIOR_PROB[sp]
#Normalize the probabilities
try:
prob[el] = {k: v / sum(prob[el].values())
for k, v in prob[el].items()}
except ZeroDivisionError:
prob[el] = {k: 0.0 for k in prob[el]}
return prob
def get_valences(self, structure):
"""
Returns a list of valences for the structure. This currently works only
for ordered structures only.
Args:
structure: Structure to analyze
Returns:
A list of valences for each site in the structure (for an ordered
structure), e.g., [1, 1, -2] or a list of lists with the
valences for each fractional element of each site in the
structure (for an unordered structure),
e.g., [[2, 4], [3], [-2], [-2], [-2]]
Raises:
A ValueError if the valences cannot be determined.
"""
els = [Element(el.symbol) for el in structure.composition.elements]
if not set(els).issubset(set(BV_PARAMS.keys())):
raise ValueError(
"Structure contains elements not in set of BV parameters!"
)
#Perform symmetry determination and get sites grouped by symmetry.
if self.symm_tol:
finder = SpacegroupAnalyzer(structure, self.symm_tol)
symm_structure = finder.get_symmetrized_structure()
equi_sites = symm_structure.equivalent_sites
else:
equi_sites = [[site] for site in structure]
#Sort the equivalent sites by decreasing electronegativity.
equi_sites = sorted(equi_sites,
key=lambda sites: -sites[0].species_and_occu
.average_electroneg)
#Get a list of valences and probabilities for each symmetrically
#distinct site.
valences = []
all_prob = []
if structure.is_ordered:
for sites in equi_sites:
test_site = sites[0]
nn = structure.get_neighbors(test_site, self.max_radius)
prob = self._calc_site_probabilities(test_site, nn)
all_prob.append(prob)
val = list(prob.keys())
#Sort valences in order of decreasing probability.
val = sorted(val, key=lambda v: -prob[v])
#Retain probabilities that are at least 1/100 of highest prob.
valences.append(
list(filter(lambda v: prob[v] > 0.01 * prob[val[0]],
val)))
else:
full_all_prob = []
for sites in equi_sites:
test_site = sites[0]
nn = structure.get_neighbors(test_site, self.max_radius)
prob = self._calc_site_probabilities_unordered(test_site, nn)
all_prob.append(prob)
full_all_prob.extend(prob.values())
vals = []
for (elsp, occ) in get_z_ordered_elmap(
test_site.species_and_occu):
val = list(prob[elsp.symbol].keys())
#Sort valences in order of decreasing probability.
val = sorted(val, key=lambda v: -prob[elsp.symbol][v])
# Retain probabilities that are at least 1/100 of highest
# prob.
vals.append(
list(filter(
lambda v: prob[elsp.symbol][v] > 0.001 * prob[
elsp.symbol][val[0]], val)))
valences.append(vals)
#make variables needed for recursion
if structure.is_ordered:
nsites = np.array([len(i) for i in equi_sites])
vmin = np.array([min(i) for i in valences])
vmax = np.array([max(i) for i in valences])
self._n = 0
self._best_score = 0
self._best_vset = None
def evaluate_assignment(v_set):
el_oxi = collections.defaultdict(list)
for i, sites in enumerate(equi_sites):
el_oxi[sites[0].specie.symbol].append(v_set[i])
max_diff = max([max(v) - min(v) for v in el_oxi.values()])
if max_diff > 1:
return
score = six.moves.reduce(
operator.mul, [all_prob[i][v] for i, v in enumerate(v_set)])
if score > self._best_score:
self._best_vset = v_set
self._best_score = score
def _recurse(assigned=[]):
#recurses to find permutations of valences based on whether a
#charge balanced assignment can still be found
if self._n > self.max_permutations:
return
i = len(assigned)
highest = vmax.copy()
highest[:i] = assigned
highest *= nsites
highest = np.sum(highest)
lowest = vmin.copy()
lowest[:i] = assigned
lowest *= nsites
lowest = np.sum(lowest)
if highest < 0 or lowest > 0:
self._n += 1
return
if i == len(valences):
evaluate_assignment(assigned)
self._n += 1
return
else:
for v in valences[i]:
new_assigned = list(assigned)
_recurse(new_assigned + [v])
else:
nsites = np.array([len(i) for i in equi_sites])
tmp = []
attrib = []
for insite, nsite in enumerate(nsites):
for val in valences[insite]:
tmp.append(nsite)
attrib.append(insite)
new_nsites = np.array(tmp)
fractions = []
elements = []
for sites in equi_sites:
for sp, occu in get_z_ordered_elmap(sites[0].species_and_occu):
elements.append(sp.symbol)
fractions.append(occu)
fractions = np.array(fractions, np.float)
new_valences = []
for vals in valences:
for val in vals:
new_valences.append(val)
vmin = np.array([min(i) for i in new_valences], np.float)
vmax = np.array([max(i) for i in new_valences], np.float)
self._n = 0
self._best_score = 0
self._best_vset = None
def evaluate_assignment(v_set):
el_oxi = collections.defaultdict(list)
jj = 0
for i, sites in enumerate(equi_sites):
for specie, occu in get_z_ordered_elmap(
sites[0].species_and_occu):
el_oxi[specie.symbol].append(v_set[jj])
jj += 1
max_diff = max([max(v) - min(v) for v in el_oxi.values()])
if max_diff > 2:
return
score = six.moves.reduce(
operator.mul,
[all_prob[attrib[iv]][elements[iv]][vv]
for iv, vv in enumerate(v_set)])
if score > self._best_score:
self._best_vset = v_set
self._best_score = score
def _recurse(assigned=[]):
#recurses to find permutations of valences based on whether a
#charge balanced assignment can still be found
if self._n > self.max_permutations:
return
i = len(assigned)
highest = vmax.copy()
highest[:i] = assigned
highest *= new_nsites
highest *= fractions
highest = np.sum(highest)
lowest = vmin.copy()
lowest[:i] = assigned
lowest *= new_nsites
lowest *= fractions
lowest = np.sum(lowest)
if (highest < -self.charge_neutrality_tolerance or
lowest > self.charge_neutrality_tolerance):
self._n += 1
return
if i == len(new_valences):
evaluate_assignment(assigned)
self._n += 1
return
else:
for v in new_valences[i]:
new_assigned = list(assigned)
_recurse(new_assigned + [v])
_recurse()
if self._best_vset:
if structure.is_ordered:
assigned = {}
for val, sites in zip(self._best_vset, equi_sites):
for site in sites:
assigned[site] = val
return [int(assigned[site]) for site in structure]
else:
assigned = {}
new_best_vset = []
for ii in range(len(equi_sites)):
new_best_vset.append(list())
for ival, val in enumerate(self._best_vset):
new_best_vset[attrib[ival]].append(val)
for val, sites in zip(new_best_vset, equi_sites):
for site in sites:
assigned[site] = val
return [[int(frac_site) for frac_site in assigned[site]]
for site in structure]
else:
raise ValueError("Valences cannot be assigned!")
def get_oxi_state_decorated_structure(self, structure):
"""
Get an oxidation state decorated structure. This currently works only
for ordered structures only.
Args:
structure: Structure to analyze
Returns:
A modified structure that is oxidation state decorated.
Raises:
ValueError if the valences cannot be determined.
"""
s = Structure.from_sites(structure.sites)
if structure.is_ordered:
valences = self.get_valences(structure)
s.add_oxidation_state_by_site(valences)
else:
valences = self.get_valences(structure)
s = add_oxidation_state_by_site_fraction(s, valences)
return s
def get_z_ordered_elmap(comp):
"""
Arbitrary ordered elmap on the elements/species of a composition of a
given site in an unordered structure. Returns a list of tuples (
element_or_specie: occupation) in the arbitrary order.
The arbitrary order is based on the Z of the element and the smallest
fractional occupations first.
Example : {"Ni3+": 0.2, "Ni4+": 0.2, "Cr3+": 0.15, "Zn2+": 0.34,
"Cr4+": 0.11} will yield the species in the following order :
Cr4+, Cr3+, Ni3+, Ni4+, Zn2+ ... or
Cr4+, Cr3+, Ni4+, Ni3+, Zn2+
"""
return sorted([(elsp, comp[elsp]) for elsp in comp.keys()])
def add_oxidation_state_by_site_fraction(structure, oxidation_states):
"""
Add oxidation states to a structure by fractional site.
Args:
oxidation_states (list): List of list of oxidation states for each
site fraction for each site.
E.g., [[2, 4], [3], [-2], [-2], [-2]]
"""
try:
for i, site in enumerate(structure):
new_sp = collections.defaultdict(float)
for j, (el, occu) in enumerate(get_z_ordered_elmap(site
.species_and_occu)):
specie = Specie(el.symbol, oxidation_states[i][j])
new_sp[specie] += occu
structure[i] = new_sp
return structure
except IndexError:
raise ValueError("Oxidation state of all sites must be "
"specified in the list.")
|
Dioptas/pymatgen
|
pymatgen/analysis/bond_valence.py
|
Python
|
mit
| 21,652
|
[
"Gaussian",
"pymatgen"
] |
468027eaf19468d5f9d3f2895bf65f0179aba54c3a1d28d348d569c8185651f3
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def mean_and_std(a, axis=None, weights=None, with_mean=True, with_std=True,
ddof=0):
"""Compute the weighted average and standard deviation along the
specified axis.
Parameters
----------
a : array_like
Calculate average and standard deviation of these values.
axis : int, optional
Axis along which the statistics are computed. The default is
to compute them on the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each
value in `a` contributes to the average according to its
associated weight. The weights array can either be 1-D (in
which case its length must be the size of `a` along the given
axis) or of the same shape as `a`. If `weights=None`, then all
data in `a` are assumed to have a weight equal to one.
with_mean : bool, optional, defaults to True
Compute average if True.
with_std : bool, optional, defaults to True
Compute standard deviation if True.
ddof : int, optional, defaults to 0
It means delta degrees of freedom. Variance is calculated by
dividing by `n - ddof` (where `n` is the number of
elements). By default it computes the maximum likelyhood
estimator.
Returns
-------
average, std
Return the average and standard deviation along the specified
axis. If any of them was not required, returns `None` instead
"""
if not (with_mean or with_std):
raise ValueError("Either the mean or standard deviation need to be"
" computed.")
a = np.asarray(a)
if weights is None:
avg = a.mean(axis=axis) if with_mean else None
std = a.std(axis=axis, ddof=ddof) if with_std else None
else:
avg = np.average(a, axis=axis, weights=weights)
if with_std:
if axis is None:
variance = np.average((a - avg)**2, weights=weights)
else:
# Make sure that the subtraction to compute variance works for
# multidimensional arrays
a_rolled = np.rollaxis(a, axis)
# Numpy doesn't have a weighted std implementation, but this is
# stable and fast
variance = np.average((a_rolled - avg)**2, axis=0,
weights=weights)
if ddof != 0: # Don't waste time if variance doesn't need scaling
if axis is None:
variance *= a.size / (a.size - ddof)
else:
variance *= a.shape[axis] / (a.shape[axis] - ddof)
std = np.sqrt(variance)
else:
std = None
avg = avg if with_mean else None
return avg, std
@experimental(as_of="0.4.0")
def scale(a, weights=None, with_mean=True, with_std=True, ddof=0, copy=True):
"""Scale array by columns to have weighted average 0 and standard
deviation 1.
Parameters
----------
a : array_like
2D array whose columns are standardized according to the
weights.
weights : array_like, optional
Array of weights associated with the columns of `a`. By
default, the scaling is unweighted.
with_mean : bool, optional, defaults to True
Center columns to have 0 weighted mean.
with_std : bool, optional, defaults to True
Scale columns to have unit weighted std.
ddof : int, optional, defaults to 0
If with_std is True, variance is calculated by dividing by `n
- ddof` (where `n` is the number of elements). By default it
computes the maximum likelyhood stimator.
copy : bool, optional, defaults to True
Whether to perform the standardization in place, or return a
new copy of `a`.
Returns
-------
2D ndarray
Scaled array.
Notes
-----
Wherever std equals 0, it is replaced by 1 in order to avoid
division by zero.
"""
if copy:
a = a.copy()
a = np.asarray(a, dtype=np.float64)
avg, std = mean_and_std(a, axis=0, weights=weights, with_mean=with_mean,
with_std=with_std, ddof=ddof)
if with_mean:
a -= avg
if with_std:
std[std == 0] = 1.0
a /= std
return a
@experimental(as_of="0.4.0")
def svd_rank(M_shape, S, tol=None):
"""Matrix rank of `M` given its singular values `S`.
See `np.linalg.matrix_rank` for a rationale on the tolerance
(we're not using that function because it doesn't let us reuse a
precomputed SVD)."""
if tol is None:
tol = S.max() * max(M_shape) * np.finfo(S.dtype).eps
return np.sum(S > tol)
@experimental(as_of="0.4.0")
def corr(x, y=None):
"""Computes correlation between columns of `x`, or `x` and `y`.
Correlation is covariance of (columnwise) standardized matrices,
so each matrix is first centered and scaled to have variance one,
and then their covariance is computed.
Parameters
----------
x : 2D array_like
Matrix of shape (n, p). Correlation between its columns will
be computed.
y : 2D array_like, optional
Matrix of shape (n, q). If provided, the correlation is
computed between the columns of `x` and the columns of
`y`. Else, it's computed between the columns of `x`.
Returns
-------
correlation
Matrix of computed correlations. Has shape (p, p) if `y` is
not provided, else has shape (p, q).
"""
x = np.asarray(x)
if y is not None:
y = np.asarray(y)
if y.shape[0] != x.shape[0]:
raise ValueError("Both matrices must have the same number of rows")
x, y = scale(x), scale(y)
else:
x = scale(x)
y = x
# Notice that scaling was performed with ddof=0 (dividing by n,
# the default), so now we need to remove it by also using ddof=0
# (dividing by n)
return x.T.dot(y) / x.shape[0]
@experimental(as_of="0.4.0")
def e_matrix(distance_matrix):
"""Compute E matrix from a distance matrix.
Squares and divides by -2 the input elementwise. Eq. 9.20 in
Legendre & Legendre 1998."""
return distance_matrix * distance_matrix / -2
def f_matrix(E_matrix):
"""Compute F matrix from E matrix.
Centring step: for each element, the mean of the corresponding
row and column are substracted, and the mean of the whole
matrix is added. Eq. 9.21 in Legendre & Legendre 1998."""
row_means = E_matrix.mean(axis=1, keepdims=True)
col_means = E_matrix.mean(axis=0, keepdims=True)
matrix_mean = E_matrix.mean()
return E_matrix - row_means - col_means + matrix_mean
|
jdrudolph/scikit-bio
|
skbio/stats/ordination/_utils.py
|
Python
|
bsd-3-clause
| 7,264
|
[
"scikit-bio"
] |
b7ccf0ce20b14fac34c3bddc74e88dbae126179149ccf78f7ac7a6d84aff13a2
|
# $Id: get_projects.py 2016-12-17 $
# Author: Coen Meerbeek <coen@buzzardlabs.com>
# Copyright: BuzzarLabs 2016
"""
This file retrieves projects from an Octopus application
"""
import logging, json, requests, time, sys
import ConfigParser
import splunk.Intersplunk as isp
import octopus_common
# Parse octopus.conf for configuration settings
stanza = octopus_common.getSelfConfStanza("octopus")
protocol = stanza['protocol']
hostname = stanza['hostname']
apikey = stanza['apikey']
# Setup logger object
logger = octopus_common.setup_logging()
logger.info(time.time())
try:
octopus_url = protocol + "://" + hostname + "/api/projects/all"
# Setup response object and execute GET request
response = requests.get(
url = octopus_url,
headers = {
"X-Octopus-ApiKey": apikey,
},
)
response.raise_for_status()
# Handle response
json_response = json.loads(response.content)
# Iterate projects and print results to Splunk
for project in json_response:
print json.dumps(project)
sys.exit(0)
# Catch exceptions if needed
except Exception as e:
logger.exception("Exception: " + str(e))
isp.generateErrorResults(str(e))
|
cmeerbeek/splunk-addon-octopus-deploy
|
TA-OctopusNT-Fwd/bin/get_projects.py
|
Python
|
mit
| 1,165
|
[
"Octopus"
] |
d4ee0b17e94df00fc9011096b0ac840f8e886f42a5393195f801d4b74f909534
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-install-db
# Author : Ricardo Graciani
########################################################################
"""
Create a new DB on the local MySQL server
"""
__RCSID__ = "$Id$"
#
from DIRAC.Core.Utilities import InstallTools
from DIRAC.FrameworkSystem.Utilities import MonitoringUtilities
#
from DIRAC import gConfig
InstallTools.exitOnError = True
#
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgFile] ... DB ...' % Script.scriptName,
'Arguments:',
' DB: Name of the Database (mandatory)'] ) )
Script.parseCommandLine()
args = Script.getPositionalArgs()
#
if len( args ) < 1:
Script.showHelp()
exit( -1 )
InstallTools.getMySQLPasswords()
for db in args:
result = InstallTools.installDatabase( db )
if not result['OK']:
print "ERROR: failed to correctly install %s" % db, result['Message']
else:
extension, system = result['Value']
InstallTools.addDatabaseOptionsToCS( gConfig, system, db, overwrite = True )
if db != 'InstalledComponentsDB':
result = MonitoringUtilities.monitorInstallation( 'DB', system, db )
if not result[ 'OK' ]:
print "ERROR: failed to register installation in database: %s" % result[ 'Message' ]
|
marcelovilaca/DIRAC
|
Core/scripts/dirac-install-db.py
|
Python
|
gpl-3.0
| 1,527
|
[
"DIRAC"
] |
97507cf195ec61a46a358aba22776120bdb0f9da3fc5d6ff0bb952ba98161685
|
"""
This sample demonstrates a simple skill built with the Amazon Alexa Skills Kit.
The Intent Schema, Built-in Slots, and Sample Utterances for this skill, as well
as testing instructions are located at http://amzn.to/1LzFrj6
For additional samples, visit the Alexa Skills Kit Getting Started guide at
http://amzn.to/1LGWsLG
"""
from __future__ import print_function
from twilio.rest import TwilioRestClient
from loadData import rawToTime, getNumber
from config import *
accountSid = 'ACcf54ef49063aaa784c99aec82d7f16c2';
authToken = '31f817a48ee7cd461c07c57490eac6ce';
fromNumber = '+19163183442';
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': 'SessionSpeechlet - ' + title,
'content': 'SessionSpeechlet - ' + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
session_attributes = {}
card_title = "Welcome"
speech_output = "Hello, welcome to the Tardy skill."
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "You can ask me to send a message to your friends."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def sarah_intent_handler(intent):
card_title = "Sarah"
speech_output = "Sarah is the best"
return build_response(None, build_speechlet_response(
card_title, speech_output, None, False))
def formatMessage(userName, targetName, time, place):
return "Hello %s, %s would like to meet at %s at %s." % (userName.title(), targetName.title(), place.title(), time)
def getInfo(userId, target, time, place):
d = {}
time = rawToTime(time)
userName = ""
for x in target:
arr = getNumber(userId, target)
if userName == "":
username = arr[0];
d[arr[1]] = [arr[2], formatMessage(userName, a[1], time, place)]
for key in d:
sendText(d[key][0], d[key][1])
def twilio_intent_handler(intent):
card_title = "Twilio"
#print(intent['slots'])
target = intent["slots"]["nameSlot"]["value"]
time = intent["slots"]["timeSlot"]["value"]
place = intent["slots"]["placeSlot"]["value"]
try:
#userID = kijZjJJ5ozPZxfeHYfjh3zd3TUh1
getInfo('kijZjJJ5ozPZxfeHYfjh3zd3TUh1', target, time, place);
#cellNumber = ""
#messageText = ""
#slots = intent['slots']
#cellNumber = slots['numberSlot']['value']
#messageText = slots['msgSlot']['value']
# call the method to send text
speech_output = "Message sent."
except Exception:
speech_output = "Sorry, please try again."
# Setting reprompt_text to None signifies that we do not want to reprompt
# the user. If the user does not respond or says something that is not
# understood, the session will end.
return build_response(None, build_speechlet_response(
card_title, speech_output, None, False))
#number,message
def sendText(to_num, msg_text):
try:
client = TwilioRestClient(accountSid, authToken)
client.messages.create(
to=to_num,
from_=from_num,
body=msg_text
)
return True
except Exception as e:
print("Failed to send message: ")
print(e.code)
return False
def help_intent_handler(intent):
card_title = "Help"
speech_output = "Ask me to send someone a text."
return build_response(None, build_speechlet_response(
card_title, speech_output, None, False))
def misunderstood_handler(intent):
card_title = "Misunderstood"
speech_output = "Sorry, please try again."
return build_response(None, build_speechlet_response(
card_title, speech_output, None, True))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for trying our Tardy skill. " \
"Have a great time at Hack Illinois! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response(None, build_speechlet_response(
card_title, speech_output, None, should_end_session))
# --------------- Events ------------------
def on_launch(launch_request):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "SarahIntent":
return sarah_intent_handler(intent)
elif intent_name == "TwilioIntent":
return twilio_intent_handler(intent)
elif intent_name == "HelpIntent":
return help_intent_handler(intent)
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
return misunderstood_handler(intent)
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
session_attributes = {}
#applicationId = event['session']['application']['applicationId']
#if applicationId != TWILIO_APPLICATION_ID:
# should_end_session = True
# bad_request_output = "Bad Request"
# print("Bad ApplicationId Received: "+applicationId)
# return build_response(session_attributes, build_speechlet_response("Twilio", bad_request_output, None, should_end_session))
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'])
|
MaxLinCode/tardy-HackIllinois-2017
|
backend/lambda_function.py
|
Python
|
mit
| 6,926
|
[
"VisIt"
] |
b9b445b6f6e9b6d78659898c09462b3064dbe824696216c2ec07a6fe2880ce0e
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2020-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Concrete collection candidate management helper module."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import tarfile
import subprocess
import typing as t
from contextlib import contextmanager
from hashlib import sha256
from urllib.error import URLError
from urllib.parse import urldefrag
from shutil import rmtree
from tempfile import mkdtemp
if t.TYPE_CHECKING:
from ansible.galaxy.dependency_resolution.dataclasses import (
Candidate, Requirement,
)
from ansible.galaxy.token import GalaxyToken
from ansible.errors import AnsibleError
from ansible.galaxy import get_collections_galaxy_meta_info
from ansible.galaxy.dependency_resolution.dataclasses import _GALAXY_YAML
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.common.yaml import yaml_load
from ansible.module_utils.six import raise_from
from ansible.module_utils.urls import open_url
from ansible.utils.display import Display
import yaml
display = Display()
MANIFEST_FILENAME = 'MANIFEST.json'
class ConcreteArtifactsManager:
"""Manager for on-disk collection artifacts.
It is responsible for:
* downloading remote collections from Galaxy-compatible servers and
direct links to tarballs or SCM repositories
* keeping track of local ones
* keeping track of Galaxy API tokens for downloads from Galaxy'ish
as well as the artifact hashes
* keeping track of Galaxy API signatures for downloads from Galaxy'ish
* caching all of above
* retrieving the metadata out of the downloaded artifacts
"""
def __init__(self, b_working_directory, validate_certs=True, keyring=None, timeout=60):
# type: (bytes, bool, str, int) -> None
"""Initialize ConcreteArtifactsManager caches and costraints."""
self._validate_certs = validate_certs # type: bool
self._artifact_cache = {} # type: dict[bytes, bytes]
self._galaxy_artifact_cache = {} # type: dict[Candidate | Requirement, bytes]
self._artifact_meta_cache = {} # type: dict[bytes, dict[str, str | list[str] | dict[str, str] | None]]
self._galaxy_collection_cache = {} # type: dict[Candidate | Requirement, tuple[str, str, GalaxyToken]]
self._galaxy_collection_origin_cache = {} # type: dict[Candidate, tuple[str, list[dict[str, str]]]]
self._b_working_directory = b_working_directory # type: bytes
self._supplemental_signature_cache = {} # type: dict[str, str]
self._keyring = keyring # type: str
self.timeout = timeout # type: int
@property
def keyring(self):
return self._keyring
def get_galaxy_artifact_source_info(self, collection):
# type: (Candidate) -> dict[str, str | list[dict[str, str]]]
server = collection.src.api_server
try:
download_url, _dummy, _dummy = self._galaxy_collection_cache[collection]
signatures_url, signatures = self._galaxy_collection_origin_cache[collection]
except KeyError as key_err:
raise RuntimeError(
'The is no known source for {coll!s}'.
format(coll=collection),
) from key_err
return {
"format_version": "1.0.0",
"namespace": collection.namespace,
"name": collection.name,
"version": collection.ver,
"server": server,
"version_url": signatures_url,
"download_url": download_url,
"signatures": signatures,
}
def get_galaxy_artifact_path(self, collection):
# type: (Candidate | Requirement) -> bytes
"""Given a Galaxy-stored collection, return a cached path.
If it's not yet on disk, this method downloads the artifact first.
"""
try:
return self._galaxy_artifact_cache[collection]
except KeyError:
pass
try:
url, sha256_hash, token = self._galaxy_collection_cache[collection]
except KeyError as key_err:
raise_from(
RuntimeError(
'The is no known source for {coll!s}'.
format(coll=collection),
),
key_err,
)
display.vvvv(
"Fetching a collection tarball for '{collection!s}' from "
'Ansible Galaxy'.format(collection=collection),
)
try:
b_artifact_path = _download_file(
url,
self._b_working_directory,
expected_hash=sha256_hash,
validate_certs=self._validate_certs,
token=token,
) # type: bytes
except URLError as err:
raise_from(
AnsibleError(
'Failed to download collection tar '
"from '{coll_src!s}': {download_err!s}".
format(
coll_src=to_native(collection.src),
download_err=to_native(err),
),
),
err,
)
else:
display.vvv(
"Collection '{coll!s}' obtained from "
'server {server!s} {url!s}'.format(
coll=collection, server=collection.src or 'Galaxy',
url=collection.src.api_server if collection.src is not None
else '',
)
)
self._galaxy_artifact_cache[collection] = b_artifact_path
return b_artifact_path
def get_artifact_path(self, collection):
# type: (Candidate | Requirement) -> bytes
"""Given a concrete collection pointer, return a cached path.
If it's not yet on disk, this method downloads the artifact first.
"""
try:
return self._artifact_cache[collection.src]
except KeyError:
pass
# NOTE: SCM needs to be special-cased as it may contain either
# NOTE: one collection in its root, or a number of top-level
# NOTE: collection directories instead.
# NOTE: The idea is to store the SCM collection as unpacked
# NOTE: directory structure under the temporary location and use
# NOTE: a "virtual" collection that has pinned requirements on
# NOTE: the directories under that SCM checkout that correspond
# NOTE: to collections.
# NOTE: This brings us to the idea that we need two separate
# NOTE: virtual Requirement/Candidate types --
# NOTE: (single) dir + (multidir) subdirs
if collection.is_url:
display.vvvv(
"Collection requirement '{collection!s}' is a URL "
'to a tar artifact'.format(collection=collection.fqcn),
)
try:
b_artifact_path = _download_file(
collection.src,
self._b_working_directory,
expected_hash=None, # NOTE: URLs don't support checksums
validate_certs=self._validate_certs,
timeout=self.timeout
)
except URLError as err:
raise_from(
AnsibleError(
'Failed to download collection tar '
"from '{coll_src!s}': {download_err!s}".
format(
coll_src=to_native(collection.src),
download_err=to_native(err),
),
),
err,
)
elif collection.is_scm:
b_artifact_path = _extract_collection_from_git(
collection.src,
collection.ver,
self._b_working_directory,
)
elif collection.is_file or collection.is_dir or collection.is_subdirs:
b_artifact_path = to_bytes(collection.src)
else:
# NOTE: This may happen `if collection.is_online_index_pointer`
raise RuntimeError(
'The artifact is of an unexpected type {art_type!s}'.
format(art_type=collection.type)
)
self._artifact_cache[collection.src] = b_artifact_path
return b_artifact_path
def _get_direct_collection_namespace(self, collection):
# type: (Candidate) -> str | None
return self.get_direct_collection_meta(collection)['namespace'] # type: ignore[return-value]
def _get_direct_collection_name(self, collection):
# type: (Candidate) -> str | None
return self.get_direct_collection_meta(collection)['name'] # type: ignore[return-value]
def get_direct_collection_fqcn(self, collection):
# type: (Candidate) -> str | None
"""Extract FQCN from the given on-disk collection artifact.
If the collection is virtual, ``None`` is returned instead
of a string.
"""
if collection.is_virtual:
# NOTE: should it be something like "<virtual>"?
return None
return '.'.join(( # type: ignore[type-var]
self._get_direct_collection_namespace(collection), # type: ignore[arg-type]
self._get_direct_collection_name(collection),
))
def get_direct_collection_version(self, collection):
# type: (Candidate | Requirement) -> str
"""Extract version from the given on-disk collection artifact."""
return self.get_direct_collection_meta(collection)['version'] # type: ignore[return-value]
def get_direct_collection_dependencies(self, collection):
# type: (Candidate | Requirement) -> dict[str, str]
"""Extract deps from the given on-disk collection artifact."""
return self.get_direct_collection_meta(collection)['dependencies'] # type: ignore[return-value]
def get_direct_collection_meta(self, collection):
# type: (Candidate | Requirement) -> dict[str, str | dict[str, str] | list[str] | None]
"""Extract meta from the given on-disk collection artifact."""
try: # FIXME: use unique collection identifier as a cache key?
return self._artifact_meta_cache[collection.src]
except KeyError:
b_artifact_path = self.get_artifact_path(collection)
if collection.is_url or collection.is_file:
collection_meta = _get_meta_from_tar(b_artifact_path)
elif collection.is_dir: # should we just build a coll instead?
# FIXME: what if there's subdirs?
try:
collection_meta = _get_meta_from_dir(b_artifact_path)
except LookupError as lookup_err:
raise_from(
AnsibleError(
'Failed to find the collection dir deps: {err!s}'.
format(err=to_native(lookup_err)),
),
lookup_err,
)
elif collection.is_scm:
collection_meta = {
'name': None,
'namespace': None,
'dependencies': {to_native(b_artifact_path): '*'},
'version': '*',
}
elif collection.is_subdirs:
collection_meta = {
'name': None,
'namespace': None,
# NOTE: Dropping b_artifact_path since it's based on src anyway
'dependencies': dict.fromkeys(
map(to_native, collection.namespace_collection_paths),
'*',
),
'version': '*',
}
else:
raise RuntimeError
self._artifact_meta_cache[collection.src] = collection_meta
return collection_meta
def save_collection_source(self, collection, url, sha256_hash, token, signatures_url, signatures):
# type: (Candidate, str, str, GalaxyToken, str, list[dict[str, str]]) -> None
"""Store collection URL, SHA256 hash and Galaxy API token.
This is a hook that is supposed to be called before attempting to
download Galaxy-based collections with ``get_galaxy_artifact_path()``.
"""
self._galaxy_collection_cache[collection] = url, sha256_hash, token
self._galaxy_collection_origin_cache[collection] = signatures_url, signatures
@classmethod
@contextmanager
def under_tmpdir(
cls,
temp_dir_base, # type: str
validate_certs=True, # type: bool
keyring=None, # type: str
): # type: (...) -> t.Iterator[ConcreteArtifactsManager]
"""Custom ConcreteArtifactsManager constructor with temp dir.
This method returns a context manager that allocates and cleans
up a temporary directory for caching the collection artifacts
during the dependency resolution process.
"""
# NOTE: Can't use `with tempfile.TemporaryDirectory:`
# NOTE: because it's not in Python 2 stdlib.
temp_path = mkdtemp(
dir=to_bytes(temp_dir_base, errors='surrogate_or_strict'),
)
b_temp_path = to_bytes(temp_path, errors='surrogate_or_strict')
try:
yield cls(b_temp_path, validate_certs, keyring=keyring)
finally:
rmtree(b_temp_path)
def parse_scm(collection, version):
"""Extract name, version, path and subdir out of the SCM pointer."""
if ',' in collection:
collection, version = collection.split(',', 1)
elif version == '*' or not version:
version = 'HEAD'
if collection.startswith('git+'):
path = collection[4:]
else:
path = collection
path, fragment = urldefrag(path)
fragment = fragment.strip(os.path.sep)
if path.endswith(os.path.sep + '.git'):
name = path.split(os.path.sep)[-2]
elif '://' not in path and '@' not in path:
name = path
else:
name = path.split('/')[-1]
if name.endswith('.git'):
name = name[:-4]
return name, version, path, fragment
def _extract_collection_from_git(repo_url, coll_ver, b_path):
name, version, git_url, fragment = parse_scm(repo_url, coll_ver)
b_checkout_path = mkdtemp(
dir=b_path,
prefix=to_bytes(name, errors='surrogate_or_strict'),
) # type: bytes
# Perform a shallow clone if simply cloning HEAD
if version == 'HEAD':
git_clone_cmd = 'git', 'clone', '--depth=1', git_url, to_text(b_checkout_path)
else:
git_clone_cmd = 'git', 'clone', git_url, to_text(b_checkout_path)
# FIXME: '--branch', version
try:
subprocess.check_call(git_clone_cmd)
except subprocess.CalledProcessError as proc_err:
raise_from(
AnsibleError( # should probably be LookupError
'Failed to clone a Git repository from `{repo_url!s}`.'.
format(repo_url=to_native(git_url)),
),
proc_err,
)
git_switch_cmd = 'git', 'checkout', to_text(version)
try:
subprocess.check_call(git_switch_cmd, cwd=b_checkout_path)
except subprocess.CalledProcessError as proc_err:
raise_from(
AnsibleError( # should probably be LookupError
'Failed to switch a cloned Git repo `{repo_url!s}` '
'to the requested revision `{commitish!s}`.'.
format(
commitish=to_native(version),
repo_url=to_native(git_url),
),
),
proc_err,
)
return (
os.path.join(b_checkout_path, to_bytes(fragment))
if fragment else b_checkout_path
)
# FIXME: use random subdirs while preserving the file names
def _download_file(url, b_path, expected_hash, validate_certs, token=None, timeout=60):
# type: (str, bytes, str | None, bool, GalaxyToken, int) -> bytes
# ^ NOTE: used in download and verify_collections ^
b_tarball_name = to_bytes(
url.rsplit('/', 1)[1], errors='surrogate_or_strict',
)
b_file_name = b_tarball_name[:-len('.tar.gz')]
b_tarball_dir = mkdtemp(
dir=b_path,
prefix=b'-'.join((b_file_name, b'')),
) # type: bytes
b_file_path = os.path.join(b_tarball_dir, b_tarball_name)
display.display("Downloading %s to %s" % (url, to_text(b_tarball_dir)))
# NOTE: Galaxy redirects downloads to S3 which rejects the request
# NOTE: if an Authorization header is attached so don't redirect it
resp = open_url(
to_native(url, errors='surrogate_or_strict'),
validate_certs=validate_certs,
headers=None if token is None else token.headers(),
unredirected_headers=['Authorization'], http_agent=user_agent(),
timeout=timeout
)
with open(b_file_path, 'wb') as download_file: # type: t.BinaryIO
actual_hash = _consume_file(resp, write_to=download_file)
if expected_hash:
display.vvvv(
'Validating downloaded file hash {actual_hash!s} with '
'expected hash {expected_hash!s}'.
format(actual_hash=actual_hash, expected_hash=expected_hash)
)
if expected_hash != actual_hash:
raise AnsibleError('Mismatch artifact hash with downloaded file')
return b_file_path
def _consume_file(read_from, write_to=None):
# type: (t.BinaryIO, t.BinaryIO) -> str
bufsize = 65536
sha256_digest = sha256()
data = read_from.read(bufsize)
while data:
if write_to is not None:
write_to.write(data)
write_to.flush()
sha256_digest.update(data)
data = read_from.read(bufsize)
return sha256_digest.hexdigest()
def _normalize_galaxy_yml_manifest(
galaxy_yml, # type: dict[str, str | list[str] | dict[str, str] | None]
b_galaxy_yml_path, # type: bytes
):
# type: (...) -> dict[str, str | list[str] | dict[str, str] | None]
galaxy_yml_schema = (
get_collections_galaxy_meta_info()
) # type: list[dict[str, t.Any]] # FIXME: <--
# FIXME: 👆maybe precise type: list[dict[str, bool | str | list[str]]]
mandatory_keys = set()
string_keys = set() # type: set[str]
list_keys = set() # type: set[str]
dict_keys = set() # type: set[str]
for info in galaxy_yml_schema:
if info.get('required', False):
mandatory_keys.add(info['key'])
key_list_type = {
'str': string_keys,
'list': list_keys,
'dict': dict_keys,
}[info.get('type', 'str')]
key_list_type.add(info['key'])
all_keys = frozenset(list(mandatory_keys) + list(string_keys) + list(list_keys) + list(dict_keys))
set_keys = set(galaxy_yml.keys())
missing_keys = mandatory_keys.difference(set_keys)
if missing_keys:
raise AnsibleError("The collection galaxy.yml at '%s' is missing the following mandatory keys: %s"
% (to_native(b_galaxy_yml_path), ", ".join(sorted(missing_keys))))
extra_keys = set_keys.difference(all_keys)
if len(extra_keys) > 0:
display.warning("Found unknown keys in collection galaxy.yml at '%s': %s"
% (to_text(b_galaxy_yml_path), ", ".join(extra_keys)))
# Add the defaults if they have not been set
for optional_string in string_keys:
if optional_string not in galaxy_yml:
galaxy_yml[optional_string] = None
for optional_list in list_keys:
list_val = galaxy_yml.get(optional_list, None)
if list_val is None:
galaxy_yml[optional_list] = []
elif not isinstance(list_val, list):
galaxy_yml[optional_list] = [list_val] # type: ignore[list-item]
for optional_dict in dict_keys:
if optional_dict not in galaxy_yml:
galaxy_yml[optional_dict] = {}
# NOTE: `version: null` is only allowed for `galaxy.yml`
# NOTE: and not `MANIFEST.json`. The use-case for it is collections
# NOTE: that generate the version from Git before building a
# NOTE: distributable tarball artifact.
if not galaxy_yml.get('version'):
galaxy_yml['version'] = '*'
return galaxy_yml
def _get_meta_from_dir(
b_path, # type: bytes
): # type: (...) -> dict[str, str | list[str] | dict[str, str] | None]
try:
return _get_meta_from_installed_dir(b_path)
except LookupError:
return _get_meta_from_src_dir(b_path)
def _get_meta_from_src_dir(
b_path, # type: bytes
): # type: (...) -> dict[str, str | list[str] | dict[str, str] | None]
galaxy_yml = os.path.join(b_path, _GALAXY_YAML)
if not os.path.isfile(galaxy_yml):
raise LookupError(
"The collection galaxy.yml path '{path!s}' does not exist.".
format(path=to_native(galaxy_yml))
)
with open(galaxy_yml, 'rb') as manifest_file_obj:
try:
manifest = yaml_load(manifest_file_obj)
except yaml.error.YAMLError as yaml_err:
raise_from(
AnsibleError(
"Failed to parse the galaxy.yml at '{path!s}' with "
'the following error:\n{err_txt!s}'.
format(
path=to_native(galaxy_yml),
err_txt=to_native(yaml_err),
),
),
yaml_err,
)
return _normalize_galaxy_yml_manifest(manifest, galaxy_yml)
def _get_json_from_installed_dir(
b_path, # type: bytes
filename, # type: str
): # type: (...) -> dict
b_json_filepath = os.path.join(b_path, to_bytes(filename, errors='surrogate_or_strict'))
try:
with open(b_json_filepath, 'rb') as manifest_fd:
b_json_text = manifest_fd.read()
except (IOError, OSError):
raise LookupError(
"The collection {manifest!s} path '{path!s}' does not exist.".
format(
manifest=filename,
path=to_native(b_json_filepath),
)
)
manifest_txt = to_text(b_json_text, errors='surrogate_or_strict')
try:
manifest = json.loads(manifest_txt)
except ValueError:
raise AnsibleError(
'Collection tar file member {member!s} does not '
'contain a valid json string.'.
format(member=filename),
)
return manifest
def _get_meta_from_installed_dir(
b_path, # type: bytes
): # type: (...) -> dict[str, str | list[str] | dict[str, str] | None]
manifest = _get_json_from_installed_dir(b_path, MANIFEST_FILENAME)
collection_info = manifest['collection_info']
version = collection_info.get('version')
if not version:
raise AnsibleError(
u'Collection metadata file `{manifest_filename!s}` at `{meta_file!s}` is expected '
u'to have a valid SemVer version value but got {version!s}'.
format(
manifest_filename=MANIFEST_FILENAME,
meta_file=to_text(b_path),
version=to_text(repr(version)),
),
)
return collection_info
def _get_meta_from_tar(
b_path, # type: bytes
): # type: (...) -> dict[str, str | list[str] | dict[str, str] | None]
if not tarfile.is_tarfile(b_path):
raise AnsibleError(
"Collection artifact at '{path!s}' is not a valid tar file.".
format(path=to_native(b_path)),
)
with tarfile.open(b_path, mode='r') as collection_tar: # type: tarfile.TarFile
try:
member = collection_tar.getmember(MANIFEST_FILENAME)
except KeyError:
raise AnsibleError(
"Collection at '{path!s}' does not contain the "
'required file {manifest_file!s}.'.
format(
path=to_native(b_path),
manifest_file=MANIFEST_FILENAME,
),
)
with _tarfile_extract(collection_tar, member) as (_member, member_obj):
if member_obj is None:
raise AnsibleError(
'Collection tar file does not contain '
'member {member!s}'.format(member=MANIFEST_FILENAME),
)
text_content = to_text(
member_obj.read(),
errors='surrogate_or_strict',
)
try:
manifest = json.loads(text_content)
except ValueError:
raise AnsibleError(
'Collection tar file member {member!s} does not '
'contain a valid json string.'.
format(member=MANIFEST_FILENAME),
)
return manifest['collection_info']
@contextmanager
def _tarfile_extract(
tar, # type: tarfile.TarFile
member, # type: tarfile.TarInfo
):
# type: (...) -> t.Iterator[tuple[tarfile.TarInfo, t.IO[bytes] | None]]
tar_obj = tar.extractfile(member)
try:
yield member, tar_obj
finally:
if tar_obj is not None:
tar_obj.close()
|
felixfontein/ansible
|
lib/ansible/galaxy/collection/concrete_artifact_manager.py
|
Python
|
gpl-3.0
| 25,482
|
[
"Galaxy"
] |
dd6f7b86c973ddb7db6e3eef0bb672d0c8d201dd1014622443f53d252ef63f0f
|
""" Maximally localized Wannier Functions
Find the set of maximally localized Wannier functions
using the spread functional of Marzari and Vanderbilt
(PRB 56, 1997 page 12847).
"""
import numpy as np
from time import time
from math import sqrt, pi
from pickle import dump, load
from ase.parallel import paropen
from ase.calculators.dacapo import Dacapo
from ase.dft.kpoints import get_monkhorst_shape
from ase.transport.tools import dagger, normalize
dag = dagger
def gram_schmidt(U):
"""Orthonormalize columns of U according to the Gram-Schmidt procedure."""
for i, col in enumerate(U.T):
for col2 in U.T[:i]:
col -= col2 * np.dot(col2.conj(), col)
col /= np.linalg.norm(col)
def lowdin(U, S=None):
"""Orthonormalize columns of U according to the Lowdin procedure.
If the overlap matrix is know, it can be specified in S.
"""
if S is None:
S = np.dot(dag(U), U)
eig, rot = np.linalg.eigh(S)
rot = np.dot(rot / np.sqrt(eig), dag(rot))
U[:] = np.dot(U, rot)
def neighbor_k_search(k_c, G_c, kpt_kc, tol=1e-4):
# search for k1 (in kpt_kc) and k0 (in alldir), such that
# k1 - k - G + k0 = 0
alldir_dc = np.array([[0,0,0],[1,0,0],[0,1,0],[0,0,1],
[1,1,0],[1,0,1],[0,1,1]], int)
for k0_c in alldir_dc:
for k1, k1_c in enumerate(kpt_kc):
if np.linalg.norm(k1_c - k_c - G_c + k0_c) < tol:
return k1, k0_c
print 'Wannier: Did not find matching kpoint for kpt=', k_c
print 'Probably non-uniform k-point grid'
raise NotImplementedError
def calculate_weights(cell_cc):
""" Weights are used for non-cubic cells, see PRB **61**, 10040"""
alldirs_dc = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1],
[1, 1, 0], [1, 0, 1], [0, 1, 1]], dtype=int)
g = np.dot(cell_cc, cell_cc.T)
# NOTE: Only first 3 of following 6 weights are presently used:
w = np.zeros(6)
w[0] = g[0, 0] - g[0, 1] - g[0, 2]
w[1] = g[1, 1] - g[0, 1] - g[1, 2]
w[2] = g[2, 2] - g[0, 2] - g[1, 2]
w[3] = g[0, 1]
w[4] = g[0, 2]
w[5] = g[1, 2]
# Make sure that first 3 Gdir vectors are included -
# these are used to calculate Wanniercenters.
Gdir_dc = alldirs_dc[:3]
weight_d = w[:3]
for d in range(3, 6):
if abs(w[d]) > 1e-5:
Gdir_dc = np.concatenate((Gdir_dc, alldirs_dc[d:d + 1]))
weight_d = np.concatenate((weight_d, w[d:d + 1]))
weight_d /= max(abs(weight_d))
return weight_d, Gdir_dc
def random_orthogonal_matrix(dim, seed=None, real=False):
"""Generate a random orthogonal matrix"""
if seed is not None:
np.random.seed(seed)
H = np.random.rand(dim, dim)
np.add(dag(H), H, H)
np.multiply(.5, H, H)
if real:
gram_schmidt(H)
return H
else:
val, vec = np.linalg.eig(H)
return np.dot(vec * np.exp(1.j * val), dag(vec))
def steepest_descent(func, step=.005, tolerance=1e-6, **kwargs):
fvalueold = 0.
fvalue = fvalueold + 10
count=0
while abs((fvalue - fvalueold) / fvalue) > tolerance:
fvalueold = fvalue
dF = func.get_gradients()
func.step(dF * step, **kwargs)
fvalue = func.get_functional_value()
count += 1
print 'SteepestDescent: iter=%s, value=%s' % (count, fvalue)
def md_min(func, step=.25, tolerance=1e-6, verbose=False, **kwargs):
if verbose:
print 'Localize with step =', step, 'and tolerance =', tolerance
t = -time()
fvalueold = 0.
fvalue = fvalueold + 10
count = 0
V = np.zeros(func.get_gradients().shape, dtype=complex)
while abs((fvalue - fvalueold) / fvalue) > tolerance:
fvalueold = fvalue
dF = func.get_gradients()
V *= (dF * V.conj()).real > 0
V += step * dF
func.step(V, **kwargs)
fvalue = func.get_functional_value()
if fvalue < fvalueold:
step *= 0.5
count += 1
if verbose:
print 'MDmin: iter=%s, step=%s, value=%s' % (count, step, fvalue)
if verbose:
t += time()
print '%d iterations in %0.2f seconds (%0.2f ms/iter), endstep = %s' %(
count, t, t * 1000. / count, step)
def rotation_from_projection(proj_nw, fixed, ortho=True):
"""Determine rotation and coefficient matrices from projections
proj_nw = <psi_n|p_w>
psi_n: eigenstates
p_w: localized function
Nb (n) = Number of bands
Nw (w) = Number of wannier functions
M (f) = Number of fixed states
L (l) = Number of extra degrees of freedom
U (u) = Number of non-fixed states
"""
Nb, Nw = proj_nw.shape
M = fixed
L = Nw - M
U_ww = np.empty((Nw, Nw), dtype=proj_nw.dtype)
U_ww[:M] = proj_nw[:M]
if L > 0:
proj_uw = proj_nw[M:]
eig_w, C_ww = np.linalg.eigh(np.dot(dag(proj_uw), proj_uw))
C_ul = np.dot(proj_uw, C_ww[:, np.argsort(-eig_w.real)[:L]])
#eig_u, C_uu = np.linalg.eigh(np.dot(proj_uw, dag(proj_uw)))
#C_ul = C_uu[:, np.argsort(-eig_u.real)[:L]]
U_ww[M:] = np.dot(dag(C_ul), proj_uw)
else:
C_ul = np.empty((Nb - M, 0))
normalize(C_ul)
if ortho:
lowdin(U_ww)
else:
normalize(U_ww)
return U_ww, C_ul
class Wannier:
"""Maximally localized Wannier Functions
Find the set of maximally localized Wannier functions using the
spread functional of Marzari and Vanderbilt (PRB 56, 1997 page
12847).
"""
def __init__(self, nwannier, calc,
file=None,
nbands=None,
fixedenergy=None,
fixedstates=None,
spin=0,
initialwannier='random',
seed=None,
verbose=False):
"""
Required arguments:
``nwannier``: The number of Wannier functions you wish to construct.
This must be at least half the number of electrons in the system
and at most equal to the number of bands in the calculation.
``calc``: A converged DFT calculator class.
If ``file`` arg. is not provided, the calculator *must* provide the
method ``get_wannier_localization_matrix``, and contain the
wavefunctions (save files with only the density is not enough).
If the localization matrix is read from file, this is not needed,
unless ``get_function`` or ``write_cube`` is called.
Optional arguments:
``nbands``: Bands to include in localization.
The number of bands considered by Wannier can be smaller than the
number of bands in the calculator. This is useful if the highest
bands of the DFT calculation are not well converged.
``spin``: The spin channel to be considered.
The Wannier code treats each spin channel independently.
``fixedenergy`` / ``fixedstates``: Fixed part of Heilbert space.
Determine the fixed part of Hilbert space by either a maximal
energy *or* a number of bands (possibly a list for multiple
k-points).
Default is None meaning that the number of fixed states is equated
to ``nwannier``.
``file``: Read localization and rotation matrices from this file.
``initialwannier``: Initial guess for Wannier rotation matrix.
Can be 'bloch' to start from the Bloch states, 'random' to be
randomized, or a list passed to calc.get_initial_wannier.
``seed``: Seed for random ``initialwannier``.
``verbose``: True / False level of verbosity.
"""
# Bloch phase sign convention
sign = -1
classname = calc.__class__.__name__
if classname in ['Dacapo', 'Jacapo']:
print 'Using ' + classname
sign = +1
self.nwannier = nwannier
self.calc = calc
self.spin = spin
self.verbose = verbose
self.kpt_kc = sign * calc.get_ibz_k_points()
assert len(calc.get_bz_k_points()) == len(self.kpt_kc)
self.kptgrid = get_monkhorst_shape(self.kpt_kc)
self.Nk = len(self.kpt_kc)
self.unitcell_cc = calc.get_atoms().get_cell()
self.largeunitcell_cc = (self.unitcell_cc.T * self.kptgrid).T
self.weight_d, self.Gdir_dc = calculate_weights(self.largeunitcell_cc)
self.Ndir = len(self.weight_d) # Number of directions
if nbands is not None:
self.nbands = nbands
else:
self.nbands = calc.get_number_of_bands()
if fixedenergy is None:
if fixedstates is None:
self.fixedstates_k = np.array([nwannier] * self.Nk, int)
else:
if type(fixedstates) is int:
fixedstates = [fixedstates] * self.Nk
self.fixedstates_k = np.array(fixedstates, int)
else:
# Setting number of fixed states and EDF from specified energy.
# All states below this energy (relative to Fermi level) are fixed.
fixedenergy += calc.get_fermi_level()
print fixedenergy
self.fixedstates_k = np.array(
[calc.get_eigenvalues(k, spin).searchsorted(fixedenergy)
for k in range(self.Nk)], int)
self.edf_k = self.nwannier - self.fixedstates_k
if verbose:
print 'Wannier: Fixed states : %s' % self.fixedstates_k
print 'Wannier: Extra degrees of freedom: %s' % self.edf_k
# Set the list of neighboring k-points k1, and the "wrapping" k0,
# such that k1 - k - G + k0 = 0
#
# Example: kpoints = (-0.375,-0.125,0.125,0.375), dir=0
# G = [0.25,0,0]
# k=0.375, k1= -0.375 : -0.375-0.375-0.25 => k0=[1,0,0]
#
# For a gamma point calculation k1 = k = 0, k0 = [1,0,0] for dir=0
if self.Nk == 1:
self.kklst_dk = np.zeros((self.Ndir, 1), int)
k0_dkc = self.Gdir_dc.reshape(-1, 1, 3)
else:
self.kklst_dk = np.empty((self.Ndir, self.Nk), int)
k0_dkc = np.empty((self.Ndir, self.Nk, 3), int)
# Distance between kpoints
kdist_c = np.empty(3)
for c in range(3):
# make a sorted list of the kpoint values in this direction
slist = np.argsort(self.kpt_kc[:, c], kind='mergesort')
skpoints_kc = np.take(self.kpt_kc, slist, axis=0)
kdist_c[c] = max([skpoints_kc[n + 1, c] - skpoints_kc[n, c]
for n in range(self.Nk - 1)])
for d, Gdir_c in enumerate(self.Gdir_dc):
for k, k_c in enumerate(self.kpt_kc):
# setup dist vector to next kpoint
G_c = np.where(Gdir_c > 0, kdist_c, 0)
if max(G_c) < 1e-4:
self.kklst_dk[d, k] = k
k0_dkc[d, k] = Gdir_c
else:
self.kklst_dk[d, k], k0_dkc[d, k] = \
neighbor_k_search(k_c, G_c, self.kpt_kc)
# Set the inverse list of neighboring k-points
self.invkklst_dk = np.empty((self.Ndir, self.Nk), int)
for d in range(self.Ndir):
for k1 in range(self.Nk):
self.invkklst_dk[d, k1] = self.kklst_dk[d].tolist().index(k1)
Nw = self.nwannier
Nb = self.nbands
self.Z_dkww = np.empty((self.Ndir, self.Nk, Nw, Nw), complex)
self.V_knw = np.zeros((self.Nk, Nb, Nw), complex)
if file is None:
self.Z_dknn = np.empty((self.Ndir, self.Nk, Nb, Nb), complex)
for d, dirG in enumerate(self.Gdir_dc):
for k in range(self.Nk):
k1 = self.kklst_dk[d, k]
k0_c = k0_dkc[d, k]
self.Z_dknn[d, k] = calc.get_wannier_localization_matrix(
nbands=Nb, dirG=dirG, kpoint=k, nextkpoint=k1,
G_I=k0_c, spin=self.spin)
self.initialize(file=file, initialwannier=initialwannier, seed=seed)
def initialize(self, file=None, initialwannier='random', seed=None):
"""Re-initialize current rotation matrix.
Keywords are identical to those of the constructor.
"""
Nw = self.nwannier
Nb = self.nbands
if file is not None:
self.Z_dknn, self.U_kww, self.C_kul = load(paropen(file))
elif initialwannier == 'bloch':
# Set U and C to pick the lowest Bloch states
self.U_kww = np.zeros((self.Nk, Nw, Nw), complex)
self.C_kul = []
for U, M, L in zip(self.U_kww, self.fixedstates_k, self.edf_k):
U[:] = np.identity(Nw, complex)
if L > 0:
self.C_kul.append(
np.identity(Nb - M, complex)[:, :L])
else:
self.C_kul.append([])
elif initialwannier == 'random':
# Set U and C to random (orthogonal) matrices
self.U_kww = np.zeros((self.Nk, Nw, Nw), complex)
self.C_kul = []
for U, M, L in zip(self.U_kww, self.fixedstates_k, self.edf_k):
U[:] = random_orthogonal_matrix(Nw, seed, real=False)
if L > 0:
self.C_kul.append(random_orthogonal_matrix(
Nb - M, seed=seed, real=False)[:, :L])
else:
self.C_kul.append(np.array([]))
else:
# Use initial guess to determine U and C
self.C_kul, self.U_kww = self.calc.initial_wannier(
initialwannier, self.kptgrid, self.fixedstates_k,
self.edf_k, self.spin)
self.update()
def save(self, file):
"""Save information on localization and rotation matrices to file."""
dump((self.Z_dknn, self.U_kww, self.C_kul), paropen(file, 'w'))
def update(self):
# Update large rotation matrix V (from rotation U and coeff C)
for k, M in enumerate(self.fixedstates_k):
self.V_knw[k, :M] = self.U_kww[k, :M]
if M < self.nwannier:
self.V_knw[k, M:] = np.dot(self.C_kul[k], self.U_kww[k, M:])
# else: self.V_knw[k, M:] = 0.0
# Calculate the Zk matrix from the large rotation matrix:
# Zk = V^d[k] Zbloch V[k1]
for d in range(self.Ndir):
for k in range(self.Nk):
k1 = self.kklst_dk[d, k]
self.Z_dkww[d, k] = np.dot(dag(self.V_knw[k]), np.dot(
self.Z_dknn[d, k], self.V_knw[k1]))
# Update the new Z matrix
self.Z_dww = self.Z_dkww.sum(axis=1) / self.Nk
def get_centers(self, scaled=False):
"""Calculate the Wannier centers
::
pos = L / 2pi * phase(diag(Z))
"""
coord_wc = np.angle(self.Z_dww[:3].diagonal(0, 1, 2)).T / (2 * pi) % 1
if not scaled:
coord_wc = np.dot(coord_wc, self.largeunitcell_cc)
return coord_wc
def get_radii(self):
"""Calculate the spread of the Wannier functions.
::
-- / L \ 2 2
radius**2 = - > | --- | ln |Z|
--d \ 2pi /
"""
r2 = -np.dot(self.largeunitcell_cc.diagonal()**2 / (2 * pi)**2,
np.log(abs(self.Z_dww[:3].diagonal(0, 1, 2))**2))
return np.sqrt(r2)
def get_spectral_weight(self, w):
return abs(self.V_knw[:, :, w])**2 / self.Nk
def get_pdos(self, w, energies, width):
"""Projected density of states (PDOS).
Returns the (PDOS) for Wannier function ``w``. The calculation
is performed over the energy grid specified in energies. The
PDOS is produced as a sum of Gaussians centered at the points
of the energy grid and with the specified width.
"""
spec_kn = self.get_spectral_weight(w)
dos = np.zeros(len(energies))
for k, spec_n in enumerate(spec_kn):
eig_n = self.calc.get_eigenvalues(k=kpt, s=self.spin)
for weight, eig in zip(spec_n, eig):
# Add gaussian centered at the eigenvalue
x = ((energies - center) / width)**2
dos += weight * np.exp(-x.clip(0., 40.)) / (sqrt(pi) * width)
return dos
def max_spread(self, directions=[0, 1, 2]):
"""Returns the index of the most delocalized Wannier function
together with the value of the spread functional"""
d = np.zeros(self.nwannier)
for dir in directions:
d[dir] = np.abs(self.Z_dww[dir].diagonal())**2 *self.weight_d[dir]
index = np.argsort(d)[0]
print 'Index:', index
print 'Spread:', d[index]
def translate(self, w, R):
"""Translate the w'th Wannier function
The distance vector R = [n1, n2, n3], is in units of the basis
vectors of the small cell.
"""
for kpt_c, U_ww in zip(self.kpt_kc, self.U_kww):
U_ww[:, w] *= np.exp(2.j * pi * np.dot(np.array(R), kpt_c))
self.update()
def translate_to_cell(self, w, cell):
"""Translate the w'th Wannier function to specified cell"""
scaled_c = np.angle(self.Z_dww[:3, w, w]) * self.kptgrid / (2 * pi)
trans = np.array(cell) - np.floor(scaled_c)
self.translate(w, trans)
def translate_all_to_cell(self, cell=[0, 0, 0]):
"""Translate all Wannier functions to specified cell.
Move all Wannier orbitals to a specific unit cell. There
exists an arbitrariness in the positions of the Wannier
orbitals relative to the unit cell. This method can move all
orbitals to the unit cell specified by ``cell``. For a
`\Gamma`-point calculation, this has no effect. For a
**k**-point calculation the periodicity of the orbitals are
given by the large unit cell defined by repeating the original
unitcell by the number of **k**-points in each direction. In
this case it is usefull to move the orbitals away from the
boundaries of the large cell before plotting them. For a bulk
calculation with, say 10x10x10 **k** points, one could move
the orbitals to the cell [2,2,2]. In this way the pbc
boundary conditions will not be noticed.
"""
scaled_wc = np.angle(self.Z_dww[:3].diagonal(0, 1, 2)).T * \
self.kptgrid / (2 * pi)
trans_wc = np.array(cell)[None] - np.floor(scaled_wc)
for kpt_c, U_ww in zip(self.kpt_kc, self.U_kww):
U_ww *= np.exp(2.j * pi * np.dot(trans_wc, kpt_c))
self.update()
def distances(self, R):
Nw = self.nwannier
cen = self.get_centers()
r1 = cen.repeat(Nw, axis=0).reshape(Nw, Nw, 3)
r2 = cen.copy()
for i in range(3):
r2 += self.unitcell_cc[i] * R[i]
r2 = np.swapaxes(r2.repeat(Nw, axis=0).reshape(Nw, Nw, 3), 0, 1)
return np.sqrt(np.sum((r1 - r2)**2, axis=-1))
def get_hopping(self, R):
"""Returns the matrix H(R)_nm=<0,n|H|R,m>.
::
1 _ -ik.R
H(R) = <0,n|H|R,m> = --- >_ e H(k)
Nk k
where R is the cell-distance (in units of the basis vectors of
the small cell) and n,m are indices of the Wannier functions.
"""
H_ww = np.zeros([self.nwannier, self.nwannier], complex)
for k, kpt_c in enumerate(self.kpt_kc):
phase = np.exp(-2.j * pi * np.dot(np.array(R), kpt_c))
H_ww += self.get_hamiltonian(k) * phase
return H_ww / self.Nk
def get_hamiltonian(self, k=0):
"""Get Hamiltonian at existing k-vector of index k
::
dag
H(k) = V diag(eps ) V
k k k
"""
eps_n = self.calc.get_eigenvalues(kpt=k, spin=self.spin)[:self.nbands]
return np.dot(dag(self.V_knw[k]) * eps_n, self.V_knw[k])
def get_hamiltonian_kpoint(self, kpt_c):
"""Get Hamiltonian at some new arbitrary k-vector
::
_ ik.R
H(k) = >_ e H(R)
R
Warning: This method moves all Wannier functions to cell (0, 0, 0)
"""
if self.verbose:
print 'Translating all Wannier functions to cell (0, 0, 0)'
self.translate_all_to_cell()
max = (self.kptgrid - 1) / 2
N1, N2, N3 = max
Hk = np.zeros([self.nwannier, self.nwannier], complex)
for n1 in xrange(-N1, N1 + 1):
for n2 in xrange(-N2, N2 + 1):
for n3 in xrange(-N3, N3 + 1):
R = np.array([n1, n2, n3], float)
hop_ww = self.get_hopping(R)
phase = np.exp(+2.j * pi * np.dot(R, kpt_c))
Hk += hop_ww * phase
return Hk
def get_function(self, index, repeat=None):
"""Get Wannier function on grid.
Returns an array with the funcion values of the indicated Wannier
function on a grid with the size of the *repeated* unit cell.
For a calculation using **k**-points the relevant unit cell for
eg. visualization of the Wannier orbitals is not the original unit
cell, but rather a larger unit cell defined by repeating the
original unit cell by the number of **k**-points in each direction.
Note that for a `\Gamma`-point calculation the large unit cell
coinsides with the original unit cell.
The large unitcell also defines the periodicity of the Wannier
orbitals.
``index`` can be either a single WF or a coordinate vector in terms
of the WFs.
"""
# Default size of plotting cell is the one corresponding to k-points.
if repeat is None:
repeat = self.kptgrid
N1, N2, N3 = repeat
dim = self.calc.get_number_of_grid_points()
largedim = dim * [N1, N2, N3]
wanniergrid = np.zeros(largedim, dtype=complex)
for k, kpt_c in enumerate(self.kpt_kc):
# The coordinate vector of wannier functions
if type(index) == int:
vec_n = self.V_knw[k, :, index]
else:
vec_n = np.dot(self.V_knw[k], index)
wan_G = np.zeros(dim, complex)
for n, coeff in enumerate(vec_n):
wan_G += coeff * self.calc.get_pseudo_wave_function(
n, k, self.spin, pad=True)
# Distribute the small wavefunction over large cell:
for n1 in xrange(N1):
for n2 in xrange(N2):
for n3 in xrange(N3): # sign?
e = np.exp(-2.j * pi * np.dot([n1, n2, n3], kpt_c))
wanniergrid[n1 * dim[0]:(n1 + 1) * dim[0],
n2 * dim[1]:(n2 + 1) * dim[1],
n3 * dim[2]:(n3 + 1) * dim[2]] += e * wan_G
# Normalization
wanniergrid /= np.sqrt(self.Nk)
return wanniergrid
def write_cube(self, index, fname, repeat=None, real=True):
"""Dump specified Wannier function to a cube file"""
from ase.io.cube import write_cube
# Default size of plotting cell is the one corresponding to k-points.
if repeat is None:
repeat = self.kptgrid
atoms = self.calc.get_atoms() * repeat
func = self.get_function(index, repeat)
# Handle separation of complex wave into real parts
if real:
if self.Nk == 1:
func *= np.exp(-1.j * np.angle(func.max()))
if 0: assert max(abs(func.imag).flat) < 1e-4
func = func.real
else:
func = abs(func)
else:
phase_fname = fname.split('.')
phase_fname.insert(1, 'phase')
phase_fname = '.'.join(phase_fname)
write_cube(phase_fname, atoms, data=np.angle(func))
func = abs(func)
write_cube(fname, atoms, data=func)
def localize(self, step=0.25, tolerance=1e-08,
updaterot=True, updatecoeff=True):
"""Optimize rotation to give maximal localization"""
md_min(self, step, tolerance, verbose=self.verbose,
updaterot=updaterot, updatecoeff=updatecoeff)
def get_functional_value(self):
"""Calculate the value of the spread functional.
::
Tr[|ZI|^2]=sum(I)sum(n) w_i|Z_(i)_nn|^2,
where w_i are weights."""
a_d = np.sum(np.abs(self.Z_dww.diagonal(0, 1, 2))**2, axis=1)
return np.dot(a_d, self.weight_d).real
def get_gradients(self):
# Determine gradient of the spread functional.
#
# The gradient for a rotation A_kij is::
#
# dU = dRho/dA_{k,i,j} = sum(I) sum(k')
# + Z_jj Z_kk',ij^* - Z_ii Z_k'k,ij^*
# - Z_ii^* Z_kk',ji + Z_jj^* Z_k'k,ji
#
# The gradient for a change of coefficients is::
#
# dRho/da^*_{k,i,j} = sum(I) [[(Z_0)_{k} V_{k'} diag(Z^*) +
# (Z_0_{k''})^d V_{k''} diag(Z)] *
# U_k^d]_{N+i,N+j}
#
# where diag(Z) is a square,diagonal matrix with Z_nn in the diagonal,
# k' = k + dk and k = k'' + dk.
#
# The extra degrees of freedom chould be kept orthonormal to the fixed
# space, thus we introduce lagrange multipliers, and minimize instead::
#
# Rho_L=Rho- sum_{k,n,m} lambda_{k,nm} <c_{kn}|c_{km}>
#
# for this reason the coefficient gradients should be multiplied
# by (1 - c c^d).
Nb = self.nbands
Nw = self.nwannier
dU = []
dC = []
for k in xrange(self.Nk):
M = self.fixedstates_k[k]
L = self.edf_k[k]
U_ww = self.U_kww[k]
C_ul = self.C_kul[k]
Utemp_ww = np.zeros((Nw, Nw), complex)
Ctemp_nw = np.zeros((Nb, Nw), complex)
for d, weight in enumerate(self.weight_d):
if abs(weight) < 1.0e-6:
continue
Z_knn = self.Z_dknn[d]
diagZ_w = self.Z_dww[d].diagonal()
Zii_ww = np.repeat(diagZ_w, Nw).reshape(Nw, Nw)
k1 = self.kklst_dk[d, k]
k2 = self.invkklst_dk[d, k]
V_knw = self.V_knw
Z_kww = self.Z_dkww[d]
if L > 0:
Ctemp_nw += weight * np.dot(
np.dot(Z_knn[k], V_knw[k1]) * diagZ_w.conj() +
np.dot(dag(Z_knn[k2]), V_knw[k2]) * diagZ_w,
dag(U_ww))
temp = Zii_ww.T * Z_kww[k].conj() - Zii_ww * Z_kww[k2].conj()
Utemp_ww += weight * (temp - dag(temp))
dU.append(Utemp_ww.ravel())
if L > 0:
# Ctemp now has same dimension as V, the gradient is in the
# lower-right (Nb-M) x L block
Ctemp_ul = Ctemp_nw[M:, M:]
G_ul = Ctemp_ul - np.dot(np.dot(C_ul, dag(C_ul)), Ctemp_ul)
dC.append(G_ul.ravel())
return np.concatenate(dU + dC)
def step(self, dX, updaterot=True, updatecoeff=True):
# dX is (A, dC) where U->Uexp(-A) and C->C+dC
Nw = self.nwannier
Nk = self.Nk
M_k = self.fixedstates_k
L_k = self.edf_k
if updaterot:
A_kww = dX[:Nk * Nw**2].reshape(Nk, Nw, Nw)
for U, A in zip(self.U_kww, A_kww):
H = -1.j * A.conj()
epsilon, Z = np.linalg.eigh(H)
# Z contains the eigenvectors as COLUMNS.
# Since H = iA, dU = exp(-A) = exp(iH) = ZDZ^d
dU = np.dot(Z * np.exp(1.j * epsilon), dag(Z))
U[:] = np.dot(U, dU)
if updatecoeff:
start = 0
for C, unocc, L in zip(self.C_kul, self.nbands - M_k, L_k):
if L == 0 or unocc == 0:
continue
Ncoeff = L * unocc
deltaC = dX[Nk * Nw**2 + start: Nk * Nw**2 + start + Ncoeff]
C += deltaC.reshape(unocc, L)
gram_schmidt(C)
start += Ncoeff
self.update()
|
slabanja/ase
|
ase/dft/wannier.py
|
Python
|
gpl-2.0
| 28,927
|
[
"ASE",
"Gaussian"
] |
d9c3de27e3ff48c0cefdd0f28db513d195d8d2df7d4486c27024669472c9dc16
|
# test_codecs.py from CPython 2.7, modified for Jython
from test import test_support
import unittest
import codecs
import locale
import sys, StringIO
if not test_support.is_jython:
import _testcapi
class Queue(object):
"""
queue: write bytes at one end, read bytes from the other end
"""
def __init__(self):
self._buffer = ""
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = ""
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class ReadTest(unittest.TestCase):
def check_partial(self, input, partialresults):
# get a StreamReader for the encoding and feed the bytestring version
# of input to the reader byte by byte. Read everything available from
# the StreamReader and check that the results equal the appropriate
# entries from partialresults.
q = Queue()
r = codecs.getreader(self.encoding)(q)
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
q.write(c)
result += r.read()
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(r.read(), u"")
self.assertEqual(r.bytebuffer, "")
self.assertEqual(r.charbuffer, u"")
# do the check again, this time using a incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(c)
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode("", True), u"")
self.assertEqual(d.buffer, "")
# Check whether the reset method works properly
d.reset()
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(c)
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode("", True), u"")
self.assertEqual(d.buffer, "")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
u"".join(codecs.iterdecode(encoded, self.encoding))
)
def test_readline(self):
def getreader(input):
stream = StringIO.StringIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = u"foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = u"foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = u"foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate(u"\n \r\n \r \u2028".split()):
vw.append((i*200)*u"\3042" + lineend)
vwo.append((i*200)*u"\3042")
self.assertEqual(readalllines("".join(vw), True), "".join(vw))
self.assertEqual(readalllines("".join(vw), False),"".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in xrange(80):
for lineend in u"\n \r\n \r \u2028".split():
s = 10*(size*u"a" + lineend + u"xxx\n")
reader = getreader(s)
for i in xrange(10):
self.assertEqual(
reader.readline(keepends=True),
size*u"a" + lineend,
)
reader = getreader(s)
for i in xrange(10):
self.assertEqual(
reader.readline(keepends=False),
size*u"a",
)
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse() # descending\r\n',
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' try:\r\n',
' return [ frog.objects.BlogEntry.load(storageEngine, date, Id) for Id in entryids ]\r\n',
' except StorageError,x:\r\n',
' log.error("Error loading articles: "+str(x))\r\n',
' self.abort("cannot load articles")\r\n',
'\r\n',
'showdate=None\r\n',
'\r\n',
'arg=self.Request.getArg()\r\n',
'if arg=="today":\r\n',
' #-------------------- TODAY\'S ARTICLES\r\n',
' self.write("<h2>Today\'s articles</h2>")\r\n',
' showdate = frog.util.isodatestr() \r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'elif arg=="active":\r\n',
' #-------------------- ACTIVE ARTICLES redirect\r\n',
' self.Yredirect("active.y")\r\n',
'elif arg=="login":\r\n',
' #-------------------- LOGIN PAGE redirect\r\n',
' self.Yredirect("login.y")\r\n',
'elif arg=="date":\r\n',
' #-------------------- ARTICLES OF A SPECIFIC DATE\r\n',
' showdate = self.Request.getParameter("date")\r\n',
' self.write("<h2>Articles written on %s</h2>"% frog.util.mediumdatestr(showdate))\r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'else:\r\n',
' #-------------------- RECENT ARTICLES\r\n',
' self.write("<h2>Recent articles</h2>")\r\n',
' dates=storageEngine.listBlogEntryDates()\r\n',
' if dates:\r\n',
' entries=[]\r\n',
' SHOWAMOUNT=10\r\n',
' for showdate in dates:\r\n',
' entries.extend( readArticlesFromDate(showdate, SHOWAMOUNT-len(entries)) )\r\n',
' if len(entries)>=SHOWAMOUNT:\r\n',
' break\r\n',
' \r\n',
]
stream = StringIO.StringIO("".join(s).encode(self.encoding))
reader = codecs.getreader(self.encoding)(stream)
for (i, line) in enumerate(reader):
self.assertEqual(line, s[i])
def test_readlinequeue(self):
q = Queue()
writer = codecs.getwriter(self.encoding)(q)
reader = codecs.getreader(self.encoding)(q)
# No lineends
writer.write(u"foo\r")
self.assertEqual(reader.readline(keepends=False), u"foo")
writer.write(u"\nbar\r")
self.assertEqual(reader.readline(keepends=False), u"")
self.assertEqual(reader.readline(keepends=False), u"bar")
writer.write(u"baz")
self.assertEqual(reader.readline(keepends=False), u"baz")
self.assertEqual(reader.readline(keepends=False), u"")
# Lineends
writer.write(u"foo\r")
self.assertEqual(reader.readline(keepends=True), u"foo\r")
writer.write(u"\nbar\r")
self.assertEqual(reader.readline(keepends=True), u"\n")
self.assertEqual(reader.readline(keepends=True), u"bar\r")
writer.write(u"baz")
self.assertEqual(reader.readline(keepends=True), u"baz")
self.assertEqual(reader.readline(keepends=True), u"")
writer.write(u"foo\r\n")
self.assertEqual(reader.readline(keepends=True), u"foo\r\n")
def test_bug1098990_a(self):
s1 = u"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
s2 = u"offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
s3 = u"next line.\r\n"
s = (s1+s2+s3).encode(self.encoding)
stream = StringIO.StringIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), u"")
def test_bug1098990_b(self):
s1 = u"aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
s2 = u"bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
s3 = u"stillokay:bbbbxx\r\n"
s4 = u"broken!!!!badbad\r\n"
s5 = u"againokay.\r\n"
s = (s1+s2+s3+s4+s5).encode(self.encoding)
stream = StringIO.StringIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), s4)
self.assertEqual(reader.readline(), s5)
self.assertEqual(reader.readline(), u"")
class UTF32Test(ReadTest):
encoding = "utf-32"
spamle = ('\xff\xfe\x00\x00'
's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00'
's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00')
spambe = ('\x00\x00\xfe\xff'
'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m'
'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m')
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = StringIO.StringIO()
f = writer(s)
f.write(u"spam")
f.write(u"spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = StringIO.StringIO(d)
f = reader(s)
self.assertEqual(f.read(), u"spamspam")
def test_badbom(self):
s = StringIO.StringIO(4*"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = StringIO.StringIO(8*"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"", # first byte of BOM read
u"", # second byte of BOM read
u"", # third byte of BOM read
u"", # fourth byte of BOM read => byteorder known
u"",
u"",
u"",
u"\x00",
u"\x00",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_handlers(self):
self.assertEqual((u'\ufffd', 1),
codecs.utf_32_decode('\x01', 'replace', True))
self.assertEqual((u'', 1),
codecs.utf_32_decode('\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded_le = '\xff\xfe\x00\x00' + '\x00\x00\x01\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_decode(encoded_le)[0])
encoded_be = '\x00\x00\xfe\xff' + '\x00\x01\x00\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_decode(encoded_be)[0])
class UTF32LETest(ReadTest):
encoding = "utf-32-le"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"",
u"",
u"",
u"\x00",
u"\x00",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_simple(self):
self.assertEqual(u"\U00010203".encode(self.encoding), "\x03\x02\x01\x00")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_le_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = '\x00\x00\x01\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_le_decode(encoded)[0])
class UTF32BETest(ReadTest):
encoding = "utf-32-be"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"",
u"",
u"",
u"\x00",
u"\x00",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_simple(self):
self.assertEqual(u"\U00010203".encode(self.encoding), "\x00\x01\x02\x03")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_be_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = '\x00\x01\x00\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_be_decode(encoded)[0])
class UTF16Test(ReadTest):
encoding = "utf-16"
spamle = '\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
spambe = '\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = StringIO.StringIO()
f = writer(s)
f.write(u"spam")
f.write(u"spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = StringIO.StringIO(d)
f = reader(s)
self.assertEqual(f.read(), u"spamspam")
def test_badbom(self):
s = StringIO.StringIO("\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = StringIO.StringIO("\xff\xff\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"", # first byte of BOM read
u"", # second byte of BOM read => byteorder known
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_handlers(self):
self.assertEqual((u'\ufffd', 1),
codecs.utf_16_decode('\x01', 'replace', True))
self.assertEqual((u'', 1),
codecs.utf_16_decode('\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_decode, "\xff", "strict", True)
def test_bug691291(self):
# Files are always opened in binary mode, even if no binary mode was
# specified. This means that no automatic conversion of '\n' is done
# on reading and writing.
s1 = u'Hello\r\nworld\r\n'
s = s1.encode(self.encoding)
self.addCleanup(test_support.unlink, test_support.TESTFN)
with open(test_support.TESTFN, 'wb') as fp:
fp.write(s)
with codecs.open(test_support.TESTFN, 'U', encoding=self.encoding) as reader:
self.assertEqual(reader.read(), s1)
class UTF16LETest(ReadTest):
encoding = "utf-16-le"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode, "\xff", "strict", True)
class UTF16BETest(ReadTest):
encoding = "utf-16-be"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode, "\xff", "strict", True)
class UTF8Test(ReadTest):
encoding = "utf-8"
def test_partial(self):
self.check_partial(
u"\x00\xff\u07ff\u0800\uffff",
[
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800\uffff",
]
)
class UTF7Test(ReadTest):
encoding = "utf-7"
def test_partial(self):
self.check_partial(
u"a+-b",
[
u"a",
u"a",
u"a+",
u"a+-",
u"a+-b",
]
)
# Jython extra (test supplementary characters)
@unittest.skipIf(not test_support.is_jython, "Jython supports surrogate pairs")
def test_partial_supp(self):
# Check the encoding is what we think it is
ustr = u"x\U00023456.\u0177\U00023456\u017az"
bstr = b'x+2E3cVg.+AXfYTdxWAXo-z'
self.assertEqual(ustr.encode(self.encoding), bstr)
self.check_partial(
ustr,
[
u"x",
u"x", # '+' added: begins Base64
u"x",
u"x",
u"x",
u"x",
u"x",
u"x",
u"x\U00023456.", # '.' added: ends Base64
u"x\U00023456.", # '+' added: begins Base64
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.",
u"x\U00023456.\u0177\U00023456\u017a", # '-' added: ends Base64
u"x\U00023456.\u0177\U00023456\u017az",
]
)
class UTF16ExTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_ex_decode, "\xff", "strict", 0, True)
def test_bad_args(self):
self.assertRaises(TypeError, codecs.utf_16_ex_decode)
@unittest.skipIf(test_support.is_jython, "Jython has no _codecs.readbuffer_encode method")
class ReadBufferTest(unittest.TestCase):
def test_array(self):
import array
self.assertEqual(
codecs.readbuffer_encode(array.array("c", "spam")),
("spam", 4)
)
def test_empty(self):
self.assertEqual(codecs.readbuffer_encode(""), ("", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.readbuffer_encode)
self.assertRaises(TypeError, codecs.readbuffer_encode, 42)
@unittest.skipIf(test_support.is_jython, "Jython has no _codecs.charbuffer_encode method")
class CharBufferTest(unittest.TestCase):
def test_string(self):
self.assertEqual(codecs.charbuffer_encode("spam"), ("spam", 4))
def test_empty(self):
self.assertEqual(codecs.charbuffer_encode(""), ("", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.charbuffer_encode)
self.assertRaises(TypeError, codecs.charbuffer_encode, 42)
class UTF8SigTest(ReadTest):
encoding = "utf-8-sig"
def test_partial(self):
self.check_partial(
u"\ufeff\x00\xff\u07ff\u0800\uffff",
[
u"",
u"",
u"", # First BOM has been read and skipped
u"",
u"",
u"\ufeff", # Second BOM has been read and emitted
u"\ufeff\x00", # "\x00" read and emitted
u"\ufeff\x00", # First byte of encoded u"\xff" read
u"\ufeff\x00\xff", # Second byte of encoded u"\xff" read
u"\ufeff\x00\xff", # First byte of encoded u"\u07ff" read
u"\ufeff\x00\xff\u07ff", # Second byte of encoded u"\u07ff" read
u"\ufeff\x00\xff\u07ff",
u"\ufeff\x00\xff\u07ff",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800\uffff",
]
)
def test_bug1601501(self):
# SF bug #1601501: check that the codec works with a buffer
unicode("\xef\xbb\xbf", "utf-8-sig")
def test_bom(self):
d = codecs.getincrementaldecoder("utf-8-sig")()
s = u"spam"
self.assertEqual(d.decode(s.encode("utf-8-sig")), s)
def test_stream_bom(self):
unistring = u"ABC\u00A1\u2200XYZ"
bytestring = codecs.BOM_UTF8 + "ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + range(1, 11) + \
[64, 128, 256, 512, 1024]:
istream = reader(StringIO.StringIO(bytestring))
ostream = StringIO.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
def test_stream_bare(self):
unistring = u"ABC\u00A1\u2200XYZ"
bytestring = "ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + range(1, 11) + \
[64, 128, 256, 512, 1024]:
istream = reader(StringIO.StringIO(bytestring))
ostream = StringIO.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
class EscapeDecodeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.escape_decode(""), ("", 0))
class RecodingTest(unittest.TestCase):
def test_recoding(self):
f = StringIO.StringIO()
f2 = codecs.EncodedFile(f, "unicode_internal", "utf-8")
# f2.write(u"a")
# Must be bytes in Jython (and probably should have been in CPython)
f2.write(b"\x00\x00\x00\x61")
f2.close()
# Python used to crash on this at exit because of a refcount
# bug in _codecsmodule.c
# From RFC 3492
punycode_testcases = [
# A Arabic (Egyptian):
(u"\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
u"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
"egbpdaj6bu4bxfgehfvwxn"),
# B Chinese (simplified):
(u"\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
"ihqwcrb4cv8a8dqg056pqjye"),
# C Chinese (traditional):
(u"\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
"ihqwctvzc91f659drss3x8bo0yb"),
# D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
(u"\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
u"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
u"\u0065\u0073\u006B\u0079",
"Proprostnemluvesky-uyb24dma41a"),
# E Hebrew:
(u"\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
u"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
u"\u05D1\u05E8\u05D9\u05EA",
"4dbcagdahymbxekheh6e0a7fei0b"),
# F Hindi (Devanagari):
(u"\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
u"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
u"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
u"\u0939\u0948\u0902",
"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
#(G) Japanese (kanji and hiragana):
(u"\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
u"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
# (H) Korean (Hangul syllables):
(u"\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
u"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
u"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
"psd879ccm6fea98c"),
# (I) Russian (Cyrillic):
(u"\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
u"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
u"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
u"\u0438",
"b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
# (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
(u"\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
u"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
u"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
u"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
u"\u0061\u00F1\u006F\u006C",
"PorqunopuedensimplementehablarenEspaol-fmd56a"),
# (K) Vietnamese:
# T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
# <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
(u"\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
u"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
u"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
u"\u0056\u0069\u1EC7\u0074",
"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
#(L) 3<nen>B<gumi><kinpachi><sensei>
(u"\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
"3B-ww4c5e180e575a65lsy2b"),
# (M) <amuro><namie>-with-SUPER-MONKEYS
(u"\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
u"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
u"\u004F\u004E\u004B\u0045\u0059\u0053",
"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
# (N) Hello-Another-Way-<sorezore><no><basho>
(u"\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
u"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
u"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
"Hello-Another-Way--fc4qua05auwb3674vfr0b"),
# (O) <hitotsu><yane><no><shita>2
(u"\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
"2-u9tlzr9756bt3uc0v"),
# (P) Maji<de>Koi<suru>5<byou><mae>
(u"\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
u"\u308B\u0035\u79D2\u524D",
"MajiKoi5-783gue6qz075azm5e"),
# (Q) <pafii>de<runba>
(u"\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
"de-jg4avhby1noc0d"),
# (R) <sono><supiido><de>
(u"\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
"d9juau41awczczp"),
# (S) -> $1.00 <-
(u"\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
u"\u003C\u002D",
"-> $1.00 <--")
]
for i in punycode_testcases:
if len(i)!=2:
print repr(i)
class PunycodeTest(unittest.TestCase):
def test_encode(self):
for uni, puny in punycode_testcases:
# Need to convert both strings to lower case, since
# some of the extended encodings use upper case, but our
# code produces only lower case. Converting just puny to
# lower is also insufficient, since some of the input characters
# are upper case.
self.assertEqual(uni.encode("punycode").lower(), puny.lower())
def test_decode(self):
for uni, puny in punycode_testcases:
self.assertEqual(uni, puny.decode("punycode"))
class UnicodeInternalTest(unittest.TestCase):
def test_bug1251300(self):
# Decoding with unicode_internal used to not correctly handle "code
# points" above 0x10ffff on UCS-4 builds.
if sys.maxunicode > 0xffff:
ok = [
("\x00\x10\xff\xff", u"\U0010ffff"),
("\x00\x00\x01\x01", u"\U00000101"),
("", u""),
]
not_ok = [
"\x7f\xff\xff\xff",
"\x80\x00\x00\x00",
"\x81\x00\x00\x00",
"\x00",
"\x00\x00\x00\x00\x00",
]
for internal, uni in ok:
if sys.byteorder == "little":
internal = "".join(reversed(internal))
self.assertEqual(uni, internal.decode("unicode_internal"))
for internal in not_ok:
if sys.byteorder == "little":
internal = "".join(reversed(internal))
self.assertRaises(UnicodeDecodeError, internal.decode,
"unicode_internal")
def test_decode_error_attributes(self):
if sys.maxunicode > 0xffff:
try:
"\x00\x00\x00\x00\x00\x11\x11\x00".decode("unicode_internal")
except UnicodeDecodeError, ex:
if test_support.is_jython:
# Jython delegates internally to utf-32be and it shows here
self.assertEqual("utf-32", ex.encoding)
else:
self.assertEqual("unicode_internal", ex.encoding)
self.assertEqual("\x00\x00\x00\x00\x00\x11\x11\x00", ex.object)
self.assertEqual(4, ex.start)
self.assertEqual(8, ex.end)
else:
self.fail("UnicodeDecodeError not raised")
def test_decode_callback(self):
if sys.maxunicode > 0xffff:
codecs.register_error("UnicodeInternalTest", codecs.ignore_errors)
decoder = codecs.getdecoder("unicode_internal")
ab = u"ab".encode("unicode_internal")
ignored = decoder("%s\x22\x22\x22\x22%s" % (ab[:4], ab[4:]),
"UnicodeInternalTest")
self.assertEqual((u"ab", 12), ignored)
def test_encode_length(self):
# Issue 3739
encoder = codecs.getencoder("unicode_internal")
self.assertEqual(encoder(u"a")[1], 1)
self.assertEqual(encoder(u"\xe9\u0142")[1], 2)
encoder = codecs.getencoder("string-escape")
self.assertEqual(encoder(r'\x00')[1], 4)
# From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
nameprep_tests = [
# 3.1 Map to nothing.
('foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar'
'\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef'
'\xb8\x8f\xef\xbb\xbf',
'foobarbaz'),
# 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045.
('CAFE',
'cafe'),
# 3.3 Case folding 8bit U+00DF (german sharp s).
# The original test case is bogus; it says \xc3\xdf
('\xc3\x9f',
'ss'),
# 3.4 Case folding U+0130 (turkish capital I with dot).
('\xc4\xb0',
'i\xcc\x87'),
# 3.5 Case folding multibyte U+0143 U+037A.
('\xc5\x83\xcd\xba',
'\xc5\x84 \xce\xb9'),
# 3.6 Case folding U+2121 U+33C6 U+1D7BB.
# XXX: skip this as it fails in UCS-2 mode
#('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb',
# 'telc\xe2\x88\x95kg\xcf\x83'),
(None, None),
# 3.7 Normalization of U+006a U+030c U+00A0 U+00AA.
('j\xcc\x8c\xc2\xa0\xc2\xaa',
'\xc7\xb0 a'),
# 3.8 Case folding U+1FB7 and normalization.
('\xe1\xbe\xb7',
'\xe1\xbe\xb6\xce\xb9'),
# 3.9 Self-reverting case folding U+01F0 and normalization.
# The original test case is bogus, it says `\xc7\xf0'
('\xc7\xb0',
'\xc7\xb0'),
# 3.10 Self-reverting case folding U+0390 and normalization.
('\xce\x90',
'\xce\x90'),
# 3.11 Self-reverting case folding U+03B0 and normalization.
('\xce\xb0',
'\xce\xb0'),
# 3.12 Self-reverting case folding U+1E96 and normalization.
('\xe1\xba\x96',
'\xe1\xba\x96'),
# 3.13 Self-reverting case folding U+1F56 and normalization.
('\xe1\xbd\x96',
'\xe1\xbd\x96'),
# 3.14 ASCII space character U+0020.
(' ',
' '),
# 3.15 Non-ASCII 8bit space character U+00A0.
('\xc2\xa0',
' '),
# 3.16 Non-ASCII multibyte space character U+1680.
('\xe1\x9a\x80',
None),
# 3.17 Non-ASCII multibyte space character U+2000.
('\xe2\x80\x80',
' '),
# 3.18 Zero Width Space U+200b.
('\xe2\x80\x8b',
''),
# 3.19 Non-ASCII multibyte space character U+3000.
('\xe3\x80\x80',
' '),
# 3.20 ASCII control characters U+0010 U+007F.
('\x10\x7f',
'\x10\x7f'),
# 3.21 Non-ASCII 8bit control character U+0085.
('\xc2\x85',
None),
# 3.22 Non-ASCII multibyte control character U+180E.
('\xe1\xa0\x8e',
None),
# 3.23 Zero Width No-Break Space U+FEFF.
('\xef\xbb\xbf',
''),
# 3.24 Non-ASCII control character U+1D175.
('\xf0\x9d\x85\xb5',
None),
# 3.25 Plane 0 private use character U+F123.
('\xef\x84\xa3',
None),
# 3.26 Plane 15 private use character U+F1234.
('\xf3\xb1\x88\xb4',
None),
# 3.27 Plane 16 private use character U+10F234.
('\xf4\x8f\x88\xb4',
None),
# 3.28 Non-character code point U+8FFFE.
('\xf2\x8f\xbf\xbe',
None),
# 3.29 Non-character code point U+10FFFF.
('\xf4\x8f\xbf\xbf',
None),
# 3.30 Surrogate code U+DF42.
('\xed\xbd\x82',
None),
# 3.31 Non-plain text character U+FFFD.
('\xef\xbf\xbd',
None),
# 3.32 Ideographic description character U+2FF5.
('\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
('\xcd\x81',
'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
('\xe2\x80\x8e',
None),
# 3.35 Deprecated U+202A.
('\xe2\x80\xaa',
None),
# 3.36 Language tagging character U+E0001.
('\xf3\xa0\x80\x81',
None),
# 3.37 Language tagging character U+E0042.
('\xf3\xa0\x81\x82',
None),
# 3.38 Bidi: RandALCat character U+05BE and LCat characters.
('foo\xd6\xbebar',
None),
# 3.39 Bidi: RandALCat character U+FD50 and LCat characters.
('foo\xef\xb5\x90bar',
None),
# 3.40 Bidi: RandALCat character U+FB38 and LCat characters.
('foo\xef\xb9\xb6bar',
'foo \xd9\x8ebar'),
# 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031.
('\xd8\xa71',
None),
# 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628.
('\xd8\xa71\xd8\xa8',
'\xd8\xa71\xd8\xa8'),
# 3.43 Unassigned code point U+E0002.
# Skip this test as we allow unassigned
#('\xf3\xa0\x80\x82',
# None),
(None, None),
# 3.44 Larger test (shrinking).
# Original test case reads \xc3\xdf
('X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2'
'\xaa\xce\xb0\xe2\x80\x80',
'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '),
# 3.45 Larger test (expanding).
# Original test case reads \xc3\x9f
('X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c'
'\x80',
'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3'
'\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82'
'\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88')
]
@unittest.skipIf(test_support.is_jython, "FIXME: incomplete unicodedata module")
class NameprepTest(unittest.TestCase):
def test_nameprep(self):
from encodings.idna import nameprep
for pos, (orig, prepped) in enumerate(nameprep_tests):
if orig is None:
# Skipped
continue
# The Unicode strings are given in UTF-8
orig = unicode(orig, "utf-8")
if prepped is None:
# Input contains prohibited characters
self.assertRaises(UnicodeError, nameprep, orig)
else:
prepped = unicode(prepped, "utf-8")
try:
self.assertEqual(nameprep(orig), prepped)
except Exception,e:
raise test_support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
@unittest.skipIf(test_support.is_jython, "FIXME: Jython issue 2000 missing support for IDNA")
class IDNACodecTest(unittest.TestCase):
def test_builtin_decode(self):
self.assertEqual(unicode("python.org", "idna"), u"python.org")
self.assertEqual(unicode("python.org.", "idna"), u"python.org.")
self.assertEqual(unicode("xn--pythn-mua.org", "idna"), u"pyth\xf6n.org")
self.assertEqual(unicode("xn--pythn-mua.org.", "idna"), u"pyth\xf6n.org.")
def test_builtin_encode(self):
self.assertEqual(u"python.org".encode("idna"), "python.org")
self.assertEqual("python.org.".encode("idna"), "python.org.")
self.assertEqual(u"pyth\xf6n.org".encode("idna"), "xn--pythn-mua.org")
self.assertEqual(u"pyth\xf6n.org.".encode("idna"), "xn--pythn-mua.org.")
def test_stream(self):
import StringIO
r = codecs.getreader("idna")(StringIO.StringIO("abc"))
r.read(3)
self.assertEqual(r.read(), u"")
def test_incremental_decode(self):
self.assertEqual(
"".join(codecs.iterdecode("python.org", "idna")),
u"python.org"
)
self.assertEqual(
"".join(codecs.iterdecode("python.org.", "idna")),
u"python.org."
)
self.assertEqual(
"".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
u"pyth\xf6n.org."
)
self.assertEqual(
"".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
u"pyth\xf6n.org."
)
decoder = codecs.getincrementaldecoder("idna")()
self.assertEqual(decoder.decode("xn--xam", ), u"")
self.assertEqual(decoder.decode("ple-9ta.o", ), u"\xe4xample.")
self.assertEqual(decoder.decode(u"rg"), u"")
self.assertEqual(decoder.decode(u"", True), u"org")
decoder.reset()
self.assertEqual(decoder.decode("xn--xam", ), u"")
self.assertEqual(decoder.decode("ple-9ta.o", ), u"\xe4xample.")
self.assertEqual(decoder.decode("rg."), u"org.")
self.assertEqual(decoder.decode("", True), u"")
def test_incremental_encode(self):
self.assertEqual(
"".join(codecs.iterencode(u"python.org", "idna")),
"python.org"
)
self.assertEqual(
"".join(codecs.iterencode(u"python.org.", "idna")),
"python.org."
)
self.assertEqual(
"".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")),
"xn--pythn-mua.org."
)
self.assertEqual(
"".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")),
"xn--pythn-mua.org."
)
encoder = codecs.getincrementalencoder("idna")()
self.assertEqual(encoder.encode(u"\xe4x"), "")
self.assertEqual(encoder.encode(u"ample.org"), "xn--xample-9ta.")
self.assertEqual(encoder.encode(u"", True), "org")
encoder.reset()
self.assertEqual(encoder.encode(u"\xe4x"), "")
self.assertEqual(encoder.encode(u"ample.org."), "xn--xample-9ta.org.")
self.assertEqual(encoder.encode(u"", True), "")
class CodecsModuleTest(unittest.TestCase):
def test_decode(self):
self.assertEqual(codecs.decode('\xe4\xf6\xfc', 'latin-1'),
u'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.decode)
self.assertEqual(codecs.decode('abc'), u'abc')
self.assertRaises(UnicodeDecodeError, codecs.decode, '\xff', 'ascii')
def test_encode(self):
self.assertEqual(codecs.encode(u'\xe4\xf6\xfc', 'latin-1'),
'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.encode)
self.assertRaises(LookupError, codecs.encode, u"foo", "__spam__")
self.assertEqual(codecs.encode(u'abc'), 'abc')
self.assertRaises(UnicodeEncodeError, codecs.encode, u'\xffff', 'ascii')
def test_register(self):
self.assertRaises(TypeError, codecs.register)
self.assertRaises(TypeError, codecs.register, 42)
def test_lookup(self):
self.assertRaises(TypeError, codecs.lookup)
self.assertRaises(LookupError, codecs.lookup, "__spam__")
self.assertRaises(LookupError, codecs.lookup, " ")
def test_getencoder(self):
self.assertRaises(TypeError, codecs.getencoder)
self.assertRaises(LookupError, codecs.getencoder, "__spam__")
def test_getdecoder(self):
self.assertRaises(TypeError, codecs.getdecoder)
self.assertRaises(LookupError, codecs.getdecoder, "__spam__")
def test_getreader(self):
self.assertRaises(TypeError, codecs.getreader)
self.assertRaises(LookupError, codecs.getreader, "__spam__")
def test_getwriter(self):
self.assertRaises(TypeError, codecs.getwriter)
self.assertRaises(LookupError, codecs.getwriter, "__spam__")
def test_lookup_issue1813(self):
# Issue #1813: under Turkish locales, lookup of some codecs failed
# because 'I' is lowercased as a dotless "i"
oldlocale = locale.getlocale(locale.LC_CTYPE)
self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
try:
locale.setlocale(locale.LC_CTYPE, 'tr_TR')
except locale.Error:
# Unsupported locale on this system
self.skipTest('test needs Turkish locale')
c = codecs.lookup('ASCII')
self.assertEqual(c.name, 'ascii')
class StreamReaderTest(unittest.TestCase):
def setUp(self):
self.reader = codecs.getreader('utf-8')
self.stream = StringIO.StringIO('\xed\x95\x9c\n\xea\xb8\x80')
def test_readlines(self):
f = self.reader(self.stream)
self.assertEqual(f.readlines(), [u'\ud55c\n', u'\uae00'])
class EncodedFileTest(unittest.TestCase):
def test_basic(self):
f = StringIO.StringIO('\xed\x95\x9c\n\xea\xb8\x80')
ef = codecs.EncodedFile(f, 'utf-16-le', 'utf-8')
self.assertEqual(ef.read(), '\\\xd5\n\x00\x00\xae')
f = StringIO.StringIO()
ef = codecs.EncodedFile(f, 'utf-8', 'latin1')
ef.write('\xc3\xbc')
self.assertEqual(f.getvalue(), '\xfc')
class Str2StrTest(unittest.TestCase):
def test_read(self):
sin = "\x80".encode("base64_codec")
reader = codecs.getreader("base64_codec")(StringIO.StringIO(sin))
sout = reader.read()
self.assertEqual(sout, "\x80")
self.assertIsInstance(sout, str)
def test_readline(self):
sin = "\x80".encode("base64_codec")
reader = codecs.getreader("base64_codec")(StringIO.StringIO(sin))
sout = reader.readline()
self.assertEqual(sout, "\x80")
self.assertIsInstance(sout, str)
all_unicode_encodings = [
"ascii",
"base64_codec",
# FIXME: Jython issue 1066: "big5",
# FIXME: Jython issue 1066: "big5hkscs",
"charmap",
"cp037",
"cp1006",
"cp1026",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
# FIXME: Jython issue 1066: "cp932",
# FIXME: Jython issue 1066: "cp949",
# FIXME: Jython issue 1066: "cp950",
# FIXME: Jython issue 1066: "euc_jis_2004",
# FIXME: Jython issue 1066: 'euc_jisx0213',
# FIXME: Jython issue 1066: 'euc_jp',
# FIXME: Jython issue 1066: 'euc_kr',
# FIXME: Jython issue 1066: 'gb18030',
# FIXME: Jython issue 1066: 'gb2312',
# FIXME: Jython issue 1066: 'gbk',
"hex_codec",
"hp_roman8",
# FIXME: Jython issue 1066: 'hz',
# FIXME: Jython issue 1066: "idna",
# FIXME: Jython issue 1066: 'iso2022_jp',
# FIXME: Jython issue 1066: 'iso2022_jp_1',
# FIXME: Jython issue 1066: 'iso2022_jp_2',
# FIXME: Jython issue 1066: 'iso2022_jp_2004',
# FIXME: Jython issue 1066: 'iso2022_jp_3',
# FIXME: Jython issue 1066: 'iso2022_jp_ext',
# FIXME: Jython issue 1066: 'iso2022_kr',
"iso8859_1",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
# FIXME: Jython issue 1066: 'johab',
"koi8_r",
"koi8_u",
"latin_1",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"palmos",
"ptcp154",
"punycode",
"raw_unicode_escape",
"rot_13",
# FIXME: Jython issue 1066: 'shift_jis',
# FIXME: Jython issue 1066: 'shift_jis_2004',
# FIXME: Jython issue 1066: 'shift_jisx0213',
"tis_620",
"unicode_escape",
"unicode_internal",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
]
if hasattr(codecs, "mbcs_encode"):
all_unicode_encodings.append("mbcs")
# The following encodings work only with str, not unicode
all_string_encodings = [
"quopri_codec",
"string_escape",
"uu_codec",
]
# The following encoding is not tested, because it's not supposed
# to work:
# "undefined"
# The following encodings don't work in stateful mode
broken_unicode_with_streams = [
"base64_codec",
"hex_codec",
"punycode",
"unicode_internal"
]
broken_incremental_coders = broken_unicode_with_streams[:]
# The following encodings only support "strict" mode
only_strict_mode = [
"idna",
"zlib_codec",
"bz2_codec",
]
try:
import bz2
except ImportError:
pass
else:
all_unicode_encodings.append("bz2_codec")
broken_unicode_with_streams.append("bz2_codec")
try:
import zlib
except ImportError:
pass
else:
all_unicode_encodings.append("zlib_codec")
broken_unicode_with_streams.append("zlib_codec")
class BasicUnicodeTest(unittest.TestCase):
@unittest.skipIf(test_support.is_jython, "_testcapi module not present in Jython")
def test_basics(self):
s = u"abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
name = codecs.lookup(encoding).name
if encoding.endswith("_codec"):
name += "_codec"
elif encoding == "latin_1":
name = "latin_1"
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
(bytes, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s), "%r != %r (encoding=%r)" % (size, len(s), encoding))
(chars, size) = codecs.getdecoder(encoding)(bytes)
self.assertEqual(chars, s, "%r != %r (encoding=%r)" % (chars, s, encoding))
if encoding not in broken_unicode_with_streams:
# check stream reader/writer
q = Queue()
writer = codecs.getwriter(encoding)(q)
encodedresult = ""
for c in s:
writer.write(c)
encodedresult += q.read()
q = Queue()
reader = codecs.getreader(encoding)(q)
decodedresult = u""
for c in encodedresult:
q.write(c)
decodedresult += reader.read()
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
if encoding not in broken_incremental_coders:
# check incremental decoder/encoder (fetched via the Python
# and C API) and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
cencoder = _testcapi.codec_incrementalencoder(encoding)
except LookupError: # no IncrementalEncoder
pass
else:
# check incremental decoder/encoder
encodedresult = ""
for c in s:
encodedresult += encoder.encode(c)
encodedresult += encoder.encode(u"", True)
decoder = codecs.getincrementaldecoder(encoding)()
decodedresult = u""
for c in encodedresult:
decodedresult += decoder.decode(c)
decodedresult += decoder.decode("", True)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
# check C API
encodedresult = ""
for c in s:
encodedresult += cencoder.encode(c)
encodedresult += cencoder.encode(u"", True)
cdecoder = _testcapi.codec_incrementaldecoder(encoding)
decodedresult = u""
for c in encodedresult:
decodedresult += cdecoder.decode(c)
decodedresult += cdecoder.decode("", True)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
# check iterencode()/iterdecode()
result = u"".join(codecs.iterdecode(codecs.iterencode(s, encoding), encoding))
self.assertEqual(result, s, "%r != %r (encoding=%r)" % (result, s, encoding))
# check iterencode()/iterdecode() with empty string
result = u"".join(codecs.iterdecode(codecs.iterencode(u"", encoding), encoding))
self.assertEqual(result, u"")
if encoding not in only_strict_mode:
# check incremental decoder/encoder with errors argument
try:
encoder = codecs.getincrementalencoder(encoding)("ignore")
cencoder = _testcapi.codec_incrementalencoder(encoding, "ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = "".join(encoder.encode(c) for c in s)
decoder = codecs.getincrementaldecoder(encoding)("ignore")
decodedresult = u"".join(decoder.decode(c) for c in encodedresult)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
encodedresult = "".join(cencoder.encode(c) for c in s)
cdecoder = _testcapi.codec_incrementaldecoder(encoding, "ignore")
decodedresult = u"".join(cdecoder.decode(c) for c in encodedresult)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
def test_seek(self):
# all codecs should be able to encode these
s = u"%s\n%s\n" % (100*u"abc123", 100*u"def456")
for encoding in all_unicode_encodings:
if encoding == "idna": # FIXME: See SF bug #1163178
continue
if encoding in broken_unicode_with_streams:
continue
reader = codecs.getreader(encoding)(StringIO.StringIO(s.encode(encoding)))
for t in xrange(5):
# Test that calling seek resets the internal codec state and buffers
reader.seek(0, 0)
line = reader.readline()
self.assertEqual(s[:len(line)], line)
def test_bad_decode_args(self):
for encoding in all_unicode_encodings:
decoder = codecs.getdecoder(encoding)
self.assertRaises(TypeError, decoder)
if encoding not in ("idna", "punycode"):
self.assertRaises(TypeError, decoder, 42)
def test_bad_encode_args(self):
for encoding in all_unicode_encodings:
encoder = codecs.getencoder(encoding)
self.assertRaises(TypeError, encoder)
def test_encoding_map_type_initialized(self):
from encodings import cp1140
# This used to crash, we are only verifying there's no crash.
table_type = type(cp1140.encoding_table)
self.assertEqual(table_type, table_type)
class BasicStrTest(unittest.TestCase):
def test_basics(self):
s = "abc123"
for encoding in all_string_encodings:
(bytes, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s))
(chars, size) = codecs.getdecoder(encoding)(bytes)
self.assertEqual(chars, s, "%r != %r (encoding=%r)" % (chars, s, encoding))
class CharmapTest(unittest.TestCase):
def test_decode_with_string_map(self):
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict", u"abc"),
(u"abc", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace", u"ab"),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace", u"ab\ufffe"),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore", u"ab"),
(u"ab", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore", u"ab\ufffe"),
(u"ab", 3)
)
allbytes = "".join(chr(i) for i in xrange(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", u""),
(u"", len(allbytes))
)
class WithStmtTest(unittest.TestCase):
def test_encodedfile(self):
f = StringIO.StringIO("\xc3\xbc")
with codecs.EncodedFile(f, "latin-1", "utf-8") as ef:
self.assertEqual(ef.read(), "\xfc")
def test_streamreaderwriter(self):
f = StringIO.StringIO("\xc3\xbc")
info = codecs.lookup("utf-8")
with codecs.StreamReaderWriter(f, info.streamreader,
info.streamwriter, 'strict') as srw:
self.assertEqual(srw.read(), u"\xfc")
class BomTest(unittest.TestCase):
def test_seek0(self):
data = u"1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be",
)
self.addCleanup(test_support.unlink, test_support.TESTFN)
for encoding in tests:
# Check if the BOM is written only once
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# Check that the BOM is written after a seek(0)
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data[0])
self.assertNotEqual(f.tell(), 0)
f.seek(0)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# (StreamWriter) Check that the BOM is written after a seek(0)
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data[0])
self.assertNotEqual(f.writer.tell(), 0)
f.writer.seek(0)
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# Check that the BOM is not written after a seek() at a position
# different than the start
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.seek(f.tell())
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# (StreamWriter) Check that the BOM is not written after a seek()
# at a position different than the start
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data)
f.writer.seek(f.writer.tell())
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
def test_main():
test_support.run_unittest(
UTF32Test,
UTF32LETest,
UTF32BETest,
UTF16Test,
UTF16LETest,
UTF16BETest,
UTF8Test,
UTF8SigTest,
UTF7Test,
UTF16ExTest,
ReadBufferTest,
CharBufferTest,
EscapeDecodeTest,
RecodingTest,
PunycodeTest,
UnicodeInternalTest,
NameprepTest,
IDNACodecTest,
CodecsModuleTest,
StreamReaderTest,
EncodedFileTest,
Str2StrTest,
BasicUnicodeTest,
BasicStrTest,
CharmapTest,
WithStmtTest,
BomTest,
)
if __name__ == "__main__":
test_main()
|
adaussy/eclipse-monkey-revival
|
plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/test/test_codecs.py
|
Python
|
epl-1.0
| 60,499
|
[
"FEFF"
] |
2217904e470c216a4ee508d6d44b00821302f7cafa74c2771385c2bce459588d
|
#!/usr/bin/python
import sys
from subprocess import call
print "\nUsage: bwa_protocol.py ListOfReads Reference Threads [noreduce/reduce]\n"
try:
data = sys.argv[1]
except:
data = raw_input("List of reads: ")
try:
ref = sys.argv[2]
except:
ref = raw_input("FASTA reference file: ")
try:
threads = sys.argv[3]
except:
threads = raw_input("Number of threads: ")
try:
reduce = sys.argv[4]
except:
reduce = raw_input("Reduce or not reduce: ")
files = open(data).readlines()
l_files = []
for f in range(0,(len(files)/2)):
l_files.append([files[f*2][:-1],files[(f*2)+1][:-1]])
try:
open(ref+".pac")
open(ref+".ann")
open(ref+".amb")
open(ref+".bwt")
open(ref+".sa")
except:
call("bwa index -a bwtsw %s" % ref, shell=True)
for pair in l_files:
name = pair[0]
name = name.split(".")
name = name[0][:-2]
call("bwa aln -t%s %s %s > read1.sai" % (threads, ref, pair[0]), shell=True)
call("bwa aln -t%s %s %s > read2.sai" % (threads, ref, pair[1]), shell=True)
call("bwa sampe %s read1.sai read2.sai %s %s | samtools view -bS - > %s_fastq.bam" % (ref, pair[0], pair[1], name), shell=True)
call("rm read1.sai read2.sai", shell=True)
call("samtools sort -T aln.sorted %s_fastq.bam -o %s_sort.bam" % (name, name), shell=True)
call("rm %s_fastq.bam" % (name), shell=True)
call("samtools index %s_sort.bam" % (name), shell=True)
call("samtools flagstat %s_sort.bam > %s_sort.flagstat" % (name, name), shell=True)
if reduce == "reduce":
call("reduce_bam.py %s_sort.bam && rm %s_sort.bam" % (name, name), shell=True)
|
fjruizruano/ngs-protocols
|
bwa_protocol.py
|
Python
|
gpl-3.0
| 1,605
|
[
"BWA"
] |
ac6b0d6e7154a3565511211cbd7cde79f9e21b5e061ebcad9857ae63b1640e2d
|
""" Testing the FCConditionPaserClass
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from DIRAC.Resources.Catalog.FCConditionParser import FCConditionParser
__RCSID__ = "$Id $"
class TestLogicEvaluation(unittest.TestCase):
""" Tests all the logic evaluation
"""
def setUp(self):
self.fcp = FCConditionParser()
self.lfns = ['/lhcb/lfn1', '/lhcb/lfn2']
def test_01_simpleParse(self):
"""Test the parse of a single plugin"""
res = self.fcp('catalogName', 'operationName', self.lfns, condition="Dummy=True")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue(res['Value']['Successful'][lfn], res)
res = self.fcp('catalogName', 'operationName', self.lfns, condition="Dummy=False")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue(not res['Value']['Successful'][lfn], res)
def test_02_notLogic(self):
"""Testing the ! operator"""
res = self.fcp('catalogName', 'operationName', self.lfns, condition="!Dummy=True")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue(not res['Value']['Successful'][lfn], res)
res = self.fcp('catalogName', 'operationName', self.lfns, condition="!Dummy=False")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue(res['Value']['Successful'][lfn], res)
def test_03_andLogic(self):
"""Testing the & operator"""
res = self.fcp('catalogName', 'operationName', self.lfns, condition="Dummy=True & Dummy=True")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue(res['Value']['Successful'][lfn], res)
res = self.fcp('catalogName', 'operationName', self.lfns, condition="Dummy=False & Dummy=True")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue(not res['Value']['Successful'][lfn], res)
res = self.fcp('catalogName', 'operationName', self.lfns, condition="Dummy=True & Dummy=False")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue(not res['Value']['Successful'][lfn], res)
def test_04_orLogic(self):
"""Testing the | operator"""
res = self.fcp('catalogName', 'operationName', self.lfns, condition="Dummy=True | Dummy=True")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue(res['Value']['Successful'][lfn], res)
res = self.fcp('catalogName', 'operationName', self.lfns, condition="Dummy=False | Dummy=True")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue(res['Value']['Successful'][lfn], res)
res = self.fcp('catalogName', 'operationName', self.lfns, condition="Dummy=False | Dummy=False")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue(not res['Value']['Successful'][lfn], res)
def test_05_priority(self):
"""Testing the priority of operators"""
res = self.fcp('catalogName', 'operationName', self.lfns, condition="!Dummy=False & Dummy=True")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue(res['Value']['Successful'][lfn], res)
res = self.fcp('catalogName', 'operationName', self.lfns, condition="!Dummy=True | Dummy=False")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue(not res['Value']['Successful'][lfn], res)
res = self.fcp('catalogName', 'operationName', self.lfns,
condition="Dummy=True & Dummy=False | Dummy=True")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue(res['Value']['Successful'][lfn], res)
res = self.fcp('catalogName', 'operationName', self.lfns,
condition="Dummy=True | Dummy=False & Dummy=True")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue(res['Value']['Successful'][lfn], res)
res = self.fcp('catalogName', 'operationName', self.lfns,
condition="!Dummy=True | Dummy=False & Dummy=True")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue(not res['Value']['Successful'][lfn], res)
res = self.fcp('catalogName', 'operationName', self.lfns,
condition="!Dummy=True | !Dummy=False & Dummy=True")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue(res['Value']['Successful'][lfn], res)
res = self.fcp('catalogName', 'operationName', self.lfns,
condition="!Dummy=True | !Dummy=False & !Dummy=True")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue(not res['Value']['Successful'][lfn], res)
res = self.fcp('catalogName', 'operationName', self.lfns, condition="[!Dummy=False] & Dummy=True")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue(res['Value']['Successful'][lfn], res)
res = self.fcp('catalogName', 'operationName', self.lfns, condition="![Dummy=False] & Dummy=True")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue(res['Value']['Successful'][lfn], res)
res = self.fcp('catalogName', 'operationName', self.lfns, condition="![Dummy=False & Dummy=True]")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue(res['Value']['Successful'][lfn], res)
res = self.fcp('catalogName', 'operationName', self.lfns,
condition="[Dummy=True | Dummy=False] & Dummy=True")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue(res['Value']['Successful'][lfn], res)
res = self.fcp('catalogName', 'operationName', self.lfns,
condition="Dummy=True | [Dummy=False & Dummy=True]")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue(res['Value']['Successful'][lfn], res)
res = self.fcp('catalogName', 'operationName', self.lfns,
condition="Dummy=False | [Dummy=False & Dummy=True]")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue(not res['Value']['Successful'][lfn], res)
def test_06_errors(self):
"""Testing different error situation"""
# Error in the plugin
res = self.fcp('catalogName', 'operationName', self.lfns, condition="Dummy=CantParse")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue(not res['Value']['Successful'][lfn], res)
# Non existing plugin
res = self.fcp('catalogName', 'operationName', self.lfns, condition="NonExistingPlugin=something")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue(not res['Value']['Successful'][lfn], res)
# Error in the grammar
res = self.fcp('catalogName', 'operationName', self.lfns, condition="[Dummy=True")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to False
for lfn in self.lfns:
self.assertTrue(not res['Value']['Successful'][lfn], res)
def test_07_noCondition(self):
"""Testing different error situation"""
# Non condition given
res = self.fcp('catalogName', 'operationName', self.lfns, condition="")
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue(res['Value']['Successful'][lfn], res)
# Can't retrive conditions
# It so happen that it will all be True
res = self.fcp('catalogName', 'operationName', self.lfns, condition=None)
self.assertTrue(res['OK'], res)
# We expect all the lfn to be to True
for lfn in self.lfns:
self.assertTrue(res['Value']['Successful'][lfn], res)
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestLogicEvaluation)
unittest.TextTestRunner(verbosity=2).run(suite)
|
yujikato/DIRAC
|
src/DIRAC/Resources/Catalog/test/Test_FCConditionParser.py
|
Python
|
gpl-3.0
| 8,957
|
[
"DIRAC"
] |
56505741b9815380d5d3f56c8d61e69b3dbf1e7c964b16f0d16ed647995ed010
|
########################################################################
# $Id$
# File : ProcessMonitor.py
# Author : Stuart Paterson
########################################################################
""" The Process Monitor utility allows to calculate cumulative CPU time and memory
for a given PID and it's process group. This is only implemented for linux /proc
file systems but could feasibly be extended in the future.
"""
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.Subprocess import shellCall
__RCSID__ = "$Id$"
import os, re, platform
class ProcessMonitor:
#############################################################################
def __init__( self ):
""" Standard constructor
"""
self.log = gLogger.getSubLogger( 'ProcessMonitor' )
self.osType = platform.uname()
#############################################################################
def getCPUConsumed( self, pid ):
"""Returns the CPU consumed for supported platforms when supplied a PID.
"""
currentOS = self.__checkCurrentOS()
if currentOS.lower() == 'linux':
return self.getCPUConsumedLinux( pid )
else:
self.log.warn( 'Platform %s is not supported' % ( currentOS ) )
return S_ERROR( 'Unsupported platform' )
def getMemoryConsumed( self, pid ):
"""Returns the CPU consumed for supported platforms when supplied a PID.
"""
currentOS = self.__checkCurrentOS()
if currentOS.lower() == 'linux':
return self.getMemoryConsumedLinux( pid )
else:
self.log.warn( 'Platform %s is not supported' % ( currentOS ) )
return S_ERROR( 'Unsupported platform' )
def getResourceConsumedLinux( self, pid ):
"""Returns the CPU consumed given a PID assuming a proc file system exists.
"""
pid = str( pid )
masterProcPath = '/proc/%s/stat' % ( pid )
if not os.path.exists( masterProcPath ):
return S_ERROR( 'Process %s does not exist' % ( pid ) )
#Get the current process list
pidListResult = self.__getProcListLinux()
if not pidListResult['OK']:
return pidListResult
pidList = pidListResult['Value']
return self.__getChildResourceConsumedLinux( pid, pidList )
#############################################################################
def getCPUConsumedLinux( self, pid ):
"""Returns the CPU consumed given a PID assuming a proc file system exists.
"""
result = self.getResourceConsumedLinux( pid )
if not result['OK']:
return result
currentCPU = result['Value']['CPU']
self.log.verbose( 'Final CPU estimate is %s' % currentCPU )
return S_OK( currentCPU )
def getMemoryConsumedLinux( self, pid ):
""" Get the current memory consumption
"""
result = self.getResourceConsumedLinux( pid )
if not result['OK']:
return result
vsize = result['Value']['Vsize']
rss = result['Value']['RSS']
self.log.verbose( 'Current memory estimate is Vsize: %s, RSS: %s' % ( vsize, rss ) )
return S_OK( {'Vsize': vsize, 'RSS': rss } )
#############################################################################
def __getProcListLinux( self ):
"""Gets list of process IDs from /proc/*.
"""
result = shellCall( 10, 'ls -d /proc/[0-9]*' )
if not result['OK']:
if not 'Value' in result:
return result
procList = result['Value'][1].replace( '/proc/', '' ).split( '\n' )
return S_OK( procList )
#############################################################################
def __getChildResourceConsumedLinux( self, pid, pidList, infoDict = None ):
"""Adds contributions to CPU total from child processes recursively.
"""
childCPU = 0
vsize = 0
rss = 0
pageSize = os.sysconf('SC_PAGESIZE')
if not infoDict:
infoDict = {}
for pidCheck in pidList:
info = self.__getProcInfoLinux( pidCheck )
if info['OK']:
infoDict[pidCheck] = info['Value']
procGroup = self.__getProcGroupLinux( pid )
if not procGroup['OK']:
return procGroup
procGroup = procGroup['Value'].strip()
for pidCheck, info in infoDict.items():
if pidCheck in infoDict and info[3] == pid:
contribution = float( info[13] ) / 100 + float( info[14] ) / 100 + float( info[15] ) / 100 + float( info[16] ) / 100
childCPU += contribution
vsize += float( info[22] )
rss += float( info[23] ) * pageSize
self.log.debug( 'Added %s to CPU total (now %s) from child PID %s %s' % ( contribution, childCPU, info[0], info[1] ) )
del infoDict[pidCheck]
result = self.__getChildResourceConsumedLinux( pidCheck, pidList, infoDict )
if result['OK']:
childCPU += result['Value']['CPU']
vsize += result['Value']['Vsize']
rss += result['Value']['RSS']
#Next add any contributions from orphan processes in same process group
for pidCheck, info in infoDict.items():
if pidCheck in infoDict and info[3] == 1 and info[4] == procGroup:
contribution = float( info[13] ) / 100 + float( info[14] ) / 100 + float( info[15] ) / 100 + float( info[16] ) / 100
childCPU += contribution
vsize += float( info[22] )
rss += float( info[23] ) * pageSize
self.log.debug( 'Added %s to CPU total (now %s) from orphan PID %s %s' % ( contribution, childCPU, info[0], info[1] ) )
del infoDict[pidCheck]
#Finally add the parent itself
if pid in infoDict:
info = infoDict[pid]
contribution = float( info[13] ) / 100 + float( info[14] ) / 100 + float( info[15] ) / 100 + float( info[16] ) / 100
childCPU += contribution
vsize += float( info[22] )
rss += float( info[23] ) * pageSize
self.log.debug( 'Added %s to CPU total (now %s) from PID %s %s' % ( contribution, childCPU, info[0], info[1] ) )
del infoDict[pid]
# Some debug printout if 0 CPU is determined
if childCPU == 0:
self.log.error( 'Consumed CPU is found to be 0' )
self.log.info( 'Contributing processes:' )
for pidCheck in pidList:
if pidCheck not in infoDict:
info = self.__getProcInfoLinux( pidCheck )
if info['OK']:
self.log.info( ' PID:', info['Value'] )
return S_OK( { "CPU": childCPU,
"Vsize": vsize,
"RSS": rss } )
#############################################################################
def __getProcInfoLinux( self, pid ):
"""Attempts to read /proc/PID/stat and returns list of items if ok.
/proc/[pid]/stat
Status information about the process. This is used by ps(1).
It is defined in /usr/src/linux/fs/proc/array.c.
The fields, in order, with their proper scanf(3) format
specifiers, are:
pid %d (1) The process ID.
comm %s (2) The filename of the executable, in
parentheses. This is visible whether or not the
executable is swapped out.
state %c (3) One character from the string "RSDZTW" where R
is running, S is sleeping in an interruptible
wait, D is waiting in uninterruptible disk sleep,
Z is zombie, T is traced or stopped (on a signal),
and W is paging.
ppid %d (4) The PID of the parent.
pgrp %d (5) The process group ID of the process.
session %d (6) The session ID of the process.
tty_nr %d (7) The controlling terminal of the process. (The
minor device number is contained in the
combination of bits 31 to 20 and 7 to 0; the major
device number is in bits 15 to 8.)
tpgid %d (8) The ID of the foreground process group of the
controlling terminal of the process.
flags %u (%lu before Linux 2.6.22)
(9) The kernel flags word of the process. For bit
meanings, see the PF_* defines in the Linux kernel
source file include/linux/sched.h. Details depend
on the kernel version.
minflt %lu (10) The number of minor faults the process has
made which have not required loading a memory page
from disk.
cminflt %lu (11) The number of minor faults that the process's
waited-for children have made.
majflt %lu (12) The number of major faults the process has
made which have required loading a memory page
from disk.
cmajflt %lu (13) The number of major faults that the process's
waited-for children have made.
utime %lu (14) Amount of time that this process has been
scheduled in user mode, measured in clock ticks
(divide by sysconf(_SC_CLK_TCK)). This includes
guest time, guest_time (time spent running a
virtual CPU, see below), so that applications that
are not aware of the guest time field do not lose
that time from their calculations.
stime %lu (15) Amount of time that this process has been
scheduled in kernel mode, measured in clock ticks
(divide by sysconf(_SC_CLK_TCK)).
cutime %ld (16) Amount of time that this process's waited-for
children have been scheduled in user mode,
measured in clock ticks (divide by
sysconf(_SC_CLK_TCK)). (See also times(2).) This
includes guest time, cguest_time (time spent
running a virtual CPU, see below).
cstime %ld (17) Amount of time that this process's waited-for
children have been scheduled in kernel mode,
measured in clock ticks (divide by
sysconf(_SC_CLK_TCK)).
priority %ld
(18) (Explanation for Linux 2.6) For processes
running a real-time scheduling policy (policy
below; see sched_setscheduler(2)), this is the
negated scheduling priority, minus one; that is, a
number in the range -2 to -100, corresponding to
real-time priorities 1 to 99. For processes
running under a non-real-time scheduling policy,
this is the raw nice value (setpriority(2)) as
represented in the kernel. The kernel stores nice
values as numbers in the range 0 (high) to 39
(low), corresponding to the user-visible nice
range of -20 to 19.
Before Linux 2.6, this was a scaled value based on
the scheduler weighting given to this process.
nice %ld (19) The nice value (see setpriority(2)), a value
in the range 19 (low priority) to -20 (high
priority).
num_threads %ld
(20) Number of threads in this process (since
Linux 2.6). Before kernel 2.6, this field was
hard coded to 0 as a placeholder for an earlier
removed field.
itrealvalue %ld
(21) The time in jiffies before the next SIGALRM
is sent to the process due to an interval timer.
Since kernel 2.6.17, this field is no longer
maintained, and is hard coded as 0.
starttime %llu (was %lu before Linux 2.6)
(22) The time the process started after system
boot. In kernels before Linux 2.6, this value was
expressed in jiffies. Since Linux 2.6, the value
is expressed in clock ticks (divide by
sysconf(_SC_CLK_TCK)).
vsize %lu (23) Virtual memory size in bytes.
rss %ld (24) Resident Set Size: number of pages the
process has in real memory. This is just the
pages which count toward text, data, or stack
space. This does not include pages which have not
been demand-loaded in, or which are swapped out.
rsslim %lu (25) Current soft limit in bytes on the rss of the
process; see the description of RLIMIT_RSS in
getrlimit(2).
startcode %lu
(26) The address above which program text can run.
endcode %lu (27) The address below which program text can run.
startstack %lu
(28) The address of the start (i.e., bottom) of
the stack.
kstkesp %lu (29) The current value of ESP (stack pointer), as
found in the kernel stack page for the process.
kstkeip %lu (30) The current EIP (instruction pointer).
signal %lu (31) The bitmap of pending signals, displayed as a
decimal number. Obsolete, because it does not
provide information on real-time signals; use
/proc/[pid]/status instead.
blocked %lu (32) The bitmap of blocked signals, displayed as a
decimal number. Obsolete, because it does not
provide information on real-time signals; use
/proc/[pid]/status instead.
sigignore %lu
(33) The bitmap of ignored signals, displayed as a
decimal number. Obsolete, because it does not
provide information on real-time signals; use
/proc/[pid]/status instead.
sigcatch %lu
(34) The bitmap of caught signals, displayed as a
decimal number. Obsolete, because it does not
provide information on real-time signals; use
/proc/[pid]/status instead.
wchan %lu (35) This is the "channel" in which the process is
waiting. It is the address of a system call, and
can be looked up in a namelist if you need a
textual name. (If you have an up-to-date
/etc/psdatabase, then try ps -l to see the WCHAN
field in action.)
nswap %lu (36) Number of pages swapped (not maintained).
cnswap %lu (37) Cumulative nswap for child processes (not
maintained).
exit_signal %d (since Linux 2.1.22)
(38) Signal to be sent to parent when we die.
processor %d (since Linux 2.2.8)
(39) CPU number last executed on.
rt_priority %u (since Linux 2.5.19; was %lu before Linux
2.6.22)
(40) Real-time scheduling priority, a number in
the range 1 to 99 for processes scheduled under a
real-time policy, or 0, for non-real-time
processes (see sched_setscheduler(2)).
policy %u (since Linux 2.5.19; was %lu before Linux 2.6.22)
(41) Scheduling policy (see
sched_setscheduler(2)). Decode using the SCHED_*
constants in linux/sched.h.
delayacct_blkio_ticks %llu (since Linux 2.6.18)
(42) Aggregated block I/O delays, measured in
clock ticks (centiseconds).
guest_time %lu (since Linux 2.6.24)
(43) Guest time of the process (time spent running
a virtual CPU for a guest operating system),
measured in clock ticks (divide by
sysconf(_SC_CLK_TCK)).
cguest_time %ld (since Linux 2.6.24)
(44) Guest time of the process's children,
measured in clock ticks (divide by
sysconf(_SC_CLK_TCK)).
"""
procPath = '/proc/%s/stat' % ( pid )
try:
fopen = open( procPath, 'r' )
procStat = fopen.readline()
fopen.close()
except Exception:
return S_ERROR( 'Not able to check %s' % pid )
return S_OK( procStat.split( ' ' ) )
#############################################################################
def __getProcGroupLinux( self, pid ):
"""Returns UID for given PID.
"""
result = shellCall( 10, 'ps --no-headers -o pgrp -p %s' % ( pid ) )
if not result['OK']:
if not 'Value' in result:
return result
return S_OK( result['Value'][1] )
#############################################################################
def __checkCurrentOS( self ):
"""Checks it is possible to determine CPU consumed with this utility
for the current OS.
"""
localOS = None
self.osType = platform.uname()
if re.search( 'Darwin', self.osType[0] ):
localOS = 'Mac'
elif re.search( 'Windows', self.osType[0] ):
localOS = 'Windows'
else:
localOS = 'Linux'
self.log.debug( 'Will determine CPU consumed for %s flavour OS' % ( localOS ) )
return localOS
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
Sbalbp/DIRAC
|
Core/Utilities/ProcessMonitor.py
|
Python
|
gpl-3.0
| 18,429
|
[
"DIRAC"
] |
9b14d6ccdabd94d05b092fe5cc6fd701d9fadace22f185c7375a050d79f355a4
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from copy import deepcopy
from functools import partial
from gzip import GzipFile
import os
import os.path as op
import numpy as np
from scipy import sparse, linalg
from .io.constants import FIFF
from .io.tree import dir_tree_find
from .io.tag import find_tag, read_tag
from .io.open import fiff_open
from .io.write import (start_block, end_block, write_int,
write_float_sparse_rcs, write_string,
write_float_matrix, write_int_matrix,
write_coord_trans, start_file, end_file, write_id)
from .bem import read_bem_surfaces
from .surface import (read_surface, _create_surf_spacing, _get_ico_surface,
_tessellate_sphere_surf, _get_surf_neighbors,
_read_surface_geom, _normalize_vectors,
_complete_surface_info, _compute_nearest,
fast_cross_3d, _fast_cross_nd_sum, mesh_dist,
_triangle_neighbors)
from .utils import (get_subjects_dir, run_subprocess, has_freesurfer,
has_nibabel, check_fname, logger, verbose,
check_version, _get_call_line, warn)
from .parallel import parallel_func, check_n_jobs
from .transforms import (invert_transform, apply_trans, _print_coord_trans,
combine_transforms, _get_trans,
_coord_frame_name, Transform, _str_to_frame)
from .externals.six import string_types
def _get_lut():
"""Helper to get the FreeSurfer LUT"""
data_dir = op.join(op.dirname(__file__), 'data')
lut_fname = op.join(data_dir, 'FreeSurferColorLUT.txt')
return np.genfromtxt(lut_fname, dtype=None,
usecols=(0, 1), names=['id', 'name'])
def _get_lut_id(lut, label, use_lut):
"""Helper to convert a label to a LUT ID number"""
if not use_lut:
return 1
assert isinstance(label, string_types)
mask = (lut['name'] == label.encode('utf-8'))
assert mask.sum() == 1
return lut['id'][mask]
_src_kind_dict = {
'vol': 'volume',
'surf': 'surface',
'discrete': 'discrete',
}
class SourceSpaces(list):
"""Represent a list of source space
Currently implemented as a list of dictionaries containing the source
space information
Parameters
----------
source_spaces : list
A list of dictionaries containing the source space information.
info : dict
Dictionary with information about the creation of the source space
file. Has keys 'working_dir' and 'command_line'.
Attributes
----------
info : dict
Dictionary with information about the creation of the source space
file. Has keys 'working_dir' and 'command_line'.
"""
def __init__(self, source_spaces, info=None):
super(SourceSpaces, self).__init__(source_spaces)
if info is None:
self.info = dict()
else:
self.info = dict(info)
def __repr__(self):
ss_repr = []
for ss in self:
ss_type = ss['type']
r = _src_kind_dict[ss_type]
if ss_type == 'vol':
if 'seg_name' in ss:
r += " (%s)" % (ss['seg_name'],)
else:
r += ", shape=%s" % (ss['shape'],)
elif ss_type == 'surf':
r += (" (%s), n_vertices=%i" % (_get_hemi(ss)[0], ss['np']))
r += (', n_used=%i, coordinate_frame=%s'
% (ss['nuse'], _coord_frame_name(int(ss['coord_frame']))))
ss_repr.append('<%s>' % r)
return "<SourceSpaces: [%s]>" % ', '.join(ss_repr)
@property
def kind(self):
"""The kind of source space (surface, volume, discrete)"""
ss_types = list(set([ss['type'] for ss in self]))
if len(ss_types) != 1:
return 'combined'
return _src_kind_dict[ss_types[0]]
def __add__(self, other):
return SourceSpaces(list.__add__(self, other))
def copy(self):
"""Make a copy of the source spaces
Returns
-------
src : instance of SourceSpaces
The copied source spaces.
"""
src = deepcopy(self)
return src
def save(self, fname):
"""Save the source spaces to a fif file
Parameters
----------
fname : str
File to write.
"""
write_source_spaces(fname, self)
@verbose
def export_volume(self, fname, include_surfaces=True,
include_discrete=True, dest='mri', trans=None,
mri_resolution=False, use_lut=True, verbose=None):
"""Exports source spaces to nifti or mgz file
Parameters
----------
fname : str
Name of nifti or mgz file to write.
include_surfaces : bool
If True, include surface source spaces.
include_discrete : bool
If True, include discrete source spaces.
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of the
original T1 image. If 'surf' the coordinate system of the
FreeSurfer surface is used (Surface RAS).
trans : dict, str, or None
Either a transformation filename (usually made using mne_analyze)
or an info dict (usually opened using read_trans()).
If string, an ending of `.fif` or `.fif.gz` will be assumed to be
in FIF format, any other ending will be assumed to be a text file
with a 4x4 transformation matrix (like the `--trans` MNE-C option.
Must be provided if source spaces are in head coordinates and
include_surfaces and mri_resolution are True.
mri_resolution : bool
If True, the image is saved in MRI resolution
(e.g. 256 x 256 x 256).
use_lut : bool
If True, assigns a numeric value to each source space that
corresponds to a color on the freesurfer lookup table.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Notes
-----
This method requires nibabel.
"""
# import nibabel or raise error
try:
import nibabel as nib
except ImportError:
raise ImportError('This function requires nibabel.')
# Check coordinate frames of each source space
coord_frames = np.array([s['coord_frame'] for s in self])
# Raise error if trans is not provided when head coordinates are used
# and mri_resolution and include_surfaces are true
if (coord_frames == FIFF.FIFFV_COORD_HEAD).all():
coords = 'head' # all sources in head coordinates
if mri_resolution and include_surfaces:
if trans is None:
raise ValueError('trans containing mri to head transform '
'must be provided if mri_resolution and '
'include_surfaces are true and surfaces '
'are in head coordinates')
elif trans is not None:
logger.info('trans is not needed and will not be used unless '
'include_surfaces and mri_resolution are True.')
elif (coord_frames == FIFF.FIFFV_COORD_MRI).all():
coords = 'mri' # all sources in mri coordinates
if trans is not None:
logger.info('trans is not needed and will not be used unless '
'sources are in head coordinates.')
# Raise error if all sources are not in the same space, or sources are
# not in mri or head coordinates
else:
raise ValueError('All sources must be in head coordinates or all '
'sources must be in mri coordinates.')
# use lookup table to assign values to source spaces
logger.info('Reading FreeSurfer lookup table')
# read the lookup table
lut = _get_lut()
# Setup a dictionary of source types
src_types = dict(volume=[], surface=[], discrete=[])
# Populate dictionary of source types
for src in self:
# volume sources
if src['type'] == 'vol':
src_types['volume'].append(src)
# surface sources
elif src['type'] == 'surf':
src_types['surface'].append(src)
# discrete sources
elif src['type'] == 'discrete':
src_types['discrete'].append(src)
# raise an error if dealing with source type other than volume
# surface or discrete
else:
raise ValueError('Unrecognized source type: %s.' % src['type'])
# Get shape, inuse array and interpolation matrix from volume sources
inuse = 0
for ii, vs in enumerate(src_types['volume']):
# read the lookup table value for segmented volume
if 'seg_name' not in vs:
raise ValueError('Volume sources should be segments, '
'not the entire volume.')
# find the color value for this volume
id_ = _get_lut_id(lut, vs['seg_name'], use_lut)
if ii == 0:
# get the inuse array
if mri_resolution:
# read the mri file used to generate volumes
aseg_data = nib.load(vs['mri_file']).get_data()
# get the voxel space shape
shape3d = (vs['mri_height'], vs['mri_depth'],
vs['mri_width'])
else:
# get the volume source space shape
# read the shape in reverse order
# (otherwise results are scrambled)
shape3d = vs['shape'][2::-1]
if mri_resolution:
# get the values for this volume
use = id_ * (aseg_data == id_).astype(int).ravel('F')
else:
use = id_ * vs['inuse']
inuse += use
# Raise error if there are no volume source spaces
if np.array(inuse).ndim == 0:
raise ValueError('Source spaces must contain at least one volume.')
# create 3d grid in the MRI_VOXEL coordinate frame
# len of inuse array should match shape regardless of mri_resolution
assert len(inuse) == np.prod(shape3d)
# setup the image in 3d space
img = inuse.reshape(shape3d).T
# include surface and/or discrete source spaces
if include_surfaces or include_discrete:
# setup affine transform for source spaces
if mri_resolution:
# get the MRI to MRI_VOXEL transform
affine = invert_transform(vs['vox_mri_t'])
else:
# get the MRI to SOURCE (MRI_VOXEL) transform
affine = invert_transform(vs['src_mri_t'])
# modify affine if in head coordinates
if coords == 'head':
# read mri -> head transformation
mri_head_t = _get_trans(trans)[0]
# get the HEAD to MRI transform
head_mri_t = invert_transform(mri_head_t)
# combine transforms, from HEAD to MRI_VOXEL
affine = combine_transforms(head_mri_t, affine,
'head', 'mri_voxel')
# loop through the surface source spaces
if include_surfaces:
# get the surface names (assumes left, right order. may want
# to add these names during source space generation
surf_names = ['Left-Cerebral-Cortex', 'Right-Cerebral-Cortex']
for i, surf in enumerate(src_types['surface']):
# convert vertex positions from their native space
# (either HEAD or MRI) to MRI_VOXEL space
srf_rr = apply_trans(affine['trans'], surf['rr'])
# convert to numeric indices
ix_orig, iy_orig, iz_orig = srf_rr.T.round().astype(int)
# clip indices outside of volume space
ix_clip = np.maximum(np.minimum(ix_orig, shape3d[2] - 1),
0)
iy_clip = np.maximum(np.minimum(iy_orig, shape3d[1] - 1),
0)
iz_clip = np.maximum(np.minimum(iz_orig, shape3d[0] - 1),
0)
# compare original and clipped indices
n_diff = np.array((ix_orig != ix_clip, iy_orig != iy_clip,
iz_orig != iz_clip)).any(0).sum()
# generate use warnings for clipping
if n_diff > 0:
warn('%s surface vertices lay outside of volume space.'
' Consider using a larger volume space.' % n_diff)
# get surface id or use default value
i = _get_lut_id(lut, surf_names[i], use_lut)
# update image to include surface voxels
img[ix_clip, iy_clip, iz_clip] = i
# loop through discrete source spaces
if include_discrete:
for i, disc in enumerate(src_types['discrete']):
# convert vertex positions from their native space
# (either HEAD or MRI) to MRI_VOXEL space
disc_rr = apply_trans(affine['trans'], disc['rr'])
# convert to numeric indices
ix_orig, iy_orig, iz_orig = disc_rr.T.astype(int)
# clip indices outside of volume space
ix_clip = np.maximum(np.minimum(ix_orig, shape3d[2] - 1),
0)
iy_clip = np.maximum(np.minimum(iy_orig, shape3d[1] - 1),
0)
iz_clip = np.maximum(np.minimum(iz_orig, shape3d[0] - 1),
0)
# compare original and clipped indices
n_diff = np.array((ix_orig != ix_clip, iy_orig != iy_clip,
iz_orig != iz_clip)).any(0).sum()
# generate use warnings for clipping
if n_diff > 0:
warn('%s discrete vertices lay outside of volume '
'space. Consider using a larger volume space.'
% n_diff)
# set default value
img[ix_clip, iy_clip, iz_clip] = 1
if use_lut:
logger.info('Discrete sources do not have values on '
'the lookup table. Defaulting to 1.')
# calculate affine transform for image (MRI_VOXEL to RAS)
if mri_resolution:
# MRI_VOXEL to MRI transform
transform = vs['vox_mri_t'].copy()
else:
# MRI_VOXEL to MRI transform
# NOTE: 'src' indicates downsampled version of MRI_VOXEL
transform = vs['src_mri_t'].copy()
if dest == 'mri':
# combine with MRI to RAS transform
transform = combine_transforms(transform, vs['mri_ras_t'],
transform['from'],
vs['mri_ras_t']['to'])
# now setup the affine for volume image
affine = transform['trans']
# make sure affine converts from m to mm
affine[:3] *= 1e3
# save volume data
# setup image for file
if fname.endswith(('.nii', '.nii.gz')): # save as nifit
# setup the nifti header
hdr = nib.Nifti1Header()
hdr.set_xyzt_units('mm')
# save the nifti image
img = nib.Nifti1Image(img, affine, header=hdr)
elif fname.endswith('.mgz'): # save as mgh
# convert to float32 (float64 not currently supported)
img = img.astype('float32')
# save the mgh image
img = nib.freesurfer.mghformat.MGHImage(img, affine)
else:
raise(ValueError('Unrecognized file extension'))
# write image to file
nib.save(img, fname)
def _add_patch_info(s):
"""Patch information in a source space
Generate the patch information from the 'nearest' vector in
a source space. For vertex in the source space it provides
the list of neighboring vertices in the high resolution
triangulation.
Parameters
----------
s : dict
The source space.
"""
nearest = s['nearest']
if nearest is None:
s['pinfo'] = None
s['patch_inds'] = None
return
logger.info(' Computing patch statistics...')
indn = np.argsort(nearest)
nearest_sorted = nearest[indn]
steps = np.where(nearest_sorted[1:] != nearest_sorted[:-1])[0] + 1
starti = np.r_[[0], steps]
stopi = np.r_[steps, [len(nearest)]]
pinfo = list()
for start, stop in zip(starti, stopi):
pinfo.append(np.sort(indn[start:stop]))
s['pinfo'] = pinfo
# compute patch indices of the in-use source space vertices
patch_verts = nearest_sorted[steps - 1]
s['patch_inds'] = np.searchsorted(patch_verts, s['vertno'])
logger.info(' Patch information added...')
@verbose
def _read_source_spaces_from_tree(fid, tree, patch_stats=False,
verbose=None):
"""Read the source spaces from a FIF file
Parameters
----------
fid : file descriptor
An open file descriptor.
tree : dict
The FIF tree structure if source is a file id.
patch_stats : bool, optional (default False)
Calculate and add cortical patch statistics to the surfaces.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
src : SourceSpaces
The source spaces.
"""
# Find all source spaces
spaces = dir_tree_find(tree, FIFF.FIFFB_MNE_SOURCE_SPACE)
if len(spaces) == 0:
raise ValueError('No source spaces found')
src = list()
for s in spaces:
logger.info(' Reading a source space...')
this = _read_one_source_space(fid, s)
logger.info(' [done]')
if patch_stats:
_complete_source_space_info(this)
src.append(this)
logger.info(' %d source spaces read' % len(spaces))
return SourceSpaces(src)
@verbose
def read_source_spaces(fname, patch_stats=False, verbose=None):
"""Read the source spaces from a FIF file
Parameters
----------
fname : str
The name of the file, which should end with -src.fif or
-src.fif.gz.
patch_stats : bool, optional (default False)
Calculate and add cortical patch statistics to the surfaces.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
src : SourceSpaces
The source spaces.
See Also
--------
write_source_spaces, setup_source_space, setup_volume_source_space
"""
# be more permissive on read than write (fwd/inv can contain src)
check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz',
'-fwd.fif', '-fwd.fif.gz',
'-inv.fif', '-inv.fif.gz'))
ff, tree, _ = fiff_open(fname)
with ff as fid:
src = _read_source_spaces_from_tree(fid, tree, patch_stats=patch_stats,
verbose=verbose)
src.info['fname'] = fname
node = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
if node:
node = node[0]
for p in range(node['nent']):
kind = node['directory'][p].kind
pos = node['directory'][p].pos
tag = read_tag(fid, pos)
if kind == FIFF.FIFF_MNE_ENV_WORKING_DIR:
src.info['working_dir'] = tag.data
elif kind == FIFF.FIFF_MNE_ENV_COMMAND_LINE:
src.info['command_line'] = tag.data
return src
@verbose
def _read_one_source_space(fid, this, verbose=None):
"""Read one source space
"""
FIFF_BEM_SURF_NTRI = 3104
FIFF_BEM_SURF_TRIANGLES = 3106
res = dict()
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_ID)
if tag is None:
res['id'] = int(FIFF.FIFFV_MNE_SURF_UNKNOWN)
else:
res['id'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE)
if tag is None:
raise ValueError('Unknown source space type')
else:
src_type = int(tag.data)
if src_type == FIFF.FIFFV_MNE_SPACE_SURFACE:
res['type'] = 'surf'
elif src_type == FIFF.FIFFV_MNE_SPACE_VOLUME:
res['type'] = 'vol'
elif src_type == FIFF.FIFFV_MNE_SPACE_DISCRETE:
res['type'] = 'discrete'
else:
raise ValueError('Unknown source space type (%d)' % src_type)
if res['type'] == 'vol':
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS)
if tag is not None:
res['shape'] = tuple(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_COORD_TRANS)
if tag is not None:
res['src_mri_t'] = tag.data
parent_mri = dir_tree_find(this, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
if len(parent_mri) == 0:
# MNE 2.7.3 (and earlier) didn't store necessary information
# about volume coordinate translations. Although there is a
# FFIF_COORD_TRANS in the higher level of the FIFF file, this
# doesn't contain all the info we need. Safer to return an
# error unless a user really wants us to add backward compat.
raise ValueError('Can not find parent MRI location. The volume '
'source space may have been made with an MNE '
'version that is too old (<= 2.7.3). Consider '
'updating and regenerating the inverse.')
mri = parent_mri[0]
for d in mri['directory']:
if d.kind == FIFF.FIFF_COORD_TRANS:
tag = read_tag(fid, d.pos)
trans = tag.data
if trans['from'] == FIFF.FIFFV_MNE_COORD_MRI_VOXEL:
res['vox_mri_t'] = tag.data
if trans['to'] == FIFF.FIFFV_MNE_COORD_RAS:
res['mri_ras_t'] = tag.data
tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR)
if tag is not None:
res['interpolator'] = tag.data
else:
logger.info("Interpolation matrix for MRI not found.")
tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE)
if tag is not None:
res['mri_file'] = tag.data
tag = find_tag(fid, mri, FIFF.FIFF_MRI_WIDTH)
if tag is not None:
res['mri_width'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MRI_HEIGHT)
if tag is not None:
res['mri_height'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MRI_DEPTH)
if tag is not None:
res['mri_depth'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MNE_FILE_NAME)
if tag is not None:
res['mri_volume_name'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS)
if tag is not None:
nneighbors = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS)
offset = 0
neighbors = []
for n in nneighbors:
neighbors.append(tag.data[offset:offset + n])
offset += n
res['neighbor_vert'] = neighbors
tag = find_tag(fid, this, FIFF.FIFF_COMMENT)
if tag is not None:
res['seg_name'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
if tag is None:
raise ValueError('Number of vertices not found')
res['np'] = int(tag.data)
tag = find_tag(fid, this, FIFF_BEM_SURF_NTRI)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI)
if tag is None:
res['ntri'] = 0
else:
res['ntri'] = int(tag.data)
else:
res['ntri'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
if tag is None:
raise ValueError('Coordinate frame information not found')
res['coord_frame'] = tag.data
# Vertices, normals, and triangles
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS)
if tag is None:
raise ValueError('Vertex data not found')
res['rr'] = tag.data.astype(np.float) # double precision for mayavi
if res['rr'].shape[0] != res['np']:
raise ValueError('Vertex information is incorrect')
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
if tag is None:
raise ValueError('Vertex normals not found')
res['nn'] = tag.data
if res['nn'].shape[0] != res['np']:
raise ValueError('Vertex normal information is incorrect')
if res['ntri'] > 0:
tag = find_tag(fid, this, FIFF_BEM_SURF_TRIANGLES)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES)
if tag is None:
raise ValueError('Triangulation not found')
else:
res['tris'] = tag.data - 1 # index start at 0 in Python
else:
res['tris'] = tag.data - 1 # index start at 0 in Python
if res['tris'].shape[0] != res['ntri']:
raise ValueError('Triangulation information is incorrect')
else:
res['tris'] = None
# Which vertices are active
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE)
if tag is None:
res['nuse'] = 0
res['inuse'] = np.zeros(res['nuse'], dtype=np.int)
res['vertno'] = None
else:
res['nuse'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION)
if tag is None:
raise ValueError('Source selection information missing')
res['inuse'] = tag.data.astype(np.int).T
if len(res['inuse']) != res['np']:
raise ValueError('Incorrect number of entries in source space '
'selection')
res['vertno'] = np.where(res['inuse'])[0]
# Use triangulation
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES)
if tag1 is None or tag2 is None:
res['nuse_tri'] = 0
res['use_tris'] = None
else:
res['nuse_tri'] = tag1.data
res['use_tris'] = tag2.data - 1 # index start at 0 in Python
# Patch-related information
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST)
if tag1 is None or tag2 is None:
res['nearest'] = None
res['nearest_dist'] = None
else:
res['nearest'] = tag1.data
res['nearest_dist'] = tag2.data.T
_add_patch_info(res)
# Distances
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT)
if tag1 is None or tag2 is None:
res['dist'] = None
res['dist_limit'] = None
else:
res['dist'] = tag1.data
res['dist_limit'] = tag2.data
# Add the upper triangle
res['dist'] = res['dist'] + res['dist'].T
if (res['dist'] is not None):
logger.info(' Distance information added...')
tag = find_tag(fid, this, FIFF.FIFF_SUBJ_HIS_ID)
if tag is not None:
res['subject_his_id'] = tag.data
return res
@verbose
def _complete_source_space_info(this, verbose=None):
"""Add more info on surface
"""
# Main triangulation
logger.info(' Completing triangulation info...')
this['tri_area'] = np.zeros(this['ntri'])
r1 = this['rr'][this['tris'][:, 0], :]
r2 = this['rr'][this['tris'][:, 1], :]
r3 = this['rr'][this['tris'][:, 2], :]
this['tri_cent'] = (r1 + r2 + r3) / 3.0
this['tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
size = np.sqrt(np.sum(this['tri_nn'] ** 2, axis=1))
this['tri_area'] = size / 2.0
this['tri_nn'] /= size[:, None]
logger.info('[done]')
# Selected triangles
logger.info(' Completing selection triangulation info...')
if this['nuse_tri'] > 0:
r1 = this['rr'][this['use_tris'][:, 0], :]
r2 = this['rr'][this['use_tris'][:, 1], :]
r3 = this['rr'][this['use_tris'][:, 2], :]
this['use_tri_cent'] = (r1 + r2 + r3) / 3.0
this['use_tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
this['use_tri_area'] = np.sqrt(np.sum(this['use_tri_nn'] ** 2, axis=1)
) / 2.0
logger.info('[done]')
def find_source_space_hemi(src):
"""Return the hemisphere id for a source space
Parameters
----------
src : dict
The source space to investigate
Returns
-------
hemi : int
Deduced hemisphere id
"""
xave = src['rr'][:, 0].sum()
if xave < 0:
hemi = int(FIFF.FIFFV_MNE_SURF_LEFT_HEMI)
else:
hemi = int(FIFF.FIFFV_MNE_SURF_RIGHT_HEMI)
return hemi
def label_src_vertno_sel(label, src):
""" Find vertex numbers and indices from label
Parameters
----------
label : Label
Source space label
src : dict
Source space
Returns
-------
vertices : list of length 2
Vertex numbers for lh and rh
src_sel : array of int (len(idx) = len(vertices[0]) + len(vertices[1]))
Indices of the selected vertices in sourse space
"""
if src[0]['type'] != 'surf':
return Exception('Labels are only supported with surface source '
'spaces')
vertno = [src[0]['vertno'], src[1]['vertno']]
if label.hemi == 'lh':
vertno_sel = np.intersect1d(vertno[0], label.vertices)
src_sel = np.searchsorted(vertno[0], vertno_sel)
vertno[0] = vertno_sel
vertno[1] = np.array([], int)
elif label.hemi == 'rh':
vertno_sel = np.intersect1d(vertno[1], label.vertices)
src_sel = np.searchsorted(vertno[1], vertno_sel) + len(vertno[0])
vertno[0] = np.array([], int)
vertno[1] = vertno_sel
elif label.hemi == 'both':
vertno_sel_lh = np.intersect1d(vertno[0], label.lh.vertices)
src_sel_lh = np.searchsorted(vertno[0], vertno_sel_lh)
vertno_sel_rh = np.intersect1d(vertno[1], label.rh.vertices)
src_sel_rh = np.searchsorted(vertno[1], vertno_sel_rh) + len(vertno[0])
src_sel = np.hstack((src_sel_lh, src_sel_rh))
vertno = [vertno_sel_lh, vertno_sel_rh]
else:
raise Exception("Unknown hemisphere type")
return vertno, src_sel
def _get_vertno(src):
return [s['vertno'] for s in src]
###############################################################################
# Write routines
@verbose
def _write_source_spaces_to_fid(fid, src, verbose=None):
"""Write the source spaces to a FIF file
Parameters
----------
fid : file descriptor
An open file descriptor.
src : list
The list of source spaces.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
for s in src:
logger.info(' Write a source space...')
start_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
_write_one_source_space(fid, s, verbose)
end_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
logger.info(' [done]')
logger.info(' %d source spaces written' % len(src))
@verbose
def write_source_spaces(fname, src, verbose=None):
"""Write source spaces to a file
Parameters
----------
fname : str
The name of the file, which should end with -src.fif or
-src.fif.gz.
src : SourceSpaces
The source spaces (as returned by read_source_spaces).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
read_source_spaces
"""
check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz'))
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_MNE)
if src.info:
start_block(fid, FIFF.FIFFB_MNE_ENV)
write_id(fid, FIFF.FIFF_BLOCK_ID)
data = src.info.get('working_dir', None)
if data:
write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data)
data = src.info.get('command_line', None)
if data:
write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data)
end_block(fid, FIFF.FIFFB_MNE_ENV)
_write_source_spaces_to_fid(fid, src, verbose)
end_block(fid, FIFF.FIFFB_MNE)
end_file(fid)
def _write_one_source_space(fid, this, verbose=None):
"""Write one source space"""
if this['type'] == 'surf':
src_type = FIFF.FIFFV_MNE_SPACE_SURFACE
elif this['type'] == 'vol':
src_type = FIFF.FIFFV_MNE_SPACE_VOLUME
elif this['type'] == 'discrete':
src_type = FIFF.FIFFV_MNE_SPACE_DISCRETE
else:
raise ValueError('Unknown source space type (%s)' % this['type'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE, src_type)
if this['id'] >= 0:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_ID, this['id'])
data = this.get('subject_his_id', None)
if data:
write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, data)
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, this['coord_frame'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, this['np'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS, this['rr'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS, this['nn'])
# Which vertices are active
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION, this['inuse'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE, this['nuse'])
if this['ntri'] > 0:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI, this['ntri'])
write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES,
this['tris'] + 1)
if this['type'] != 'vol' and this['use_tris'] is not None:
# Use triangulation
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI, this['nuse_tri'])
write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES,
this['use_tris'] + 1)
if this['type'] == 'vol':
neighbor_vert = this.get('neighbor_vert', None)
if neighbor_vert is not None:
nneighbors = np.array([len(n) for n in neighbor_vert])
neighbors = np.concatenate(neighbor_vert)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS, nneighbors)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS, neighbors)
write_coord_trans(fid, this['src_mri_t'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS, this['shape'])
start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
write_coord_trans(fid, this['mri_ras_t'])
write_coord_trans(fid, this['vox_mri_t'])
mri_volume_name = this.get('mri_volume_name', None)
if mri_volume_name is not None:
write_string(fid, FIFF.FIFF_MNE_FILE_NAME, mri_volume_name)
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR,
this['interpolator'])
if 'mri_file' in this and this['mri_file'] is not None:
write_string(fid, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE,
this['mri_file'])
write_int(fid, FIFF.FIFF_MRI_WIDTH, this['mri_width'])
write_int(fid, FIFF.FIFF_MRI_HEIGHT, this['mri_height'])
write_int(fid, FIFF.FIFF_MRI_DEPTH, this['mri_depth'])
end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
# Patch-related information
if this['nearest'] is not None:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST, this['nearest'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST,
this['nearest_dist'])
# Distances
if this['dist'] is not None:
# Save only upper triangular portion of the matrix
dists = this['dist'].copy()
dists = sparse.triu(dists, format=dists.format)
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST, dists)
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT,
this['dist_limit'])
# Segmentation data
if this['type'] == 'vol' and ('seg_name' in this):
# Save the name of the segment
write_string(fid, FIFF.FIFF_COMMENT, this['seg_name'])
##############################################################################
# Surface to MNI conversion
@verbose
def vertex_to_mni(vertices, hemis, subject, subjects_dir=None, mode=None,
verbose=None):
"""Convert the array of vertices for a hemisphere to MNI coordinates
Parameters
----------
vertices : int, or list of int
Vertex number(s) to convert
hemis : int, or list of int
Hemisphere(s) the vertices belong to
subject : string
Name of the subject to load surfaces from.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
mode : string | None
Either 'nibabel' or 'freesurfer' for the software to use to
obtain the transforms. If None, 'nibabel' is tried first, falling
back to 'freesurfer' if it fails. Results should be equivalent with
either option, but nibabel may be quicker (and more pythonic).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
coordinates : n_vertices x 3 array of float
The MNI coordinates (in mm) of the vertices
Notes
-----
This function requires either nibabel (in Python) or Freesurfer
(with utility "mri_info") to be correctly installed.
"""
if not has_freesurfer() and not has_nibabel():
raise RuntimeError('NiBabel (Python) or Freesurfer (Unix) must be '
'correctly installed and accessible from Python')
if not isinstance(vertices, list) and not isinstance(vertices, np.ndarray):
vertices = [vertices]
if not isinstance(hemis, list) and not isinstance(hemis, np.ndarray):
hemis = [hemis] * len(vertices)
if not len(hemis) == len(vertices):
raise ValueError('hemi and vertices must match in length')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surfs = [op.join(subjects_dir, subject, 'surf', '%s.white' % h)
for h in ['lh', 'rh']]
# read surface locations in MRI space
rr = [read_surface(s)[0] for s in surfs]
# take point locations in MRI space and convert to MNI coordinates
xfm = _read_talxfm(subject, subjects_dir, mode)
data = np.array([rr[h][v, :] for h, v in zip(hemis, vertices)])
return apply_trans(xfm['trans'], data)
@verbose
def _read_talxfm(subject, subjects_dir, mode=None, verbose=None):
"""Read MNI transform from FreeSurfer talairach.xfm file
Adapted from freesurfer m-files. Altered to deal with Norig
and Torig correctly.
"""
if mode is not None and mode not in ['nibabel', 'freesurfer']:
raise ValueError('mode must be "nibabel" or "freesurfer"')
fname = op.join(subjects_dir, subject, 'mri', 'transforms',
'talairach.xfm')
# read the RAS to MNI transform from talairach.xfm
with open(fname, 'r') as fid:
logger.debug('Reading FreeSurfer talairach.xfm file:\n%s' % fname)
# read lines until we get the string 'Linear_Transform', which precedes
# the data transformation matrix
got_it = False
comp = 'Linear_Transform'
for line in fid:
if line[:len(comp)] == comp:
# we have the right line, so don't read any more
got_it = True
break
if got_it:
xfm = list()
# read the transformation matrix (3x4)
for ii, line in enumerate(fid):
digs = [float(s) for s in line.strip('\n;').split()]
xfm.append(digs)
if ii == 2:
break
xfm.append([0., 0., 0., 1.])
xfm = np.array(xfm, dtype=float)
else:
raise ValueError('failed to find \'Linear_Transform\' string in '
'xfm file:\n%s' % fname)
# Setup the RAS to MNI transform
ras_mni_t = {'from': FIFF.FIFFV_MNE_COORD_RAS,
'to': FIFF.FIFFV_MNE_COORD_MNI_TAL, 'trans': xfm}
# now get Norig and Torig
# (i.e. vox_ras_t and vox_mri_t, respectively)
path = op.join(subjects_dir, subject, 'mri', 'orig.mgz')
if not op.isfile(path):
path = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if not op.isfile(path):
raise IOError('mri not found: %s' % path)
if has_nibabel():
use_nibabel = True
else:
use_nibabel = False
if mode == 'nibabel':
raise ImportError('Tried to import nibabel but failed, try using '
'mode=None or mode=Freesurfer')
# note that if mode == None, then we default to using nibabel
if use_nibabel is True and mode == 'freesurfer':
use_nibabel = False
if use_nibabel:
hdr = _get_mri_header(path)
# read the MRI_VOXEL to RAS transform
n_orig = hdr.get_vox2ras()
# read the MRI_VOXEL to MRI transform
ds = np.array(hdr.get_zooms())
ns = (np.array(hdr.get_data_shape()[:3]) * ds) / 2.0
t_orig = np.array([[-ds[0], 0, 0, ns[0]],
[0, 0, ds[2], -ns[2]],
[0, -ds[1], 0, ns[1]],
[0, 0, 0, 1]], dtype=float)
nt_orig = [n_orig, t_orig]
else:
nt_orig = list()
for conv in ['--vox2ras', '--vox2ras-tkr']:
stdout, stderr = run_subprocess(['mri_info', conv, path])
stdout = np.fromstring(stdout, sep=' ').astype(float)
if not stdout.size == 16:
raise ValueError('Could not parse Freesurfer mri_info output')
nt_orig.append(stdout.reshape(4, 4))
# extract the MRI_VOXEL to RAS transform
n_orig = nt_orig[0]
vox_ras_t = {'from': FIFF.FIFFV_MNE_COORD_MRI_VOXEL,
'to': FIFF.FIFFV_MNE_COORD_RAS,
'trans': n_orig}
# extract the MRI_VOXEL to MRI transform
t_orig = nt_orig[1]
vox_mri_t = Transform('mri_voxel', 'mri', t_orig)
# invert MRI_VOXEL to MRI to get the MRI to MRI_VOXEL transform
mri_vox_t = invert_transform(vox_mri_t)
# construct an MRI to RAS transform
mri_ras_t = combine_transforms(mri_vox_t, vox_ras_t, 'mri', 'ras')
# construct the MRI to MNI transform
mri_mni_t = combine_transforms(mri_ras_t, ras_mni_t, 'mri', 'mni_tal')
return mri_mni_t
###############################################################################
# Creation and decimation
@verbose
def setup_source_space(subject, fname=True, spacing='oct6', surface='white',
overwrite=False, subjects_dir=None, add_dist=True,
n_jobs=1, verbose=None):
"""Setup a bilater hemisphere surface-based source space with subsampling
Parameters
----------
subject : str
Subject to process.
fname : str | None | bool
Filename to use. If True, a default name will be used. If None,
the source space will not be saved (only returned).
spacing : str
The spacing to use. Can be ``'ico#'`` for a recursively subdivided
icosahedron, ``'oct#'`` for a recursively subdivided octahedron,
or ``'all'`` for all points.
surface : str
The surface to use.
overwrite: bool
If True, overwrite output file (if it exists).
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
add_dist : bool
Add distance and patch information to the source space. This takes some
time so precomputing it is recommended.
n_jobs : int
Number of jobs to run in parallel. Will use at most 2 jobs
(one for each hemisphere).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
src : list
The source space for each hemisphere.
See Also
--------
setup_volume_source_space
"""
cmd = ('setup_source_space(%s, fname=%s, spacing=%s, surface=%s, '
'overwrite=%s, subjects_dir=%s, add_dist=%s, verbose=%s)'
% (subject, fname, spacing, surface, overwrite,
subjects_dir, add_dist, verbose))
# check to make sure our parameters are good, parse 'spacing'
space_err = ('"spacing" must be a string with values '
'"ico#", "oct#", or "all", and "ico" and "oct"'
'numbers must be integers')
if not isinstance(spacing, string_types) or len(spacing) < 3:
raise ValueError(space_err)
if spacing == 'all':
stype = 'all'
sval = ''
elif spacing[:3] == 'ico':
stype = 'ico'
sval = spacing[3:]
elif spacing[:3] == 'oct':
stype = 'oct'
sval = spacing[3:]
else:
raise ValueError(space_err)
try:
if stype in ['ico', 'oct']:
sval = int(sval)
elif stype == 'spacing': # spacing
sval = float(sval)
except:
raise ValueError(space_err)
subjects_dir = get_subjects_dir(subjects_dir)
surfs = [op.join(subjects_dir, subject, 'surf', hemi + surface)
for hemi in ['lh.', 'rh.']]
bem_dir = op.join(subjects_dir, subject, 'bem')
for surf, hemi in zip(surfs, ['LH', 'RH']):
if surf is not None and not op.isfile(surf):
raise IOError('Could not find the %s surface %s'
% (hemi, surf))
if not (fname is True or fname is None or isinstance(fname, string_types)):
raise ValueError('"fname" must be a string, True, or None')
if fname is True:
extra = '%s-%s' % (stype, sval) if sval != '' else stype
fname = op.join(bem_dir, '%s-%s-src.fif' % (subject, extra))
if fname is not None and op.isfile(fname) and overwrite is False:
raise IOError('file "%s" exists, use overwrite=True if you want '
'to overwrite the file' % fname)
logger.info('Setting up the source space with the following parameters:\n')
logger.info('SUBJECTS_DIR = %s' % subjects_dir)
logger.info('Subject = %s' % subject)
logger.info('Surface = %s' % surface)
if stype == 'ico':
src_type_str = 'ico = %s' % sval
logger.info('Icosahedron subdivision grade %s\n' % sval)
elif stype == 'oct':
src_type_str = 'oct = %s' % sval
logger.info('Octahedron subdivision grade %s\n' % sval)
else:
src_type_str = 'all'
logger.info('Include all vertices\n')
# Create the fif file
if fname is not None:
logger.info('>>> 1. Creating the source space file %s...' % fname)
else:
logger.info('>>> 1. Creating the source space...\n')
# mne_make_source_space ... actually make the source spaces
src = []
# pre-load ico/oct surf (once) for speed, if necessary
if stype in ['ico', 'oct']:
# ### from mne_ico_downsample.c ###
if stype == 'ico':
logger.info('Doing the icosahedral vertex picking...')
ico_surf = _get_ico_surface(sval)
else:
logger.info('Doing the octahedral vertex picking...')
ico_surf = _tessellate_sphere_surf(sval)
else:
ico_surf = None
for hemi, surf in zip(['lh', 'rh'], surfs):
logger.info('Loading %s...' % surf)
# Setup the surface spacing in the MRI coord frame
s = _create_surf_spacing(surf, hemi, subject, stype, sval, ico_surf,
subjects_dir)
logger.info('loaded %s %d/%d selected to source space (%s)'
% (op.split(surf)[1], s['nuse'], s['np'], src_type_str))
src.append(s)
logger.info('') # newline after both subject types are run
# Fill in source space info
hemi_ids = [FIFF.FIFFV_MNE_SURF_LEFT_HEMI, FIFF.FIFFV_MNE_SURF_RIGHT_HEMI]
for s, s_id in zip(src, hemi_ids):
# Add missing fields
s.update(dict(dist=None, dist_limit=None, nearest=None, type='surf',
nearest_dist=None, pinfo=None, patch_inds=None, id=s_id,
coord_frame=np.array((FIFF.FIFFV_COORD_MRI,), np.int32)))
s['rr'] /= 1000.0
del s['tri_area']
del s['tri_cent']
del s['tri_nn']
del s['neighbor_tri']
# upconvert to object format from lists
src = SourceSpaces(src, dict(working_dir=os.getcwd(), command_line=cmd))
if add_dist:
add_source_space_distances(src, n_jobs=n_jobs, verbose=verbose)
# write out if requested, then return the data
if fname is not None:
write_source_spaces(fname, src)
logger.info('Wrote %s' % fname)
logger.info('You are now one step closer to computing the gain matrix')
return src
@verbose
def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None,
sphere=(0.0, 0.0, 0.0, 90.0), bem=None,
surface=None, mindist=5.0, exclude=0.0,
overwrite=False, subjects_dir=None,
volume_label=None, add_interpolator=True,
verbose=None):
"""Setup a volume source space with grid spacing or discrete source space
Parameters
----------
subject : str
Subject to process.
fname : str | None
Filename to use. If None, the source space will not be saved
(only returned).
pos : float | dict
Positions to use for sources. If float, a grid will be constructed
with the spacing given by `pos` in mm, generating a volume source
space. If dict, pos['rr'] and pos['nn'] will be used as the source
space locations (in meters) and normals, respectively, creating a
discrete source space. NOTE: For a discrete source space (`pos` is
a dict), `mri` must be None.
mri : str | None
The filename of an MRI volume (mgh or mgz) to create the
interpolation matrix over. Source estimates obtained in the
volume source space can then be morphed onto the MRI volume
using this interpolator. If pos is a dict, this can be None.
sphere : array_like (length 4)
Define spherical source space bounds using origin and radius given
by (ox, oy, oz, rad) in mm. Only used if `bem` and `surface` are
both None.
bem : str | None
Define source space bounds using a BEM file (specifically the inner
skull surface).
surface : str | dict | None
Define source space bounds using a FreeSurfer surface file. Can
also be a dictionary with entries `'rr'` and `'tris'`, such as
those returned by :func:`mne.read_surface`.
mindist : float
Exclude points closer than this distance (mm) to the bounding surface.
exclude : float
Exclude points closer than this distance (mm) from the center of mass
of the bounding surface.
overwrite: bool
If True, overwrite output file (if it exists).
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
volume_label : str | None
Region of interest corresponding with freesurfer lookup table.
add_interpolator : bool
If True and ``mri`` is not None, then an interpolation matrix
will be produced.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
src : list
The source space. Note that this list will have length 1 for
compatibility reasons, as most functions expect source spaces
to be provided as lists).
See Also
--------
setup_source_space
Notes
-----
To create a discrete source space, `pos` must be a dict, 'mri' must be
None, and 'volume_label' must be None. To create a whole brain volume
source space, `pos` must be a float and 'mri' must be provided. To create
a volume source space from label, 'pos' must be a float, 'volume_label'
must be provided, and 'mri' must refer to a .mgh or .mgz file with values
corresponding to the freesurfer lookup-table (typically aseg.mgz).
"""
subjects_dir = get_subjects_dir(subjects_dir)
if bem is not None and surface is not None:
raise ValueError('Only one of "bem" and "surface" should be '
'specified')
if mri is not None:
if not op.isfile(mri):
raise IOError('mri file "%s" not found' % mri)
if isinstance(pos, dict):
raise ValueError('Cannot create interpolation matrix for '
'discrete source space, mri must be None if '
'pos is a dict')
if volume_label is not None:
if mri is None:
raise RuntimeError('"mri" must be provided if "volume_label" is '
'not None')
# Check that volume label is found in .mgz file
volume_labels = get_volume_labels_from_aseg(mri)
if volume_label not in volume_labels:
raise ValueError('Volume %s not found in file %s. Double check '
'freesurfer lookup table.' % (volume_label, mri))
sphere = np.asarray(sphere)
if sphere.size != 4:
raise ValueError('"sphere" must be array_like with 4 elements')
# triage bounding argument
if bem is not None:
logger.info('BEM file : %s', bem)
elif surface is not None:
if isinstance(surface, dict):
if not all(key in surface for key in ['rr', 'tris']):
raise KeyError('surface, if dict, must have entries "rr" '
'and "tris"')
# let's make sure we have geom info
surface = _read_surface_geom(surface, verbose=False)
surf_extra = 'dict()'
elif isinstance(surface, string_types):
if not op.isfile(surface):
raise IOError('surface file "%s" not found' % surface)
surf_extra = surface
logger.info('Boundary surface file : %s', surf_extra)
else:
logger.info('Sphere : origin at (%.1f %.1f %.1f) mm'
% (sphere[0], sphere[1], sphere[2]))
logger.info(' radius : %.1f mm' % sphere[3])
# triage pos argument
if isinstance(pos, dict):
if not all(key in pos for key in ['rr', 'nn']):
raise KeyError('pos, if dict, must contain "rr" and "nn"')
pos_extra = 'dict()'
else: # pos should be float-like
try:
pos = float(pos)
except (TypeError, ValueError):
raise ValueError('pos must be a dict, or something that can be '
'cast to float()')
if not isinstance(pos, float):
logger.info('Source location file : %s', pos_extra)
logger.info('Assuming input in millimeters')
logger.info('Assuming input in MRI coordinates')
logger.info('Output file : %s', fname)
if isinstance(pos, float):
logger.info('grid : %.1f mm' % pos)
logger.info('mindist : %.1f mm' % mindist)
pos /= 1000.0 # convert pos from m to mm
if exclude > 0.0:
logger.info('Exclude : %.1f mm' % exclude)
if mri is not None:
logger.info('MRI volume : %s' % mri)
exclude /= 1000.0 # convert exclude from m to mm
logger.info('')
# Explicit list of points
if not isinstance(pos, float):
# Make the grid of sources
sp = _make_discrete_source_space(pos)
else:
# Load the brain surface as a template
if bem is not None:
# read bem surface in the MRI coordinate frame
surf = read_bem_surfaces(bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN,
verbose=False)
logger.info('Loaded inner skull from %s (%d nodes)'
% (bem, surf['np']))
elif surface is not None:
if isinstance(surface, string_types):
# read the surface in the MRI coordinate frame
surf = _read_surface_geom(surface)
else:
surf = surface
logger.info('Loaded bounding surface from %s (%d nodes)'
% (surface, surf['np']))
surf = deepcopy(surf)
surf['rr'] *= 1e-3 # must be converted to meters
else: # Load an icosahedron and use that as the surface
logger.info('Setting up the sphere...')
surf = _get_ico_surface(3)
# Scale and shift
# center at origin and make radius 1
_normalize_vectors(surf['rr'])
# normalize to sphere (in MRI coord frame)
surf['rr'] *= sphere[3] / 1000.0 # scale by radius
surf['rr'] += sphere[:3] / 1000.0 # move by center
_complete_surface_info(surf, True)
# Make the grid of sources in MRI space
sp = _make_volume_source_space(surf, pos, exclude, mindist, mri,
volume_label)
# Compute an interpolation matrix to show data in MRI_VOXEL coord frame
if mri is not None:
_add_interpolator(sp, mri, add_interpolator)
elif sp['type'] == 'vol':
# If there is no interpolator, it's actually a discrete source space
sp['type'] = 'discrete'
if 'vol_dims' in sp:
del sp['vol_dims']
# Save it
sp.update(dict(nearest=None, dist=None, use_tris=None, patch_inds=None,
dist_limit=None, pinfo=None, ntri=0, nearest_dist=None,
nuse_tri=0, tris=None))
sp = SourceSpaces([sp], dict(working_dir=os.getcwd(), command_line='None'))
if fname is not None:
write_source_spaces(fname, sp, verbose=False)
return sp
def _make_voxel_ras_trans(move, ras, voxel_size):
"""Make a transformation from MRI_VOXEL to MRI surface RAS (i.e. MRI)"""
assert voxel_size.ndim == 1
assert voxel_size.size == 3
rot = ras.T * voxel_size[np.newaxis, :]
assert rot.ndim == 2
assert rot.shape[0] == 3
assert rot.shape[1] == 3
trans = np.c_[np.r_[rot, np.zeros((1, 3))], np.r_[move, 1.0]]
t = Transform('mri_voxel', 'mri', trans)
return t
def _make_discrete_source_space(pos, coord_frame='mri'):
"""Use a discrete set of source locs/oris to make src space
Parameters
----------
pos : dict
Must have entries "rr" and "nn". Data should be in meters.
coord_frame : str
The coordinate frame in which the positions are given; default: 'mri'.
The frame must be one defined in transforms.py:_str_to_frame
Returns
-------
src : dict
The source space.
"""
# Check that coordinate frame is valid
if coord_frame not in _str_to_frame: # will fail if coord_frame not string
raise KeyError('coord_frame must be one of %s, not "%s"'
% (list(_str_to_frame.keys()), coord_frame))
coord_frame = _str_to_frame[coord_frame] # now an int
# process points
rr = pos['rr'].copy()
nn = pos['nn'].copy()
if not (rr.ndim == nn.ndim == 2 and nn.shape[0] == nn.shape[0] and
rr.shape[1] == nn.shape[1]):
raise RuntimeError('"rr" and "nn" must both be 2D arrays with '
'the same number of rows and 3 columns')
npts = rr.shape[0]
_normalize_vectors(nn)
nz = np.sum(np.sum(nn * nn, axis=1) == 0)
if nz != 0:
raise RuntimeError('%d sources have zero length normal' % nz)
logger.info('Positions (in meters) and orientations')
logger.info('%d sources' % npts)
# Ready to make the source space
sp = dict(coord_frame=coord_frame, type='discrete', nuse=npts, np=npts,
inuse=np.ones(npts, int), vertno=np.arange(npts), rr=rr, nn=nn,
id=-1)
return sp
def _make_volume_source_space(surf, grid, exclude, mindist, mri=None,
volume_label=None, do_neighbors=True, n_jobs=1):
"""Make a source space which covers the volume bounded by surf"""
# Figure out the grid size in the MRI coordinate frame
mins = np.min(surf['rr'], axis=0)
maxs = np.max(surf['rr'], axis=0)
cm = np.mean(surf['rr'], axis=0) # center of mass
# Define the sphere which fits the surface
maxdist = np.sqrt(np.max(np.sum((surf['rr'] - cm) ** 2, axis=1)))
logger.info('Surface CM = (%6.1f %6.1f %6.1f) mm'
% (1000 * cm[0], 1000 * cm[1], 1000 * cm[2]))
logger.info('Surface fits inside a sphere with radius %6.1f mm'
% (1000 * maxdist))
logger.info('Surface extent:')
for c, mi, ma in zip('xyz', mins, maxs):
logger.info(' %s = %6.1f ... %6.1f mm' % (c, 1000 * mi, 1000 * ma))
maxn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else -
np.floor(np.abs(m) / grid) - 1 for m in maxs], int)
minn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else -
np.floor(np.abs(m) / grid) - 1 for m in mins], int)
logger.info('Grid extent:')
for c, mi, ma in zip('xyz', minn, maxn):
logger.info(' %s = %6.1f ... %6.1f mm'
% (c, 1000 * mi * grid, 1000 * ma * grid))
# Now make the initial grid
ns = maxn - minn + 1
npts = np.prod(ns)
nrow = ns[0]
ncol = ns[1]
nplane = nrow * ncol
# x varies fastest, then y, then z (can use unravel to do this)
rr = np.meshgrid(np.arange(minn[2], maxn[2] + 1),
np.arange(minn[1], maxn[1] + 1),
np.arange(minn[0], maxn[0] + 1), indexing='ij')
x, y, z = rr[2].ravel(), rr[1].ravel(), rr[0].ravel()
rr = np.array([x * grid, y * grid, z * grid]).T
sp = dict(np=npts, nn=np.zeros((npts, 3)), rr=rr,
inuse=np.ones(npts, int), type='vol', nuse=npts,
coord_frame=FIFF.FIFFV_COORD_MRI, id=-1, shape=ns)
sp['nn'][:, 2] = 1.0
assert sp['rr'].shape[0] == npts
logger.info('%d sources before omitting any.', sp['nuse'])
# Exclude infeasible points
dists = np.sqrt(np.sum((sp['rr'] - cm) ** 2, axis=1))
bads = np.where(np.logical_or(dists < exclude, dists > maxdist))[0]
sp['inuse'][bads] = False
sp['nuse'] -= len(bads)
logger.info('%d sources after omitting infeasible sources.', sp['nuse'])
_filter_source_spaces(surf, mindist, None, [sp], n_jobs)
logger.info('%d sources remaining after excluding the sources outside '
'the surface and less than %6.1f mm inside.'
% (sp['nuse'], mindist))
if not do_neighbors:
if volume_label is not None:
raise RuntimeError('volume_label cannot be None unless '
'do_neighbors is True')
return sp
k = np.arange(npts)
neigh = np.empty((26, npts), int)
neigh.fill(-1)
# Figure out each neighborhood:
# 6-neighborhood first
idxs = [z > minn[2], x < maxn[0], y < maxn[1],
x > minn[0], y > minn[1], z < maxn[2]]
offsets = [-nplane, 1, nrow, -1, -nrow, nplane]
for n, idx, offset in zip(neigh[:6], idxs, offsets):
n[idx] = k[idx] + offset
# Then the rest to complete the 26-neighborhood
# First the plane below
idx1 = z > minn[2]
idx2 = np.logical_and(idx1, x < maxn[0])
neigh[6, idx2] = k[idx2] + 1 - nplane
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[7, idx3] = k[idx3] + 1 + nrow - nplane
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[8, idx2] = k[idx2] + nrow - nplane
idx2 = np.logical_and(idx1, x > minn[0])
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[9, idx3] = k[idx3] - 1 + nrow - nplane
neigh[10, idx2] = k[idx2] - 1 - nplane
idx3 = np.logical_and(idx2, y > minn[1])
neigh[11, idx3] = k[idx3] - 1 - nrow - nplane
idx2 = np.logical_and(idx1, y > minn[1])
neigh[12, idx2] = k[idx2] - nrow - nplane
idx3 = np.logical_and(idx2, x < maxn[0])
neigh[13, idx3] = k[idx3] + 1 - nrow - nplane
# Then the same plane
idx1 = np.logical_and(x < maxn[0], y < maxn[1])
neigh[14, idx1] = k[idx1] + 1 + nrow
idx1 = x > minn[0]
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[15, idx2] = k[idx2] - 1 + nrow
idx2 = np.logical_and(idx1, y > minn[1])
neigh[16, idx2] = k[idx2] - 1 - nrow
idx1 = np.logical_and(y > minn[1], x < maxn[0])
neigh[17, idx1] = k[idx1] + 1 - nrow - nplane
# Finally one plane above
idx1 = z < maxn[2]
idx2 = np.logical_and(idx1, x < maxn[0])
neigh[18, idx2] = k[idx2] + 1 + nplane
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[19, idx3] = k[idx3] + 1 + nrow + nplane
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[20, idx2] = k[idx2] + nrow + nplane
idx2 = np.logical_and(idx1, x > minn[0])
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[21, idx3] = k[idx3] - 1 + nrow + nplane
neigh[22, idx2] = k[idx2] - 1 + nplane
idx3 = np.logical_and(idx2, y > minn[1])
neigh[23, idx3] = k[idx3] - 1 - nrow + nplane
idx2 = np.logical_and(idx1, y > minn[1])
neigh[24, idx2] = k[idx2] - nrow + nplane
idx3 = np.logical_and(idx2, x < maxn[0])
neigh[25, idx3] = k[idx3] + 1 - nrow + nplane
# Restrict sources to volume of interest
if volume_label is not None:
try:
import nibabel as nib
except ImportError:
raise ImportError("nibabel is required to read segmentation file.")
logger.info('Selecting voxels from %s' % volume_label)
# Read the segmentation data using nibabel
mgz = nib.load(mri)
mgz_data = mgz.get_data()
# Get the numeric index for this volume label
lut = _get_lut()
vol_id = _get_lut_id(lut, volume_label, True)
# Get indices for this volume label in voxel space
vox_bool = mgz_data == vol_id
# Get the 3 dimensional indices in voxel space
vox_xyz = np.array(np.where(vox_bool)).T
# Transform to RAS coordinates
# (use tkr normalization or volume won't align with surface sources)
trans = _get_mgz_header(mri)['vox2ras_tkr']
# Convert transform from mm to m
trans[:3] /= 1000.
rr_voi = apply_trans(trans, vox_xyz) # positions of VOI in RAS space
# Filter out points too far from volume region voxels
dists = _compute_nearest(rr_voi, sp['rr'], return_dists=True)[1]
# Maximum distance from center of mass of a voxel to any of its corners
maxdist = np.sqrt(((trans[:3, :3].sum(0) / 2.) ** 2).sum())
bads = np.where(dists > maxdist)[0]
# Update source info
sp['inuse'][bads] = False
sp['vertno'] = np.where(sp['inuse'] > 0)[0]
sp['nuse'] = len(sp['vertno'])
sp['seg_name'] = volume_label
sp['mri_file'] = mri
# Update log
logger.info('%d sources remaining after excluding sources too far '
'from VOI voxels', sp['nuse'])
# Omit unused vertices from the neighborhoods
logger.info('Adjusting the neighborhood info...')
# remove non source-space points
log_inuse = sp['inuse'] > 0
neigh[:, np.logical_not(log_inuse)] = -1
# remove these points from neigh
vertno = np.where(log_inuse)[0]
sp['vertno'] = vertno
old_shape = neigh.shape
neigh = neigh.ravel()
checks = np.where(neigh >= 0)[0]
removes = np.logical_not(np.in1d(checks, vertno))
neigh[checks[removes]] = -1
neigh.shape = old_shape
neigh = neigh.T
# Thought we would need this, but C code keeps -1 vertices, so we will:
# neigh = [n[n >= 0] for n in enumerate(neigh[vertno])]
sp['neighbor_vert'] = neigh
# Set up the volume data (needed for creating the interpolation matrix)
r0 = minn * grid
voxel_size = grid * np.ones(3)
ras = np.eye(3)
sp['src_mri_t'] = _make_voxel_ras_trans(r0, ras, voxel_size)
sp['vol_dims'] = maxn - minn + 1
return sp
def _vol_vertex(width, height, jj, kk, pp):
return jj + width * kk + pp * (width * height)
def _get_mri_header(fname):
"""Get MRI header using nibabel"""
import nibabel as nib
img = nib.load(fname)
try:
return img.header
except AttributeError: # old nibabel
return img.get_header()
def _get_mgz_header(fname):
"""Adapted from nibabel to quickly extract header info"""
if not fname.endswith('.mgz'):
raise IOError('Filename must end with .mgz')
header_dtd = [('version', '>i4'), ('dims', '>i4', (4,)),
('type', '>i4'), ('dof', '>i4'), ('goodRASFlag', '>i2'),
('delta', '>f4', (3,)), ('Mdc', '>f4', (3, 3)),
('Pxyz_c', '>f4', (3,))]
header_dtype = np.dtype(header_dtd)
with GzipFile(fname, 'rb') as fid:
hdr_str = fid.read(header_dtype.itemsize)
header = np.ndarray(shape=(), dtype=header_dtype,
buffer=hdr_str)
# dims
dims = header['dims'].astype(int)
dims = dims[:3] if len(dims) == 4 else dims
# vox2ras_tkr
delta = header['delta']
ds = np.array(delta, float)
ns = np.array(dims * ds) / 2.0
v2rtkr = np.array([[-ds[0], 0, 0, ns[0]],
[0, 0, ds[2], -ns[2]],
[0, -ds[1], 0, ns[1]],
[0, 0, 0, 1]], dtype=np.float32)
# ras2vox
d = np.diag(delta)
pcrs_c = dims / 2.0
Mdc = header['Mdc'].T
pxyz_0 = header['Pxyz_c'] - np.dot(Mdc, np.dot(d, pcrs_c))
M = np.eye(4, 4)
M[0:3, 0:3] = np.dot(Mdc, d)
M[0:3, 3] = pxyz_0.T
M = linalg.inv(M)
header = dict(dims=dims, vox2ras_tkr=v2rtkr, ras2vox=M)
return header
def _add_interpolator(s, mri_name, add_interpolator):
"""Compute a sparse matrix to interpolate the data into an MRI volume"""
# extract transformation information from mri
logger.info('Reading %s...' % mri_name)
header = _get_mgz_header(mri_name)
mri_width, mri_height, mri_depth = header['dims']
s.update(dict(mri_width=mri_width, mri_height=mri_height,
mri_depth=mri_depth))
trans = header['vox2ras_tkr'].copy()
trans[:3, :] /= 1000.0
s['vox_mri_t'] = Transform('mri_voxel', 'mri', trans) # ras_tkr
trans = linalg.inv(np.dot(header['vox2ras_tkr'], header['ras2vox']))
trans[:3, 3] /= 1000.0
s['mri_ras_t'] = Transform('mri', 'ras', trans) # ras
s['mri_volume_name'] = mri_name
nvox = mri_width * mri_height * mri_depth
if not add_interpolator:
s['interpolator'] = sparse.csr_matrix((nvox, s['np']))
return
_print_coord_trans(s['src_mri_t'], 'Source space : ')
_print_coord_trans(s['vox_mri_t'], 'MRI volume : ')
_print_coord_trans(s['mri_ras_t'], 'MRI volume : ')
#
# Convert MRI voxels from destination (MRI volume) to source (volume
# source space subset) coordinates
#
combo_trans = combine_transforms(s['vox_mri_t'],
invert_transform(s['src_mri_t']),
'mri_voxel', 'mri_voxel')
combo_trans['trans'] = combo_trans['trans'].astype(np.float32)
logger.info('Setting up interpolation...')
# Loop over slices to save (lots of) memory
# Note that it is the slowest incrementing index
# This is equivalent to using mgrid and reshaping, but faster
data = []
indices = []
indptr = np.zeros(nvox + 1, np.int32)
for p in range(mri_depth):
js = np.arange(mri_width, dtype=np.float32)
js = np.tile(js[np.newaxis, :],
(mri_height, 1)).ravel()
ks = np.arange(mri_height, dtype=np.float32)
ks = np.tile(ks[:, np.newaxis],
(1, mri_width)).ravel()
ps = np.empty((mri_height, mri_width), np.float32).ravel()
ps.fill(p)
r0 = np.c_[js, ks, ps]
del js, ks, ps
# Transform our vertices from their MRI space into our source space's
# frame (this is labeled as FIFFV_MNE_COORD_MRI_VOXEL, but it's
# really a subset of the entire volume!)
r0 = apply_trans(combo_trans['trans'], r0)
rn = np.floor(r0).astype(int)
maxs = (s['vol_dims'] - 1)[np.newaxis, :]
good = np.where(np.logical_and(np.all(rn >= 0, axis=1),
np.all(rn < maxs, axis=1)))[0]
rn = rn[good]
r0 = r0[good]
# now we take each MRI voxel *in this space*, and figure out how
# to make its value the weighted sum of voxels in the volume source
# space. This is a 3D weighting scheme based (presumably) on the
# fact that we know we're interpolating from one volumetric grid
# into another.
jj = rn[:, 0]
kk = rn[:, 1]
pp = rn[:, 2]
vss = np.empty((len(jj), 8), np.int32)
width = s['vol_dims'][0]
height = s['vol_dims'][1]
jjp1 = jj + 1
kkp1 = kk + 1
ppp1 = pp + 1
vss[:, 0] = _vol_vertex(width, height, jj, kk, pp)
vss[:, 1] = _vol_vertex(width, height, jjp1, kk, pp)
vss[:, 2] = _vol_vertex(width, height, jjp1, kkp1, pp)
vss[:, 3] = _vol_vertex(width, height, jj, kkp1, pp)
vss[:, 4] = _vol_vertex(width, height, jj, kk, ppp1)
vss[:, 5] = _vol_vertex(width, height, jjp1, kk, ppp1)
vss[:, 6] = _vol_vertex(width, height, jjp1, kkp1, ppp1)
vss[:, 7] = _vol_vertex(width, height, jj, kkp1, ppp1)
del jj, kk, pp, jjp1, kkp1, ppp1
uses = np.any(s['inuse'][vss], axis=1)
if uses.size == 0:
continue
vss = vss[uses].ravel() # vertex (col) numbers in csr matrix
indices.append(vss)
indptr[good[uses] + p * mri_height * mri_width + 1] = 8
del vss
# figure out weights for each vertex
r0 = r0[uses]
rn = rn[uses]
del uses, good
xf = r0[:, 0] - rn[:, 0].astype(np.float32)
yf = r0[:, 1] - rn[:, 1].astype(np.float32)
zf = r0[:, 2] - rn[:, 2].astype(np.float32)
omxf = 1.0 - xf
omyf = 1.0 - yf
omzf = 1.0 - zf
# each entry in the concatenation corresponds to a row of vss
data.append(np.array([omxf * omyf * omzf,
xf * omyf * omzf,
xf * yf * omzf,
omxf * yf * omzf,
omxf * omyf * zf,
xf * omyf * zf,
xf * yf * zf,
omxf * yf * zf], order='F').T.ravel())
del xf, yf, zf, omxf, omyf, omzf
# Compose the sparse matrix
indptr = np.cumsum(indptr, out=indptr)
indices = np.concatenate(indices)
data = np.concatenate(data)
s['interpolator'] = sparse.csr_matrix((data, indices, indptr),
shape=(nvox, s['np']))
logger.info(' %d/%d nonzero values [done]' % (len(data), nvox))
@verbose
def _filter_source_spaces(surf, limit, mri_head_t, src, n_jobs=1,
verbose=None):
"""Remove all source space points closer than a given limit (in mm)"""
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD and mri_head_t is None:
raise RuntimeError('Source spaces are in head coordinates and no '
'coordinate transform was provided!')
# How close are the source points to the surface?
out_str = 'Source spaces are in '
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
inv_trans = invert_transform(mri_head_t)
out_str += 'head coordinates.'
elif src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
out_str += 'MRI coordinates.'
else:
out_str += 'unknown (%d) coordinates.' % src[0]['coord_frame']
logger.info(out_str)
out_str = 'Checking that the sources are inside the bounding surface'
if limit > 0.0:
out_str += ' and at least %6.1f mm away' % (limit)
logger.info(out_str + ' (will take a few...)')
for s in src:
vertno = np.where(s['inuse'])[0] # can't trust s['vertno'] this deep
# Convert all points here first to save time
r1s = s['rr'][vertno]
if s['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
r1s = apply_trans(inv_trans['trans'], r1s)
# Check that the source is inside surface (often the inner skull)
outside = _points_outside_surface(r1s, surf, n_jobs)
omit_outside = np.sum(outside)
# vectorized nearest using BallTree (or cdist)
omit = 0
if limit > 0.0:
dists = _compute_nearest(surf['rr'], r1s, return_dists=True)[1]
close = np.logical_and(dists < limit / 1000.0,
np.logical_not(outside))
omit = np.sum(close)
outside = np.logical_or(outside, close)
s['inuse'][vertno[outside]] = False
s['nuse'] -= (omit + omit_outside)
s['vertno'] = np.where(s['inuse'])[0]
if omit_outside > 0:
extras = [omit_outside]
extras += ['s', 'they are'] if omit_outside > 1 else ['', 'it is']
logger.info('%d source space point%s omitted because %s '
'outside the inner skull surface.' % tuple(extras))
if omit > 0:
extras = [omit]
extras += ['s'] if omit_outside > 1 else ['']
extras += [limit]
logger.info('%d source space point%s omitted because of the '
'%6.1f-mm distance limit.' % tuple(extras))
# Adjust the patch inds as well if necessary
if omit + omit_outside > 0 and s.get('patch_inds') is not None:
if s['nearest'] is None:
# This shouldn't happen, but if it does, we can probably come
# up with a more clever solution
raise RuntimeError('Cannot adjust patch information properly, '
'please contact the mne-python developers')
_add_patch_info(s)
logger.info('Thank you for waiting.')
@verbose
def _points_outside_surface(rr, surf, n_jobs=1, verbose=None):
"""Check whether points are outside a surface
Parameters
----------
rr : ndarray
Nx3 array of points to check.
surf : dict
Surface with entries "rr" and "tris".
Returns
-------
outside : ndarray
1D logical array of size N for which points are outside the surface.
"""
rr = np.atleast_2d(rr)
assert rr.shape[1] == 3
parallel, p_fun, _ = parallel_func(_get_solids, n_jobs)
tot_angles = parallel(p_fun(surf['rr'][tris], rr)
for tris in np.array_split(surf['tris'], n_jobs))
return np.abs(np.sum(tot_angles, axis=0) / (2 * np.pi) - 1.0) > 1e-5
def _get_solids(tri_rrs, fros):
"""Helper for computing _sum_solids_div total angle in chunks"""
# NOTE: This incorporates the division by 4PI that used to be separate
# for tri_rr in tri_rrs:
# v1 = fros - tri_rr[0]
# v2 = fros - tri_rr[1]
# v3 = fros - tri_rr[2]
# triple = np.sum(fast_cross_3d(v1, v2) * v3, axis=1)
# l1 = np.sqrt(np.sum(v1 * v1, axis=1))
# l2 = np.sqrt(np.sum(v2 * v2, axis=1))
# l3 = np.sqrt(np.sum(v3 * v3, axis=1))
# s = (l1 * l2 * l3 +
# np.sum(v1 * v2, axis=1) * l3 +
# np.sum(v1 * v3, axis=1) * l2 +
# np.sum(v2 * v3, axis=1) * l1)
# tot_angle -= np.arctan2(triple, s)
# This is the vectorized version, but with a slicing heuristic to
# prevent memory explosion
tot_angle = np.zeros((len(fros)))
slices = np.r_[np.arange(0, len(fros), 100), [len(fros)]]
for i1, i2 in zip(slices[:-1], slices[1:]):
v1 = fros[i1:i2] - tri_rrs[:, 0, :][:, np.newaxis]
v2 = fros[i1:i2] - tri_rrs[:, 1, :][:, np.newaxis]
v3 = fros[i1:i2] - tri_rrs[:, 2, :][:, np.newaxis]
triples = _fast_cross_nd_sum(v1, v2, v3)
l1 = np.sqrt(np.sum(v1 * v1, axis=2))
l2 = np.sqrt(np.sum(v2 * v2, axis=2))
l3 = np.sqrt(np.sum(v3 * v3, axis=2))
ss = (l1 * l2 * l3 +
np.sum(v1 * v2, axis=2) * l3 +
np.sum(v1 * v3, axis=2) * l2 +
np.sum(v2 * v3, axis=2) * l1)
tot_angle[i1:i2] = -np.sum(np.arctan2(triples, ss), axis=0)
return tot_angle
@verbose
def _ensure_src(src, kind=None, verbose=None):
"""Helper to ensure we have a source space"""
if isinstance(src, string_types):
if not op.isfile(src):
raise IOError('Source space file "%s" not found' % src)
logger.info('Reading %s...' % src)
src = read_source_spaces(src, verbose=False)
if not isinstance(src, SourceSpaces):
raise ValueError('src must be a string or instance of SourceSpaces')
if kind is not None:
if kind == 'surf':
surf = [s for s in src if s['type'] == 'surf']
if len(surf) != 2 or len(src) != 2:
raise ValueError('Source space must contain exactly two '
'surfaces.')
src = surf
return src
def _ensure_src_subject(src, subject):
src_subject = src[0].get('subject_his_id', None)
if subject is None:
subject = src_subject
if subject is None:
raise ValueError('source space is too old, subject must be '
'provided')
elif src_subject is not None and subject != src_subject:
raise ValueError('Mismatch between provided subject "%s" and subject '
'name "%s" in the source space'
% (subject, src_subject))
return subject
@verbose
def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None):
"""Compute inter-source distances along the cortical surface
This function will also try to add patch info for the source space.
It will only occur if the ``dist_limit`` is sufficiently high that all
points on the surface are within ``dist_limit`` of a point in the
source space.
Parameters
----------
src : instance of SourceSpaces
The source spaces to compute distances for.
dist_limit : float
The upper limit of distances to include (in meters).
Note: if limit < np.inf, scipy > 0.13 (bleeding edge as of
10/2013) must be installed.
n_jobs : int
Number of jobs to run in parallel. Will only use (up to) as many
cores as there are source spaces.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
src : instance of SourceSpaces
The original source spaces, with distance information added.
The distances are stored in src[n]['dist'].
Note: this function operates in-place.
Notes
-----
Requires scipy >= 0.11 (> 0.13 for `dist_limit < np.inf`).
This function can be memory- and CPU-intensive. On a high-end machine
(2012) running 6 jobs in parallel, an ico-5 (10242 per hemi) source space
takes about 10 minutes to compute all distances (`dist_limit = np.inf`).
With `dist_limit = 0.007`, computing distances takes about 1 minute.
We recommend computing distances once per source space and then saving
the source space to disk, as the computed distances will automatically be
stored along with the source space data for future use.
"""
n_jobs = check_n_jobs(n_jobs)
src = _ensure_src(src)
if not np.isscalar(dist_limit):
raise ValueError('limit must be a scalar, got %s' % repr(dist_limit))
if not check_version('scipy', '0.11'):
raise RuntimeError('scipy >= 0.11 must be installed (or > 0.13 '
'if dist_limit < np.inf')
if not all(s['type'] == 'surf' for s in src):
raise RuntimeError('Currently all source spaces must be of surface '
'type')
if dist_limit < np.inf:
# can't do introspection on dijkstra function because it's Cython,
# so we'll just try quickly here
try:
sparse.csgraph.dijkstra(sparse.csr_matrix(np.zeros((2, 2))),
limit=1.0)
except TypeError:
raise RuntimeError('Cannot use "limit < np.inf" unless scipy '
'> 0.13 is installed')
parallel, p_fun, _ = parallel_func(_do_src_distances, n_jobs)
min_dists = list()
min_idxs = list()
logger.info('Calculating source space distances (limit=%s mm)...'
% (1000 * dist_limit))
for s in src:
connectivity = mesh_dist(s['tris'], s['rr'])
d = parallel(p_fun(connectivity, s['vertno'], r, dist_limit)
for r in np.array_split(np.arange(len(s['vertno'])),
n_jobs))
# deal with indexing so we can add patch info
min_idx = np.array([dd[1] for dd in d])
min_dist = np.array([dd[2] for dd in d])
midx = np.argmin(min_dist, axis=0)
range_idx = np.arange(len(s['rr']))
min_dist = min_dist[midx, range_idx]
min_idx = min_idx[midx, range_idx]
min_dists.append(min_dist)
min_idxs.append(min_idx)
# now actually deal with distances, convert to sparse representation
d = np.concatenate([dd[0] for dd in d]).ravel() # already float32
idx = d > 0
d = d[idx]
i, j = np.meshgrid(s['vertno'], s['vertno'])
i = i.ravel()[idx]
j = j.ravel()[idx]
d = sparse.csr_matrix((d, (i, j)),
shape=(s['np'], s['np']), dtype=np.float32)
s['dist'] = d
s['dist_limit'] = np.array([dist_limit], np.float32)
# Let's see if our distance was sufficient to allow for patch info
if not any(np.any(np.isinf(md)) for md in min_dists):
# Patch info can be added!
for s, min_dist, min_idx in zip(src, min_dists, min_idxs):
s['nearest'] = min_idx
s['nearest_dist'] = min_dist
_add_patch_info(s)
else:
logger.info('Not adding patch information, dist_limit too small')
return src
def _do_src_distances(con, vertno, run_inds, limit):
"""Helper to compute source space distances in chunks"""
if limit < np.inf:
func = partial(sparse.csgraph.dijkstra, limit=limit)
else:
func = sparse.csgraph.dijkstra
chunk_size = 20 # save memory by chunking (only a little slower)
lims = np.r_[np.arange(0, len(run_inds), chunk_size), len(run_inds)]
n_chunks = len(lims) - 1
# eventually we want this in float32, so save memory by only storing 32-bit
d = np.empty((len(run_inds), len(vertno)), np.float32)
min_dist = np.empty((n_chunks, con.shape[0]))
min_idx = np.empty((n_chunks, con.shape[0]), np.int32)
range_idx = np.arange(con.shape[0])
for li, (l1, l2) in enumerate(zip(lims[:-1], lims[1:])):
idx = vertno[run_inds[l1:l2]]
out = func(con, indices=idx)
midx = np.argmin(out, axis=0)
min_idx[li] = idx[midx]
min_dist[li] = out[midx, range_idx]
d[l1:l2] = out[:, vertno]
midx = np.argmin(min_dist, axis=0)
min_dist = min_dist[midx, range_idx]
min_idx = min_idx[midx, range_idx]
d[d == np.inf] = 0 # scipy will give us np.inf for uncalc. distances
return d, min_idx, min_dist
def get_volume_labels_from_aseg(mgz_fname):
"""Returns a list of names of segmented volumes.
Parameters
----------
mgz_fname : str
Filename to read. Typically aseg.mgz or some variant in the freesurfer
pipeline.
Returns
-------
label_names : list of str
The names of segmented volumes included in this mgz file.
Notes
-----
.. versionadded:: 0.9.0
"""
import nibabel as nib
# Read the mgz file using nibabel
mgz_data = nib.load(mgz_fname).get_data()
# Get the unique label names
lut = _get_lut()
label_names = [lut[lut['id'] == ii]['name'][0].decode('utf-8')
for ii in np.unique(mgz_data)]
label_names = sorted(label_names, key=lambda n: n.lower())
return label_names
def _get_hemi(s):
"""Helper to get a hemisphere from a given source space"""
if s['type'] != 'surf':
raise RuntimeError('Only surface source spaces supported')
if s['id'] == FIFF.FIFFV_MNE_SURF_LEFT_HEMI:
return 'lh', 0, s['id']
elif s['id'] == FIFF.FIFFV_MNE_SURF_RIGHT_HEMI:
return 'rh', 1, s['id']
else:
raise ValueError('unknown surface ID %s' % s['id'])
def _get_vertex_map_nn(fro_src, subject_from, subject_to, hemi, subjects_dir,
to_neighbor_tri=None):
"""Helper to get a nearest-neigbor vertex match for a given hemi src
The to_neighbor_tri can optionally be passed in to avoid recomputation
if it's already available.
"""
# adapted from mne_make_source_space.c, knowing accurate=False (i.e.
# nearest-neighbor mode should be used)
logger.info('Mapping %s %s -> %s (nearest neighbor)...'
% (hemi, subject_from, subject_to))
regs = [op.join(subjects_dir, s, 'surf', '%s.sphere.reg' % hemi)
for s in (subject_from, subject_to)]
reg_fro, reg_to = [_read_surface_geom(r, patch_stats=False) for r in regs]
if to_neighbor_tri is None:
to_neighbor_tri = _triangle_neighbors(reg_to['tris'], reg_to['np'])
morph_inuse = np.zeros(len(reg_to['rr']), bool)
best = np.zeros(fro_src['np'], int)
ones = _compute_nearest(reg_to['rr'], reg_fro['rr'][fro_src['vertno']])
for v, one in zip(fro_src['vertno'], ones):
# if it were actually a proper morph map, we would do this, but since
# we know it's nearest neighbor list, we don't need to:
# this_mm = mm[v]
# one = this_mm.indices[this_mm.data.argmax()]
if morph_inuse[one]:
# Try the nearest neighbors
neigh = _get_surf_neighbors(reg_to, one) # on demand calc
was = one
one = neigh[np.where(~morph_inuse[neigh])[0]]
if len(one) == 0:
raise RuntimeError('vertex %d would be used multiple times.'
% one)
one = one[0]
logger.info('Source space vertex moved from %d to %d because of '
'double occupation.' % (was, one))
best[v] = one
morph_inuse[one] = True
return best
@verbose
def morph_source_spaces(src_from, subject_to, surf='white', subject_from=None,
subjects_dir=None, verbose=None):
"""Morph an existing source space to a different subject
.. warning:: This can be used in place of morphing source estimates for
multiple subjects, but there may be consequences in terms
of dipole topology.
Parameters
----------
src_from : instance of SourceSpaces
Surface source spaces to morph.
subject_to : str
The destination subject.
surf : str
The brain surface to use for the new source space.
subject_from : str | None
The "from" subject. For most source spaces this shouldn't need
to be provided, since it is stored in the source space itself.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
src : instance of SourceSpaces
The morphed source spaces.
Notes
-----
.. versionadded:: 0.10.0
"""
# adapted from mne_make_source_space.c
src_from = _ensure_src(src_from)
subject_from = _ensure_src_subject(src_from, subject_from)
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
src_out = list()
for fro in src_from:
hemi, idx, id_ = _get_hemi(fro)
to = op.join(subjects_dir, subject_to, 'surf', '%s.%s' % (hemi, surf,))
logger.info('Reading destination surface %s' % (to,))
to = _read_surface_geom(to, patch_stats=False, verbose=False)
_complete_surface_info(to)
# Now we morph the vertices to the destination
# The C code does something like this, but with a nearest-neighbor
# mapping instead of the weighted one::
#
# >>> mm = read_morph_map(subject_from, subject_to, subjects_dir)
#
# Here we use a direct NN calculation, since picking the max from the
# existing morph map (which naively one might expect to be equivalent)
# differs for ~3% of vertices.
best = _get_vertex_map_nn(fro, subject_from, subject_to, hemi,
subjects_dir, to['neighbor_tri'])
for key in ('neighbor_tri', 'tri_area', 'tri_cent', 'tri_nn',
'use_tris'):
del to[key]
to['vertno'] = np.sort(best[fro['vertno']])
to['inuse'] = np.zeros(len(to['rr']), int)
to['inuse'][to['vertno']] = True
to['use_tris'] = best[fro['use_tris']]
to.update(nuse=len(to['vertno']), nuse_tri=len(to['use_tris']),
nearest=None, nearest_dist=None, patch_inds=None, pinfo=None,
dist=None, id=id_, dist_limit=None, type='surf',
coord_frame=FIFF.FIFFV_COORD_MRI, subject_his_id=subject_to,
rr=to['rr'] / 1000.)
src_out.append(to)
logger.info('[done]\n')
info = dict(working_dir=os.getcwd(),
command_line=_get_call_line(in_verbose=True))
return SourceSpaces(src_out, info=info)
@verbose
def _get_morph_src_reordering(vertices, src_from, subject_from, subject_to,
subjects_dir=None, verbose=None):
"""Get the reordering indices for a morphed source space
Parameters
----------
vertices : list
The vertices for the left and right hemispheres.
src_from : instance of SourceSpaces
The original source space.
subject_from : str
The source subject.
subject_to : str
The destination subject.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
data_idx : ndarray, shape (n_vertices,)
The array used to reshape the data.
from_vertices : list
The right and left hemisphere vertex numbers for the "from" subject.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
from_vertices = list()
data_idxs = list()
offset = 0
for ii, hemi in enumerate(('lh', 'rh')):
# Get the mapping from the original source space to the destination
# subject's surface vertex numbers
best = _get_vertex_map_nn(src_from[ii], subject_from, subject_to,
hemi, subjects_dir)
full_mapping = best[src_from[ii]['vertno']]
# Tragically, we might not have all of our vertno left (e.g. because
# some are omitted during fwd calc), so we must do some indexing magic:
# From all vertices, a subset could be chosen by fwd calc:
used_vertices = np.in1d(full_mapping, vertices[ii])
from_vertices.append(src_from[ii]['vertno'][used_vertices])
remaining_mapping = full_mapping[used_vertices]
if not np.array_equal(np.sort(remaining_mapping), vertices[ii]) or \
not np.in1d(vertices[ii], full_mapping).all():
raise RuntimeError('Could not map vertices, perhaps the wrong '
'subject "%s" was provided?' % subject_from)
# And our data have been implicitly remapped by the forced ascending
# vertno order in source spaces
implicit_mapping = np.argsort(remaining_mapping) # happens to data
data_idx = np.argsort(implicit_mapping) # to reverse the mapping
data_idx += offset # hemisphere offset
data_idxs.append(data_idx)
offset += len(implicit_mapping)
data_idx = np.concatenate(data_idxs)
# this one is really just a sanity check for us, should never be violated
# by users
assert np.array_equal(np.sort(data_idx),
np.arange(sum(len(v) for v in vertices)))
return data_idx, from_vertices
def _compare_source_spaces(src0, src1, mode='exact', nearest=True,
dist_tol=1.5e-3):
"""Compare two source spaces
Note: this function is also used by forward/tests/test_make_forward.py
"""
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_allclose, assert_array_equal
from scipy.spatial.distance import cdist
if mode != 'exact' and 'approx' not in mode: # 'nointerp' can be appended
raise RuntimeError('unknown mode %s' % mode)
for s0, s1 in zip(src0, src1):
# first check the keys
a, b = set(s0.keys()), set(s1.keys())
assert_equal(a, b, str(a ^ b))
for name in ['nuse', 'ntri', 'np', 'type', 'id']:
assert_equal(s0[name], s1[name], name)
for name in ['subject_his_id']:
if name in s0 or name in s1:
assert_equal(s0[name], s1[name], name)
for name in ['interpolator']:
if name in s0 or name in s1:
diffs = (s0['interpolator'] - s1['interpolator']).data
if len(diffs) > 0 and 'nointerp' not in mode:
# 5%
assert_true(np.sqrt(np.mean(diffs ** 2)) < 0.10, name)
for name in ['nn', 'rr', 'nuse_tri', 'coord_frame', 'tris']:
if s0[name] is None:
assert_true(s1[name] is None, name)
else:
if mode == 'exact':
assert_array_equal(s0[name], s1[name], name)
else: # 'approx' in mode
atol = 1e-3 if name == 'nn' else 1e-4
assert_allclose(s0[name], s1[name], rtol=1e-3, atol=atol,
err_msg=name)
for name in ['seg_name']:
if name in s0 or name in s1:
assert_equal(s0[name], s1[name], name)
# these fields will exist if patch info was added
if nearest:
for name in ['nearest', 'nearest_dist', 'patch_inds']:
if s0[name] is None:
assert_true(s1[name] is None, name)
else:
assert_array_equal(s0[name], s1[name])
for name in ['pinfo']:
if s0[name] is None:
assert_true(s1[name] is None, name)
else:
assert_true(len(s0[name]) == len(s1[name]), name)
for p1, p2 in zip(s0[name], s1[name]):
assert_true(all(p1 == p2), name)
if mode == 'exact':
for name in ['inuse', 'vertno', 'use_tris']:
assert_array_equal(s0[name], s1[name], err_msg=name)
for name in ['dist_limit']:
assert_true(s0[name] == s1[name], name)
for name in ['dist']:
if s0[name] is not None:
assert_equal(s1[name].shape, s0[name].shape)
assert_true(len((s0['dist'] - s1['dist']).data) == 0)
else: # 'approx' in mode:
# deal with vertno, inuse, and use_tris carefully
assert_array_equal(s0['vertno'], np.where(s0['inuse'])[0],
'left hemisphere vertices')
assert_array_equal(s1['vertno'], np.where(s1['inuse'])[0],
'right hemisphere vertices')
assert_equal(len(s0['vertno']), len(s1['vertno']))
agreement = np.mean(s0['inuse'] == s1['inuse'])
assert_true(agreement >= 0.99, "%s < 0.99" % agreement)
if agreement < 1.0:
# make sure mismatched vertno are within 1.5mm
v0 = np.setdiff1d(s0['vertno'], s1['vertno'])
v1 = np.setdiff1d(s1['vertno'], s0['vertno'])
dists = cdist(s0['rr'][v0], s1['rr'][v1])
assert_allclose(np.min(dists, axis=1), np.zeros(len(v0)),
atol=dist_tol, err_msg='mismatched vertno')
if s0['use_tris'] is not None: # for "spacing"
assert_array_equal(s0['use_tris'].shape, s1['use_tris'].shape)
else:
assert_true(s1['use_tris'] is None)
assert_true(np.mean(s0['use_tris'] == s1['use_tris']) > 0.99)
# The above "if s0[name] is not None" can be removed once the sample
# dataset is updated to have a source space with distance info
for name in ['working_dir', 'command_line']:
if mode == 'exact':
assert_equal(src0.info[name], src1.info[name])
else: # 'approx' in mode:
if name in src0.info:
assert_true(name in src1.info, '"%s" missing' % name)
else:
assert_true(name not in src1.info,
'"%s" should not exist' % name)
|
jniediek/mne-python
|
mne/source_space.py
|
Python
|
bsd-3-clause
| 103,050
|
[
"Mayavi"
] |
ab896590ec898aada00a47fb814f6dd01235fbf19208b2abe150788b7430dc5b
|
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from sparse_gp import SparseGP
from numpy.linalg.linalg import LinAlgError
from ..inference.latent_function_inference.var_dtc_parallel import update_gradients, VarDTC_minibatch
import logging
logger = logging.getLogger("sparse gp mpi")
class SparseGP_MPI(SparseGP):
"""
A general purpose Sparse GP model with MPI parallelization support
This model allows (approximate) inference using variational DTC or FITC
(Gaussian likelihoods) as well as non-conjugate sparse methods based on
these.
:param X: inputs
:type X: np.ndarray (num_data x input_dim)
:param likelihood: a likelihood instance, containing the observed data
:type likelihood: GPy.likelihood.(Gaussian | EP | Laplace)
:param kernel: the kernel (covariance function). See link kernels
:type kernel: a GPy.kern.kern instance
:param X_variance: The uncertainty in the measurements of X (Gaussian variance)
:type X_variance: np.ndarray (num_data x input_dim) | None
:param Z: inducing inputs
:type Z: np.ndarray (num_inducing x input_dim)
:param num_inducing: Number of inducing points (optional, default 10. Ignored if Z is not None)
:type num_inducing: int
:param mpi_comm: The communication group of MPI, e.g. mpi4py.MPI.COMM_WORLD
:type mpi_comm: mpi4py.MPI.Intracomm
"""
def __init__(self, X, Y, Z, kernel, likelihood, variational_prior=None, inference_method=None, name='sparse gp mpi', Y_metadata=None, mpi_comm=None, normalizer=False):
self._IN_OPTIMIZATION_ = False
if mpi_comm != None:
if inference_method is None:
inference_method = VarDTC_minibatch(mpi_comm=mpi_comm)
else:
assert isinstance(inference_method, VarDTC_minibatch), 'inference_method has to support MPI!'
super(SparseGP_MPI, self).__init__(X, Y, Z, kernel, likelihood, inference_method=inference_method, name=name, Y_metadata=Y_metadata, normalizer=normalizer)
self.update_model(False)
if variational_prior is not None:
self.link_parameter(variational_prior)
self.mpi_comm = mpi_comm
# Manage the data (Y) division
if mpi_comm != None:
from ..util.parallel import divide_data
N_start, N_end, N_list = divide_data(Y.shape[0], mpi_comm.rank, mpi_comm.size)
self.N_range = (N_start, N_end)
self.N_list = np.array(N_list)
self.Y_local = self.Y[N_start:N_end]
print 'MPI RANK '+str(self.mpi_comm.rank)+' with the data range '+str(self.N_range)
mpi_comm.Bcast(self.param_array, root=0)
self.update_model(True)
def __getstate__(self):
dc = super(SparseGP_MPI, self).__getstate__()
dc['mpi_comm'] = None
if self.mpi_comm != None:
del dc['N_range']
del dc['N_list']
del dc['Y_local']
if 'normalizer' not in dc:
dc['normalizer'] = None
dc['Y_normalized'] = dc['Y']
return dc
#=====================================================
# The MPI parallelization
# - can move to model at some point
#=====================================================
@SparseGP.optimizer_array.setter
def optimizer_array(self, p):
if self.mpi_comm != None:
if self._IN_OPTIMIZATION_ and self.mpi_comm.rank==0:
self.mpi_comm.Bcast(np.int32(1),root=0)
self.mpi_comm.Bcast(p, root=0)
SparseGP.optimizer_array.fset(self,p)
def optimize(self, optimizer=None, start=None, **kwargs):
self._IN_OPTIMIZATION_ = True
if self.mpi_comm==None:
super(SparseGP_MPI, self).optimize(optimizer,start,**kwargs)
elif self.mpi_comm.rank==0:
super(SparseGP_MPI, self).optimize(optimizer,start,**kwargs)
self.mpi_comm.Bcast(np.int32(-1),root=0)
elif self.mpi_comm.rank>0:
x = self.optimizer_array.copy()
flag = np.empty(1,dtype=np.int32)
while True:
self.mpi_comm.Bcast(flag,root=0)
if flag==1:
try:
self.optimizer_array = x
self._fail_count = 0
except (LinAlgError, ZeroDivisionError, ValueError):
if self._fail_count >= self._allowed_failures:
raise
self._fail_count += 1
elif flag==-1:
break
else:
self._IN_OPTIMIZATION_ = False
raise Exception("Unrecognizable flag for synchronization!")
self._IN_OPTIMIZATION_ = False
def parameters_changed(self):
if isinstance(self.inference_method,VarDTC_minibatch):
update_gradients(self, mpi_comm=self.mpi_comm)
else:
super(SparseGP_MPI,self).parameters_changed()
|
TianpeiLuke/GPy
|
GPy/core/sparse_gp_mpi.py
|
Python
|
bsd-3-clause
| 5,101
|
[
"Gaussian"
] |
6600634acfdd688d0f1c6bb83652596e0b78bdce5c5810de7d03b343c76420bf
|
import os
import logging
import numpy as np
import parmap
from sklearn import mixture
import scipy
from scipy.interpolate import interp1d
import datetime as dt
from tqdm import tqdm
import torch
import torch.multiprocessing as mp
from yass import read_config
from yass.reader import READER
from yass.deconvolve.match_pursuit import MatchPursuit_objectiveUpsample
from yass.deconvolve.match_pursuit_gpu import deconvGPU, deconvGPU2
from yass.template import shift_chans, align_get_shifts_with_ref
from yass.util import absolute_path_to_asset
from scipy import interpolate
from yass.deconvolve.util import make_CONFIG2
from yass.neuralnetwork import Denoise
from yass import mfm
from yass.merge.merge import (test_merge, run_ldatest, run_diptest)
from yass.cluster.cluster import knn_triage
from yass.residual.residual_gpu import RESIDUAL_GPU2#, RESIDUAL_DRIFT
from yass.visual.util import binary_reader_waveforms
#from yass.deconvolve.soft_assignment import get_soft_assignments
def run(fname_templates_in,
output_directory,
recordings_filename,
recording_dtype,
threshold=None,
update_templates=False,
run_chunk_sec='full',
save_up_data=True):
"""Deconvolute spikes
Parameters
----------
spike_index_all: numpy.ndarray (n_data, 3)
A 2D array for all potential spikes whose first column indicates the
spike time and the second column the principal channels
3rd column indicates % confidence of cluster membership
Note: can now have single events assigned to multiple templates
templates: numpy.ndarray (n_channels, waveform_size, n_templates)
A 3D array with the templates
output_directory: str, optional
Output directory (relative to CONFIG.data.root_folder) used to load
the recordings to generate templates, defaults to tmp/
recordings_filename: str, optional
Recordings filename (relative to CONFIG.data.root_folder/
output_directory) used to draw the waveforms from, defaults to
standardized.bin
Returns
-------
spike_train: numpy.ndarray (n_clear_spikes, 2)
A 2D array with the spike train, first column indicates the spike
time and the second column the neuron ID
Examples
--------
.. literalinclude:: ../../examples/pipeline/deconvolute.py
"""
logger = logging.getLogger(__name__)
CONFIG = read_config()
CONFIG = make_CONFIG2(CONFIG)
#print("... deconv using GPU device: ", torch.cuda.current_device())
# output folder
if not os.path.exists(output_directory):
os.makedirs(output_directory)
if update_templates:
fname_templates = os.path.join(
output_directory, 'template_updates')
else:
fname_templates = os.path.join(
output_directory, 'templates.npy')
fname_spike_train = os.path.join(
output_directory, 'spike_train.npy')
fname_shifts = os.path.join(
output_directory, 'shifts.npy')
fname_scales = os.path.join(
output_directory, 'scales.npy')
# Cat: TODO: use Peter's conditional (below) instead of single file check
# if (os.path.exists(fname_templates) and
# os.path.exists(fname_spike_train) and
# os.path.exists(fname_templates_up) and
# os.path.exists(fname_spike_train_up)):
# return (fname_templates, fname_spike_train,
# fname_templates_up, fname_spike_train_up)
if (os.path.exists(fname_templates) and
os.path.exists(fname_spike_train) and
os.path.exists(fname_shifts) and
os.path.exists(fname_scales)):
return (fname_templates, fname_spike_train,
fname_shifts, fname_scales)
# parameters
# TODO: read from CONFIG
if threshold is None:
threshold = CONFIG.deconvolution.threshold
elif threshold == 'low_fp':
threshold = 150
if run_chunk_sec == 'full':
chunk_sec = None
else:
chunk_sec = run_chunk_sec
if CONFIG.deconvolution.deconv_gpu:
n_sec_chunk = CONFIG.resources.n_sec_chunk_gpu_deconv
else:
n_sec_chunk = CONFIG.resources.n_sec_chunk
reader = READER(recordings_filename,
recording_dtype,
CONFIG,
n_sec_chunk,
chunk_sec=chunk_sec)
# deconv using GPU
if CONFIG.deconvolution.deconv_gpu:
deconv_ONgpu2(fname_templates_in,
output_directory,
reader,
threshold,
update_templates,
CONFIG,
run_chunk_sec)
# deconv using CPU
else:
deconv_ONcpu(fname_templates_in,
output_directory,
reader,
threshold,
save_up_data,
fname_spike_train,
fname_templates,
CONFIG)
return (fname_templates, fname_spike_train,
fname_shifts, fname_scales)
def deconv_ONgpu2(fname_templates_in,
output_directory,
reader,
threshold,
update_templates,
CONFIG,
run_chunk_sec):
# **************** MAKE DECONV OBJECT *****************
d_gpu = deconvGPU(CONFIG, fname_templates_in, output_directory)
#print (kfadfa)
# Cat: TODO: gpu deconv requires own chunk_len variable
#root_dir = '/media/cat/1TB/liam/49channels/data1_allset'
root_dir = CONFIG.data.root_folder
d_gpu.root_dir = root_dir
# Cat: TODO: read from CONFIG
d_gpu.max_iter = 1000
d_gpu.deconv_thresh=threshold
# Cat: TODO: make sure svd recomputed for higher rank etc.
d_gpu.svd_flag = True
# Cat: TODO read from CONFIG file
d_gpu.RANK = 5
d_gpu.vis_chan_thresh = 1.0
d_gpu.fit_height = True
d_gpu.max_height_diff = 0.1
d_gpu.fit_height_ptp = 20
# debug/printout parameters
# Cat: TODO: read all from CONFIG
d_gpu.save_objective = False
d_gpu.verbose = False
d_gpu.print_iteration_counter = 1
# Turn on refactoriness
d_gpu.refractoriness = True
# Stochastic gradient descent option
# Cat: TODO: move these and other params to CONFIG
d_gpu.scd = True
if d_gpu.scd==False:
print (" ICD TURNED OFFF.....")
else:
print (" ICD TUREND ON .....")
# Cat: TODO: move to CONFIG; # of times to run scd inside the chunk
# Cat: TODO: the number of stages need to be a fuction of # of channels;
# around 1 stage per 20-30 channels seems to work;
# but for 100s of chans this many need to be scaled further
# d_gpu.n_scd_stages = self.CONFIG.recordings.n_channels // 24
d_gpu.n_scd_stages = 2
# Cat: TODO move to CONFIG; # of addition steps each time
d_gpu.n_scd_iterations = 10
# Cat: TODO: parameters no longer used, to remove;
#d_gpu.scd_max_iteration = 1000 # maximum iteration number from which to grab spikes
# # smaller means grabbing spikes from earlier (i.e. larger SNR units)
#d_gpu.scd_n_additions = 3 # number of addition steps to be done for every loop
# this can turn off the superresolution alignemnt as an option
d_gpu.superres_shift = True
# parameter allows templates to be updated forward (i.e. templates
# are updated based on spikes in previous chunk)
# Cat: TODO read from CONFIG
d_gpu.update_templates = update_templates
# min difference allowed (in terms of ptp of templates)
d_gpu.min_bad_diff = 0.3
# max difference with the max weights
d_gpu.max_good_diff = 3
if d_gpu.update_templates:
print (" templates being updated every ",
CONFIG.deconvolution.template_update_time, " sec")
else:
print (" templates NOT being updated ...")
# update template time chunk; in seconds
# Cat: TODO: read from CONFIG file
d_gpu.template_update_time = CONFIG.deconvolution.template_update_time
#d_gpu.neuron_discover = CONFIG.deconvolution.neuron_discover
#if d_gpu.neuron_discover:
# d_gpu.residual_comp = RESIDUAL_GPU2(
# reader, CONFIG, None, None, None,
# None, None, None, None, None, True)
# d_gpu.min_split_spikes = CONFIG.deconvolution.min_split_spikes
# set forgetting factor to 5Hz (i.e. 5 spikes per second of chunk)
# Cat: TODO: read from CONFIG
#d_gpu.nu = 1 * d_gpu.template_update_time
# time to try and split deconv-based spikes
#d_gpu.neuron_discover_time = CONFIG.deconvolution.neuron_discover_time
#print (" d_gpu.neuron_discover_time: ", d_gpu.neuron_discover_time)
# add reader
d_gpu.reader = reader
# enforce broad buffer
d_gpu.reader.buffer=1000
# *********************************************************
# *********************** RUN DECONV **********************
# *********************************************************
begin=dt.datetime.now().timestamp()
if update_templates:
d_gpu = run_deconv_with_templates_update2(d_gpu)
else:
d_gpu = run_deconv_no_templates_update(d_gpu, CONFIG)
# save templates
templates_post_deconv = d_gpu.temps.transpose(2, 1, 0)
fname_templates = os.path.join(d_gpu.out_dir, 'templates.npy')
np.save(fname_templates, templates_post_deconv)
# ****************************************************************
# *********************** GATHER SPIKE TRAINS ********************
# ****************************************************************
subtract_time = np.round((dt.datetime.now().timestamp()-begin),4)
print ("-------------------------------------------")
total_length_sec = int((d_gpu.reader.end - d_gpu.reader.start)/d_gpu.reader.sampling_rate)
print ("Total Deconv Speed ", np.round(total_length_sec/(subtract_time),2), " x Realtime")
# ************* DEBUG MODE *****************
if d_gpu.save_objective:
fname_obj_array = os.path.join(d_gpu.out_dir, 'obj_array.npy')
np.save(fname_obj_array, d_gpu.obj_array)
# ************** SAVE SPIKES & SHIFTS **********************
print (" gathering spike trains and shifts from deconv (todo: parallelize)")
batch_size = d_gpu.reader.batch_size
buffer_size = d_gpu.reader.buffer
temporal_size = (CONFIG.recordings.sampling_rate/1000*
CONFIG.recordings.spike_size_ms)
# get number of max spikes first
n_sec_chunk_gpu = CONFIG.resources.n_sec_chunk_gpu_deconv
n_spikes = 0
for chunk_id in tqdm(range(reader.n_batches)):
time_index = int((chunk_id+1)*n_sec_chunk_gpu +
d_gpu.reader.start/d_gpu.reader.sampling_rate)
fname = os.path.join(d_gpu.seg_dir,str(time_index).zfill(6)+'.npz')
n_spikes += len(np.load(fname, allow_pickle=True)['spike_train'])
# loop over chunks and add spikes;
spike_train = np.zeros((n_spikes, 2), 'int32')
shifts = np.zeros(n_spikes, 'float32')
scales = np.zeros(n_spikes, 'float32')
counter = 0
for chunk_id in tqdm(range(reader.n_batches)):
#fname = os.path.join(d_gpu.seg_dir,str(chunk_id).zfill(5)+'.npz')
time_index = int((chunk_id+1)*n_sec_chunk_gpu +
d_gpu.reader.start/d_gpu.reader.sampling_rate)
fname = os.path.join(d_gpu.seg_dir,str(time_index).zfill(6)+'.npz')
data = np.load(fname, allow_pickle=True)
offset = data['offset']
spike_train_chunk = data['spike_train']
shifts_chunk = data['shifts']
scales_chunk = data['heights']
idx_keep = np.logical_and(
spike_train_chunk[:, 0] >= buffer_size,
spike_train_chunk[:, 0] < batch_size + buffer_size)
idx_keep = np.where(idx_keep)[0]
# add offset
spike_train_chunk[:, 0] += offset
# stack data
idx = slice(counter, counter+len(idx_keep))
spike_train[idx] = spike_train_chunk[idx_keep]
shifts[idx] = shifts_chunk[idx_keep]
scales[idx] = scales_chunk[idx_keep]
counter += len(idx_keep)
spike_train = spike_train[:counter]
shifts = shifts[:counter]
scales = scales[:counter]
# sort spike train by time
print (" ordering spikes: ")
idx = spike_train[:,0].argsort(0)
spike_train = spike_train[idx]
shifts = shifts[idx]
scales = scales[idx]
# remove duplicates
# Cat: TODO: are there still duplicates in spike trains!?
#print (" skipping spike deduplication step ")
if False:
np.save(os.path.join(d_gpu.out_dir,
'spike_train_prededuplication.npy'),
spike_train)
print ("removing duplicates... (TODO: remove this requirement eventually...)")
for k in np.unique(spike_train[:,1]):
idx = np.where(spike_train[:,1]==k)[0]
_,idx2 = np.unique(spike_train[idx,0], return_index=True)
idx3 = np.delete(np.arange(idx.shape[0]),idx2)
if idx3.shape[0]>0:
print ("unit: ", k, " has duplicates: ", idx3.shape[0])
spike_train[idx[idx3],0]=-1E6
# quit()
idx = np.where(spike_train[:,0]==-1E6)[0]
spike_train = np.delete(spike_train, idx, 0)
shifts = np.delete(shifts, idx, 0)
scales = np.delete(scales, idx, 0)
# save spike train
print (" saving spike_train: ", spike_train.shape)
fname_spike_train = os.path.join(d_gpu.out_dir, 'spike_train.npy')
np.save(fname_spike_train, spike_train)
# save shifts
fname_shifts = os.path.join(d_gpu.out_dir, 'shifts.npy')
np.save(fname_shifts, shifts)
# save scales
fname_scales = os.path.join(d_gpu.out_dir, 'scales.npy')
np.save(fname_scales, scales)
def run_deconv_no_templates_update(d_gpu, CONFIG):
chunk_ids = np.arange(d_gpu.reader.n_batches)
n_sec_chunk_gpu = CONFIG.resources.n_sec_chunk_gpu
processes = []
#if len(CONFIG.torch_devices) == 1:
run_deconv_no_templates_update_parallel(d_gpu,
chunk_ids,
n_sec_chunk_gpu,
#CONFIG.torch_devices[0])
CONFIG.resources.gpu_id)
# else:
# chunk_ids_split = np.split(chunk_ids,
# len(CONFIG.torch_devices))
# for ii, device in enumerate(CONFIG.torch_devices):
# p = mp.Process(target=run_deconv_no_templates_update_parallel,
# args=(d_gpu, chunk_ids_split[ii],
# n_sec_chunk_gpu, device))
# p.start()
# processes.append(p)
# for p in processes:
# p.join()
return d_gpu
def run_deconv_no_templates_update_parallel(d_gpu, chunk_ids, n_sec_chunk_gpu, device):
torch.cuda.set_device(device)
d_gpu.initialize()
for chunk_id in chunk_ids:
time_index = int((chunk_id+1)*n_sec_chunk_gpu + d_gpu.reader.start/d_gpu.reader.sampling_rate)
fname = os.path.join(d_gpu.seg_dir, str(time_index).zfill(6)+'.npz')
if os.path.exists(fname)==False:
#print ("Forward deconv only ", time_index, " sec, ", chunk_id, "/", len(chunk_ids))
print ("deconv: ", time_index, " sec, ", chunk_id, "/", len(chunk_ids))
# run deconv
d_gpu.run(chunk_id)
# save deconv results
np.savez(fname,
spike_train = d_gpu.spike_train,
offset = d_gpu.offset,
shifts = d_gpu.shifts,
heights = d_gpu.heights
)
def run_deconv_with_templates_update2(d_gpu):
n_sec_chunk = d_gpu.reader.n_sec_chunk
n_chunks_update = int(d_gpu.template_update_time/n_sec_chunk)
update_chunk = np.hstack((np.arange(0, d_gpu.reader.n_batches,
n_chunks_update), d_gpu.reader.n_batches))
d_gpu.initialize()
fname_templates_denoised = os.path.join(
d_gpu.out_dir,
'template_updates',
'templates_init.npy')
np.save(fname_templates_denoised, d_gpu.temps.transpose(2, 1, 0))
for batch_id in range(len(update_chunk)-1):
##################
## Forward pass ##
##################
fnames_forward = []
for chunk_id in range(update_chunk[batch_id], update_chunk[batch_id+1]):
# output name
time_index = (chunk_id+1)*n_sec_chunk
fname = os.path.join(d_gpu.seg_dir,
str(time_index).zfill(6)+'_forward.npz')
fnames_forward.append(fname)
if os.path.exists(fname)==False:
print ("Forward deconv", time_index, " sec, ",
chunk_id, "/", d_gpu.reader.n_batches)
# run deconv
d_gpu.run(chunk_id)
# get ptps
min_max_vals_average, weights = d_gpu.compute_min_max_vals()
# save deconv results
np.savez(fname,
spike_train = d_gpu.spike_train,
offset = d_gpu.offset,
shifts = d_gpu.shifts,
heights = d_gpu.heights,
min_max_vals_average = min_max_vals_average.cpu().numpy(),
weights = weights.cpu().numpy())
else:
d_gpu.chunk_id = chunk_id
###################
## Backward pass ##
###################
templates_reinitialized = False
for chunk_id in range(update_chunk[batch_id], update_chunk[batch_id+1]):
# output name
time_index = (chunk_id+1)*n_sec_chunk
fname = os.path.join(d_gpu.seg_dir,
str(time_index).zfill(6)+'.npz')
if os.path.exists(fname)==False:
######################
## update templates ##
######################
if not templates_reinitialized:
fname_templates_updated = os.path.join(
d_gpu.out_dir,
'template_updates',
'templates_in_{}sec.npy'.format(n_chunks_update*n_sec_chunk*batch_id))
fname_temp_min_loc = os.path.join(d_gpu.out_dir, 'templates_min_locs.npy')
update_templates(fnames_forward,
fname_templates_denoised,
fname_templates_updated,
fname_temp_min_loc,
update_weight = 50)
# re initialize with updated templates
d_gpu.fname_templates = fname_templates_updated
# make sure that it is at the right chunk location
d_gpu.chunk_id = chunk_id
d_gpu.initialize()
# resaving the templates that is processed
# and actually used for deconv
fname_templates_denoised = os.path.join(
d_gpu.out_dir,
'template_updates',
'templates_{}sec.npy'.format(n_chunks_update*n_sec_chunk*batch_id))
np.save(fname_templates_denoised, d_gpu.temps.transpose(2, 1, 0))
templates_reinitialized = True
print ("Backward deconv", time_index, " sec, ",
chunk_id, "/", d_gpu.reader.n_batches)
# run deconv
d_gpu.run(chunk_id)
# save deconv results
np.savez(fname,
spike_train = d_gpu.spike_train,
offset = d_gpu.offset,
shifts = d_gpu.shifts,
heights = d_gpu.heights)
else:
d_gpu.chunk_id = chunk_id
return d_gpu
def update_templates_old(fnames_forward,
fname_templates,
fname_templates_updated,
update_weight = 30):
# get all ptps sufficent stats
avg_ptps_all = [None]*len(fnames_forward)
weights_all = [None]*len(fnames_forward)
templates_shifts_all = [None]*len(fnames_forward)
for ii, fname in enumerate(fnames_forward):
temp = np.load(fname, allow_pickle=True)
avg_ptps_all[ii] = temp['avg_ptps']
weights_all[ii] = temp['weights']
templates_shifts_all[ii] = temp['templates_shifts']
avg_ptps_all = np.stack(avg_ptps_all)
weights_all = np.stack(weights_all)
templates_shifts_all = np.stack(templates_shifts_all)
avg_ptps = np.average(avg_ptps_all, axis=0, weights=weights_all)
avg_shifts = np.average(templates_shifts_all, axis=0, weights=weights_all)
n_spikes = np.sum(weights_all, axis=0)
# laod templates
templates = np.load(fname_templates)
# get template ptp
temp_ptps = templates.ptp(1)
temp_ptps[temp_ptps==0] = 0.01
# do geometric update
weight_old = np.exp(-n_spikes/update_weight)
ptps_updated = temp_ptps*weight_old + (1-weight_old)*avg_ptps
scale = ptps_updated/temp_ptps
updated_templates = templates*scale[:, None]
shifts_updated = (1-weight_old)*avg_shifts
np.save(fname_templates_updated, updated_templates)
def quad_interp_loc(pts):
''' find x-shift after fitting quadratic to 3 points
Input: [n_peaks, 3] which are values of three points centred on obj_func peak
Assumes: equidistant spacing between sample times (i.e. the x-values are hardcoded below)
'''
num = ((pts[1]-pts[2])*(-1)-(pts[0]-pts[1])*(-3))/2
denom = -2*((pts[0]-pts[1])-(((pts[1]-pts[2])*(-1)-(pts[0]-pts[1])*(-3))/(2)))
num[denom==0] = 1
denom[denom==0] = 1
return (num/denom)-1
def quad_interp_val(vals, shift):
a = 0.5*vals[0] + 0.5*vals[2] - vals[1]
b = -0.5*vals[0] + 0.5*vals[2]
c = vals[1]
return a*shift**2 + b*shift + c
def run_template_update(d_gpu,
cleaned_min_max,
update_weight = 50):
n_chans, n_times, n_units = d_gpu.temps.shape
templates_updated = np.zeros((n_units, n_times, n_chans), 'float32')
for k in range(n_units):
if cleaned_min_max[k].shape[0] == 0:
templates_updated[k] = d_gpu.temps[:, :, k].T
continue
vis_chan_k = d_gpu.vis_chans[k]
temp_k = d_gpu.temps[vis_chan_k,:,k].T
# ptp of spikes and templates
ptps_spikes = np.max(cleaned_min_max[k][:,1], 2) - np.min(cleaned_min_max[k][:,0], 2)
ptp_temp = temp_k.ptp(0)
# get weight of individual spikes
diffs = np.abs(ptps_spikes - ptp_temp)
diffs[diffs < d_gpu.max_good_diff] = d_gpu.max_good_diff
weights = (d_gpu.max_good_diff**2)/np.square(diffs)
weights[diffs > d_gpu.min_bad_diff_templates[vis_chan_k, k].cpu().numpy()] = 0
# get geometric update weights
weight_new = 1 - np.exp(-np.sum(weights, 0)/update_weight)
# in a case of no spikes
idx_no_spikes = np.where(np.sum(weights, 0) == 0)[0]
weights[:, idx_no_spikes] = 0.0001
# ptp average on this chunk
ptp_avg = np.average(ptps_spikes, axis=0, weights=weights)
# min location (subsample) using quadratic fit
#(relative to the template min location)
min_vals = np.average(cleaned_min_max[k][:, 0], axis=0, weights=np.tile(weights[:,:,None], (1, 1, 5)))
peak_loc_integer = min_vals.argmin(1)
peak_loc = np.copy(peak_loc_integer).astype('float32')
for j in range(5):
idx_ = peak_loc_integer == j
if j == 0:
subsample_shift = quad_interp_loc(
min_vals[idx_, j:j+3].transpose())
subsample_shift[subsample_shift < -1] = -1
subsample_shift[subsample_shift > -0.5] = -1
subsample_shift += 1
elif j > 0 and j < 4:
subsample_shift = quad_interp_loc(
min_vals[idx_, j-1:j+2].transpose())
elif j == 4:
subsample_shift = quad_interp_loc(
min_vals[idx_, j-2:j+1].transpose())
subsample_shift[subsample_shift > 1] = 1
subsample_shift[subsample_shift < 0.5] = 1
subsample_shift -= 1
peak_loc[idx_] += subsample_shift
peak_loc -= 2
# min location of templates (subsample shift)
temp_min_loc = temp_k.argmin(0)
loc_3pts = temp_min_loc[:, None] + np.arange(-1, 2)
loc_3pts[loc_3pts < 0] = 0
loc_3pts[loc_3pts > n_times-1] = n_times - 1
chan_idx = np.tile(np.arange(temp_k.shape[1])[:, None], (1, 3))
val_3pts = temp_k[loc_3pts, chan_idx].T
temp_peak_loc = quad_interp_loc(val_3pts)
temp_peak_loc[np.isnan(temp_peak_loc)] = 0
# update peak location
peak_loc_updated = (1-weight_new)*temp_peak_loc + weight_new*peak_loc
# and determine how much has shifted
shifts = peak_loc_updated - temp_peak_loc
shifts[temp_k.ptp(0).argmax()] = 0
# update ptp
ptps_updated = (1-weight_new)*ptp_temp + weight_new*ptp_avg
# scale templates
scale = ptps_updated/ptp_temp
temp_k_scaled = temp_k*scale[None]
# shift templates
temp_k_updated = np.zeros_like(temp_k_scaled)
t_range = np.arange(n_times)
for c in range(len(vis_chan_k)):
t_range_new = t_range - shifts[c]
#f = interp1d(t_range, templates_scaled[unit,:,c], 'cubic', fill_value='extrapolate')
f = interp1d(t_range, temp_k_scaled[:,c],
'cubic', bounds_error=False, fill_value=0.0)
temp_k_updated[:, c] = f(t_range_new)
templates_updated[k,: ,vis_chan_k] = temp_k_updated.T
return templates_updated
def update_templates_old2(fnames_forward,
fname_templates,
fname_templates_updated,
fname_temp_min_loc,
update_weight = 50):
if os.path.exists(fname_templates_updated):
return
n_chunks = len(fnames_forward)
# get all ptps sufficent stats
min_max_vals_all = [None]*n_chunks
weights_all = [None]*n_chunks
for ii, fname in enumerate(fnames_forward):
temp = np.load(fname, allow_pickle=True)
min_max_vals_all[ii] = temp['min_max_vals_average']
weights_all[ii] = temp['weights']
# size of min_max_vals_all: n_chunks x n_units x 2 x n_chans x 5
# size of weights_all: n_chunks x n_units x n_chans
min_max_vals_all = np.stack(min_max_vals_all)
weights_all = np.stack(weights_all)
# ptp per chunk
# size of ptp_all: n_chunks x n_units x n_chans
ptp_all = np.max(min_max_vals_all[:, :, 1], 3) - np.min(min_max_vals_all[:, :, 0], 3)
# average them
ptp_avg = np.average(ptp_all, axis=0, weights=weights_all)
n_spikes = np.sum(weights_all, axis=0)
n_spikes[n_spikes < 0.01] = 0
# get geometric update weights
weight_new = 1 - np.exp(-n_spikes/update_weight)
# load templates
templates = np.load(fname_templates)
temp_ptps = templates.ptp(1)
n_units, n_times, n_channels = templates.shape
# minimum peak location
min_vals_all_reshaped = min_max_vals_all[:, :, 0].reshape(-1, 5)
# peak_loc_integer size: (n_chunks x n_units x n_chans)
peak_loc_integer = np.argmin(min_vals_all_reshaped, 1)
peak_loc = np.copy(peak_loc_integer).astype('float32')
for j in range(5):
idx_ = peak_loc_integer == j
if j == 0:
subsample_shift = quad_interp_loc(
min_vals_all_reshaped[idx_, j:j+3].transpose())
subsample_shift[subsample_shift < -1] = -1
subsample_shift[subsample_shift > -0.5] = -1
subsample_shift += 1
elif j > 0 and j < 4:
subsample_shift = quad_interp_loc(
min_vals_all_reshaped[idx_, j-1:j+2].transpose())
elif j == 4:
subsample_shift = quad_interp_loc(
min_vals_all_reshaped[idx_, j-2:j+1].transpose())
subsample_shift[subsample_shift > 1] = 1
subsample_shift[subsample_shift < 0.5] = 1
subsample_shift -= 1
peak_loc[idx_] += subsample_shift
peak_loc = peak_loc.reshape(n_chunks, n_units, n_channels)
peak_loc = peak_loc - 2
peak_loc_avg = np.average(peak_loc, axis=0, weights=weights_all)
# get subsample peak location of templates
min_max_loc_temp = np.stack((templates.argmin(1), templates.argmax(1)), 1)
loc_3pts = min_max_loc_temp[None] + np.arange(-1, 2)[:, None, None, None]
loc_3pts[loc_3pts < 0] = 0
loc_3pts[loc_3pts > n_times-1] = n_times - 1
val_3pts = np.zeros(loc_3pts.shape, 'float32')
chan_idx = np.tile(np.arange(n_channels)[None,None], (3, 2, 1))
for k in range(n_units):
val_3pts[:,k] = templates[k, loc_3pts[:,k], chan_idx]
temp_peak_loc = quad_interp_loc(val_3pts)
temp_peak_loc[np.isnan(temp_peak_loc)] = 0
#temp_peak_vals = quad_interp_val(val_3pts, temp_peak_loc)
# update peak location
peak_loc_updated = temp_peak_loc[:, 0]*(1-weight_new)+ weight_new*peak_loc_avg
#ptps_updated = (temp_peak_vals[:,1] - temp_peak_vals[:,0])*(1-weight_new) + weight_new*ptp_avg
# min_loc_current shape: n_units x n_channels
min_loc_current = min_max_loc_temp[:, 0] + peak_loc_updated
if False:
# do rank 1 denoising. estimate time component using high ptp channels only
ptp_threshold = 3
if os.path.exists(fname_temp_min_loc):
# min_loc_matrix size : n_chunks x n_units x n_chans
min_loc_matrix = np.load(fname_temp_min_loc)
min_loc_matrix = np.concatenate((min_loc_matrix, min_loc_current[None]), axis=0)
np.save(fname_temp_min_loc, min_loc_matrix)
for k in range(n_units):
vis_chan = np.where(temp_ptps[k] > ptp_threshold)[0]
if len(vis_chan) == 0:
vis_chan = np.where(temp_ptps[k] > 0.8*temp_ptps[k].max())[0]
# min_loc_k : n_chunks x n_chans
min_loc_k = min_loc_matrix[:, k]
a, b, c = np.linalg.svd(min_loc_k[:, vis_chan] - min_loc_k[[0], vis_chan])
# temporal_component: n_chunks x 1
temporal_component = a[:,0]*b[0]
channel_components = np.sum(
min_loc_k*temporal_component[:, None], 0)/np.square(
temporal_component).sum()
# denoised location
min_loc_current[k] = temporal_component[-1]*channel_components + min_loc_k[0]
else:
np.save(fname_temp_min_loc, min_loc_current[None])
# denoised location relative to the templates from the previous batch
peak_loc_denoised = min_loc_current - min_max_loc_temp[:, 0]
else:
peak_loc_denoised = peak_loc_updated
# and determine how much has shifted
shifts = peak_loc_denoised - temp_peak_loc[:,0]
# max chan stay fixed
max_chans = templates.ptp(1).argmax(1)
for k in range(n_units):
shifts[k, max_chans[k]] = 0
# update ptp
ptps_updated = temp_ptps*(1-weight_new) + weight_new*ptp_avg
# not updating non visible channels
ptps_updated[temp_ptps==0] = 0
shifts[temp_ptps==0] = 0
temp_ptps[temp_ptps==0] = 0.01
# scale templates
scale = ptps_updated/temp_ptps
templates_scaled = templates*scale[:, None]
# shift templates
templates_updated = np.zeros_like(templates_scaled)
t_range = np.arange(n_times)
for unit in range(n_units):
vis_chan = np.where(templates_scaled[unit].ptp(0) > 0)[0]
for c in vis_chan:
t_range_new = t_range - shifts[unit, c]
#f = interp1d(t_range, templates_scaled[unit,:,c], 'cubic', fill_value='extrapolate')
f = interp1d(t_range, templates_scaled[unit,:,c],
'cubic', bounds_error=False, fill_value=0.0)
templates_updated[unit, :, c] = f(t_range_new)
np.save(fname_templates_updated, templates_updated)
def update_templates2(fnames_forward,
fname_templates,
fname_templates_updated,
update_weight = 50):
# get all ptps sufficent stats
min_max_vals_all = [None]*len(fnames_forward)
weights_all = [None]*len(fnames_forward)
for ii, fname in enumerate(fnames_forward):
temp = np.load(fname, allow_pickle=True)
min_max_vals_all[ii] = temp['min_max_vals_average']
weights_all[ii] = temp['weights']
min_max_vals_all = np.stack(min_max_vals_all)
weights_all = np.stack(weights_all)
# average them
min_max_vals_avg = np.average(
min_max_vals_all, axis=0,
weights=np.tile(weights_all[:, :, None, :, None], (1, 1, 2, 1, 5)))
n_spikes = np.sum(weights_all, axis=0)
# load templates
templates = np.load(fname_templates)
temp_ptps = templates.ptp(1)
n_units, n_times, n_channels = templates.shape
# get geometric update weights
weight_new = 1 - np.exp(-n_spikes/update_weight)
# get subsample peak location of current batch (relative to integer peak location of templates)
# min_max_vals_avg: n units x 2 x n_chans x 5
# peak_loc: n_units x n_chans
peak_loc = np.zeros((n_units, n_channels), 'float32')
min_val_reshaped = min_max_vals_avg[:, 0].reshape(-1, 5)
peak_loc = min_val_reshaped.argmin(1).astype('float32')
for j in range(5):
idx_ = peak_loc == j
if j == 0:
subsample_shift = quad_interp_loc(
min_val_reshaped[idx_, j:j+3].transpose())
subsample_shift[subsample_shift < -1] = -1
subsample_shift[subsample_shift > -0.5] = -1
subsample_shift += 1
elif j > 0 and j < 4:
subsample_shift = quad_interp_loc(
min_val_reshaped[idx_, j-1:j+2].transpose())
elif j == 4:
subsample_shift = quad_interp_loc(
min_val_reshaped[idx_, j-2:j+1].transpose())
subsample_shift[subsample_shift > 1] = 1
subsample_shift[subsample_shift < 0.5] = 1
subsample_shift -= 1
peak_loc[idx_] += subsample_shift
peak_loc = peak_loc.reshape(n_units, n_channels)
peak_loc = peak_loc - 2
#peak_loc = quad_interp_loc(min_max_vals_avg[:, 0].transpose(2, 0, 1))
#peak_loc[peak_loc > 1] = 1
#peak_loc[peak_loc < -1] = -1
# get subsample peak location of templates
min_max_loc_temp = np.stack((templates.argmin(1), templates.argmax(1)), 1)
loc_3pts = min_max_loc_temp[None] + np.arange(-1, 2)[:, None, None, None]
loc_3pts[loc_3pts < 0] = 0
loc_3pts[loc_3pts > n_times-1] = n_times - 1
val_3pts = np.zeros(loc_3pts.shape, 'float32')
chan_idx = np.tile(np.arange(n_channels)[None,None], (3, 2, 1))
for k in range(n_units):
val_3pts[:,k] = templates[k, loc_3pts[:,k], chan_idx]
temp_peak_loc = quad_interp_loc(val_3pts)
temp_peak_loc[np.isnan(temp_peak_loc)] = 0
# update peak location
peak_loc = temp_peak_loc[:, 0]*(1-weight_new)+ weight_new*peak_loc
# and determine how much has shifted
shifts = peak_loc - temp_peak_loc[:,0]
#shifts[shifts < -5] = -5
#shifts[shifts > 5] = 5
# get ptp at the subsample shift location
temp_peak_loc_updated = temp_peak_loc + shifts[:, None]
temp_peak_loc_updated[temp_peak_loc_updated > 2] = 2
temp_peak_loc_updated[temp_peak_loc_updated < -2] = 2
# shift templates
temp_peak_loc_updated = temp_peak_loc_updated.reshape(-1) + 2
min_max_vals_avg = min_max_vals_avg.transpose(3, 0, 1, 2).reshape(5, -1)
min_max_vals_shifted = np.zeros(len(temp_peak_loc_updated), 'float32')
for j in range(5):
if j == 0:
idx_ = temp_peak_loc_updated <= 0.5
min_max_vals_shifted[idx_] = quad_interp_val(
min_max_vals_avg[j:j+3, idx_],
temp_peak_loc_updated[idx_]-1)
elif j == 4:
idx_ = temp_peak_loc_updated > 3.5
min_max_vals_shifted[idx_] = quad_interp_val(
min_max_vals_avg[j-2:j+1, idx_],
temp_peak_loc_updated[idx_]-3)
else:
idx_ = np.logical_and(temp_peak_loc_updated>j-0.5,
temp_peak_loc_updated<=j+0.5)
min_max_vals_shifted[idx_] = quad_interp_val(
min_max_vals_avg[j-1:j+2, idx_],
temp_peak_loc_updated[idx_]-j)
min_max_vals_shifted = min_max_vals_shifted.reshape((n_units, 2, n_channels))
ptps = min_max_vals_shifted[:,1] - min_max_vals_shifted[:,0]
ptps_updated = temp_ptps*(1-weight_new) + weight_new*ptps
# scale templates
ptps_updated[temp_ptps==0] = 0
shifts[temp_ptps==0] = 0
temp_ptps[temp_ptps==0] = 0.01
scale = ptps_updated/temp_ptps
templates_scaled = templates*scale[:, None]
# max chan stay fixed
max_chans = templates.ptp(1).argmax(1)
for k in range(n_units):
shifts[k, max_chans[k]] = 0
# shift templates
templates_updated = np.zeros_like(templates_scaled)
t_range = np.arange(n_times)
for unit in range(n_units):
vis_chan = np.where(templates_scaled[unit].ptp(0) > 0)[0]
for c in vis_chan:
t_range_new = t_range - shifts[unit, c]
#f = interp1d(t_range, templates_scaled[unit,:,c], 'cubic', fill_value='extrapolate')
f = interp1d(t_range, templates_scaled[unit,:,c],
'cubic', bounds_error=False, fill_value=0.0)
templates_updated[unit, :, c] = f(t_range_new)
np.save(fname_templates_updated, templates_updated)
def run_deconv_with_templates_update(d_gpu, CONFIG,
output_directory):
begin=dt.datetime.now().timestamp()
# loop over chunks and run sutraction step
#templates_old = None
wfs_array = []
n_spikes_array = []
ptp_array = []
ptp_time_array = []
# this is a place holder; gets returned to main wrapper to save templates
# post deconv;
# - it is updated during deconv to contain latest updated templates;
fname_updated_templates = d_gpu.fname_templates
#***********************************************************
#********************* MAIN DECONV LOOP ********************
#***********************************************************
# main idea: 2 loops; outer checks for backward/updated deconv
# inner does the forward deconv
chunk_id = 0
neuron_discovery_flag = CONFIG.deconvolution.neuron_discover
new_neuron_len = CONFIG.deconvolution.neuron_discover_time
batch_len = CONFIG.deconvolution.template_update_time
chunk_len = CONFIG.resources.n_sec_chunk_gpu
verbose = False
while True:
# keep track of chunk being deconved and time_index
time_index = (chunk_id+1)*chunk_len
#if d_gpu.update_templates_backwards:
fname_forward = os.path.join(d_gpu.seg_dir,str(time_index).zfill(6)+'_forward.npz')
fname_updated = os.path.join(d_gpu.seg_dir,str(time_index).zfill(6)+'.npz')
if verbose:
print (" searching for finalized chunnk: ", fname_updated)
if os.path.exists(fname_updated):
#print (" found it")
chunk_id+=1
continue
# exit when finished reading;
if chunk_id>=d_gpu.reader.n_batches:
break
# if updated file missing; check which block we're in
updated_temp_time = ((chunk_id*chunk_len)//batch_len+1)*batch_len
previous_temp_time = ((chunk_id*chunk_len)//batch_len)*batch_len
# print ("")
# print ("")
# print ("")
# check if the batch templates have already been updated;
# if yes, then do backward step; if not finish the forward batch
fname_updated_templates = os.path.join(output_directory,'template_updates',
'templates_' + str(updated_temp_time)+'sec.npy')
if verbose:
print ("searching for updated tempaltes fname: ", fname_updated_templates)
# BACKWARD PASS
if os.path.exists(fname_updated_templates) and (d_gpu.update_templates):
if verbose:
print ("")
print ("")
print ("")
print (" >>>>>>>>>>>>>>>> BACKWARD PASS <<<<<<<<<<<<<<<< ")
# reinitialize
# initialize deconv at the right location;
# forward pass need last set of updates
d_gpu.chunk_id = (updated_temp_time)//chunk_len-1
d_gpu.fname_templates = fname_updated_templates
d_gpu.initialize()
for k in range(batch_len//chunk_len):
time_index = (updated_temp_time-batch_len+chunk_len+k*chunk_len)
fname_forward = os.path.join(d_gpu.seg_dir,str(time_index).zfill(6)+'.npz')
#print (" searching for backward/updated deconv file: ", fname_forward)
if os.path.exists(fname_forward):
chunk_id+=1
continue
# exit when getting to last file
if chunk_id>=d_gpu.reader.n_batches:
break
#if verbose:
print (" Backward pass time ", time_index)
# run deconv
#chunk_id =
if verbose:
print (" chunk_id passed to deconv: ", chunk_id)
d_gpu.run(chunk_id)
# save deconv results
fname = os.path.join(d_gpu.seg_dir,str(time_index).zfill(6)+'.npz')
np.savez(fname,
spike_array = d_gpu.spike_array,
offset_array = d_gpu.offset_array,
neuron_array = d_gpu.neuron_array,
shift_list = d_gpu.shift_list,
height_list = d_gpu.height_list)
chunk_id+=1
print (" DONE BACKWARD PASS: ")
else:
if verbose:
print ("")
print ("")
print ("")
print (" >>>>>>>>>>>>>>>> FORWARD PASS <<<<<<<<<<<<<<<< ")
# initialize deconv at the right location;
# forward pass need last set of updates
fname_previous_templates = os.path.join(output_directory,'template_updates',
'templates_' + str(previous_temp_time)+'sec.npy')
if verbose:
print (" fname rpev templates ", fname_previous_templates)
if chunk_id == 0:
d_gpu.chunk_id = (previous_temp_time)//chunk_len
else:
d_gpu.chunk_id = (previous_temp_time)//chunk_len-1
d_gpu.fname_templates = fname_previous_templates
d_gpu.initialize()
# loop over batch forward steps
chunks = []
# loop over chunks in eatch batch;
for k in range(batch_len//chunk_len):
# Note this entire wrapper assumes templates are bing updated; no need to check;
time_index = (updated_temp_time - batch_len + chunk_len + k*chunk_len)
fname_forward = os.path.join(d_gpu.seg_dir,str(time_index).zfill(6)+'_forward.npz')
if os.path.exists(fname_forward):
if verbose:
print (" time index: ", time_index, " already completed (TODO: make sure metadata is there")
# exit when getting to last file
if chunk_id>=d_gpu.reader.n_batches:
break
chunks.append(chunk_id)
chunk_id+=1
continue
# exit when getting to last file
if chunk_id>=d_gpu.reader.n_batches:
break
chunks.append(chunk_id)
#if verbose:
print (" Forward pass time ", time_index, ", chunk : ", chunk_id, " / ", d_gpu.reader.n_batches)
# run deconv
d_gpu.run(chunk_id)
# save deconv results
fname = os.path.join(d_gpu.seg_dir,str(time_index).zfill(6)+'_forward.npz')
np.savez(fname,
spike_array = d_gpu.spike_array,
offset_array = d_gpu.offset_array,
neuron_array = d_gpu.neuron_array,
shift_list = d_gpu.shift_list,
height_list = d_gpu.height_list)
track_spikes_post_deconv(d_gpu,
CONFIG,
time_index,
output_directory,
chunk_id
)
chunk_id+=1
# after batch is complete, run template update
# Cat; TODO: is this flag redundant? This entire wrapper is for updating templates
if d_gpu.update_templates:
templates_new = update_templates_forward_backward(d_gpu,
CONFIG,
chunks,
time_index)
# check if new neurons are found
# Cat; TODO: is this flag redundant? This entire wrapper is for updating templates
if d_gpu.update_templates:
if ((time_index%new_neuron_len==0) and (time_index>chunk_len) and
(neuron_discovery_flag)):
n_temps = templates_new.shape[2]
templates_new = split_neurons(templates_new,
d_gpu,
CONFIG,
time_index
)
# if finding new neurons backup all the way to beginning of
# the new neuron_batch (note we also step back below)
if n_temps!=templates_new.shape[2]:
chunk_id -= (new_neuron_len-batch_len)//chunk_len
if verbose:
print (" New neurons found: ", templates_new.shape[2]-n_temps,
", reseeting chunk_id to: ", chunk_id)
neuron_discovery_flag = False
# finalize and reinitialize deconvolution with new templates
# Cat; TODO: is this flag redundant? This entire wrapper is for updating templates
if d_gpu.update_templates:
finish_templates(templates_new, d_gpu, CONFIG, time_index,
chunk_id, updated_temp_time)
# reset the chunk ID back to initialize the updated/backward template pass
# Cat; TODO: is this flag redundant? This entire wrapper is for updating templates
if d_gpu.update_templates:
print ("time index: ", time_index)
if (time_index-chunk_len)<= CONFIG.deconvolution.template_update_time:
chunk_id= 0
else:
chunk_id-= (batch_len//chunk_len)
if verbose:
print (" tempaltes updated, resetting chunk_id to: ", chunk_id)
print (" chunk_id: ", chunk_id)
print (" d_gpu.reader.n_batche: ", d_gpu.reader.n_batches)
print (" time_index: ", time_index)
# exit when finished reading;
if chunk_id>=d_gpu.reader.n_batches:
break
return d_gpu, fname_updated_templates
def compute_residual_drift(d_gpu):
RESIDUAL_DRIFT(d_gpu)
def track_spikes_post_deconv(d_gpu,
CONFIG,
time_index,
output_directory,
chunk_id):
wfs_array = []
n_spikes_array = []
ptp_array = []
ptp_time_array = []
# load deconvolution shifts, ids, spike times
if len(d_gpu.neuron_array)>0:
ids = torch.cat(d_gpu.neuron_array)
spike_array = torch.cat(d_gpu.spike_array)
shifts_array = torch.cat(d_gpu.shift_list)
else:
ids = torch.empty(0,dtype=torch.double)
spike_array = torch.empty(0,dtype=torch.double)
shifts_array = torch.empty(0,dtype=torch.double).cpu().data.numpy()
units = np.arange(d_gpu.temps.shape[2])
# Cat: TODO: move these values to CONFIG;
save_flag = False
verbose = False
super_res_align= True
nn_denoise = False
debug = True
# arrays for tracking ptp data for split step
# Cat: TODO move this to CONFIG files
resplit = True
ptp_all_array = []
wfs_all_array = []
# DEBUG arrays:
raw_wfs_array = []
idx_ptp_array = []
wfs_temp_original_array = []
wfs_temp_aligned_array = []
# Cat: TODO remove this try: code into wrapper code
try:
os.mkdir(d_gpu.out_dir + '/wfs/')
except:
pass
try:
os.mkdir(d_gpu.out_dir + '/resplit/')
except:
pass
# *********************************************************
# **************** COMPUTE RESIDUALS **********************
# *********************************************************
# print ("Calling residual computation during drift: ")
# res = compute_residual_drift(d_gpu)
# *********************************************************
# **************** SAVE SPIKE SHAPES **********************
# *********************************************************
spike_train_array = []
max_percent_update = d_gpu.max_percent_update
# set to large value when not debugging
unit_test = 53400
# GPU version of weighted computation
d_gpu.temps_cuda = torch.from_numpy(d_gpu.temps).cuda() #torch(d_gpu.temps).cuda
data = d_gpu.data
snipit = torch.arange(0,d_gpu.temps.shape[1],1).cuda()
wfs_empty = np.zeros((d_gpu.temps.shape[1],d_gpu.temps.shape[0]),'float32')
# Cat: TODO read from config;
multi_chan_rank1 = False
# minimum amount in SU units that spike can be different at peak/trough before being
# triaged/rejected for template update
max_diff_peaks = 1.0
# minimum amount in SU units that spike ptp can be different at fixed peak/trough before
# being rejected
max_diff_ptp = 3.0
#print ("... processing template update...")
#for unit in tqdm(units):
for unit in units:
#
idx = torch.where(ids==unit, ids*0+1, ids*0)
idx = torch.nonzero(idx)[:,0]
times = spike_array[idx]
shifts = shifts_array[idx]
# get indexes of spikes that are not too close to end;
# note: spiketimes are relative to current chunk; i.e. start at 0
idx2 = torch.where(times<(data.shape[1]-d_gpu.temps.shape[1]), times*0+1, times*0)
idx2 = torch.nonzero(idx2)[:,0]
times = times[idx2]
shifts = shifts[idx2].cpu().data.numpy()
# save spiketrains for resplit step
spike_train_array.append(times.cpu().data.numpy())
# grab waveforms;
# Cat: TODO: decide if median or mean need to be used here;
if idx2.shape[0]>0:
#wfs = torch.median(data[:,times[idx2][:,None]+
# snipit-d_gpu.temps.shape[1]+1].
# transpose(0,1).transpose(1,2),0)[0]
wfs_temp_original = data[:,times[:,None]+
snipit-d_gpu.temps.shape[1]+1]. \
transpose(0,1).transpose(1,2)[:,None]
# select max channel spikes only from 4D tensor
if multi_chan_rank1:
# keep all channels
wf_torch = wfs_temp_original[:,0,:,:]
else:
wf_torch = wfs_temp_original[:,0,:,d_gpu.max_chans[unit]]
# *********************************************************
# **************** NN DENOISE STEP ************************
# *********************************************************
# not used at this time;
# convert data to cpu
denoised_wfs = wf_torch.cpu().data.numpy()
# save data for post-run debugging;
# Cat: TODO this is a large file, can eventually erase it;
# saving only max channel data -not whole file
#print ("denoised_wfs: ", denoised_wfs.shape)
if multi_chan_rank1:
wfs_temp_original_array.append(denoised_wfs[:,:,d_gpu.max_chans[unit]])
else:
wfs_temp_original_array.append(denoised_wfs)
if multi_chan_rank1:
template_original_denoised = d_gpu.temps_cuda[:,:,unit].cpu().data.numpy()
else:
template_original_denoised = d_gpu.temps_cuda[d_gpu.max_chans[unit],:,unit].cpu().data.numpy()
# *********************************************************
# ************ USE DECONV SHIFTS TO ALIGN SPIKES **********
# *********************************************************
# work with denoised waveforms
if super_res_align:
if shifts.shape[0]==1:
shifts = shifts[:,None]
wfs_temp_aligned = shift_chans(denoised_wfs, -shifts)
#print ("wfs_temp_aligned: ", wfs_temp_aligned.shape)
wfs_temp_aligned_array.append(wfs_temp_aligned)
else:
wfs_temp_aligned = denoised_wfs
wfs_temp_aligned_array.append(wfs_temp_aligned)
# *********************************************************
# ************ COMPUTE THRESHOLDS FOR TRIAGE **************
# *********************************************************
# Grab ptps from wfs using previously computed ptp_max and ptp_min values in ptp_locs
# exclude ptps that are > or < than 20% than the current template ptp
# STEP 1: compute the boundaries on thresholds
# select +/- 10% of waveform or +/- 1SU whichever is larger
if multi_chan_rank1:
template_at_peak = d_gpu.max_temp_array[unit]
template_at_trough = d_gpu.min_temp_array[unit]
# print ("template_at_peak: ", template_at_peak.shape)
# print ("template_at_peak: ", template_at_peak)
# define the max_threshold based on peak locations
max_thresh_dynamic = [] # np.zeros(d_gpu.temps.shape[2])
min_thresh_dynamic = [] # np.zeros(d_gpu.temps.shape[2])
for c in range(d_gpu.temps.shape[0]):
max_thresh_dynamic.append([max(template_at_peak[c]*(1+max_percent_update),
template_at_peak[c]+max_diff_peaks),
min(template_at_peak[c]*(1-max_percent_update),
template_at_peak[c]-max_diff_peaks)
])
min_thresh_dynamic.append([min(template_at_trough[c]*(1+max_percent_update),
template_at_trough[c]+max_diff_peaks),
max(template_at_trough[c]*(1-max_percent_update),
template_at_trough[c]-max_diff_peaks)
])
#
max_thresh_dynamic = np.vstack(max_thresh_dynamic)
min_thresh_dynamic = np.vstack(min_thresh_dynamic)
#
print ("template_original_denoised: ", template_original_denoised.shape)
# this will give ptp on each of the channels
ptp_template_original_denoised = template_original_denoised.ptp(1)
# select +/- 10% of waveform or +/- 3SU whichever is larger
ptp_thresh_dynamic = []
for c in range(d_gpu.temps.shape[0]):
ptp_thresh_dynamic.append([max(ptp_template_original_denoised[c]*(1+max_percent_update),
ptp_template_original_denoised[c]+max_diff_ptp),
min(ptp_template_original_denoised[c]*(1-max_percent_update),
ptp_template_original_denoised[c]-max_diff_ptp)
])
ptp_thresh_dynamic = np.array(ptp_thresh_dynamic)
print ("ptp_thresh_dynamic: ", ptp_thresh_dynamic)
else:
template_at_peak = template_original_denoised[d_gpu.ptp_locs[unit][0]]
template_at_trough = template_original_denoised[d_gpu.ptp_locs[unit][1]]
max_thresh_dynamic = [max(template_at_peak*(1+max_percent_update),template_at_peak+max_diff_peaks),
min(template_at_peak*(1-max_percent_update),template_at_peak-max_diff_peaks)
]
min_thresh_dynamic = [min(template_at_trough*(1+max_percent_update),template_at_trough+max_diff_peaks),
max(template_at_trough*(1-max_percent_update),template_at_trough-max_diff_peaks)
]
# threshold also using ptp of template
# note this makes either a single channel template or multi-chan template;
ptp_template_original_denoised = template_original_denoised.ptp(0)
# select +/- 10% of waveform or +/- 3SU whichever is larger
ptp_thresh_dynamic = [max(ptp_template_original_denoised*(1+max_percent_update),
ptp_template_original_denoised+max_diff_ptp),
min(ptp_template_original_denoised*(1-max_percent_update),
ptp_template_original_denoised-max_diff_ptp)
]
# *************************************************************
# **** OPTION #2: Maxes/Mins at fixed poitns + PTP LIMITS *****
# *************************************************************
# search the immediate vicinity of the peaks: -1..+1 (not just exact peak)
# this makes it more noisy - but helps avoid alignment/denoising steps
# if False:
# maxes = wfs_temp_aligned[:,d_gpu.ptp_locs[unit][0]-1:d_gpu.ptp_locs[unit][0]+2].max(1)
# mins = wfs_temp_aligned[:,d_gpu.ptp_locs[unit][1]-1:d_gpu.ptp_locs[unit][1]+2].min(1)
# search just specific peak
if multi_chan_rank1:
maxes = []
mins = []
print ("d_gpu.max_temp_array: ", d_gpu.max_temp_array.shape)
for c in range(d_gpu.temps.shape[0]):
maxes.append(wfs_temp_aligned[:,c,d_gpu.max_temp_array[unit,c]])
mins.append(wfs_temp_aligned[:,c,d_gpu.min_temp_array[unit,c]])
else:
maxes = wfs_temp_aligned[:,d_gpu.ptp_locs[unit][0]]
mins = wfs_temp_aligned[:,d_gpu.ptp_locs[unit][1]]
#print ("maxes: ", maxes)
# save ptps for all spike waveforms at selected time points
ptp_all = (maxes-mins)
ptp_all_array.append(ptp_all)
# also compute ptps blindly over all waveforms at all locations
# - this could be improved by limiting to a window between through and peak...
ptps_dumb = wfs_temp_aligned.ptp(1)
# *************************************************************
# ************** FIND POINTS WITHIN THRESHOLDS ****************
# *************************************************************
# old method that uses relative threshold only:
idx_maxes = np.where(np.logical_and(maxes>=max_thresh_dynamic[1],
maxes<=max_thresh_dynamic[0])
)[0]
idx_mins = np.where(np.logical_and(mins<=min_thresh_dynamic[1],
mins>=min_thresh_dynamic[0])
)[0]
idx_ptp_dumb = np.where(np.logical_and(ptps_dumb>=ptp_thresh_dynamic[1],
ptps_dumb<=ptp_thresh_dynamic[0])
)[0]
# find intersection of ptp_maxes and ptp_mins
idx_max_min, _, _ = np.intersect1d(idx_maxes,idx_mins,return_indices=True)
# find intersection with ptp_dumb
idx_ptp, idx_max_min_ptp, _ = np.intersect1d(idx_max_min,
idx_ptp_dumb,return_indices=True)
# index into the original ptp array;
idx_ptp_final = idx_max_min[idx_max_min_ptp]
# save indexes where all 3 conditions are met: max, min and ptp fall within boudnaries
idx_ptp_array.append(idx_ptp_final)
# DEBUG PRINTOUT FOR DRIFT MODEL for a particular unit;
# do not remove
if unit==unit_test:
print ("UNIT: ", unit)
print ("template at peak: ", template_at_peak)
print ("template at trough: ", template_at_trough)
print ("max threshold dynamic: ", max_thresh_dynamic)
print ("min threshold dynamic: ", min_thresh_dynamic)
print ("ptp threshold dynamic: ", ptp_thresh_dynamic)
#print ("template_original_denoised: ", template_original_denoised)
print ("template_original_denoised[d_gpu.ptp_locs[unit][0]]: ", template_original_denoised[d_gpu.ptp_locs[unit][0]])
print ("template_original_denoised[d_gpu.ptp_locs[unit][1]]: ", template_original_denoised[d_gpu.ptp_locs[unit][1]])
print ("idx_maxes: ", idx_maxes[:10])
print ("idx_mins: ", idx_mins[:10])
print ("maxes: ", maxes[:10])
print ("maxes average: ", maxes.mean(0))
print ("mins: ", mins[:10])
print ("mins average: ", mins.mean(0))
print ("maxes[idx_ptp]: ", maxes[idx_ptp_final][:10])
print ("maxes average[idx_ptp]: ", maxes[idx_ptp_final].mean(0))
print ("mins:[idx_ptp] ", mins[idx_ptp_final][:10])
print ("mins average:[idx_ptp] ", mins[idx_ptp_final].mean(0))
print ("idx_ptp_max_min: ", idx_max_min[:10])
print ("idx_ptp_dumb: ", idx_ptp_dumb[:10])
print ("idx_ptp_final: ", idx_ptp_final[:10])
# this uses ptps of the raw waveforms
ptp_temp1 = (maxes-mins)
ptp_temp1 = ptp_temp1[idx_ptp_final].mean(0)
print ("ptp value: ", ptp_temp1)
print ("original Template ptp without denoise: ",
d_gpu.temps_cuda[d_gpu.max_chans[unit],:,unit].cpu().data.numpy().ptp(0))
print ("ptp[triaged spikes]: ", ptp_temp1)
print ("all spikes ptp(mean): ", wf_torch.cpu().data.numpy().mean(0).ptp(0))
print ("all spikes mean(ptp): ", wf_torch.cpu().data.numpy().ptp(1).mean(0))
print ("")
print ("")
print ("")
# if at least 1 spike survived
if idx_ptp.shape[0]>0:
# compute mean ptp at non-triaged events
ptp = ptp_all[idx_ptp_final].mean(0)
# Cat: TODO: THIS IS NOT CORRECT FOR THE FULL TEMPLATE MODEL!!!
wfs = np.mean(wfs_temp_aligned[idx_ptp_final],0)[0]
wfs_array.append(wfs)
n_spikes_array.append(idx_ptp_final.shape[0])
ptp_array.append(ptp)
# save this as metadata; not really required
ptp_time_array.append(times[idx_ptp_final].cpu().data.numpy())
else:
# default save zeros
wfs_array.append(d_gpu.temps[:,:,unit].transpose(1,0))
#d_gpu.temps.shape[1], d_gpu.temps.shape[0])
n_spikes_array.append(0)
ptp_array.append(0)
ptp_time_array.append(0)
# THERE ARE NO SPIKES IN TIME CHUNK for unit
else:
# default save zeros
wfs_array.append(d_gpu.temps[:,:,unit].transpose(1,0))
#d_gpu.temps.shape[1], d_gpu.temps.shape[0])
n_spikes_array.append(0)
ptp_array.append(0)
ptp_time_array.append(0)
wfs_temp_original_array.append([])
idx_ptp_array.append([])
# save meta data for split information below
wfs_temp_aligned_array.append([])
ptp_all_array.append([])
# *************************************
# ***** POST PROCESSING SAVES *********
# *************************************
np.savez(d_gpu.out_dir + '/template_updates/chunk_data_'+
str((chunk_id+1)*CONFIG.resources.n_sec_chunk_gpu)+'.npz',
wfs_array=wfs_array,
n_spikes_array=n_spikes_array,
ptp_array=ptp_array,
ptp_time_array=ptp_time_array
)
if debug:
np.savez(d_gpu.out_dir + '/wfs/'+
str((chunk_id+1)*CONFIG.resources.n_sec_chunk_gpu)+'.npz',
wfs_temp_original_array = wfs_temp_original_array,
idx_ptp_array = idx_ptp_array
)
# save meta information to do split;
if resplit:
np.savez(d_gpu.out_dir + '/resplit/'+
str((chunk_id+1)*CONFIG.resources.n_sec_chunk_gpu)+'.npz',
ptp_all_array = ptp_all_array,
raw_wfs_array_aligned = wfs_temp_aligned_array,
spike_train_array = spike_train_array
)
# return if in middle of forward pass
# Cat: TODO read length of time to update from CONFIG
#if (((time_index%d_gpu.template_update_time)!=0) or (time_index==0)):
# Cat: TODO not sure this is necessary/needed
del d_gpu.temps_cuda
torch.cuda.empty_cache()
def update_templates_forward_backward(d_gpu, CONFIG, chunks, time_index):
print (" UPDATING TEMPLATES <<<<<<<<<<<<<")
# find max chans of templates
max_chans = d_gpu.temps.ptp(1).argmax(0)
# make new templates
templates_new = np.zeros(d_gpu.temps.shape,'float32')
units = np.arange(d_gpu.temps.shape[2])
verbose = False
n_batches_per_chunk = d_gpu.template_update_time//CONFIG.resources.n_sec_chunk_gpu
wfs_local_array = []
n_spikes_local_array = []
ptp_local_array = []
for k in range(len(chunks)):
fname = (d_gpu.out_dir + '/template_updates/chunk_data_'+
str((chunks[k]+1)*CONFIG.resources.n_sec_chunk_gpu)+'.npz')
if verbose:
print ("Loadin gchunks: ", fname)
data = np.load(fname, allow_pickle=True)
wfs_local_array.append(data['wfs_array'])
n_spikes_local_array.append(data['n_spikes_array'])
ptp_local_array.append(data['ptp_array'])
# Cat: TODO: This is in CONFIG file already
ptp_flag = True
for unit in units:
ptp_local = []
wfs_local = []
n_spikes_local = []
for c in range(len(chunks)):
#wfs_local[c] = wfs_array[batch_offset+c][unit]
#n_spikes_local[c] = n_spikes_array[batch_offset+c][unit]
#ptp_local.append(ptp_array[batch_offset+c][unit])
wfs_local.append(wfs_local_array[c][unit])
n_spikes_local.append(n_spikes_local_array[c][unit])
ptp_local.append(ptp_local_array[c][unit])
n_spikes = np.hstack(n_spikes_local).sum(0)
# if there are no spikes at all matched, just use previous template
if n_spikes==0:
templates_new[:,:,unit]=d_gpu.temps[:,:,unit]
continue
# *******************************************************
# **************** COMPUTE DRIFT MODEL SCALING **********
# *******************************************************
# DRIFT MODEL 0; PARTIAL template update using scaling of neurons
if ptp_flag:
ptp_temp = np.average(np.float32(ptp_local), weights=np.int32(n_spikes_local), axis=0)
scale = ptp_temp/d_gpu.temps[:,:,unit].ptp(1).max(0)
# DRIFT MODEL 1; FULL template update using raw spikes
else:
# compute weighted average of the template
template = np.average(np.float32(wfs_local), weights=np.int32(n_spikes_local), axis=0).T
# *******************************************************
# **************** UPDATE TEMPLATE **********************
# *******************************************************
# # first chunk of data; just scale starting template/or keep original
if time_index==d_gpu.template_update_time:
if ptp_flag:
templates_new[:,:,unit]=d_gpu.temps[:,:,unit]*scale
else:
print (" NOTE: First chunk of time FULL TEMPLATE UPDATE"+
" (<<<<< NOT CORRECTLY UPDATED >>>>)")
templates_new[:,:,unit]=template
else:
# # use KS eq (6)
# else:
if ptp_flag:
#templates_new[:,:,unit] = d_gpu.temps[:,:,unit]*scale
# exponential updates
t1 = d_gpu.temps[:,:,unit]*np.exp(-n_spikes/d_gpu.nu)
t2 = (1-np.exp(-n_spikes/d_gpu.nu))*d_gpu.temps[:,:,unit]*scale
templates_new[:,:,unit] = (t1+t2)
else:
#print ("temps: ", d_gpu.temps.shape, "Unit: ", unit, ", scaling factors: ",
# np.exp(-n_spikes/d_gpu.nu), (1-np.exp(-n_spikes/d_gpu.nu)))
t1 = d_gpu.temps[:,:,unit]*np.exp(-n_spikes/d_gpu.nu)
t2 = (1-np.exp(-n_spikes/d_gpu.nu))*template
templates_new[:,:,unit] = (t1+t2)
# if unit==unit_test:
# print (" FINAL SCALE: ", scale)
# print (" templates_new max ptp: ", templates_new[:,:,unit].ptp(1).max(0))
# print (" ptp_local: ", ptp_local)
if verbose:
if time_index==d_gpu.template_update_time:
print (" FIRST DECONV STEP... updating existing template forward only")
else:
print (" SECONDARY DECONV STEPs... updating existing template forward and backward")
return templates_new
def split_neurons(templates_new, d_gpu, CONFIG, time_index):
print (" CHECKING FOR NEW NEURONS (in development...)")
#
standardized_filename = os.path.join(os.path.join(os.path.join(d_gpu.root_dir, 'tmp'),
'preprocess'),
'standardized.bin')
units = np.arange(d_gpu.temps.shape[2])
# load relevant data; verify how many steps of data to load for split
n_steps_back = CONFIG.deconvolution.neuron_discover_time//CONFIG.resources.n_sec_chunk_gpu
print (" # of steps backwards: ", n_steps_back)
ptps = []
raw_wfs = []
spike_train = []
for unit in units:
ptps.append([])
raw_wfs.append([])
spike_train.append([])
# Cat: TODO: this is a bit hacky; should speed it up.
sample_rate = CONFIG.recordings.sampling_rate
for k in range(time_index-(n_steps_back-1)*CONFIG.resources.n_sec_chunk_gpu,
time_index+1, CONFIG.resources.n_sec_chunk_gpu):
fname_resplit = d_gpu.out_dir + '/resplit/'+str(k)+'.npz'
print ("FNAMe resplit: ", fname_resplit)
data = np.load(fname_resplit)
ptps_temp = data['ptp_all_array']
raw_temp = data['raw_wfs_array_aligned']
spikes = data['spike_train_array']
for unit in units:
ptps[unit].append(ptps_temp[unit])
raw_wfs[unit].append(raw_temp[unit])
spikes_temp = (spikes[unit]
+ (k-CONFIG.resources.n_sec_chunk_gpu)*sample_rate
-d_gpu.reader.buffer
-d_gpu.temps.shape[1]
)
spike_train[unit].append(spikes_temp)
# loop over units and find splits:
print (".... Searching for new neurons...")
if CONFIG.resources.multi_processing:
#batches_in = np.array_split(units, CONFIG.resources.n_processors)
new_templates = parmap.map(new_neuron_search, units, ptps, CONFIG, spike_train,
standardized_filename,
d_gpu.temps,
d_gpu.out_dir,
processes=CONFIG.resources.n_processors,
pm_pbar=True)
else:
new_templates = []
for unit in units:
temp = new_neuron_search(unit, ptps, CONFIG, spike_train,
standardized_filename,
d_gpu.temps,
d_gpu.out_dir)
#if temp is not None:
# print ("new_temp: ", temp.shape)
new_templates.append(temp)
print (" TODO: delete all intermediate resplit files saved...")
# append the new tempaltes to
print (" STARTING TEMPLATES: ", templates_new.shape)
for k in range(len(new_templates)):
if new_templates[k] is not None:
templates_new = np.concatenate((templates_new,
new_templates[k][:,:,None].transpose(1,0,2)),axis=2)
print (" FINAL TEMPLATES: ", templates_new.shape)
return templates_new
def finish_templates(templates_new, d_gpu, CONFIG, time_index, chunk_id,
updated_temp_time):
verbose = False
if verbose:
print ("")
print ("")
print (" FINISHING TEMPLATES ")
# save updated templates
out_file = os.path.join(d_gpu.out_dir,'template_updates',
'templates_'+
str(time_index)+
'sec.npy')
# also save updated templates for end of file with name as a muitple of the update_template
# time so that the backward step can find this file;
# Cat: TODO: this can probably be done better/more elegantly
time_index_extended = str(updated_temp_time)
out_file = os.path.join(d_gpu.out_dir,'template_updates',
'templates_'+
str(time_index_extended)+
'sec.npy')
if verbose:
print (" TEMPS being saved: ", out_file)
np.save(out_file, templates_new.transpose(2,1,0))
# re-initialize d_gpu
# change fname-templates
# Cat: TODO: is this the best way to pass on template? probably name is fine;
d_gpu.fname_templates = out_file
d_gpu.chunk_id = time_index//CONFIG.resources.n_sec_chunk_gpu-1
d_gpu.initialize()
# pass reinitialized object for following pass
return d_gpu
def new_neuron_search(unit, ptps, CONFIG, spike_train,
standardized_filename,
d_gpu_temps,
d_gpu_out_dir):
features = np.hstack(ptps[unit])
if features.shape[0]<CONFIG.deconvolution.min_split_spikes:
return None
# triage 5% of spikes
idx_keep = knn_triage(95,features[:,None]) # return boolean
idx_keep = np.where(idx_keep)[0]
features_triaged = features[idx_keep]
# screen distribution for bimodalities using diptest before running MFM
pval = run_diptest_resplit(features_triaged, assignment=None)
if pval>0.1:
return None
# ************************
# ******* SPLIT **********
# ************************
# we just do 2comp gmm now (rather than MFM + cc)
assignments = em_test(features_triaged[:,None])
pvals=[]
for k in np.unique(assignments):
idx = np.where(assignments==k)[0]
pvals.append(run_diptest_resplit(features_triaged[idx]))
# if neither split is stable skip unit
# this is a bit too conservative; might need to retweak
# Cat: TODO: export to CONFIG
pval_thresh = 0.95
if max(pvals)<pval_thresh:
return None
# if unit survived diptest; need to load raw waveforms to compute
temp_spikes = np.hstack(spike_train[unit])[idx_keep]
temp_ptps = np.hstack(ptps[unit])[idx_keep]
wfs, skipped_idx = binary_reader_waveforms(standardized_filename,
CONFIG.recordings.n_channels,
d_gpu_temps.shape[1],
temp_spikes)
#np.save('/home/cat/wfs.npy', wfs)
#np.save('/home/cat/spike_train.npy', spike_train)
#print ("idx_keep: ", .shape)
#print ("WFS: ", wfs.shape)
# if reader misses spikes; delete them from assignments also
if len(skipped_idx)>0:
assignments = np.delete(assignments, skipped_idx)
temp_ptps = np.delete(temp_ptps, skipped_idx)
# generate new templates for particular unit
wfs_resplit = []
new_templates = []
temps = []
# loop over the 2 assignments
for k in np.unique(assignments):
idx = np.where(assignments==k)[0]
temp = wfs[idx]
temp = temp.mean(0)
temps.append(temp)
# compute cosine-similarty check to see which neuron already is present in recording;
# and which is not
res = check_matches(d_gpu_temps, temps)
match_vals1, match_vals2 = res[0], res[1]
match1 = match_vals1.max(0)
match2 = match_vals2.max(0)
fname_newneuron = d_gpu_out_dir + '/resplit/new_'+str(unit)+'.npz'
if match1<=match2:
match_new = 0
match_old = 1
np.savez(fname_newneuron,
new_neuron = temps[match_new],
old_neuron = temps[match_old]
)
return temps[match_new]
else:
match_new = 1
match_old = 0
np.savez(fname_newneuron,
new_neuron = temps[match_new],
old_neuron = temps[match_old]
)
return temps[match_new]
def check_matches(templates, temp):
templates_local = templates.transpose(1,0,2)
#temp = temp.transpose(0,1)
res = []
for p in range(2):
match_vals = []
units = np.arange(templates_local.shape[2])
data1 = temp[p].T
for unit in units:
data2 = templates_local[:,:,unit].T.ravel()
best_result = 0
for k in range(-10,10,1):
data_test = np.roll(data1,k,axis=1).ravel()
result = 1 - scipy.spatial.distance.cosine(data_test,data2)
if result>best_result:
best_result = result
match_vals.append(best_result)
res.append(np.hstack(match_vals))
return (res)
def em_test(features):
gmm = mixture.GaussianMixture(n_components=2, covariance_type='full').fit(features)
return gmm.predict(features)
def mfm_resplit(features, CONFIG):
mask = np.ones((features.shape[0], 1))
group = np.arange(features.shape[0])
vbParam = mfm.spikesort(features[:,:,None],
mask,
group,
CONFIG)
return vbParam
def run_diptest_resplit(features, assignment=None):
from diptest import diptest as dp
if assignment is not None:
lda = LDA(n_components = 1)
lda_feat = lda.fit_transform(features, assignment).ravel()
pval = dp(lda_feat)[1]
else:
pval = dp(features)[1]
return pval
def template_calculation_parallel(unit, ids, spike_array, temps, data, snipit):
idx = np.where(ids==unit)[0]
times = spike_array[idx]
# get indexes of spikes that are not too close to end;
# note: spiketimes are relative to current chunk; i.e. start at 0
idx2 = np.where(times<(data.shape[1]-temps.shape[1]))[0]
# grab waveforms;
if idx2.shape[0]>0:
wfs = np.median(data[:,times[idx2][:,None]+
snipit-temps.shape[1]+1].
transpose(1,2,0),0)
return (wfs, idx2.shape[0])
else:
return (wfs_empty, 0)
def update_templates_GPU(d_gpu,
CONFIG,
time_index,
wfs_array,
n_spikes_array,
output_directory):
# ******************************************************
# ***************** SAVE WFS FIRST *********************
# ******************************************************
# make new entry in array
wfs_array.append([])
# is this array redundant?
n_spikes_array.append([])
iter_ = len(wfs_array)-1
# raw data:
# original templates needed for array generation
#templates_old = d_gpu.temps
ids = torch.cat(d_gpu.neuron_array)
units = np.arange(d_gpu.temps.shape[2])
# GPU version + weighted computation
data = d_gpu.data
spike_array = torch.cat(d_gpu.spike_array)
snipit = torch.arange(0,d_gpu.temps.shape[1],1).cuda() #torch.from_numpy(coefficients).cuda()
wfs_empty = np.zeros((d_gpu.temps.shape[1],d_gpu.temps.shape[0]),'float32')
for unit in units:
#
idx = torch.where(ids==unit, ids*0+1, ids*0)
idx = torch.nonzero(idx)[:,0]
#print ("spike array: ", spike_array.shape)
times = spike_array[idx]
# exclude buffer; technically should exclude both start and ends
# note: spiketimes are relative to current chunk; i.e. start at 0
idx2 = torch.where(times < data.shape[1], times*0+1, times*0)
idx2 = torch.nonzero(idx2)[:,0]
# grab waveforms; need to add a time point to data
if idx2.shape[0]>0:
wfs = torch.median(data[:,times[idx2][:,None]+
snipit-d_gpu.temps.shape[1]+1].
transpose(0,1).transpose(1,2),0)[0]
# Cat: TODO: maybe save only vis chans to save space?
wfs_array[iter_].append(wfs.cpu().data.numpy())
n_spikes_array[iter_].append(idx2.shape[0])
else:
wfs_array[iter_].append(wfs_empty)
n_spikes_array[iter_].append(0)
# ******************************************************
# ********** UPDATE TEMPLATES EVERY 60SEC **************
# ******************************************************
# update only every 60 seconds
#print ("time_index: ", time_index)
# Cat: TODO read length of time to update from CONFIG
#if (templates_in is None) or (time_index%60!=0):
if (((time_index%d_gpu.template_update_time)!=0) or (time_index==0)):
#and not d_gpu.update_templates_recursive):
return (d_gpu, wfs_array)
#d_gpu, templates_old, wfs_array
# forgetting factor ~ number of spikes
# Cat: TODO: read from CONFIG file
nu = 10
# find max chans of templates
max_chans = d_gpu.temps.ptp(1).argmax(0)
# make new templates
templates_new = np.zeros(d_gpu.temps.shape,'float32')
units = np.arange(d_gpu.temps.shape[2])
#print ("wfs_array going into computation: ", wfs_array[0][0].shape)
# Weighted template computation over chunks of data...
n_chunks = len(wfs_array)
wfs_local = np.zeros((n_chunks, d_gpu.temps.shape[1], d_gpu.temps.shape[0]),'float32')
n_spikes_local = np.zeros((n_chunks), 'int32')
# Cat: TODO: this code might crash if we don't have enough spikes overall
# or even within a single window
for unit in units:
# get saved waveforms and number of spikes
for c in range(len(wfs_array)):
wfs_local[c] = wfs_array[c][unit]#.cpu().data.numpy()
n_spikes_local[c] = n_spikes_array[c][unit]
#print ("wfs_local: ", wfs_local.shape)
#print ("n_spikes_local: ", n_spikes_local.shape)
n_spikes = n_spikes_local.sum(0)
# if there are no spikes at all matched, just use previous template shape
if n_spikes==0:
templates_new[:,:,unit]=template
continue
template = np.average(wfs_local, weights=n_spikes_local,axis=0).T
#print ("Wfs local weighted averages: ", wfs_local.shape)
# first chunk of data just use without weight.
if time_index==d_gpu.template_update_time:
templates_new[:,:,unit]=template
# use KS eq (6)
else:
t1 = d_gpu.temps[:,:,unit]*np.exp(-n_spikes/nu)
t2 = (1-np.exp(-n_spikes/nu))*template
templates_new[:,:,unit] = (t1+t2)
# # save template chunk for offline analysis only
# np.save('/media/cat/1TB/liam/49channels/data1_allset/tmp/block_2/deconv/'+
# str(time_index)+'.npy', templates_new)
out_file = os.path.join(output_directory,'template_updates',
'templates_'+
str((d_gpu.chunk_id+1)*CONFIG.resources.n_sec_chunk_gpu)+
'sec.npy')
# print (" TEMPS DONE: ", templates_in.shape, templates_new.shape)
np.save(out_file, templates_new.transpose(2,1,0))
# re-initialize d_gpu
# change fname-templates
d_gpu.fname_templates = out_file
d_gpu.initialize()
# reset wfs_array to empty
return (d_gpu, [])
def deconv_ONgpu(fname_templates_in,
output_directory,
reader,
threshold,
fname_spike_train,
fname_spike_train_up,
fname_templates,
fname_templates_up,
fname_shifts,
CONFIG,
run_chunk_sec):
# *********** CONSTRUCT DECONV OBJECT ************
d_gpu = deconvGPU(CONFIG, fname_templates_in, output_directory)
#print (kfadfa)
# Cat: TODO: gpu deconv requires own chunk_len variable
n_sec=CONFIG.resources.n_sec_chunk_gpu
#root_dir = '/media/cat/1TB/liam/49channels/data1_allset'
root_dir = CONFIG.data.root_folder
# Cat: TODO: read from CONFIG
d_gpu.max_iter=1000
d_gpu.deconv_thresh=threshold
# Cat: TODO: make sure svd recomputed for higher rank etc.
d_gpu.svd_flag = True
# Cat: TODO read from CONFIG file
d_gpu.RANK = 49
d_gpu.vis_chan_thresh = 1.0
d_gpu.superres_shift = True
# debug/printout parameters
# Cat: TODO: read all from CONFIG
d_gpu.save_objective = False
d_gpu.verbose = False
d_gpu.print_iteration_counter = 50
d_gpu.save_state = True
# add reader
d_gpu.reader = reader
# *********** INIT DECONV ****************
begin=dt.datetime.now().timestamp()
d_gpu.initialize()
setup_time = np.round((dt.datetime.now().timestamp()-begin),4)
print ("-------------------------------------------")
print ("Total init time ", setup_time, 'sec')
print ("-------------------------------------------")
print ("")
# ************ RUN DECONV ***************
print ("Subtraction step...")
begin=dt.datetime.now().timestamp()
#if True:
# chunks = []
# for k in range(0, CONFIG.rec_len//CONFIG.recordings.sampling_rate,
# CONFIG.resources.n_sec_chunk_gpu_deconv):
# chunks.append([k,k+n_sec])
## run data on small chunk only
#else:
# chunks = [run_chunk_sec]
# Cat: TODO : last chunk of data may be skipped if this doesn't work right.
print (" (TODO: Make sure last bit is added if rec_len not multiple of n_sec_gpu_chnk)")
# loop over chunks and run sutraction step
for chunk_id in tqdm(range(reader.n_batches)):
fname = os.path.join(d_gpu.seg_dir,str(chunk_id).zfill(5)+'.npz')
if os.path.exists(fname)==False:
# rest lists for each segment of time
d_gpu.offset_array = []
d_gpu.spike_array = []
d_gpu.neuron_array = []
d_gpu.shift_list = []
# run deconv
d_gpu.run(chunk_id)
# save deconv
np.savez(fname,
spike_array = d_gpu.spike_array,
offset_array = d_gpu.offset_array,
neuron_array = d_gpu.neuron_array,
shift_list = d_gpu.shift_list)
subtract_time = np.round((dt.datetime.now().timestamp()-begin),4)
print ("-------------------------------------------")
total_length_sec = int((d_gpu.reader.end - d_gpu.reader.start)/d_gpu.reader.sampling_rate)
print ("Total Deconv Speed ", np.round(total_length_sec/(setup_time+subtract_time),2), " x Realtime")
# ************* DEBUG MODE *****************
if d_gpu.save_objective:
fname_obj_array = os.path.join(d_gpu.out_dir, 'obj_array.npy')
np.save(fname_obj_array, d_gpu.obj_array)
# ************** SAVE SPIKES & SHIFTS **********************
print (" gathering spike trains and shifts from deconv ")
batch_size = d_gpu.reader.batch_size
buffer_size = d_gpu.reader.buffer
temporal_size = CONFIG.spike_size
# loop over chunks and run sutraction step
spike_train = [np.zeros((0,2),'int32')]
shifts = []
for chunk_id in tqdm(range(reader.n_batches)):
fname = os.path.join(d_gpu.seg_dir,str(chunk_id).zfill(5)+'.npz')
data = np.load(fname)
spike_array = data['spike_array']
neuron_array = data['neuron_array']
offset_array = data['offset_array']
shift_list = data['shift_list']
for p in range(len(spike_array)):
spike_times = spike_array[p].cpu().data.numpy()
idx_keep = np.logical_and(spike_times >= buffer_size,
spike_times < batch_size+buffer_size)
idx_keep = np.where(idx_keep)[0]
temp=np.zeros((len(idx_keep),2), 'int32')
temp[:,0]=spike_times[idx_keep]+offset_array[p]
temp[:,1]=neuron_array[p].cpu().data.numpy()[idx_keep]
spike_train.extend(temp)
shifts.append(shift_list[p].cpu().data.numpy()[idx_keep])
spike_train = np.vstack(spike_train)
shifts = np.hstack(shifts)
# add half the spike time back in to get to centre of spike
spike_train[:,0] = spike_train[:,0]-temporal_size//2
# sort spike train by time
idx = spike_train[:,0].argsort(0)
spike_train = spike_train[idx]
shifts = shifts[idx]
# save spike train
print (" saving spike_train: ", spike_train.shape)
fname_spike_train = os.path.join(d_gpu.out_dir, 'spike_train.npy')
np.save(fname_spike_train, spike_train)
np.save(fname_spike_train_up, spike_train)
# save shifts
fname_shifts = os.path.join(d_gpu.out_dir, 'shifts.npy')
np.save(fname_shifts,shifts)
# save templates and upsampled templates
templates_in_original = np.load(fname_templates_in)
np.save(fname_templates, templates_in_original)
np.save(fname_templates_up, templates_in_original)
def deconv_ONcpu(fname_templates_in,
output_directory,
reader,
threshold,
save_up_data,
fname_spike_train,
fname_spike_train_up,
fname_templates,
fname_templates_up,
CONFIG):
logger = logging.getLogger(__name__)
conv_approx_rank = 5
upsample_max_val = 8
max_iter = 1000
mp_object = MatchPursuit_objectiveUpsample(
fname_templates=fname_templates_in,
save_dir=output_directory,
reader=reader,
max_iter=max_iter,
upsample=upsample_max_val,
threshold=threshold,
conv_approx_rank=conv_approx_rank,
n_processors=CONFIG.resources.n_processors,
multi_processing=CONFIG.resources.multi_processing)
logger.info('Number of Units IN: {}'.format(mp_object.temps.shape[2]))
# directory to save results for each segment
seg_dir = os.path.join(output_directory, 'seg')
if not os.path.exists(seg_dir):
os.makedirs(seg_dir)
# skip files/batches already completed; this allows more even distribution
# across cores in case of restart
# Cat: TODO: if cpu is still being used by endusers, may wish to implement
# dynamic file assignment here to deal with slow cores etc.
fnames_out = []
batch_ids = []
for batch_id in range(reader.n_batches):
fname_temp = os.path.join(seg_dir,
"seg_{}_deconv.npz".format(
str(batch_id).zfill(6)))
if os.path.exists(fname_temp):
continue
fnames_out.append(fname_temp)
batch_ids.append(batch_id)
logger.info("running deconvolution on {} batches of {} seconds".format(
len(batch_ids), CONFIG.resources.n_sec_chunk))
if len(batch_ids)>0:
if CONFIG.resources.multi_processing:
logger.info("running deconvolution with {} processors".format(
CONFIG.resources.n_processors))
batches_in = np.array_split(batch_ids, CONFIG.resources.n_processors)
fnames_in = np.array_split(fnames_out, CONFIG.resources.n_processors)
parmap.starmap(mp_object.run,
list(zip(batches_in, fnames_in)),
processes=CONFIG.resources.n_processors,
pm_pbar=True)
else:
logger.info("running deconvolution")
for ctr in range(len(batch_ids)):
mp_object.run([batch_ids[ctr]], [fnames_out[ctr]])
# collect result
res = []
logger.info("gathering deconvolution results")
for batch_id in range(reader.n_batches):
fname_out = os.path.join(seg_dir,
"seg_{}_deconv.npz".format(
str(batch_id).zfill(6)))
res.append(np.load(fname_out)['spike_train'])
res = np.vstack(res)
logger.info('Number of Spikes deconvolved: {}'.format(res.shape[0]))
# save templates and upsampled templates
np.save(fname_templates, np.load(fname_templates_in))
#np.save(fname_templates,
# mp_object.temps.transpose(2,0,1))
# since deconv spike time is not centered, get shift for centering
shift = CONFIG.spike_size // 2
# get spike train and save
spike_train = np.copy(res)
# map back to original id
spike_train[:, 1] = np.int32(spike_train[:, 1]/mp_object.upsample_max_val)
spike_train[:, 0] += shift
# save
np.save(fname_spike_train, spike_train)
if save_up_data:
# get upsampled templates and mapping for computing residual
(templates_up,
deconv_id_sparse_temp_map) = mp_object.get_sparse_upsampled_templates()
np.save(fname_templates_up,
templates_up.transpose(2,0,1))
# get upsampled spike train
spike_train_up = np.copy(res)
spike_train_up[:, 1] = deconv_id_sparse_temp_map[
spike_train_up[:, 1]]
spike_train_up[:, 0] += shift
np.save(fname_spike_train_up, spike_train_up)
# Compute soft assignments
#soft_assignments, assignment_map = get_soft_assignments(
# templates=templates.transpose([2, 0, 1]),
# templates_upsampled=templates.transpose([2, 0, 1]),
# spike_train=spike_train,
# spike_train_upsampled=spike_train,
# filename_residual=deconv_obj.residual_fname,
# n_similar_units=2)
#np.save(deconv_obj.root_dir + '/soft_assignment.npy', soft_assignments)
#np.save(deconv_obj.root_dir + '/soft_assignment_map.npy', assignment_map)
def update_templates_CPU(d_gpu, templates_in, CONFIG, ref_template,
time_index, wfs_array, n_spikes_array):
# need to load reference template for super-res alignment OPTION
# ref template
ref_template = np.load(absolute_path_to_asset(
os.path.join('template_space', 'ref_template.npy')))
spike_size = CONFIG.spike_size
x = np.arange(ref_template.shape[0])
xnew = np.linspace(0, x.shape[0]-1, num=spike_size, endpoint=True)
y = ref_template
tck = interpolate.splrep(x, y, s=0)
ref_template = interpolate.splev(xnew, tck, der=0)
# ******************************************************
# ***************** SAVE WFS FIRST *********************
# ******************************************************
# make new entry in array
wfs_array.append([])
# is this array redundant?
n_spikes_array.append([])
iter_ = len(wfs_array)-1
# raw data:
# original templates needed for array generation
templates_old = d_gpu.temps
ids = torch.cat(d_gpu.neuron_array)
units = np.arange(templates_old.shape[2])
# CPU Version
if True:
data = d_gpu.data_cpu
# Cat: TODO: save only vis channels data
for unit in units:
# cpu version
idx = np.where(ids.cpu().data.numpy()==unit)[0]
times = torch.cat(d_gpu.spike_array).cpu().data.numpy()[idx]
# exclude buffer; technically should exclude both start and ends
idx2 = np.where(times<data.shape[1]-200)[0]
wfs = (data[:,times[idx2]
[:,None]+
np.arange(61)-60].transpose(1,2,0))
# Cat: TODO: save only vis chans
wfs_array[iter_].append(wfs)
n_spikes_array[iter_].append(idx2.shape[0])
# GPU version + weighted computation
else:
data = d_gpu.data
spike_array = torch.cat(d_gpu.spike_array)
snipit = torch.arange(0,61,1).cuda() #torch.from_numpy(coefficients).cuda()
for unit in units:
# cpu version
#idx = np.where(ids.cpu().data.numpy()==unit)[0]
#times = torch.cat(d_gpu.spike_array).cpu().data.numpy()[idx]
# Third step: only deconvolve spikes where obj_function max > threshold
idx = torch.where(ids==unit, ids*0+1, ids*0)
idx = torch.nonzero(idx)[:,0]
print ("spike array: ", spike_array.shape)
times = spike_array[idx]
# exclude buffer; technically should exclude both start and ends
#idx2 = np.where(times<data.shape[1]-200)[0]
idx2 = torch.where(times<data.shape[1], times*0+1, times*0)
idx2 = torch.nonzero(idx2)[:,0]
# grab waveforms
wfs = data[:,times[idx2][:,None]+snipit-60].transpose(0,1).transpose(1,2).mean(0)
# Cat: TODO: save only vis chans
wfs_array[iter_].append(wfs)
n_spikes_array[iter_].append(idx2.shape[0])
# ******************************************************
# ********** UPDATE TEMPLATES EVERY 60SEC **************
# ******************************************************
# update only every 60 seconds
print ("time_index: ", time_index)
# Cat: TODO read length of time to update from CONFIG
#if (templates_in is None) or (time_index%60!=0):
if (time_index%60!=0) or (time_index==0):
return templates_old, wfs_array
# forgetting factor ~ number of spikes
# Cat: TODO: read from CONFIG file
nu = 10
# find max chans of templates
max_chans = templates_in.ptp(1).argmax(0)
# make new templates
templates_new = np.zeros(templates_in.shape,'float32')
# get unit ids
ids = torch.cat(d_gpu.neuron_array)
units = np.arange(templates_in.shape[2])
# Cat: TODO: read from CONFIG; # of spikes to be used for computing template
n_spikes_min = 5000
# Cat: TODO Parallelize this step
for unit in units:
# get saved waveforms and number of spikes
wfs_local = []
n_spikes_local = 0
for c in range(len(wfs_array)):
wfs_local.append(wfs_array[c][unit])
n_spikes_local+=n_spikes_array[c][unit]
wfs_local = np.vstack(wfs_local)
# limit loaded waveforms to 1000 spikes; alignment is expensive
idx_choice = np.random.choice(np.arange(wfs_local.shape[0]),
size=min(wfs_local.shape[0],n_spikes_min))
wfs_local = wfs_local[idx_choice]
# keep track of spikes in window;
# Cat: TODO: we are not weighing the template correctly here;
# # of spikes is technically larger than # of waveforms tracked...
#n_spikes = wfs_local.shape[0]
n_spikes = n_spikes_local
print ("wfs local: ", wfs_local.shape)
if n_spikes<2:
templates_new[:,:,unit] = templates_in[:,:,unit]
continue
# Cat: TODO: implement gpu only version;
# d_gpu.data should already be on GPU
# idx = torch.where(ids==unit,
# ids*0+1,
# ids*0)
# idx = torch.nonzero(idx)[:,0]
# wfs = data[:,ids[idx].cpu().data.numpy()[:,None]+np.arange(61)-61]
# align waveforms by finding best shfits
if False:
mc = max_chans[unit]
best_shifts = align_get_shifts_with_ref(wfs_local[:, :, mc],ref_template)
wfs_local = shift_chans(wfs_local, best_shifts)
# compute template
# template = wfs.mean(0).T
template = np.median(wfs_local, axis=0).T
# update templates; similar to Kilosort Eq (6)
# if we're in the first block of time; just use existing spikes;
# i.e. don't update based on existing tempaltes as they represent
# the mean of the template over time.
if time_index==60:
templates_new[:,:,unit]=template
# use KS eq (6)
else:
t1 = templates_in[:,:,unit]*np.exp(-n_spikes/nu)
t2 = (1-np.exp(-n_spikes/nu))*template
templates_new[:,:,unit] = (t1+t2)
# save template chunk for offline analysis only
np.save('/media/cat/1TB/liam/49channels/data1_allset/tmp/block_2/deconv/'+
str(time_index)+'.npy', templates_new)
return templates_new, []
|
paninski-lab/yass
|
src/yass/deconvolve/run_original.py
|
Python
|
apache-2.0
| 105,663
|
[
"NEURON"
] |
9f58c74fd8a6354db59c67fb2dc59e6e0d0817f612a43c58f21baff76c94398e
|
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2009 Rob G. Healey <robhealey1@gmail.com>
# Copyright (C) 2014 Vlada Perić <vlada.peric@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
from xml.parsers import expat
import datetime
import math
import os
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.const import PLUGINS_DIR, USER_PLUGINS, DATA_DIR
from gramps.gen.lib.gcalendar import (gregorian_ymd, hebrew_sdn)
#------------------------------------------------------------------------
#
# Support functions
#
#------------------------------------------------------------------------
def g2iso(dow):
""" Converst Gramps day of week to ISO day of week """
# Gramps: SUN = 1
# ISO: MON = 1
return (dow + 5) % 7 + 1
def easter(year):
"""
Computes the year/month/day of easter. Based on work by
J.-M. Oudin (1940) and is reprinted in the "Explanatory Supplement
to the Astronomical Almanac", ed. P. K. Seidelmann (1992). Note:
Ash Wednesday is 46 days before Easter Sunday.
"""
c = year // 100
n = year - 19 * (year // 19)
k = (c - 17) // 25
i = c - c // 4 - (c - k) // 3 + 19 * n + 15
i = i - 30 * (i // 30)
i = i - (i // 28) * (1 - (i // 28) * (29 // (i + 1)) * ((21 - n) // 11) )
j = year + year // 4 + i + 2 - c + c // 4
j = j - 7 * (j // 7)
l = i - j
month = 3 + (l + 40) // 44
day = l + 28 - 31 * (month // 4)
return "%d/%d/%d" % (year, month, day)
def julian_easter(year):
"""
Computes the year/month/day of Eastern Orthodox Easter, given in the
Gregorian calendar. Implements the Jean Meeus algorithm. Valid: 1900-2099.
"""
a = year % 4
b = year % 7
c = year % 19
d = (19*c + 15) % 30
e = (2*a + 4*b - d + 34) % 7
month = int(math.floor((d + e + 114) / 31))
day = ((d + e + 114) % 31) + 1
# produced date was in the Julian calendar, add 13 days to it
day = day + 13
if month == 3 and day > 31:
day = day - 31
month = 4
elif month == 4 and day > 30:
day = day - 30
month = 5
return "%d/%d/%d" % (year, month, day)
def passover(year):
"""
Returns the date of Passover in a given Gregorian year.
"""
heb_year = year + 3760
heb = hebrew_sdn(heb_year, 8, 15) #Passover, 15 Nissan
return "%d/%d/%d" % gregorian_ymd(heb)
def hanuka(year):
"""
Returns the date of first day of Hanuka in a given Gregorian year.
We can't use passover as an offset, because the year length changes.
The hebrew year have 6 possible lengths.
"""
heb_year = year + 3761 #Not 3760, because Hanuka is in Nov/Dec of the previous year
heb = hebrew_sdn(heb_year, 3, 25) #Hanuka, 25 Kislev
return "%d/%d/%d" % gregorian_ymd(heb)
def dst(year, area="us"):
"""
Return Daylight Saving Time start/stop in a given area ("us", "eu").
US calculation valid 1976-2099; EU 1996-2099
"""
if area == "us":
if year > 2006:
start = "%d/%d/%d" % (year, 3, 14 - (math.floor(1 + year * 5 / 4) % 7)) # March
stop = "%d/%d/%d" % (year, 11, 7 - (math.floor(1 + year * 5 / 4) % 7)) # November
else:
start = "%d/%d/%d" % (year, 4, (2 + 6 * year - math.floor(year / 4)) % 7 + 1) # April
stop = "%d/%d/%d" % (year, 10, (31 - (math.floor(year * 5 / 4) + 1) % 7)) # October
elif area == "eu":
start = "%d/%d/%d" % (year, 3, (31 - (math.floor(year * 5 / 4) + 4) % 7)) # March
stop = "%d/%d/%d" % (year, 10, (31 - (math.floor(year * 5 / 4) + 1) % 7)) # Oct
return (start, stop)
def dow(y, m, d):
""" Return the ISO day of week for the given year, month and day. """
return datetime.date(y, m, d).isoweekday()
def cmp(a, b):
"""
Replacement for older Python's cmp.
"""
return (a > b) - (a < b)
#------------------------------------------------------------------------
#
# HolidayTable
#
#------------------------------------------------------------------------
class HolidayTable:
"""
HolidayTable is a class which provides holidays for various
countries and years.
"""
__holiday_files = []
__countries = []
def __init__(self):
"""
Find the holiday files and load the countries if it has not already
been done.
"""
if( not HolidayTable.__holiday_files ):
self.__find_holiday_files()
if( not HolidayTable.__countries ):
self.__build_country_list()
# Initialize the holiday table to be empty
self.__holidays = {}
self.__init_table()
def __find_holiday_files(self):
"""
Looks in multiple places for holidays.xml files
It will search for the file in user's plugin directories first,
then it will search in program's plugins directories.
"""
holiday_file = 'holidays.xml'
# Look for holiday files in the user plugins directory and all
# subdirectories.
for (dirpath, dirnames, filenames) in os.walk(USER_PLUGINS):
holiday_full_path = os.path.join(dirpath, holiday_file)
if os.path.exists(holiday_full_path):
HolidayTable.__holiday_files.append(holiday_full_path)
# Look for holiday files in the installation data directory and all
# subdirectories.
for (dirpath, dirnames, filenames) in os.walk(DATA_DIR):
holiday_full_path = os.path.join(dirpath, holiday_file)
if os.path.exists(holiday_full_path):
HolidayTable.__holiday_files.append(holiday_full_path)
def __build_country_list(self):
""" Generate the list of countries that have holiday information. """
for holiday_file_path in HolidayTable.__holiday_files:
parser = _Xml2Obj()
root_element = parser.parse(holiday_file_path)
for country_element in root_element.get_children():
if country_element.get_name() == "country":
country_name = country_element.get_attribute("name")
if country_name not in HolidayTable.__countries:
HolidayTable.__countries.append(_(country_name))
def __init_table(self):
""" Initialize the holiday table structure. """
for month in range(1, 13):
self.__holidays[month] = {}
for day in range(1, 32):
self.__holidays[month][day] = []
def get_countries(self):
"""
Get all the country names that holidays are available for.
@return: nothing
"""
return HolidayTable.__countries
def load_holidays(self, year, country):
"""
Load the holiday table for the specified year and country.
This must be called before get_holidays().
@param year: The year for which the holidays should be loaded.
Example: 2010
@type year: int
@param country: The country for which the holidays should be loaded.
Example: "United States"
@type country: str
@return: nothing
"""
self.__init_table()
for holiday_file_path in HolidayTable.__holiday_files:
parser = _Xml2Obj()
element = parser.parse(holiday_file_path)
calendar = _Holidays(element, country)
date = datetime.date(year, 1, 1)
while date.year == year:
holidays = calendar.check_date(date)
for text in holidays:
self.__holidays[date.month][date.day].append(_(text))
date = date.fromordinal(date.toordinal() + 1)
def get_holidays(self, month, day):
"""
Get the holidays for the given day of the year.
@param month: The month for the requested holidays.
Example: 1
@type month: int
@param month: The day for the requested holidays.
Example: 1
@type month: int
@return: An array of strings with holiday names.
@return type: [str]
"""
return self.__holidays[month][day]
#------------------------------------------------------------------------
#
# _Element
#
#------------------------------------------------------------------------
class _Element:
""" A parsed XML element """
def __init__(self, name, attributes):
'Element constructor'
# The element's tag name
self.__name = name
# The element's attribute dictionary
self.__attributes = attributes
# The element's child element list (sequence)
self.__children = []
def add_child(self, element):
'Add a reference to a child element'
self.__children.append(element)
def get_attribute(self, key):
'Get an attribute value'
return self.__attributes.get(key)
def get_attributes(self):
'Get all the attributes'
return self.__attributes
def get_name(self):
""" Get the name of this element. """
return self.__name
def get_children(self):
""" Get the children elements for this element. """
return self.__children
#------------------------------------------------------------------------
#
# _Xml2Obj
#
#------------------------------------------------------------------------
class _Xml2Obj:
""" XML to Object """
def __init__(self):
self.root = None
self.nodeStack = []
def start_element(self, name, attributes):
'SAX start element even handler'
# Instantiate an Element object
element = _Element(name, attributes)
# Push element onto the stack and make it a child of parent
if len(self.nodeStack) > 0:
parent = self.nodeStack[-1]
parent.add_child(element)
else:
self.root = element
self.nodeStack.append(element)
def end_element(self, name):
'SAX end element event handler'
self.nodeStack = self.nodeStack[:-1]
def parse(self, filename):
'Create a SAX parser and parse filename '
parser = expat.ParserCreate()
# SAX event handlers
parser.StartElementHandler = self.start_element
parser.EndElementHandler = self.end_element
# Parse the XML File
with open(filename, 'rb') as xml_file:
parser.ParseFile(xml_file)
return self.root
#------------------------------------------------------------------------
#
# _Holidays
#
#------------------------------------------------------------------------
class _Holidays:
""" Class used to read XML holidays to add to calendar. """
MONTHS = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
DAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
WORKDAY = list(range(5)) # indexes into above
WEEKEND = (5, 6) # indexes into above
def __init__(self, elements, country="US"):
self.debug = 0
self.elements = elements
self.country = country
self.dates = []
self.initialize()
def set_country(self, country):
""" Set the contry of holidays to read """
self.country = country
self.dates = []
self.initialize()
def initialize(self):
""" Parse the holiday date XML items """
for country_set in self.elements.get_children():
if country_set.get_name() == "country" and \
_(country_set.get_attribute("name")) == self.country:
for date in country_set.get_children():
if date.get_name() == "date":
data = {"value" : "",
"name" : "",
"offset": "",
"type": "",
"if": "",
} # defaults
for attr in date.get_attributes():
data[attr] = date.get_attribute(attr)
self.dates.append(data)
def get_daynames(self, year, month, dayname):
""" Get the items for a particular year/month and day of week """
if self.debug:
print("%s's in %d %d..." % (dayname, month, year))
retval = [0]
day_of_week = self.DAYS.index(dayname)
for day in range(1, 32):
try:
date = datetime.date(year, month, day)
except ValueError:
continue
if date.weekday() == day_of_week:
retval.append(day)
if self.debug:
print("day_of_week=", day_of_week, "days=", retval)
return retval
def check_date(self, date):
""" Return items that match rules """
retval = []
for rule in self.dates:
if self.debug:
print("Checking ", rule["name"], "...")
offset = 0
if rule["offset"] != "":
if rule["offset"].isdigit():
offset = int(rule["offset"])
elif rule["offset"][0] in ["-", "+"] and \
rule["offset"][1:].isdigit():
offset = int(rule["offset"])
else:
# must be a dayname or "workday"
offset = rule["offset"]
if rule["value"].startswith('>'):
# eval exp -> year/num[/day[/month]]
y, m, d = date.year, date.month, date.day
rule["value"] = eval(rule["value"][1:])
if self.debug:
print("rule['value']:", rule["value"])
if rule["value"].count("/") == 3: # year/num/day/month, "3rd wednesday in april"
y, num, dayname, mon = rule["value"].split("/")
if y == "*":
y = date.year
else:
y = int(y)
if mon.isdigit():
m = int(mon)
elif mon == "*":
m = date.month
elif mon in self.MONTHS:
m = self.MONTHS.index(mon) + 1
dates_of_dayname = self.get_daynames(y, m, dayname)
if self.debug:
print("num =", num)
d = dates_of_dayname[int(num)]
elif rule["value"].count("/") == 2: # year/month/day
y, m, d = rule["value"].split("/")
if y == "*":
y = date.year
else:
y = int(y)
if m == "*":
m = date.month
elif m in self.MONTHS:
m = self.MONTHS.index(m) + 1
else:
m = int(m)
if d == "*":
d = date.day
else:
d = int(d)
ndate = datetime.date(y, m, d)
if self.debug:
print(ndate, offset, type(offset))
if isinstance(offset, int):
if offset != 0:
ndate = ndate.fromordinal(ndate.toordinal() + offset)
elif isinstance(offset, str):
direction = 1
if offset[0] == "-":
direction = -1
offset = offset[1:]
elif offset[0] == "+":
direction = 1
offset = offset[1:]
if offset == "workday":
# next workday you come to, including this one
day_of_week = self.WORKDAY
ordinal = ndate.toordinal()
while ndate.fromordinal(ordinal).weekday() not in day_of_week:
ordinal += direction
ndate = ndate.fromordinal(ordinal)
elif offset == "weekend":
# next weekend you come to, including this one
day_of_week = self.WEEKEND
ordinal = ndate.toordinal()
while ndate.fromordinal(ordinal).weekday() not in day_of_week:
ordinal += direction
ndate = ndate.fromordinal(ordinal)
elif offset in self.DAYS:
# next tuesday you come to, including this one
day_of_week = self.DAYS.index(offset)
ordinal = ndate.toordinal()
while ndate.fromordinal(ordinal).weekday() != day_of_week:
ordinal += direction
ndate = ndate.fromordinal(ordinal)
if self.debug:
print("ndate:", ndate, "date:", date)
if ndate == date:
if rule["if"] != "":
if not eval(rule["if"]):
continue
retval.append(rule["name"])
return retval
|
jralls/gramps
|
gramps/plugins/lib/libholiday.py
|
Python
|
gpl-2.0
| 18,137
|
[
"Brian"
] |
67fb65dc098d17cdc50d1ba8052ef97a83d87e159bc40b4c91da8dd3fe29f6a9
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
""" An example python-meep simulation of a dielectric sphere scattering a broadband impulse,
illustrating the use of the convenient functions provided by meep_utils.py
(c) 2014 Filip Dominec, see http://f.dominec.eu/meep for more information
The simulated models of structures are stored in the `metamaterial_models.py' module; see its code for the
description of each structure and command-line parameters accepted. Several of these parameters are not
passed to the model; see the definition of `process_param()' in `meep_utils.py'.
"""
import time, sys, os
import numpy as np
from scipy.constants import c, epsilon_0, mu_0
import meep_utils, meep_materials, metamaterial_models
from meep_utils import in_sphere, in_xcyl, in_ycyl, in_zcyl, in_xslab, in_yslab, in_zslab, in_ellipsoid
import meep_mpi as meep
#import meep
# Model selection
model_param = meep_utils.process_param(sys.argv[1:])
model = metamaterial_models.models[model_param.get('model', 'default')](**model_param)
## Initialize volume, structure and the fields according to the model
vol = meep.vol3d(model.size_x, model.size_y, model.size_z, 1./model.resolution)
vol.center_origin()
s = meep_utils.init_structure(model=model, volume=vol, pml_axes=meep.Z)
f = meep.fields(s)
# Define the Bloch-periodic boundaries (any transversal component of k-vector is allowed)
f.use_bloch(meep.X, getattr(model, 'Kx', 0) / (-2*np.pi))
f.use_bloch(meep.Y, getattr(model, 'Ky', 0) / (-2*np.pi))
# Add the field source (see meep_utils for an example of how an arbitrary source waveform is defined)
if not getattr(model, 'frequency', None): ## (temporal source shape)
#src_time_type = meep.band_src_time(model.src_freq/c, model.src_width/c, model.simtime*c/1.1)
src_time_type = meep.gaussian_src_time(model.src_freq/c, model.src_width/c)
else:
src_time_type = meep.continuous_src_time(getattr(model, 'frequency', None)/c)
srcvolume = meep.volume( ## (spatial source shape)
meep.vec(-model.size_x/2, -model.size_y/2, -model.size_z/2+model.pml_thickness),
meep.vec( model.size_x/2, model.size_y/2, -model.size_z/2+model.pml_thickness))
#f.add_volume_source(meep.Ex, src_time_type, srcvolume)
## Replace the f.add_volume_source(meep.Ex, srctype, srcvolume) line with following:
## Option for a custom source (e.g. exciting some waveguide mode)
class SrcAmplitudeFactor(meep.Callback):
## The source amplitude is complex -> phase factor modifies its direction
## todo: implement in MEEP: we should define an AmplitudeVolume() object and reuse it for monitors later
def __init__(self, Kx=0, Ky=0):
meep.Callback.__init__(self)
(self.Kx, self.Ky) = Kx, Ky
def complex_vec(self, vec): ## Note: the 'vec' coordinates are _relative_ to the source center
# (oblique) plane wave source:
return np.exp(-1j*(self.Kx*vec.x() + self.Ky*vec.y()))
# (oblique) Gaussian beam source:
#return np.exp(-1j*(self.Kx*vec.x() + self.Ky*vec.y()) - (vec.x()/100e-6)**2 - (vec.y()/100e-6)**2)
af = SrcAmplitudeFactor(Kx=getattr(model, 'Kx', 0), Ky=getattr(model, 'Ky', 0))
meep.set_AMPL_Callback(af.__disown__())
#f.add_volume_source(meep.Ex, src_time_type, srcvolume, meep.AMPL)
f.add_volume_source(meep.Ex, src_time_type, srcvolume, meep.AMPL)
## Define monitor planes, and the field output for visualisation (controlled by keywords in the 'comment' parameter)
monitor_options = {'size_x':model.size_x, 'size_y':model.size_y, 'resolution':model.resolution, 'Kx':getattr(model, 'Kx', 0), 'Ky':getattr(model, 'Ky', 0)}
monitor1_Ex = meep_utils.AmplitudeMonitorPlane(f, comp=meep.Ex, z_position=model.monitor_z1, **monitor_options)
monitor1_Hy = meep_utils.AmplitudeMonitorPlane(f, comp=meep.Hy, z_position=model.monitor_z1, **monitor_options)
monitor2_Ex = meep_utils.AmplitudeMonitorPlane(f, comp=meep.Ex, z_position=model.monitor_z2, **monitor_options)
monitor2_Hy = meep_utils.AmplitudeMonitorPlane(f, comp=meep.Hy, z_position=model.monitor_z2, **monitor_options)
slices = []
#if not "noepssnapshot" in str(model.comment):
#slices += [meep_utils.Slice(model=model, field=f, components=(meep.Dielectric), at_t=0, name='EPS')]
if "narrowfreq-snapshots" in str(model.comment):
slices += [meep_utils.Slice(model=model, field=f, components=meep.Ex, at_y=0, at_t=np.inf,
name=('At%.3eHz'%getattr(model, 'frequency', None)) if getattr(model, 'frequency', None) else '',
outputpng=True, outputvtk=False)]
if "fieldevolution" in str(model.comment):
slices += [meep_utils.Slice(model=model, field=f, components=(meep.Ex), at_y=0, name='FieldEvolution',
min_timestep=.1/model.src_freq, outputgif=True, outputhdf=True, outputvtk=True)]
if "snapshote" in str(model.comment):
slices += [meep_utils.Slice(model=model, field=f, components=(meep.Ex, meep.Ey, meep.Ez), at_t=np.inf, name='SnapshotE')]
## Run the FDTD simulation or the frequency-domain solver
if not getattr(model, 'frequency', None): ## time-domain computation
f.step(); timer = meep_utils.Timer(simtime=model.simtime); meep.quiet(True) # use custom progress messages
while (f.time()/c < model.simtime): # timestepping cycle
f.step()
timer.print_progress(f.time()/c)
for monitor in (monitor1_Ex, monitor1_Hy, monitor2_Ex, monitor2_Hy): monitor.record(field=f)
for slice_ in slices: slice_.poll(f.time()/c)
for slice_ in slices: slice_.finalize()
meep_utils.notify(model.simulation_name, run_time=timer.get_time())
else: ## frequency-domain computation
f.solve_cw(getattr(model, 'MaxTol',0.001), getattr(model, 'MaxIter', 5000), getattr(model, 'BiCGStab', 8))
for monitor in (monitor1_Ex, monitor1_Hy, monitor2_Ex, monitor2_Hy): monitor.record(field=f)
for slice_ in slices: slice_.finalize()
meep_utils.notify(model.simulation_name)
## Get the reflection and transmission of the structure
if meep.my_rank() == 0:
#t = monitor1_Ex.get_time()
#Ex1, Hy1, Ex2, Hy2 = [mon.get_field_waveform() for mon in (monitor1_Ex, monitor1_Hy, monitor2_Ex, monitor2_Hy)]
freq, s11, s12, columnheaderstring = meep_utils.get_s_parameters(monitor1_Ex, monitor1_Hy, monitor2_Ex, monitor2_Hy,
frequency_domain=True if getattr(model, 'frequency', None) else False,
frequency=getattr(model, 'frequency', None), ## procedure compatible with both FDTD and FDFD
intf=getattr(model, 'interesting_frequencies', [0, model.src_freq+model.src_width]), ## clip the frequency range for plotting
pad_zeros=1.0, ## speed-up FFT, and stabilize eff-param retrieval
Kx=getattr(model, 'Kx', 0), Ky=getattr(model, 'Ky', 0), ## enable oblique incidence (works only if monitors in vacuum)
eps1=getattr(model, 'mon1eps', 1), eps2=getattr(model, 'mon2eps', 1)) ## enable monitors inside dielectrics
if len(s11)>0:
meep_utils.savetxt(fname=model.simulation_name+".dat", fmt="%.6e",
## Save 5 columns: freq, amplitude/phase for reflection/transmission:
X=zip(freq, np.abs(s11), np.angle(s11), np.abs(s12), np.angle(s12)),
header=model.parameterstring+columnheaderstring) ## Export header
with open("./last_simulation_name.dat", "w") as outfile: outfile.write(model.simulation_name)
print "Scattering parameters succesfully saved to "+model.simulation_name+".dat"
meep.all_wait() # Wait until all file operations are finished
|
FilipDominec/python-meep-utils
|
scatter.py
|
Python
|
gpl-2.0
| 7,750
|
[
"Gaussian",
"exciting"
] |
e9f13dc4d15c239a40658140885affb05a052204c71058d6a628452c0da705c6
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with functions that call the four main :py:mod:`driver`
functions: :py:mod:`driver.energy`, :py:mod:`driver.optimize`,
:py:mod:`driver.response`, and :py:mod:`driver.frequency`.
"""
import os
import re
import math
import pickle
import collections
from psi4.driver import constants
from psi4.driver import p4util
from psi4.driver.driver import *
# never import aliases into this file
#########################
## Start of Database ##
#########################
DB_RGT = {}
DB_RXN = {}
def database(name, db_name, **kwargs):
r"""Function to access the molecule objects and reference energies of
popular chemical databases.
:aliases: db()
:returns: (*float*) Mean absolute deviation of the database in kcal/mol
:PSI variables:
.. hlist::
:columns: 1
* :psivar:`db_name DATABASE MEAN SIGNED DEVIATION <db_nameDATABASEMEANSIGNEDDEVIATION>`
* :psivar:`db_name DATABASE MEAN ABSOLUTE DEVIATION <db_nameDATABASEMEANABSOLUTEDEVIATION>`
* :psivar:`db_name DATABASE ROOT-MEAN-SQUARE DEVIATION <db_nameDATABASEROOT-MEAN-SQUARESIGNEDDEVIATION>`
* Python dictionaries of results accessible as ``DB_RGT`` and ``DB_RXN``.
.. note:: It is very easy to make a database from a collection of xyz files
using the script :source:`share/scripts/ixyz2database.py`.
See :ref:`sec:createDatabase` for details.
.. caution:: Some features are not yet implemented. Buy a developer some coffee.
- In sow/reap mode, use only global options (e.g., the local option set by ``set scf scf_type df`` will not be respected).
.. note:: To access a database that is not embedded in a |PSIfour|
distribution, add the path to the directory containing the database
to the environment variable :envvar:`PYTHONPATH`.
:type name: string
:param name: ``'scf'`` || ``'sapt0'`` || ``'ccsd(t)'`` || etc.
First argument, usually unlabeled. Indicates the computational method
to be applied to the database. May be any valid argument to
:py:func:`~driver.energy`.
:type db_name: string
:param db_name: ``'BASIC'`` || ``'S22'`` || ``'HTBH'`` || etc.
Second argument, usually unlabeled. Indicates the requested database
name, matching (case insensitive) the name of a python file in
``psi4/share/databases`` or :envvar:`PYTHONPATH`. Consult that
directory for available databases and literature citations.
:type func: :ref:`function <op_py_function>`
:param func: |dl| ``energy`` |dr| || ``optimize`` || ``cbs``
Indicates the type of calculation to be performed on each database
member. The default performs a single-point ``energy('name')``, while
``optimize`` perfoms a geometry optimization on each reagent, and
``cbs`` performs a compound single-point energy. If a nested series
of python functions is intended (see :ref:`sec:intercalls`), use
keyword ``db_func`` instead of ``func``.
:type mode: string
:param mode: |dl| ``'continuous'`` |dr| || ``'sow'`` || ``'reap'``
Indicates whether the calculations required to complete the
database are to be run in one file (``'continuous'``) or are to be
farmed out in an embarrassingly parallel fashion
(``'sow'``/``'reap'``). For the latter, run an initial job with
``'sow'`` and follow instructions in its output file.
:type cp: :ref:`boolean <op_py_boolean>`
:param cp: ``'on'`` || |dl| ``'off'`` |dr|
Indicates whether counterpoise correction is employed in computing
interaction energies. Use this option and NOT the :py:func:`~wrappers.cp`
function for BSSE correction in database(). Option available
(See :ref:`sec:availableDatabases`) only for databases of bimolecular complexes.
:type rlxd: :ref:`boolean <op_py_boolean>`
:param rlxd: ``'on'`` || |dl| ``'off'`` |dr|
Indicates whether correction for deformation energy is
employed in computing interaction energies. Option available
(See :ref:`sec:availableDatabases`) only for databases of bimolecular complexes
with non-frozen monomers, e.g., HBC6.
:type symm: :ref:`boolean <op_py_boolean>`
:param symm: |dl| ``'on'`` |dr| || ``'off'``
Indicates whether the native symmetry of the database reagents is
employed (``'on'``) or whether it is forced to :math:`C_1` symmetry
(``'off'``). Some computational methods (e.g., SAPT) require no
symmetry, and this will be set by database().
:type zpe: :ref:`boolean <op_py_boolean>`
:param zpe: ``'on'`` || |dl| ``'off'`` |dr|
Indicates whether zero-point-energy corrections are appended to
single-point energy values. Option valid only for certain
thermochemical databases. Disabled until Hessians ready.
:type benchmark: string
:param benchmark: |dl| ``'default'`` |dr| || ``'S22A'`` || etc.
Indicates whether a non-default set of reference energies, if
available (See :ref:`sec:availableDatabases`), are employed for the
calculation of error statistics.
:type tabulate: array of strings
:param tabulate: |dl| ``[]`` |dr| || ``['scf total energy', 'natom']`` || etc.
Indicates whether to form tables of variables other than the
primary requested energy. Available for any PSI variable.
:type subset: string or array of strings
:param subset:
Indicates a subset of the full database to run. This is a very
flexible option and can be used in three distinct ways, outlined
below. Note that two take a string and the last takes an array.
See `Available Databases`_ for available values.
* ``'small'`` || ``'large'`` || ``'equilibrium'``
Calls predefined subsets of the requested database, either
``'small'``, a few of the smallest database members,
``'large'``, the largest of the database members, or
``'equilibrium'``, the equilibrium geometries for a database
composed of dissociation curves.
* ``'BzBz_S'`` || ``'FaOOFaON'`` || ``'ArNe'`` || ``'HB'`` || etc.
For databases composed of dissociation curves, or otherwise
divided into subsets, individual curves and subsets can be
called by name. Consult the database python files for available
molecular systems (case insensitive).
* ``[1,2,5]`` || ``['1','2','5']`` || ``['BzMe-3.5', 'MeMe-5.0']`` || etc.
Specify a list of database members to run. Consult the
database python files for available molecular systems. This
is the only portion of database input that is case sensitive;
choices for this keyword must match the database python file.
:examples:
>>> # [1] Two-stage SCF calculation on short, equilibrium, and long helium dimer
>>> db('scf','RGC10',cast_up='sto-3g',subset=['HeHe-0.85','HeHe-1.0','HeHe-1.5'], tabulate=['scf total energy','natom'])
>>> # [2] Counterpoise-corrected interaction energies for three complexes in S22
>>> # Error statistics computed wrt an old benchmark, S22A
>>> database('mp2','S22',cp=1,subset=[16,17,8],benchmark='S22A')
>>> # [3] SAPT0 on the neon dimer dissociation curve
>>> db('sapt0',subset='NeNe',cp=0,symm=0,db_name='RGC10')
>>> # [4] Optimize system 1 in database S22, producing tables of scf and mp2 energy
>>> db('mp2','S22',db_func=optimize,subset=[1], tabulate=['mp2 total energy','current energy'])
>>> # [5] CCSD on the smallest systems of HTBH, a hydrogen-transfer database
>>> database('ccsd','HTBH',subset='small', tabulate=['ccsd total energy', 'mp2 total energy'])
"""
lowername = name #TODO
kwargs = p4util.kwargs_lower(kwargs)
# Wrap any positional arguments into kwargs (for intercalls among wrappers)
if not('name' in kwargs) and name:
kwargs['name'] = name #.lower()
if not('db_name' in kwargs) and db_name:
kwargs['db_name'] = db_name
# Establish function to call
func = kwargs.pop('db_func', kwargs.pop('func', energy))
kwargs['db_func'] = func
# Bounce to CP if bsse kwarg (someday)
if kwargs.get('bsse_type', None) is not None:
raise ValidationError("""Database: Cannot specify bsse_type for database. Use the cp keyword withing database instead.""")
allowoptexceeded = kwargs.get('allowoptexceeded', False)
optstash = p4util.OptionsState(
['WRITER_FILE_LABEL'],
['SCF', 'REFERENCE'])
# Wrapper wholly defines molecule. discard any passed-in
kwargs.pop('molecule', None)
# Paths to search for database files: here + PSIPATH + library + PYTHONPATH
db_paths = []
db_paths.append(os.getcwd())
db_paths.extend(os.environ.get('PSIPATH', '').split(os.path.pathsep))
db_paths.append(os.path.join(core.get_datadir(), 'databases'))
db_paths.append(os.path.dirname(__file__))
db_paths = list(map(os.path.abspath, db_paths))
sys.path[1:1] = db_paths
# TODO this should be modernized a la interface_cfour
# Define path and load module for requested database
database = p4util.import_ignorecase(db_name)
if database is None:
core.print_out('\nPython module for database %s failed to load\n\n' % (db_name))
core.print_out('\nSearch path that was tried:\n')
core.print_out(", ".join(map(str, sys.path)))
raise ValidationError("Python module loading problem for database " + str(db_name))
else:
dbse = database.dbse
HRXN = database.HRXN
ACTV = database.ACTV
RXNM = database.RXNM
BIND = database.BIND
TAGL = database.TAGL
GEOS = database.GEOS
try:
DATA = database.DATA
except AttributeError:
DATA = {}
user_writer_file_label = core.get_global_option('WRITER_FILE_LABEL')
user_reference = core.get_global_option('REFERENCE')
# Configuration based upon e_name & db_name options
# Force non-supramolecular if needed
if not hasattr(lowername, '__call__') and re.match(r'^.*sapt', lowername):
try:
database.ACTV_SA
except AttributeError:
raise ValidationError('Database %s not suitable for non-supramolecular calculation.' % (db_name))
else:
ACTV = database.ACTV_SA
# Force open-shell if needed
openshell_override = 0
if user_reference in ['RHF', 'RKS']:
try:
database.isOS
except AttributeError:
pass
else:
if p4util.yes.match(str(database.isOS)):
openshell_override = 1
core.print_out('\nSome reagents in database %s require an open-shell reference; will be reset to UHF/UKS as needed.\n' % (db_name))
# Configuration based upon database keyword options
# Option symmetry- whether symmetry treated normally or turned off (currently req'd for dfmp2 & dft)
db_symm = kwargs.get('symm', True)
symmetry_override = 0
if db_symm is False:
symmetry_override = 1
elif db_symm is True:
pass
else:
raise ValidationError("""Symmetry mode '%s' not valid.""" % (db_symm))
# Option mode of operation- whether db run in one job or files farmed out
db_mode = kwargs.pop('db_mode', kwargs.pop('mode', 'continuous')).lower()
kwargs['db_mode'] = db_mode
if db_mode == 'continuous':
pass
elif db_mode == 'sow':
pass
elif db_mode == 'reap':
db_linkage = kwargs.get('linkage', None)
if db_linkage is None:
raise ValidationError("""Database execution mode 'reap' requires a linkage option.""")
else:
raise ValidationError("""Database execution mode '%s' not valid.""" % (db_mode))
# Option counterpoise- whether for interaction energy databases run in bsse-corrected or not
db_cp = kwargs.get('cp', False)
if db_cp is True:
try:
database.ACTV_CP
except AttributeError:
raise ValidationError("""Counterpoise correction mode 'yes' invalid for database %s.""" % (db_name))
else:
ACTV = database.ACTV_CP
elif db_cp is False:
pass
else:
raise ValidationError("""Counterpoise correction mode '%s' not valid.""" % (db_cp))
# Option relaxed- whether for non-frozen-monomer interaction energy databases include deformation correction or not?
db_rlxd = kwargs.get('rlxd', False)
if db_rlxd is True:
if db_cp is True:
try:
database.ACTV_CPRLX
database.RXNM_CPRLX
except AttributeError:
raise ValidationError('Deformation and counterpoise correction mode \'yes\' invalid for database %s.' % (db_name))
else:
ACTV = database.ACTV_CPRLX
RXNM = database.RXNM_CPRLX
elif db_cp is False:
try:
database.ACTV_RLX
except AttributeError:
raise ValidationError('Deformation correction mode \'yes\' invalid for database %s.' % (db_name))
else:
ACTV = database.ACTV_RLX
elif db_rlxd is False:
#elif no.match(str(db_rlxd)):
pass
else:
raise ValidationError('Deformation correction mode \'%s\' not valid.' % (db_rlxd))
# Option zero-point-correction- whether for thermochem databases jobs are corrected by zpe
db_zpe = kwargs.get('zpe', False)
if db_zpe is True:
raise ValidationError('Zero-point-correction mode \'yes\' not yet implemented.')
elif db_zpe is False:
pass
else:
raise ValidationError('Zero-point-correction \'mode\' %s not valid.' % (db_zpe))
# Option benchmark- whether error statistics computed wrt alternate reference energies
db_benchmark = 'default'
if 'benchmark' in kwargs:
db_benchmark = kwargs['benchmark']
if db_benchmark.lower() == 'default':
pass
else:
BIND = p4util.getattr_ignorecase(database, 'BIND_' + db_benchmark)
if BIND is None:
raise ValidationError('Special benchmark \'%s\' not available for database %s.' % (db_benchmark, db_name))
# Option tabulate- whether tables of variables other than primary energy method are formed
# TODO db(func=cbs,tabulate=[non-current-energy]) # broken
db_tabulate = []
if 'tabulate' in kwargs:
db_tabulate = kwargs['tabulate']
# Option subset- whether all of the database or just a portion is run
db_subset = HRXN
if 'subset' in kwargs:
db_subset = kwargs['subset']
if isinstance(db_subset, (str, bytes)):
if db_subset.lower() == 'small':
try:
database.HRXN_SM
except AttributeError:
raise ValidationError("""Special subset 'small' not available for database %s.""" % (db_name))
else:
HRXN = database.HRXN_SM
elif db_subset.lower() == 'large':
try:
database.HRXN_LG
except AttributeError:
raise ValidationError("""Special subset 'large' not available for database %s.""" % (db_name))
else:
HRXN = database.HRXN_LG
elif db_subset.lower() == 'equilibrium':
try:
database.HRXN_EQ
except AttributeError:
raise ValidationError("""Special subset 'equilibrium' not available for database %s.""" % (db_name))
else:
HRXN = database.HRXN_EQ
else:
HRXN = p4util.getattr_ignorecase(database, db_subset)
if HRXN is None:
HRXN = p4util.getattr_ignorecase(database, 'HRXN_' + db_subset)
if HRXN is None:
raise ValidationError("""Special subset '%s' not available for database %s.""" % (db_subset, db_name))
else:
temp = []
for rxn in db_subset:
if rxn in HRXN:
temp.append(rxn)
else:
raise ValidationError("""Subset element '%s' not a member of database %s.""" % (str(rxn), db_name))
HRXN = temp
temp = []
for rxn in HRXN:
temp.append(ACTV['%s-%s' % (dbse, rxn)])
HSYS = p4util.drop_duplicates(sum(temp, []))
# Sow all the necessary reagent computations
core.print_out("\n\n")
p4util.banner(("Database %s Computation" % (db_name)))
core.print_out("\n")
# write index of calcs to output file
instructions = """\n The database single-job procedure has been selected through mode='continuous'.\n"""
instructions += """ Calculations for the reagents will proceed in the order below and will be followed\n"""
instructions += """ by summary results for the database.\n\n"""
for rgt in HSYS:
instructions += """ %-s\n""" % (rgt)
core.print_out(instructions)
# Loop through chemical systems
ERGT = {}
ERXN = {}
VRGT = {}
VRXN = {}
for rgt in HSYS:
VRGT[rgt] = {}
core.print_out('\n')
p4util.banner(' Database {} Computation: Reagent {} \n {}'.format(db_name, rgt, TAGL[rgt]))
core.print_out('\n')
molecule = core.Molecule.from_dict(GEOS[rgt].to_dict())
molecule.set_name(rgt)
molecule.update_geometry()
if symmetry_override:
molecule.reset_point_group('c1')
molecule.fix_orientation(True)
molecule.fix_com(True)
molecule.update_geometry()
if (openshell_override) and (molecule.multiplicity() != 1):
if user_reference == 'RHF':
core.set_global_option('REFERENCE', 'UHF')
elif user_reference == 'RKS':
core.set_global_option('REFERENCE', 'UKS')
core.set_global_option('WRITER_FILE_LABEL', user_writer_file_label + ('' if user_writer_file_label == '' else '-') + rgt)
if allowoptexceeded:
try:
ERGT[rgt] = func(molecule=molecule, **kwargs)
except ConvergenceError:
core.print_out(f"Optimization exceeded cycles for {rgt}")
ERGT[rgt] = 0.0
else:
ERGT[rgt] = func(molecule=molecule, **kwargs)
core.print_variables()
core.print_out(" Database Contributions Map:\n {}\n".format('-' * 75))
for rxn in HRXN:
db_rxn = dbse + '-' + str(rxn)
if rgt in ACTV[db_rxn]:
core.print_out(' reagent {} contributes by {:.4f} to reaction {}\n'.format(rgt, RXNM[db_rxn][rgt], db_rxn))
core.print_out('\n')
for envv in db_tabulate:
VRGT[rgt][envv.upper()] = core.variable(envv)
core.set_global_option("REFERENCE", user_reference)
core.clean()
#core.opt_clean()
core.clean_variables()
# Reap all the necessary reaction computations
core.print_out("\n")
p4util.banner(("Database %s Results" % (db_name)))
core.print_out("\n")
maxactv = []
for rxn in HRXN:
maxactv.append(len(ACTV[dbse + '-' + str(rxn)]))
maxrgt = max(maxactv)
table_delimit = '-' * (62 + 20 * maxrgt)
tables = ''
# find any reactions that are incomplete
FAIL = collections.defaultdict(int)
for rxn in HRXN:
db_rxn = dbse + '-' + str(rxn)
for i in range(len(ACTV[db_rxn])):
if abs(ERGT[ACTV[db_rxn][i]]) < 1.0e-12:
if not allowoptexceeded:
FAIL[rxn] = 1
# tabulate requested process::environment variables
tables += """ For each VARIABLE requested by tabulate, a 'Reaction Value' will be formed from\n"""
tables += """ 'Reagent' values according to weightings 'Wt', as for the REQUESTED ENERGY below.\n"""
tables += """ Depending on the nature of the variable, this may or may not make any physical sense.\n"""
for rxn in HRXN:
db_rxn = dbse + '-' + str(rxn)
VRXN[db_rxn] = {}
for envv in db_tabulate:
envv = envv.upper()
tables += """\n ==> %s <==\n\n""" % (envv.title())
tables += _tblhead(maxrgt, table_delimit, 2)
for rxn in HRXN:
db_rxn = dbse + '-' + str(rxn)
if FAIL[rxn]:
tables += """\n%23s %8s %8s %8s %8s""" % (db_rxn, '', '****', '', '')
for i in range(len(ACTV[db_rxn])):
tables += """ %16.8f %2.0f""" % (VRGT[ACTV[db_rxn][i]][envv], RXNM[db_rxn][ACTV[db_rxn][i]])
else:
VRXN[db_rxn][envv] = 0.0
for i in range(len(ACTV[db_rxn])):
VRXN[db_rxn][envv] += VRGT[ACTV[db_rxn][i]][envv] * RXNM[db_rxn][ACTV[db_rxn][i]]
tables += """\n%23s %16.8f """ % (db_rxn, VRXN[db_rxn][envv])
for i in range(len(ACTV[db_rxn])):
tables += """ %16.8f %2.0f""" % (VRGT[ACTV[db_rxn][i]][envv], RXNM[db_rxn][ACTV[db_rxn][i]])
tables += """\n %s\n""" % (table_delimit)
# tabulate primary requested energy variable with statistics
count_rxn = 0
minDerror = 100000.0
maxDerror = 0.0
MSDerror = 0.0
MADerror = 0.0
RMSDerror = 0.0
tables += """\n ==> %s <==\n\n""" % ('Requested Energy')
tables += _tblhead(maxrgt, table_delimit, 1)
for rxn in HRXN:
db_rxn = dbse + '-' + str(rxn)
if FAIL[rxn]:
tables += """\n%23s %8.4f %8s %10s %10s""" % (db_rxn, BIND[db_rxn], '****', '****', '****')
for i in range(len(ACTV[db_rxn])):
tables += """ %16.8f %2.0f""" % (ERGT[ACTV[db_rxn][i]], RXNM[db_rxn][ACTV[db_rxn][i]])
else:
ERXN[db_rxn] = 0.0
for i in range(len(ACTV[db_rxn])):
ERXN[db_rxn] += ERGT[ACTV[db_rxn][i]] * RXNM[db_rxn][ACTV[db_rxn][i]]
error = constants.hartree2kcalmol * ERXN[db_rxn] - BIND[db_rxn]
tables += """\n%23s %8.4f %8.4f %10.4f %10.4f""" % (db_rxn, BIND[db_rxn], constants.hartree2kcalmol * ERXN[db_rxn],
error, error * constants.cal2J)
for i in range(len(ACTV[db_rxn])):
tables += """ %16.8f %2.0f""" % (ERGT[ACTV[db_rxn][i]], RXNM[db_rxn][ACTV[db_rxn][i]])
if abs(error) < abs(minDerror):
minDerror = error
if abs(error) > abs(maxDerror):
maxDerror = error
MSDerror += error
MADerror += abs(error)
RMSDerror += error * error
count_rxn += 1
tables += """\n %s\n""" % (table_delimit)
if count_rxn:
MSDerror /= float(count_rxn)
MADerror /= float(count_rxn)
RMSDerror = math.sqrt(RMSDerror / float(count_rxn))
tables += """%23s %19s %10.4f %10.4f\n""" % ('Minimal Dev', '', minDerror, minDerror * constants.cal2J)
tables += """%23s %19s %10.4f %10.4f\n""" % ('Maximal Dev', '', maxDerror, maxDerror * constants.cal2J)
tables += """%23s %19s %10.4f %10.4f\n""" % ('Mean Signed Dev', '', MSDerror, MSDerror * constants.cal2J)
tables += """%23s %19s %10.4f %10.4f\n""" % ('Mean Absolute Dev', '', MADerror, MADerror * constants.cal2J)
tables += """%23s %19s %10.4f %10.4f\n""" % ('RMS Dev', '', RMSDerror, RMSDerror * constants.cal2J)
tables += """ %s\n""" % (table_delimit)
core.set_variable('%s DATABASE MEAN SIGNED DEVIATION' % (db_name), MSDerror)
core.set_variable('%s DATABASE MEAN ABSOLUTE DEVIATION' % (db_name), MADerror)
core.set_variable('%s DATABASE ROOT-MEAN-SQUARE DEVIATION' % (db_name), RMSDerror)
core.print_out(tables)
finalenergy = MADerror
else:
finalenergy = 0.0
optstash.restore()
DB_RGT.clear()
DB_RGT.update(VRGT)
DB_RXN.clear()
DB_RXN.update(VRXN)
return finalenergy
def _tblhead(tbl_maxrgt, tbl_delimit, ttype):
r"""Function that prints the header for the changable-width results tables in db().
*tbl_maxrgt* is the number of reagent columns the table must plan for. *tbl_delimit*
is a string of dashes of the correct length to set off the table. *ttype* is 1 for
tables comparing the computed values to the reference or 2 for simple tabulation
and sum of the computed values.
"""
tbl_str = ''
tbl_str += """ %s""" % (tbl_delimit)
if ttype == 1:
tbl_str += """\n%23s %19s %21s""" % ('Reaction', 'Reaction Energy', 'Reaction Error')
elif ttype == 2:
tbl_str += """\n%23s %19s %17s""" % ('Reaction', 'Reaction Value', '')
for i in range(tbl_maxrgt):
tbl_str += """%20s""" % ('Reagent ' + str(i + 1))
if ttype == 1:
tbl_str += """\n%23s %8s %8s %10s %10s""" % ('', 'Ref', 'Calc', '[kcal/mol]', '[kJ/mol]')
elif ttype == 2:
tbl_str += """\n%65s""" % ('')
for i in range(tbl_maxrgt):
if ttype == 1:
tbl_str += """%20s""" % ('[Eh] Wt')
elif ttype == 2:
tbl_str += """%20s""" % ('Value Wt')
tbl_str += """\n %s""" % (tbl_delimit)
return tbl_str
## Aliases ##
db = database
#######################
## End of Database ##
#######################
|
ashutoshvt/psi4
|
psi4/driver/wrapper_database.py
|
Python
|
lgpl-3.0
| 26,302
|
[
"Psi4"
] |
cc4c230191c77484ca982e83d384b8f6c7976929a2971cdb824aa774989cee16
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('agg')
from os import path, remove
from nutils import *
import scipy
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
from scipy.io import mmwrite
from mskrylov import poly_driver
from mekrylov import me_driver
import time
from math import pi
import scipy.sparse.linalg as spla
from plot_misc import plot_opt_splitting2
out_file = 'experm/split_f19_np4.txt'
ppw = 20
def test_orig_problem(K, C, M, b, freq, damping, x, solver_flag):
Nom = len(freq)
trace_C = np.sum(C.diagonal())
if abs(trace_C) > 0 & solver_flag==0:
om = (1.0-1j*damping)*2.0*np.pi*freq # 'our' damping model
else:
om2 = (1.0-1j*damping)*(2.0*np.pi*freq)**2 # Mulders' damping model
om = np.sqrt(om2)
normb = np.linalg.norm(b)
relerr = np.zeros((Nom,))
for j in range(Nom):
xs = x[:,j]
r = b - (K*xs + 1j*om[j]*(C*xs) - om[j]**2*(M*xs))
relerr[j] = np.linalg.norm(r)/normb
print('Relative residual of original problem:' +str(relerr))
@log.title
def makeplots( domain, geom, Lx, Lz, value, name, title, ndigits=0, index=None, clim=None, lineOn=False, imgtype=None,):
points, colors = domain.elem_eval( [ geom, value ], ischeme='bezier3', separate=True )
with plot.PyPlot( name, ndigits=ndigits, figsize=(5,6), index=index, imgtype=imgtype ) as plt:
plt.mesh( points, colors, triangulate='bezier', edgecolors='none' )
plt.title(title)
plt.xlabel('x [m]')
plt.ylabel('z [m]')
plt.xticks( [0, Lx/2.0, Lx], ['0', '300', '600'] )
plt.yticks( [-0, -0.4*Lz, -0.8*Lz, -Lz], ['0', '400', '800', '1000'] )
if clim is not None:
plt.clim(*clim)
plt.colorbar()
if lineOn:
# Only for wedge problem in 2D
plt.plot( [0, 600],[-400, -500],'k' )
plt.plot( [0, 600],[-800, -600],'k' )
def makevtk(domain, geom, rho, lam, mu, cp, cs, sol, freq, vec_basis, name):
Nom = len(freq)
vtk_geom, vtk_rho, vtk_lam, vtk_mu, vtk_cp, vtk_cs = domain.simplex.elem_eval( [ geom, rho, lam, mu, cp, cs ], ischeme='vtk', separate=True )
with plot.VTKFile( name ) as vtk:
vtk.unstructuredgrid( vtk_geom )
vtk.pointdataarray( 'rho', vtk_rho )
vtk.pointdataarray( 'lambda', vtk_lam )
vtk.pointdataarray( 'mu', vtk_mu )
vtk.pointdataarray( 'cp', vtk_cp )
vtk.pointdataarray( 'cs', vtk_cs )
for k in range(0,Nom):
disp = vec_basis.dot( sol[k,:] ).real
vtk_disp = domain.simplex.elem_eval( disp, ischeme='vtk', separate=True )
vtk.pointdataarray( 'disp_f'+str(freq[k]), vtk_disp )
def makespyplot( matrix, name, imgtype=None ):
if not scipy.sparse.isspmatrix( matrix ):
matrix = matrix.toscipy()
with plot.PyPlot( name, ndigits=0, imgtype=imgtype ) as plt:
plt.spy( matrix, markersize=0.8, color='blue')
plt.title( name+', nnz = '+str(matrix.nnz) )
def point_eval(func, domain, geom, point):
domain = domain[tuple(slice(0, p) if p > 0 else slice(None) for p in point)]
for p in point:
domain = domain.boundary['right' if p > 0 else 'left']
return numpy.asarray(domain.integrate( func, geometry=geom, ischeme='gauss2' ).toscipy().todense())
def elast_mat(rho, cp, cs, lam, mu, ndims, nx, ny, nz, vec_basis, domain, geom, block):
# define PDE
stress = lambda u: lam*u.div(geom)[:,_,_]*function.eye(ndims) + 2.0*mu*u.symgrad(geom)
elasticity = function.outer( stress(vec_basis), vec_basis.grad(geom) ).sum([2,3])
w_mass = lambda u: rho*u
mass = function.outer( w_mass(vec_basis), vec_basis ).sum(-1)
# define Sommerfeld BC
n = geom.normal()
t = np.eye(ndims)
t = t-(t*n[_,:]).sum(1)
B_bc = cp*n[:,_]*n[_,:]+cs*(t[:,:,_]*t[:,_,:]).sum(0)
bc_fun = lambda u: rho*(B_bc*u[:,_,:]).sum(-1)
sommerfeld = function.outer( bc_fun(vec_basis), vec_basis ).sum(-1)
if ndims == 2:
sommerfeld_boundary = 'left,right,bottom'
source_position = nx//2, nz
else:
sommerfeld_boundary = 'left,right,bottom,front,back'
source_position = nx//2, ny//2, nz
# Build matrices
K, M = domain.integrate( [elasticity, mass], geometry=geom, ischeme='gauss2' )
C = domain.boundary[sommerfeld_boundary].integrate( sommerfeld, geometry=geom, ischeme='gauss2' )
# Build RHS
if not block:
C = 0.0*C
source_position = nx//2, nz//2
rhs = point_eval(vec_basis, domain, geom, source_position)[:,-1] #+ point_eval(vec_basis, domain, geom, source_position)[:,0]
else:
rhs = point_eval(vec_basis, domain, geom, source_position)[:,-1]
return K, C, M, rhs
def main( ndims=2, # problem dimension (2,3)
dx=100.0, # grid size in x-direction
dy=100.0, # grid size in y-direction
dz=100.0, # grid size in z-direction
freq=[1.0,9.0], # frequencies in Hz
Nom=7, # number of freq's
#df=.5, # number of equally-spaced freq's
degree=1, # degree of FEM splines
damping=0.7, # viscous damping param
maxit=300, # max no of iterations
tol=1e-8, # residual norm tolerance
dg_pp=0, # degree of poly preconditioner
rot = False, # rotation in MEqn approach
tau_re=0.7, # real(seed), if tau.real<0: take 'optimal' tau
tau_im=-0.3, # imag(seed)
block=True, # C=0 if False
plots=False, # plots on/off
plot_resnrm=False, # display residual norm live
plot_ritz=False, # plot ritz values
iLU=False,
fill_factor=10,
solver_flag=0): # -1(python's built-in), 0(poly_pre), 1(matr_eqn)
tau = tau_re+1j*tau_im
# domain size
Lx = 600.0
Ly = 600.0
Lz = 1000.0
# problem parameters
freq = np.linspace(freq[0],freq[-1],Nom)
#freq = np.arange(freq[0], freq[-1]+df, df)
Nom = len(freq)
# define physical params
rho0 = 1800.0
rho1 = 2100.0
rho2 = 1950.0
cp0 = 2000.0
cp1 = 3000.0
cp2 = 2300.0
cs0 = 800.0
cs1 = 1600.0
cs2 = 1100.0
# define Cartesian grid
nx = int(np.round(Lx/dx))+1
nz = int(np.round(Lz/dz))+1
verts_x = np.linspace( 0, Lx, nx )
verts_z = np.linspace( -Lz, 0, nz )
if ndims == 2:
ny = 1
dy = 0.
verts = [verts_x, verts_z]
elif ndims == 3:
ny = int(np.round(Ly/dy))+1
verts_y = np.linspace( 0, Ly, ny )
verts = [verts_x, verts_y, verts_z]
domain, geom = mesh.rectilinear(verts)
vec_basis = domain.splinefunc( degree=degree ).vector( ndims )
# define wedge problem
rho = function.select(
[function.greater(geom[-1]+0.4*Lz+geom[0]/6, 0), function.greater(geom[-1]+0.8*Lz-geom[0]/3, 0)],
[rho0, rho1], rho2)
cp = function.select(
[function.greater(geom[-1]+0.4*Lz+geom[0]/6, 0), function.greater(geom[-1]+0.8*Lz-geom[0]/3, 0)],
[cp0, cp1], cp2)
cs = function.select(
[function.greater(geom[-1]+0.4*Lz+geom[0]/6, 0), function.greater(geom[-1]+0.8*Lz-geom[0]/3, 0)],
[cs0, cs1], cs2)
mu = cs**2 * rho
lam = rho * (cp**2 - 2.0*cs**2)
# problem summary
print( '---- WEDGE PROBLEM ----' )
print( 'problem size : ' + str(nx-1+degree)+' x '+str(ny-1+degree)+' x '+str(nz-1+degree) )
print( '# dofs : ' + str(len(vec_basis)) )
print( 'max. frequency : ' + str( min(cs0,cs1,cs2,cp0,cp1,cp2)/(ppw*max(dx,dy,dz)) ) )
print( '-------------------------------\n' )
# Create discretization matrices using nutils
K, C, M, rhs = elast_mat(rho, cp, cs, lam, mu, ndims, nx, ny, nz, vec_basis, domain, geom, block)
# plot_opt_splitting2(damping, 2*pi*1.0, 2*pi*9.0)
t0 = time.time()
if solver_flag==0:
print('Use poly_msgmres of degree '+str(dg_pp))
sol, it = poly_driver(K.toscipy().tocsc(), C.toscipy().tocsc(), M.toscipy().tocsc(), rhs, freq, tau, damping, tol, maxit, dg_pp, plot_ritz=plot_ritz)
#if path.exists(out_file):
#remove(out_file)
with open(out_file, "a") as myfile:
print(freq)
myfile.write(''+str(round(min(freq),1))+' '+str(round(max(freq),1))+' '+str(it)+'\n')
elif solver_flag==1:
print('Use megmres')
sol, it = me_driver(K.toscipy().tocsc(), C.toscipy().tocsc(), M.toscipy().tocsc(), rhs, freq, tau, damping, tol, maxit, iLU=iLU, fill_factor=fill_factor, rot=rot, plot_ritz=plot_ritz)
else:
print('Use pythons built-in solver...')
sol = np.zeros((Nom, len(vec_basis)), dtype=complex)
it = -1
for k in range(0,Nom):
om = 2.0*np.pi*freq[k]*(1.0-1j*damping)
matrix = K + 1j*om*C - om**2*M
A = matrix.toscipy().tocsc()
if ndims==2:
t0_lu = time.time()
lu = spla.splu(A)
print('LU decomposition:'+str(time.time()-t0_lu))
t0_solve = time.time()
sol[k,:] = lu.solve(rhs)
print('solve:'+str(time.time()-t0_solve))
else:
print('Use ILU+GMRES')
class gmres_counter(object):
def __init__(self, disp=True):
self._disp = disp
self.resvec=[]
self.niter = 0
def __call__(self, rk=None):
self.niter += 1
self.resvec.append(rk)
if self._disp:
print('iter %3i\trk = %s' % (self.niter, str(rk)))
t0_lu = time.time()
invA = spla.spilu( A, fill_factor=10.0)
invA_x = lambda x: invA.solve(x)
ilu = spla.LinearOperator(A.shape, invA_x)
print('ilu setup:'+str(time.time()-t0_lu))
t0_solve = time.time()
counter = gmres_counter(disp=True)
sol[k,:], info = spla.gmres(A, rhs, tol=1e-16, restart=200, maxiter=20, M=ilu, callback=counter)
it = info
print('GMRES time:'+str(time.time()-t0_solve))
print('GMRES info:'+str(counter.niter)+' -- '+str(counter.resvec[-1]))
te = time.time()
print('No iterations: '+str(it)+' CPU time: '+str(te-t0))
test_orig_problem(K.toscipy(), C.toscipy(), M.toscipy(), rhs, freq, damping, sol.T, solver_flag)
if plots:
if(ndims ==2):
makeplots( domain, geom, Lx, Lz, rho, 'rho', 'Density' )
#makeplots( domain, geom, Lx, Lz, rho, 'rho', 'Density', imgtype='eps' )
#makeplots( domain, geom, Lx, Lz, cp, 'cp', 'c_p [m/s]' )
#makeplots( domain, geom, Lx, Lz, cs, 'cs', 'c_s [m/s]' )
for k in range(0,Nom):
disp = vec_basis.dot( sol[k,:] ) # FEM summation
#disp_x = disp[0].real # Plot Re(u_x)
disp_z = disp[-1].real # Plot Re(u_z)
#makeplots( domain, geom, Lx, Lz, disp_x, 'disp_x'+str(k), 'u_x at {} Hz'.format(freq[k]), lineOn=True )
makeplots( domain, geom, Lx, Lz, disp_z, 'disp_z'+str(k), 'u_z at {} Hz'.format(freq[k]), lineOn=True )
#makeplots( domain, geom, Lx, Lz, disp_x, 'disp_x'+str(k), 'u_x at {} Hz'.format(freq[k]), lineOn=True, imgtype='eps' )
#makeplots( domain, geom, Lx, Lz, disp_z, 'disp_z'+str(k), 'u_z at {} Hz'.format(freq[k]), lineOn=True, imgtype='eps' )
makevtk(domain, geom, rho, lam, mu, cp, cs, sol, freq, vec_basis, 'wedge2d')
elif(ndims==3):
makevtk(domain, geom, rho, lam, mu, cp, cs, sol.T, freq, vec_basis, 'wedge3d')
util.run( main )
|
ManuelMBaumann/opt_tau
|
num_exper/elast_wedge.py
|
Python
|
mit
| 11,781
|
[
"VTK"
] |
5498c48598cd2ddd6ac595872a168d2df000e144437e6e0b27889ad6490be204
|
#================================================================================
# Title: Angular correlation and auto-correlation function for bubbles and YSOs
# Authors: S. Kendrew, Max Planck Institute for Astronomy, Heidelberg, 2012
#================================================================================
# Code names: calc_corr.py
#
# Language: python
#
# Code tested under the following compilers/operating systems: Mac OSX 10.6.8, python 2.6.6
#
# Description of input data:
# bubCat: a numpy recarray describing the input bubble catalog. must contain the following fields:
# - bubCat['lon']: galactic longitude in degrees. longitudes > 180d must be converted to their negative equivalents
# - bubCat['lat']: galactic latitude in degrees.
# - bubCat['reff']: effective radius of the bubbles (or other measure of size), in arcminutes
# ysoCat: a numpy recarray describing the YSO catalog. must contain the following fields:
# - ysoCat['lon']: galactic longitude, as above
# - ysoCat['lat']: galactic latitude, as above
# corrType: a string describing flavour of correlation function
# - 'a' for auto-correlation; in this case the bubble catalog is not used
# - 'x' for cross-correlation between the two catalogs [default]
# rSize: integer, size of the random catalog; this is a multiplier applied to the size of the input catalogs. [default = 10; recommended > 20]
# nbStrap: integer, number of bootstrap ietrations performed [default = 20; recommended > 50]
# binStep: float, size of the separation bins between the sources. For auto-correlations this should be given in arcminutes, for cross-correlations in units of bubble Reff [default = 0.2]
#
#
# Description of output data:
#
# theta: a vector of floats with the size bins (for auto-correlation in arcminutes, for cross-correlation in unites of bubble radii)
# corr: a vector of floats with correlation values in each bin. unitless.
# corErr: a vector of floats, 1-sigma uncertainty on the correlation values in each bin. unitless.
#
# System requirements: 32-bits, 4 GB RAM (estimate)
#
# Calls to external routines:
#
# The code uses the following python modules:
# numpy (module random)
# scipy (modules stats, optimize)
# matplotlib (module pyplot)
# math
# itertools
# pdb (optional, for debugging)
# These packages are all explicitly imported at the start of the code; must be preinstalled by the user.
#
# Additional comments:
#
# The code calculates cross- and auto-correlation functions for two generic input catalgos, as described above. It uses the Landy-Szalay
# correlation estimator, described in detail in Landy & Szalay (1993).
# As well as the main function calc_corr, the file contains a number of supporting routines:
# - fitLat: performs a Gaussian fit to the latitude distributions of the catalogs
# - fitReff: performs a log-normal fit to the effective radii distribution of bubCat
# - genRandomYso: generates a random catalog of YSOs based on the properties of the input catalog ysoCat and the specified size, rSize
# - genRandomBubs: generates a random catalog of bubbles based on the properties of the input catalog bubCat and the specified size, rSize
# - genbStrap: generates random indices with replacement of one of the input catalogs for the bootstrapping operation
# - genNcountsX: performs pair counts for the correlation calculation for either corrType 'a' or 'x'
# - genDiagFig: generates diagnostic figures comparing data and random catalog distributions with lon, lat and reff - for sanity check.
# - genBoxFig: generates diagnostic box-and-whisker plot of pair counts per bin over all bootstrap iteration - for sanity check.
#
# Each of these functions contains a short description of inputs and outputs with its definition.
#
# This function can easily be integrated into a python script by adding
# import calc_corr
# to the script header. Individual functions can then be called using e.g.
# x, y, yerr = calc_corr.calc_corr(cat1, cat2, corrType='x', rSize=100, nbStrap=100, binStep=0.1)
#
#================================================================================
#The AAS gives permission to anyone who wishes to use these subroutines to run their own calculations.
#Permission to republish or reuse these routines should be directed to permissions@aas.org.
#Note that the AAS does not take responsibility for the content of the source code. Potential users should
#be wary of applying the code to conditions that the code was not written to model and the accuracy of the
#code may be affected when compiled and executed on different systems.
#================================================================================
#
#
# Written by S. Kendrew, 2012, kendrew@mpia.de
#
# Changes:
# jun 2013: changed addressing of recarray from e.g. ['lon'] to ['lon'] (in line with python 2.7)
#
#########################################################################################################################
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from scipy import optimize
import itertools
# Debugger: useful but optional
import pdb
from sklearn.neighbors import KDTree
def constrained_random(size, proposal, constraint):
"""
Generate samples from a random distribution, with constraints
Parameters
----------
size : int
Number of samples to draw
proposal : func
Function which takes size as input,
and returns that many random samples
constraint : func
Function which takes an array as input,
and returns a True/False array indicating
whether each point is allowed
Returns
-------
size draws from the proposal function, all of which pass the
constraint test
"""
result = proposal(size)
bad = ~constraint(result)
while bad.any():
result[bad] = proposal(bad.sum())
bad = ~constraint(result)
return result
def fast_histogram(tree, xy, bins):
"""
Use a KD tree to quickly compute a histogram of distances
to an x, y point
Parameters
----------
tree : sklearn.neighbors.KDTree instance
Tree holding the data points
xy : ndarray [1 row, 2 columns]
The point to query
bins : ndarray [n elements]
The bin edges
Returns
-------
counts : [n - 1 elements]
Histogram counts
"""
#nudge xy, since KDtree doesn't like exact matches
eps = np.random.normal(0, 1e-7, xy.shape)
xy = xy + eps
xy, _ = np.broadcast_arrays(xy.reshape(1, 2), bins.reshape(-1, 1))
counts = tree.query_radius(xy, bins, count_only=True)
return counts[1:] - counts[:-1]
#==============================================================
def fitLat(inpCat):
###########################################################
# Fits a gaussian to the galactic latitude distribution of the input cat
# inpCat: numpy recarray that must have the field inpCat['lat']
# returns:
# latp1[1]: mean of the best-fit gaussian (float)
# latp1[2]: sigma of the best-fit gaussian (float)
###########################################################
# define a gaussian fitting function where
# p[0] = amplitude
# p[1] = mean
# p[2] = sigma
latbins = np.arange(-1.0, 1.1, 0.1)
hlat = np.histogram(inpCat['lat'], bins = latbins)
#output from np.histogram: hlat[0] is counts per bin, hlat[1] gives the L edges of the bins
fitfunc = lambda p, x: p[0]*np.exp(-(x-p[1])**2/(2.0*p[2]**2))
errfunc = lambda p, x, y: fitfunc(p,x)-y
latp0 = [np.max(hlat[0]), 0., 0.5]
# fit a gaussian to the correlation function
latx = 0.05+hlat[1][:-1]
latp1, latsuccess = optimize.leastsq(errfunc, latp0,args=(latx, hlat[0]))
# compute the best fit function from the best fit parameters
latfit = fitfunc(latp1, latx)
err_latfit = errfunc(latp1, latx, hlat[0])
return latp1[1], latp1[2]
#=============================================================
def fitReff(inpCat):
##########################################################
# Fits a log-normal distribution to the distribution of effective radii of inpCat (bubble catalog)
# inpCat: numpy recarray that must have the field inpCat['reff'] describing the objects' effective radii (arcmin)
# returns:
# reffp1[1]: mean of distribution (float)
# reffp1[2]: sigma of distribution (float)
#########################################################
rhist = np.histogram(np.log(inpCat['reff']), bins=10)
binStep = rhist[1][1]-rhist[1][0]
fitx = (binStep/2.)+rhist[1][:-1]
fitfunc = lambda p, x: p[0]*np.exp(-(x-p[1])**2/(2.0*p[2]**2))
errfunc = lambda p, x, y: fitfunc(p,x)-y
reffp0 = [np.max(rhist[0]), 0., 1.5]
# fit a gaussian to the correlation function
reffp1, reffsuc = optimize.leastsq(errfunc, reffp0,args=(fitx, rhist[0]))
fit_label = 'mu = %.2f, sigma = %.2f' %(reffp1[1], reffp1[2])
# compute the best fit function from the best fit parameters
reffFit = fitfunc(reffp1, fitx)
err_reffFit = errfunc(reffp1, fitx, rhist[0])
return reffp1[1], reffp1[2]
#==============================================================
def genRandomYso(ysoCat, size, params):
###########################################################
# Generates a random catalog of YSOs from the input catalog, the user-specified random size and the latitude fit parameters
# Inputs:
# inpCat: numpy recarray, must contain fields inpCat['lon'] (gal longitude), inpCat['lat'] (gal latitude)
# size: int, desired number of sources in random catalog; calculated from the rSize parameter in the top-level function
# params: a 2-element tuple of floats with the mu and sigma values of the best-fit gaussian latitude distribution, as returned by fitLat()
# Returns:
# randCat: numpy recarray with fields randCat['lon'] (float, gal longitudes) and randCat['lat'] (float, gal latitudes)
# NOTE: this version of the code excludes |l| < 10. as this is not covered in the RMS survey. for other surveys, amend lines
# 167-168.
###########################################################
coordLims = [-65., 65., -1., 1.]
randSize = size
types = [('lon', '<f8'), ('lat', '<f8')]
randCatArr = np.ndarray(randSize, dtype=types)
def lon_p(size):
return np.random.uniform(coordLims[0], coordLims[1], size)
def lon_c(x):
return np.abs(x) > 10.
def lat_p(size):
return np.random.normal(params[0], params[1], size)
def lat_c(x):
return np.abs(x) <= 1
lon = constrained_random(size, lon_p, lon_c)
lat = constrained_random(size, lat_p, lat_c)
randCatArr['lon'] = lon
randCatArr['lat'] = lat
randCat = randCatArr.view(np.recarray)
return randCat
#===============================================================
def genRandomBubs(bubCat, size, params, rparams):
###########################################################
# Generates a random catalog of bubbles from the input catalog, the user-specified random size and the latitude and Reff fit parameters
# Inputs:
# bubCat: numpy recarray, must contain fields bubCat['lon'] (float, gal longitude), bubCat['lat'] (float, gal latitude), bubCat['reff'] (float, effective radius)
# size: int, desired number of sources in random catalog; calculated from the rSize parameter in the top-level function
# params: a 2-element tuple of floats with the mu and sigma values of the best-fit gaussian latitude distribution, as returned by fitLat()
# rparams: as params for the best-fit log-normal distribution for reff, as returned by fitReff()
# Returns:
# randCat: numpy recarray with fields randCat['lon'] (float, gal longitudes), randCat['lat'] (float, gal latitudes), randCat['reff'] (float, effective radii in arcmin)
# all sources in randCat cover the same coordinate range as bubCat and all sizes are within the minimum and maximum sizes in bubCat
###########################################################
coordLims = [-65., 65., -1., 1.]
rLims = [np.min(bubCat['reff']), np.max(bubCat['reff'])]
types = [('lon', '<f8'), ('lat', '<f8'), ('reff', '<f8')]
randCatArr = np.ndarray(size, dtype=types)
reff_min = np.min(bubCat['reff'])
reff_max = np.max(bubCat['reff'])
#generate the randoms here but ensure that stays within the required coordinate range
def lon_p(size):
return np.random.uniform(coordLims[0], coordLims[1], size)
def lon_c(x):
return np.abs(x) > 10.
def lat_p(size):
return np.random.normal(params[0], params[1], size)
def lat_c(x):
return np.abs(x) <= 1
def reff_p(size):
return np.random.lognormal(mean=rparams[0], sigma=rparams[1],
size=size)
def reff_c(x):
return (x >= reff_min) & (x <= reff_max)
lon = constrained_random(size, lon_p, lon_c)
lat = constrained_random(size, lat_p, lat_c)
reff_r = constrained_random(size, reff_p, reff_c)
randCatArr['lon'] = lon
randCatArr['lat'] = lat
randCatArr['reff'] = reff_r
randCat = randCatArr.view(np.recarray)
return randCat
#=================================================================
def genBstrap(inpCat):
##############################################################
# Generates random indices with replacement from the inpCat, for bootstrapping
# Input:
# inpCat: numpy recarray
# Returns:
# integer vector of randomised indices with the same length as inpCat, with replacements
##############################################################
nelem = np.size(inpCat)
return np.random.randint(0, nelem, nelem)
#=================================================================
def genNcountsX(cat1, cat2, bins, ctype):
##############################################################
# Calculates the pair counts between two catalogs, or within 1 catalog, as a function of separation
# The two catalogs can be any combination of bubbles and YSOs, data or random, depending on the correlation type specified.
# If one of the catalogs is a bubbles catalog, it must be passed in cat1
# Inputs:
# cat1: numpy recarray for first catalog. must contain fields cat1['lon'] and cat1['lat'], as above. if represents bubbles, must contain cat1['reff'] field in arcminutes
# cat2: numpy recarray for 2nd catalog. must contain fields cat2['lon'] and cat2['lat'], as above.
# bins: a vector of floats specifying the theta bins in which the pair counts will be calculated.
# ctype: 'a' for auto-correlation, 'x' for cross-correlation. inherited from the top level parameter cType
# Returns:
# dbnBox: numpy vector of length np.size(bins-1), containing total pair counts in each bin theta, prior to normalisation. this is used for the diagnostic box plots only, not for further calculations
# dbnTot: numpy vector of length np.size(bins-1), containing normalised pair counts in each bin, for calculation of w(theta)
##############################################################
nsrc = np.size(cat1)
dbn = np.zeros((len(bins)-1,nsrc))
# cycle through the bubbles and calc vectorised distances to the YSOs. Then histogram them.
l1, l2, b1, b2 = map(np.asarray, [cat1['lon'], cat2['lon'],
cat1['lat'], cat2['lat']])
lb1 = np.column_stack((l1, b1))
lb2 = np.column_stack((l2, b2))
tree = KDTree(lb2)
if ctype == 'a':
#for auto-correlation between homogeneous catalogues:
for i in range(0,nsrc):
#convert bins from arcmin to degrees
dbn[:, i] = fast_histogram(tree, lb1[i], bins / 60.)
elif ctype == 'x':
#for cross-correlation between heterogeneous catalogues:
reff = np.asarray(cat1['reff'])
for i in range(0,nsrc):
#convert bins from bubble radii to degrees
dbn[:, i] = fast_histogram(tree, lb1[i], bins * reff[i] / 60)
dbnTot = np.sum(dbn, axis = 1)
dbnBox = dbnTot
dbnTot = dbnTot/np.sum(dbnTot)
return dbnBox, dbnTot
#================================================================
def genBoxFig(bins, dd, dr, rd, rr, name):
##############################################################
# Generates a box plot showing the total range of pair counts over the bootstrap iterations
# Inputs:
# bins: numpy vector of floats with the theta bins
# dd: numpy vector of floats, total number of pair counts over the bootstrap iterations in each bin, for the data-data pair counts
# dr: as dd, for data-random pair counts
# rd: as dd, for random-data pair counts
# rr: as dd, for random-random pair counts
# name: name of catalog, for additional labelling if desired
# Returns:
# The box plot
##############################################################
boxFig = plt.figure(figsize=[6,8])
ddBox = boxFig.add_subplot(211)
plt.boxplot(dd, notch=0, sym='bx')
ddBox.set_title('Data-Data')
xlabs=bins.astype('|S3')
ddBox.set_xticklabels(xlabs, fontsize='medium')
drBox = boxFig.add_subplot(212)
plt.boxplot(dr, notch=0, sym='bx')
drBox.set_title('Data-Random')
drBox.set_xticklabels(xlabs, fontsize='medium')
ddBox.set_xlabel(r'bin ($\theta$)')
ddBox.set_ylabel('pair counts')
drBox.set_xlabel(r'bin ($\theta$)')
drBox.set_ylabel('pair counts')
boxFig.show()
#================================================================
def genDiagFig(bub, yso, bubR, ysoR):
##############################################################
# Generates a composite figure showing the distributions with lon, lat and reff of the data and random catalogues.
# For diagnostic purposes
# Inputs:
# bub: numpy recarray, bubble catalog (data); inherited from top-level input parameters
# yso: numpy recarray, yso catalog (data); inherited from top-level input parameters
# bubR: numpy recarray, bubble catalog (random); as returned by genRandomBubs()
# ysoR: numpy recarray, yso catalog (random); as returned by genRandomYso()
# Returns:
# the figure
#############################################################
diagFig = plt.figure(figsize=(12,8))
lonData = diagFig.add_axes([0.05, 0.7, 0.4, 0.2])
latData = diagFig.add_axes([0.5, 0.7, 0.4, 0.2])
lonRand = diagFig.add_axes([0.05, 0.4, 0.4, 0.2])
latRand = diagFig.add_axes([0.5, 0.4, 0.4, 0.2])
reffPlot = diagFig.add_axes([0.05, 0.1, 0.4, 0.2])
# do the first plots on the data
lonBub = lonData.hist(bub['lon'], bins = 50, histtype='step', label = 'bubbles', lw=2)
lonYso = lonData.hist(yso['lon'], bins = lonBub[1], histtype='step', label = 'ysos', lw=2)
lonLeg = lonData.legend(loc = 'best')
#lonLeg.set_fontsize('small')
lonData.set_title('Longitude distribution - Data')
latBub = latData.hist(bub['lat'], bins = 10, histtype='step', label = 'bubbles', axes = latData, lw=2)
latYso = latData.hist(yso['lat'], bins = latBub[1], histtype='step', label = 'ysos', axes = latData, lw=2)
latLeg = latData.legend(loc = 'best')
#latLeg.set_fontsize('small')
latData.set_title('Latitude distribution - Data')
reffData = reffPlot.hist(bub['reff'], bins = 20, histtype='step', label = 'data', axes = reffPlot, normed=True, lw=2)
reffPlot.set_title('Reff distribution')
# add the random catalogue data to the diagnostic plots
lonBubR = lonRand.hist(bubR['lon'], bins = 50, histtype='step', label = 'bubbles', lw=2)
lonYsoR = lonRand.hist(ysoR['lon'], bins = lonBubR[1], histtype='step', label = 'ysos', lw=2)
lonLegR = lonRand.legend(loc = 'best')
lonRand.set_title('Longitude distribution - Randoms')
latBubR = latRand.hist(bubR['lat'], bins = 10, histtype='step', label = 'bubbles', axes = latData, lw=2)
latYsoR = latRand.hist(ysoR['lat'], bins = latBubR[1], histtype='step', label = 'ysos', axes = latData, lw=2)
latLegR = latRand.legend(loc = 'best')
latRand.set_title('Latitude distribution - Random')
reffRand = reffPlot.hist(bubR['reff'], bins = reffData[1], histtype='step', label = 'randoms', axes = reffPlot, normed=True, lw=2)
reffLeg = reffPlot.legend(loc=0)
diagFig.show()
#================================================================
def divSample(ysoCat, bubCat):
#############################################################
# Divide the YSOs up into "associated" and "control" samples according to distance to nearest bubble
# Input:
# ysoCat, bubCat: as defined before
# Returns:
# assoc: the YSOs that lie within 2 effective radii from a bubble
# assoc2: the YSOs that are specifically within 0.8-1.6 effective radii from a bubble (i.e. associated with the bubble rim)
# control: YSOs that lie further than 3 efefctive radii from the nearest bubble.
#############################################################
nyso=np.size(ysoCat)
assoc = np.zeros(1, dtype=int)
control = np.zeros(1, dtype=int)
assoc2 = np.zeros(1, dtype=int)
#control2 = np.zeros(1, dtype=int)
for i in range(0,nyso):
dist = np.sqrt((bubCat['lon'] - ysoCat['lon'][i])**2 + (bubCat['lat'] - ysoCat['lat'][i])**2)
dist = dist*60.
dist = dist/bubCat['reff'] #distance from the YSO to all bubbles, expressed as a function of their effective radius
mindist = np.min(dist)
#print str(mindist)+' Reff'
if mindist <= 2.:
assoc = np.append(assoc, i)
if (mindist >= 0.8) & (mindist <= 1.6):
assoc2 = np.append(assoc2, i)
#print 'added to associated sample'
if mindist > 3.:
control = np.append(control, i)
#print 'added to control sample'
assoc = assoc[1:] #remove the first element, which will be zero from how I've defined the array
assoc2 = assoc2[1:]
control = control[1:]
print 'Bubble-associated sample contains %i YSOs; assoc2 contains %i sources. Control sample contains %i YSOs.' %(np.size(assoc), np.size(assoc2),np.size(control))
return assoc, assoc2, control
#================================================================
def calcAcorr(dd, dr, rr, bins):
##############################################################
# Calculates the auto-correlation function from pair counts returned by genNcountsX(), if corrType='a'
# Inputs:
# dd: numpy floats vector of normalised data-data pair counts in each theta bin
# dr: numpy floats vector of normalised data-random pair counts in each theta bin
# rr: numpy floats vector of normalised random-random pair counts in each theta bin
# bins: numpy floats vector of theta bins (in arcminutes)
# Returns:
# w: numpy floats vector with auto-correlation values in each bin theta (see Landy & Szalay, 1993)
###############################################################
w = np.zeros(len(bins))
w = (dd-(2*dr)+rr)/rr
return w
#================================================================
def calcXcorr(dd,dr,rd,rr, bins):
##############################################################
# Calculates the cross-correlation function from pair counts returned by genNcountsX(), if corrType='x'
# Inputs:
# dd: numpy floats vector of normalised data-data pair counts in each theta bin
# dr: numpy floats vector of normalised data-random pair counts in each theta bin
# rd: numpy floats vector of normalised random-data pair counts in each theta bin
# rr: numpy floats vector of normalised random-random pair counts in each theta bin
# bins: numpy floats vector of theta bins (normalised to bubble radii)
# Returns:
# w: numpy floats vector with cross-correlation values in each bin theta (see Landy & Szalay, 1993)
###############################################################
#calculate the angular correlation vector from the calculated number counts
w = np.zeros(len(bins))
w = (dd-dr-rd+rr)/rr
return w
#==================================================================
# MAIN FUNCTION
#==================================================================
def calc_corr(bubCat, ysoCat, corrType='x', rSize=10, nbStrap=20, binStep=0.2):
#find the best-fit gaussian params for the two input catalogues' latitude distribution:
ysoLatmu, ysoLatsig = fitLat(ysoCat)
ysoParams = [ysoLatmu, ysoLatsig]
print 'fit parameters for ysos: mean = %.2f, sigma = %.2f' %(ysoLatmu, ysoLatsig)
#if doing a cross-correlation with bubbles then we want the fits for the bubbles as well.
if corrType == 'x':
bubLatmu, bubLatsig = fitLat(bubCat)
reffp1, reffp2 = fitReff(bubCat)
bubParams = [bubLatmu, bubLatsig]
reffParams = [reffp1, reffp2]
print 'fit parameters for bubbles: mean = %.2f, sigma = %.2f' %(bubLatmu, bubLatsig)
print 'fit parameters for bubble Reffs: mu = %.2f, sigma = %.2f' %(reffp1, reffp2)
# Define random catalogue sizes:
ysoRandSize = rSize*np.size(ysoCat)
bubRandSize = rSize*np.size(bubCat)
# create angular distance grid generate random cats. if doing a
# cross-correlation then want both bubble, yso cats, if auto-correlation then only need
# yso random cat.
if corrType == 'x':
theta = np.arange(0.0, 4., binStep) #in Reff units
ysoRand = genRandomYso(ysoCat, ysoRandSize, ysoParams)
bubRand = genRandomBubs(bubCat, bubRandSize, bubParams, reffParams)
genDiagFig(bubCat, ysoCat, bubRand, ysoRand)
if corrType == 'a':
theta = np.arange(0., 10., binStep) #in arcminutes
ysoRand = genRandomYso(ysoCat, ysoRandSize, ysoParams)
nbins = np.size(theta)
print 'Number of bootstrap iterations = %i' %(nbStrap)
#create arrays for the pair counts
corrAll = np.zeros((nbins,nbStrap))
ddBoxdat = np.zeros((nbStrap, nbins-1))
drBoxdat = np.zeros((nbStrap, nbins-1))
rdBoxdat = np.zeros((nbStrap, nbins-1))
rrBoxdat = np.zeros((nbStrap, nbins-1))
# if corrType =='x' we want a cross-correlation between the bubbles and YSOs. As random-data and random-random don't use bootstrapped cats, can perform these outside of the bootstrap loop
if corrType == 'x':
# pair counts with the bubbles random catalogue are the same for each bootstrap iteration so can take these out fo the for loop
print 'Random pair counts'
rdBox, rdTotal = genNcountsX(bubRand, ysoCat, theta, corrType)
rrBox, rrTotal = genNcountsX(bubRand, ysoRand, theta, corrType)
for i in range(0,nbStrap):
print 'Bootstrap iteration {0}' .format(i)
bStrapInd = genBstrap(bubCat)
bubCat_bStrap = bubCat[bStrapInd]
ddBox, ddTotal = genNcountsX(bubCat_bStrap, ysoCat, theta, corrType)
drBox, drTotal = genNcountsX(bubCat_bStrap, ysoRand, theta, corrType)
corrTemp = calcXcorr(ddTotal, drTotal, rdTotal, rrTotal, theta)
corrAll[:-1,i] = corrTemp
ddBoxdat[i,:] = ddBox
drBoxdat[i,:] = drBox
rdBoxdat[i,:] = rdBox
rrBoxdat[i,:] = rrBox
# if corrType == 'a' I want the autocorrelation of the YSO catalogue only.
elif corrType == 'a':
rrBox, rrTotal = genNcountsX(ysoRand, ysoRand, theta, corrType)
for i in range(0,nbStrap):
bStrapInd = genBstrap(ysoCat)
ysoCat_bStrap = ysoCat[bStrapInd]
ddBox, ddTotal = genNcountsX(ysoCat_bStrap, ysoCat, theta, corrType)
drBox, drTotal = genNcountsX(ysoCat_bStrap, ysoRand, theta, corrType)
corrTemp = calcAcorr(ddTotal, drTotal, rrTotal, theta)
corrAll[:-1,i] = corrTemp
# Average the correlation functions over all the bootstrap iterations, and calculate the standard deviations:
corr = np.mean(corrAll, axis=1)
corrErr = np.std(corrAll, axis=1)
# Plot the correlation function
xcorrFig = plt.figure()
plt.errorbar(theta, corr, yerr = corrErr, xerr = None, fmt = 'bo', figure=xcorrFig)
title = '%s correlation function. Bootstrap iterations = %i, Random catalogue size = %i.' %(corrType, nbStrap, rSize)
plt.suptitle(title)
if corrType == 'x': plt.xlabel('bubble-YSO separation theta (theta/Reff)')
plt.ylabel('w(theta)')
if corrType == 'a':
bub_med = stats.scoreatpercentile(bubCat['reff'], 50.)
bubq1 = stats.scoreatpercentile(bubCat['reff'], 25.)
bubq3 = stats.scoreatpercentile(bubCat['reff'], 75.)
plt.axvline(x=bub_med, ymin=0, ymax=1, c='red', lw = 2., ls = '--')
plt.axvline(x=bubq1, ymin=0, ymax=1, c='green', lw = 2., ls = '--')
plt.axvline(x=bubq3, ymin=0, ymax=1, c='green', lw = 2., ls = '--')
plt.xlabel('theta (arcmin)')
if corrType == 'x':
genBoxFig(theta,ddBoxdat, drBoxdat, rdBoxdat, rrBoxdat, bubCat)
xcorrFig.show()
return theta, corr, corrErr
|
linan7788626/brut
|
scripts/calc_corr.py
|
Python
|
mit
| 29,259
|
[
"Gaussian"
] |
3a7daab8c2770b05e216536accb19e0a3940f7634b41b9f342e044ebc4777bd2
|
# algorithm to implement max-flow min-cut
def bfs(N, start, end, dist):
'find an augmenting path from start to end via dijkstra on a graph.'
visited = [False] * N
distance = [None] * N
pointer = [None] * N
distance[start] = 0
while not visited[end]:
latest_frontier = None # next vertex to visit.
for i in xrange(N):
if distance[i] is None or visited[i]: continue
if latest_frontier is None or distance[latest_frontier] > distance[i]:
latest_frontier = i
if latest_frontier is None: return None # no path.
visited[latest_frontier] = True
for i in xrange(N):
# update the known minimum distance.
if dist(latest_frontier, i) is None: continue
candidate_distance = distance[latest_frontier] + dist(latest_frontier, i)
if distance[i] is None or distance[i] > candidate_distance:
distance[i] = candidate_distance
pointer[i] = latest_frontier
# reconstruct the path (start, 1, 2, 3, ... end)
path = []
cur_index = end
path.append(end)
while pointer[cur_index] is not None:
path.append(pointer[cur_index])
cur_index = pointer[cur_index]
path.reverse()
return path
def find_max_capacity(path, dist):
max_capacity = None
for i in xrange(len(path) - 1):
if max_capacity is None: max_capacity = dist(path[i], path[i+1])
else: max_capacity = min(max_capacity, dist(path[i], path[i+1]))
return max_capacity
def max_flow_min_cut(graph, start = 0, end = None):
'graph is an adjancecy graph of flow capacity'
N = len(graph)
if end is None: end = N - 1
f = new_graph(N, 0) # flow graph - initialize to 0.
def residual_distance(i, j):
'utility function for the residual graph. should be postiive or None (has no edge or no residual capacity).'
if graph[i][j] is not None: v = graph[i][j] - f[i][j]
elif graph[j][i] is not None: v = f[j][i]
else: v = 0
return v if v > 0 else None
while True:
augpath = bfs(N, start, end, residual_distance)
if augpath is None: # terminal condition - no more augmenting path. return the flow value.
return sum(f[start])
else: # otherwise adjust the flow along the augpath.
max_capacity = find_max_capacity(augpath, residual_distance)
for i in xrange(len(augpath) - 1):
f[augpath[i]][augpath[i+1]] += max_capacity
f[augpath[i+1]][augpath[i]] -= max_capacity
def new_graph(N, value=None):
'new adjancy graph of size N.'
return [[value] * N for i in xrange(N)]
def test_1():
# using the example in p. 710 in CLRS, 3rd edition.
g = new_graph(6)
# name to id mapping
n = {
'vancouver': 0, 'edmonton': 1,
'calgary': 2, 'saskatoon': 3,
'regina': 4, 'winnipeg': 5,
}
g[n['vancouver']][n['edmonton']] = 16
g[n['vancouver']][n['calgary']] = 13
g[n['edmonton']][n['saskatoon']] = 12
g[n['calgary']][n['edmonton']] = 4
g[n['calgary']][n['regina']] = 14
g[n['saskatoon']][n['winnipeg']] = 20
g[n['saskatoon']][n['calgary']] = 9
g[n['regina']][n['saskatoon']] = 7
g[n['regina']][n['winnipeg']] = 4
assert max_flow_min_cut(g) == 23
def test_2():
g = new_graph(2)
assert max_flow_min_cut(g) == 0
def test_3():
g = new_graph(4)
g[0][1] = 1
g[1][2] = 2
g[2][3] = 3
assert max_flow_min_cut(g) == 1
def test_4():
g = new_graph(4)
g[0][1] = 1000000
g[0][2] = 1000000
g[1][3] = 1000000
g[1][2] = 1
g[2][3] = 1000000
assert max_flow_min_cut(g) == 2000000
test_1()
test_2()
test_3()
test_4()
|
jeeyoungk/exercise
|
python/maxflow.py
|
Python
|
mit
| 3,762
|
[
"VisIt"
] |
3cb733ccb94edf2d1213feae7bb43c7e3ad77c189b7aadd0a0c2f4f0375c4c26
|
import requests
import time
ERROR_STRING = '"ERROR, event logged"'
OSCAR_API_BASE_URL = 'http://burdellanswers.com:3000/api/oscar/'
DEPARTMENTS = { 'ACCT' : 'Accounting',
'AE' : 'Aerospace Engineering',
'AS' : 'Air Force Aerospace Studies',
'APPH' : 'Applied Physiology',
'ASE' : 'Applied Systems Engineering',
'ARBC' : 'Arabic',
'ARCH' : 'Architecture',
'BIOL' : 'Biology',
'BMEJ' : 'Biomedical Engineering Joint Emory PKU',
'BME' : 'Biomedical Engineering',
'BMEM' : 'Biomedical Engineering Joint Emory',
'BC' : 'Building Construction',
'CETL' : 'Center Enhancement Teach/Learn',
'CHBE' : 'Chemical % Biomolecular Engineering',
'CHEM' : 'Chemistry',
'CHIN' : 'Chinese',
'CP' : 'City Planning',
'CEE' : 'Civil and Environmental Engineering',
'COA' : 'College of Architecture',
'COE' : 'College of Engineering',
'CX' : 'Computational Mod, Sim, & Data',
'CSE' : 'Computational Science and Engineering',
'CS' : 'Computer Science',
'COOP' : 'Co-op',
'UCGA' : 'Cross-enrollment',
'EAS' : 'Earth and Atmospheric Sciences',
'ECON' : 'Economics',
'ECE' : 'Electrical and Computer Engineering',
'ENGL' : 'English',
'FS' : 'Foreign Studies',
'FREN' : 'French',
'GT' : 'Georgia Tech',
'GTL' : 'Georgia Tech Lorraine',
'GRMN' : 'German',
'HPS' : 'Health Performance Science',
'HS' : 'Health Systems',
'HIST' : 'History',
'HTS' : 'History, Technology, and Society',
'ISYE' : 'Industrial and Systems Engineering',
'ID' : 'Industrial Design',
'INTA' : 'International Affairs',
'IL' : 'International Logistics',
'INTN' : 'Internship',
'JAPN' : 'Japanese',
'KOR' : 'Korean',
'LS' : 'Learning Support',
'LING' : 'Linguistics',
'LCC' : 'Literature, Communication, and Culture',
'MGT' : 'Management',
'MOT' : 'Management of Technology',
'MSE' : 'Materials Science and Engineering',
'MATH' : 'Mathematics',
'ME' : 'Mechanical Engineering',
'MP' : 'Medical Physics',
'MSL' : 'Military Science and Leadership',
'ML' : 'Modern Languages',
'MUSI' : 'Music',
'NS' : 'Naval Science',
'NRE' : 'Nuclear and Radiological Engineering',
'PERS' : 'Persian',
'PHIL' : 'Philosophy',
'PHYS' : 'Physics',
'POL' : 'Political Science',
'PTFE' : 'Polymer, Texture, and Fiber Engineering',
'DOPP' : 'Professional Practice',
'PSYC' : 'Psychology',
'PUBP' : 'Public Policy',
'RGTR' : 'Regent\'s Reading Skills',
'RGTE' : 'Regent\'s Writing Skills',
'RUSS' : 'Russian',
'SOC' : 'Sociology',
'SPAN' : 'Spanish' }
DEPARTMENT_LIST = [x.lower() for x in DEPARTMENTS.iterkeys()]
del x # x has global scope due to the above list comprehension.
# Delete it so it goes away, its value is undefined anyway.
class OscarException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class OscarCourse:
def __init__(self, department, number):
self.department = department
self.course_number = number
info_dict = _get_course_info(department, number)
self.credit_hours = info_dict['creditHours'][0] or 0
self.description = info_dict['description']
self.is_auditable = info_dict['grade_basis'].find('A') > 0
self.is_letter_gradeable = info_dict['grade_basis'].find('L') > 0
self.is_pass_failable = info_dict['grade_basis'].find('P') > 0
self.lab_hours = info_dict['labHours'][0] or 0
self.lecture_hours = info_dict['lectureHours'][0] or 0
self.name = info_dict['name']
def get_sections(self, year, semester):
sections_dict = _get_course_sections(self.department, self.course_number,
year, semester)
sections = []
for section in sections_dict:
sections.append(OscarCourseSection(self.department, self.course_number,
year, semester, section['crn'],
section['where']))
return sections
class OscarCourseSection:
def __init__(self, department, course_number, year, semester, crn, location_list):
self.department = department
self.course_number = course_number
self.year = year
self.semester = semester
self.crn = crn
info_dict = _get_crn_info(department, course_number, year, semester, crn)
self.name = info_dict['name']
self.seats_total = info_dict['seats']['capacity']
self.seats_filled = info_dict['seats']['actual']
self.seats_remaining = info_dict['seats']['remaining']
self.waitlist_total = info_dict['waitlist']['capacity']
self.waitlist_filled = info_dict['waitlist']['actual']
self.waitlist_remaining = info_dict['waitlist']['remaining']
self.section = info_dict['section']
self.schedule = OscarCourseSchedule(location_list)
def refresh_seats_and_waitlist(self):
info_dict = _get_crn_info(self.department, self.course_number,
self.year, self.semester, self.crn)
self.seats_total = info_dict['seats']['capacity']
self.seats_filled = info_dict['seats']['actual']
self.seats_remaining = info_dict['seats']['remaining']
self.waitlist_total = info_dict['waitlist']['capacity']
self.waitlist_filled = info_dict['waitlist']['actual']
self.waitlist_remaining = info_dict['waitlist']['remaining']
class OscarCourseSchedule:
def __init__(self, location_list):
self.class_list = []
for entry in location_list:
out_dict = {}
out_dict['start_time'] = time.strptime(entry['time'][0], '%H:%M')
out_dict['end_time'] = time.strptime(entry['time'][1], '%H:%M')
out_dict['professor'] = entry['prof']
out_dict['type'] = entry['type']
out_dict['location'] = entry['location']
out_dict['days'] = entry['day']
self.class_list.append(out_dict)
def is_in_class_at_time(self, time_in):
day_of_week = 'MTWRFSU'[time_in.tm_wday] # tm_wday is in [0, 6], 0 is monday 6 is sunday
time_in.tm_wday = 0 # ensure that comparison operators don't consider the day of the week
for session in self.class_list:
# do we go to this class today?
if session['days'].find(day_of_week) > 0:
# if so, is this time within the time that we are in class?
if time_in < session['end_time'] and time_in > session['start_time']:
return True
return False
def get_courses_by_department(department):
course_list = _get_courses_by_department(department)
for course in course_list:
yield OscarCourse(department, course['number'])
def get_course_info(department, course_number):
return OscarCourse(department, course_number)
# this is a "private" function and shouldn't be used outside this file. The above
# function wraps this functionality for outside use.
def _get_courses_by_department(department):
if not department.lower() in DEPARTMENT_LIST:
raise OscarException('Invalid department: {}'.format(department))
response = requests.get(OSCAR_API_BASE_URL + department.lower())
if response.status_code >= 300:
raise OscarException('HTTP response had status code > 300: {}'.format(response.status_code))
if response.text == ERROR_STRING:
raise OscarException('OSCAR replied with an error message')
course_list = response.json()
return course_list
def _get_course_info(department, course_number):
if not department.lower() in DEPARTMENT_LIST:
raise OscarException('Invalid department: {}'.format(department))
response = requests.get(OSCAR_API_BASE_URL + department.lower() + "/" + course_number)
if response.status_code >= 300:
raise OscarException('HTTP response had status code > 300: {}'.format(response.status_code))
if response.text == ERROR_STRING:
raise OscarException('No such course exists: {} {}'.format(department, course_number))
class_info = response.json()
return class_info
def _get_course_sections(department, course_number, year, semester):
if not semester in ['fall', 'spring', 'summer']:
raise OscarException('Invalid semester, must be fall, spring, or summer: {}'.format(semester))
if not department.lower() in DEPARTMENT_LIST:
raise OscarException('Invalid department: {}'.format(department))
response = requests.get(OSCAR_API_BASE_URL + '{}/{}/{}/{}'.format(department, course_number,
year, semester))
if response.status_code >= 300:
raise OscarException('HTTP response had status code > 300: {}'.format(response.status_code))
if response.text == ERROR_STRING:
raise OscarException('OSCAR replied with an error message')
course_sections = response.json()
return course_sections
def _get_crn_info(department, course_number, year, semester, crn_number):
if not semester in ['fall', 'spring', 'summer']:
raise OscarException('Invalid semester, must be fall, spring, or summer: {}'.format(semester))
if not department.lower() in DEPARTMENT_LIST:
raise OscarException('Invalid department: {}'.format(department))
response = requests.get(OSCAR_API_BASE_URL + '{}/{}/{}/{}/{}'.format(department, course_number,
year, semester, crn_number))
if response.status_code >= 300:
raise OscarException('HTTP response had status code > 300: {}'.format(response.status_code))
if response.text == ERROR_STRING:
raise OscarException('OSCAR replied with an error message, most likely the CRN is incorrect: {}'.format(crn_number))
crn_info = response.json()
return crn_info
|
swgillespie/pyoscar
|
pyoscar.py
|
Python
|
mit
| 10,966
|
[
"ASE"
] |
f4892e42a101d5ad1611a9c49bef3616a491a05c4f4b9e9644c73fd719eefe9e
|
#! /usr/bin/python
"""versioneer.py
(like a rocketeer, but for versions)
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Version: 0.7+
This file helps distutils-based projects manage their version number by just
creating version-control tags.
For developers who work from a VCS-generated tree (e.g. 'git clone' etc),
each 'setup.py version', 'setup.py build', 'setup.py sdist' will compute a
version number by asking your version-control tool about the current
checkout. The version number will be written into a generated _version.py
file of your choosing, where it can be included by your __init__.py
For users who work from a VCS-generated tarball (e.g. 'git archive'), it will
compute a version number by looking at the name of the directory created when
te tarball is unpacked. This conventionally includes both the name of the
project and a version number.
For users who work from a tarball built by 'setup.py sdist', it will get a
version number from a previously-generated _version.py file.
As a result, loading code directly from the source tree will not result in a
real version. If you want real versions from VCS trees (where you frequently
update from the upstream repository, or do new development), you will need to
do a 'setup.py version' after each update, and load code from the build/
directory.
You need to provide this code with a few configuration values:
versionfile_source:
A project-relative pathname into which the generated version strings
should be written. This is usually a _version.py next to your project's
main __init__.py file. If your project uses src/myproject/__init__.py,
this should be 'src/myproject/_version.py'. This file should be checked
in to your VCS as usual: the copy created below by 'setup.py
update_files' will include code that parses expanded VCS keywords in
generated tarballs. The 'build' and 'sdist' commands will replace it with
a copy that has just the calculated version string.
versionfile_build:
Like versionfile_source, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have package_dir={'myproject': 'src/myproject'},
then you will probably have versionfile_build='myproject/_version.py' and
versionfile_source='src/myproject/_version.py'.
tag_prefix: a string, like 'PROJECTNAME-', which appears at the start of all
VCS tags. If your tags look like 'myproject-1.2.0', then you
should use tag_prefix='myproject-'. If you use unprefixed tags
like '1.2.0', this should be an empty string.
parentdir_prefix: a string, frequently the same as tag_prefix, which
appears at the start of all unpacked tarball filenames. If
your tarball unpacks into 'myproject-1.2.0', this should
be 'myproject-'.
To use it:
1: include this file in the top level of your project
2: make the following changes to the top of your setup.py:
import versioneer
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
4: run 'setup.py update_files', which will create _version.py, and will
append the following to your __init__.py:
from _version import __version__
5: modify your MANIFEST.in to include versioneer.py
6: add both versioneer.py and the generated _version.py to your VCS
"""
import os, sys, re
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = "git"
IN_LONG_VERSION_PY = False
LONG_VERSION_PY = '''
IN_LONG_VERSION_PY = True
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.7+ (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
import subprocess
import sys
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print("discarding '%%s', no digits" %% ref)
refs.discard(ref)
# Assume all version tags have a digit. git's %%d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print("remaining refs: %%s" %% ",".join(sorted(refs)))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if not ver:
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if not ver:
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
verbose)
if not ver:
ver = default
return ver
'''
import subprocess
import sys
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print("discarding '%s', no digits" % ref)
refs.discard(ref)
# Assume all version tags have a digit. git's %d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print("remaining refs: %s" % ",".join(sorted(refs)))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
import sys
def do_vcs_install(versionfile_source, ipy):
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
run_command([GIT, "add", "versioneer.py"])
run_command([GIT, "add", versionfile_source])
run_command([GIT, "add", ipy])
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
run_command([GIT, "add", ".gitattributes"])
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.7+) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
f = open(filename)
except EnvironmentError:
return versions
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
return versions
def write_to_version_file(filename, versions):
f = open(filename, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
print("set %s to '%s'" % (filename, versions["version"]))
def get_best_versions(versionfile, tag_prefix, parentdir_prefix,
default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
#
# extract version from first of _version.py, 'git describe', parentdir.
# This is meant to work for developers using a source checkout, for users
# of a tarball created by 'setup.py sdist', and for users of a
# tarball/zipball created by 'git archive' or github's download-from-tag
# feature.
variables = get_expanded_variables(versionfile_source)
if variables:
ver = versions_from_expanded_variables(variables, tag_prefix)
if ver:
if verbose: print("got version from expanded variable %s" % ver)
return ver
ver = versions_from_file(versionfile)
if ver:
if verbose: print("got version from file %s %s" % (versionfile, ver))
return ver
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if ver:
if verbose: print("got version from git %s" % ver)
return ver
ver = versions_from_parentdir(parentdir_prefix, versionfile_source, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return ver
if verbose: print("got version from default %s" % ver)
return default
def get_versions(default=DEFAULT, verbose=False):
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
return get_best_versions(versionfile_source, tag_prefix, parentdir_prefix,
default=default, verbose=verbose)
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
f.close()
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "modify __init__.py and create _version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
print(" creating %s" % versionfile_source)
f = open(versionfile_source, "w")
f.write(LONG_VERSION_PY % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
f.close()
try:
old = open(ipy, "r").read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
f = open(ipy, "a")
f.write(INIT_PY_SNIPPET)
f.close()
else:
print(" %s unmodified" % ipy)
do_vcs_install(versionfile_source, ipy)
def get_cmdclass():
return {'version': cmd_version,
'update_files': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
|
masterkorp/obfsproxy
|
versioneer.py
|
Python
|
bsd-3-clause
| 25,767
|
[
"Brian"
] |
4c54d39adb3ff998148449308cd66004230a01c3f56ad51ea0fed37f87f38d95
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module holds the old run() function which is deprecated, the
tools.run_flow() function should be used in its place."""
from __future__ import print_function
import logging
import socket
import sys
import webbrowser
import gflags
from oauth2client import client
from oauth2client import util
from oauth2client.tools import ClientRedirectHandler
from oauth2client.tools import ClientRedirectServer
from six.moves import input
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('auth_local_webserver', True,
('Run a local web server to handle redirects during '
'OAuth authorization.'))
gflags.DEFINE_string('auth_host_name', 'localhost',
('Host name to use when running a local web server to '
'handle redirects during OAuth authorization.'))
gflags.DEFINE_multi_int('auth_host_port', [8080, 8090],
('Port to use when running a local web server to '
'handle redirects during OAuth authorization.'))
@util.positional(2)
def run(flow, storage, http=None):
"""Core code for a command-line application.
The ``run()`` function is called from your application and runs
through all the steps to obtain credentials. It takes a ``Flow``
argument and attempts to open an authorization server page in the
user's default web browser. The server asks the user to grant your
application access to the user's data. If the user grants access,
the ``run()`` function returns new credentials. The new credentials
are also stored in the ``storage`` argument, which updates the file
associated with the ``Storage`` object.
It presumes it is run from a command-line application and supports the
following flags:
``--auth_host_name`` (string, default: ``localhost``)
Host name to use when running a local web server to handle
redirects during OAuth authorization.
``--auth_host_port`` (integer, default: ``[8080, 8090]``)
Port to use when running a local web server to handle redirects
during OAuth authorization. Repeat this option to specify a list
of values.
``--[no]auth_local_webserver`` (boolean, default: ``True``)
Run a local web server to handle redirects during OAuth authorization.
Since it uses flags make sure to initialize the ``gflags`` module before
calling ``run()``.
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a ``Storage`` to store the credential in.
http: An instance of ``httplib2.Http.request`` or something that acts
like it.
Returns:
Credentials, the obtained credential.
"""
logging.warning('This function, oauth2client.tools.run(), and the use of '
'the gflags library are deprecated and will be removed in a future '
'version of the library.')
if FLAGS.auth_local_webserver:
success = False
port_number = 0
for port in FLAGS.auth_host_port:
port_number = port
try:
httpd = ClientRedirectServer((FLAGS.auth_host_name, port),
ClientRedirectHandler)
except socket.error as e:
pass
else:
success = True
break
FLAGS.auth_local_webserver = success
if not success:
print('Failed to start a local webserver listening on either port 8080')
print('or port 9090. Please check your firewall settings and locally')
print('running programs that may be blocking or using those ports.')
print()
print('Falling back to --noauth_local_webserver and continuing with')
print('authorization.')
print()
if FLAGS.auth_local_webserver:
oauth_callback = 'http://%s:%s/' % (FLAGS.auth_host_name, port_number)
else:
oauth_callback = client.OOB_CALLBACK_URN
flow.redirect_uri = oauth_callback
authorize_url = flow.step1_get_authorize_url()
if FLAGS.auth_local_webserver:
webbrowser.open(authorize_url, new=1, autoraise=True)
print('Your browser has been opened to visit:')
print()
print(' ' + authorize_url)
print()
print('If your browser is on a different machine then exit and re-run')
print('this application with the command-line parameter ')
print()
print(' --noauth_local_webserver')
print()
else:
print('Go to the following link in your browser:')
print()
print(' ' + authorize_url)
print()
code = None
if FLAGS.auth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
print('Failed to find "code" in the query parameters of the redirect.')
sys.exit('Try running with --noauth_local_webserver.')
else:
code = input('Enter verification code: ').strip()
try:
credential = flow.step2_exchange(code, http=http)
except client.FlowExchangeError as e:
sys.exit('Authentication has failed: %s' % e)
storage.put(credential)
credential.set_store(storage)
print('Authentication successful.')
return credential
|
itielshwartz/BackendApi
|
lib/oauth2client/old_run.py
|
Python
|
apache-2.0
| 6,106
|
[
"VisIt"
] |
33430b91f4cefef2bad403164ca0fa96ca089a86b2559f4d203ef4aed699517a
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import sys
from skbio.util import TestRunner
test = TestRunner(__file__).test
if __name__ == '__main__':
if test():
sys.exit(0)
else:
sys.exit(1)
|
anderspitman/scikit-bio
|
skbio/test.py
|
Python
|
bsd-3-clause
| 527
|
[
"scikit-bio"
] |
80d1f1bdabad91da70430121020aee41b30ea50990ca88479a6b876c8cd83e86
|
import sys
from galaxy.datatypes.tabular import Tabular
from galaxy.util.json import loads
class BaseDataProvider( object ):
"""
Base class for data providers. Data providers (a) read and package data from datasets;
and (b) write subsets of data to new datasets.
"""
def __init__( self, converted_dataset=None, original_dataset=None, dependencies=None,
error_max_vals="Only the first %i values are returned." ):
""" Create basic data provider. """
self.converted_dataset = converted_dataset
self.original_dataset = original_dataset
self.dependencies = dependencies
self.error_max_vals = error_max_vals
def has_data( self, **kwargs ):
"""
Returns true if dataset has data in the specified genome window, false
otherwise.
"""
raise Exception( "Unimplemented Function" )
def get_iterator( self, **kwargs ):
"""
Returns an iterator that provides data in the region chrom:start-end
"""
raise Exception( "Unimplemented Function" )
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Process data from an iterator to a format that can be provided to client.
"""
raise Exception( "Unimplemented Function" )
def get_data( self, chrom, start, end, start_val=0, max_vals=sys.maxint, **kwargs ):
"""
Returns data as specified by kwargs. start_val is the first element to
return and max_vals indicates the number of values to return.
Return value must be a dictionary with the following attributes:
dataset_type, data
"""
iterator = self.get_iterator( chrom, start, end )
return self.process_data( iterator, start_val, max_vals, **kwargs )
def write_data_to_file( self, filename, **kwargs ):
"""
Write data in region defined by chrom, start, and end to a file.
"""
raise Exception( "Unimplemented Function" )
class ColumnDataProvider( BaseDataProvider ):
""" Data provider for columnar data """
MAX_LINES_RETURNED = 30000
def __init__( self, original_dataset, max_lines_returned=MAX_LINES_RETURNED ):
# Compatibility check.
if not isinstance( original_dataset.datatype, Tabular ):
raise Exception( "Data provider can only be used with tabular data" )
# Attribute init.
self.original_dataset = original_dataset
# allow throttling
self.max_lines_returned = max_lines_returned
def get_data( self, columns=None, start_val=0, max_vals=None, skip_comments=True, **kwargs ):
"""
Returns data from specified columns in dataset. Format is list of lists
where each list is a line of data.
"""
if not columns:
raise TypeError( 'parameter required: columns' )
#TODO: validate kwargs
try:
max_vals = int( max_vals )
max_vals = min([ max_vals, self.max_lines_returned ])
except ( ValueError, TypeError ):
max_vals = self.max_lines_returned
try:
start_val = int( start_val )
start_val = max([ start_val, 0 ])
except ( ValueError, TypeError ):
start_val = 0
# skip comment lines (if any/avail)
# pre: should have original_dataset and
if( skip_comments
and self.original_dataset.metadata.comment_lines
and start_val < self.original_dataset.metadata.comment_lines ):
start_val = int( self.original_dataset.metadata.comment_lines )
# columns is an array of ints for now (should handle column names later)
columns = loads( columns )
for column in columns:
assert( ( column < self.original_dataset.metadata.columns )
and ( column >= 0 ) ),(
"column index (%d) must be positive and less" % ( column )
+ " than the number of columns: %d" % ( self.original_dataset.metadata.columns ) )
#print columns, start_val, max_vals, skip_comments, kwargs
# set up the response, column lists
response = {}
response[ 'data' ] = data = [ [] for column in columns ]
response[ 'meta' ] = meta = [{
'min' : None,
'max' : None,
'count' : 0,
'sum' : 0
} for column in columns ]
column_types = [ self.original_dataset.metadata.column_types[ column ] for column in columns ]
# function for casting by column_types
def cast_val( val, type ):
""" Cast value based on type. Return None if can't be cast """
if type == 'int':
try: val = int( val )
except: return None
elif type == 'float':
try: val = float( val )
except: return None
return val
returning_data = False
f = open( self.original_dataset.file_name )
#TODO: add f.seek if given fptr in kwargs
for count, line in enumerate( f ):
# check line v. desired start, end
if count < start_val:
continue
if ( count - start_val ) >= max_vals:
break
returning_data = True
fields = line.split()
fields_len = len( fields )
#NOTE: this will return None/null for abberrant column values (including bad indeces)
for index, column in enumerate( columns ):
column_val = None
column_type = column_types[ index ]
if column < fields_len:
column_val = cast_val( fields[ column ], column_type )
if column_val != None:
# if numeric, maintain min, max, sum
if( column_type == 'float' or column_type == 'int' ):
if( ( meta[ index ][ 'min' ] == None ) or ( column_val < meta[ index ][ 'min' ] ) ):
meta[ index ][ 'min' ] = column_val
if( ( meta[ index ][ 'max' ] == None ) or ( column_val > meta[ index ][ 'max' ] ) ):
meta[ index ][ 'max' ] = column_val
meta[ index ][ 'sum' ] += column_val
# maintain a count - for other stats
meta[ index ][ 'count' ] += 1
data[ index ].append( column_val )
response[ 'endpoint' ] = dict( last_line=( count - 1 ), file_ptr=f.tell() )
f.close()
if not returning_data: return None
for index, meta in enumerate( response[ 'meta' ] ):
column_type = column_types[ index ]
count = meta[ 'count' ]
if( ( column_type == 'float' or column_type == 'int' )
and count ):
meta[ 'mean' ] = float( meta[ 'sum' ] ) / count
sorted_data = sorted( response[ 'data' ][ index ] )
middle_index = ( count / 2 ) - 1
if count % 2 == 0:
meta[ 'median' ] = ( ( sorted_data[ middle_index ] + sorted_data[( middle_index + 1 )] ) / 2.0 )
else:
meta[ 'median' ] = sorted_data[ middle_index ]
# ugh ... metadata_data_lines is not a reliable source; hafta have an EOF
return response
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/visualization/data_providers/basic.py
|
Python
|
gpl-3.0
| 7,444
|
[
"Galaxy"
] |
8b83b3335225bfb18c37da57cf8f2ba43e9e7c67807d2af48b1588139cd2fda0
|
import unittest
from kalliope.core.SignalModule import MissingParameter
from kalliope.core.Models import Brain
from kalliope.core.Models import Neuron
from kalliope.core.Models import Synapse
from kalliope.core.Models.Signal import Signal
from kalliope.signals.geolocation.geolocation import Geolocation
class Test_Geolocation(unittest.TestCase):
def test_check_geolocation_valid(self):
expected_parameters = ["latitude", "longitude", "radius"]
self.assertTrue(Geolocation.check_parameters(expected_parameters))
def test_check_geolocation_valid_with_other(self):
expected_parameters = ["latitude", "longitude", "radius", "kalliope", "random"]
self.assertTrue(Geolocation.check_parameters(expected_parameters))
def test_check_geolocation_no_radius(self):
expected_parameters = ["latitude", "longitude", "kalliope", "random"]
self.assertFalse(Geolocation.check_parameters(expected_parameters))
def test_check_geolocation_no_latitude(self):
expected_parameters = ["longitude", "radius", "kalliope", "random"]
self.assertFalse(Geolocation.check_parameters(expected_parameters))
def test_check_geolocation_no_longitude(self):
expected_parameters = ["latitude", "radius", "kalliope", "random"]
self.assertFalse(Geolocation.check_parameters(expected_parameters))
def test_get_list_synapse_with_geolocation(self):
# Init
neuron1 = Neuron(name='neurone1', parameters={'var1': 'val1'})
neuron2 = Neuron(name='neurone2', parameters={'var2': 'val2'})
neuron3 = Neuron(name='neurone3', parameters={'var3': 'val3'})
neuron4 = Neuron(name='neurone4', parameters={'var4': 'val4'})
fake_geolocation_parameters = {
"latitude": 66,
"longitude": 66,
"radius": 66,
}
signal1 = Signal(name="geolocation", parameters=fake_geolocation_parameters)
signal2 = Signal(name="order", parameters="this is the second sentence")
synapse1 = Synapse(name="Synapse1", neurons=[neuron1, neuron2], signals=[signal1])
synapse2 = Synapse(name="Synapse2", neurons=[neuron3, neuron4], signals=[signal2])
synapses_list = [synapse1, synapse2]
br = Brain(synapses=synapses_list)
expected_list = [synapse1]
# Stubbing the Geolocation Signal with the brain
geo = Geolocation()
geo.brain = br
geo.run()
self.assertEqual(expected_list, geo.list_synapses_with_geolocalion)
def test_get_list_synapse_with_raise_missing_parameters(self):
# Init
neuron1 = Neuron(name='neurone1', parameters={'var1': 'val1'})
neuron2 = Neuron(name='neurone2', parameters={'var2': 'val2'})
neuron3 = Neuron(name='neurone3', parameters={'var3': 'val3'})
neuron4 = Neuron(name='neurone4', parameters={'var4': 'val4'})
fake_geolocation_parameters = {
"longitude": 66,
"radius": 66,
}
signal1 = Signal(name="geolocation", parameters=fake_geolocation_parameters)
signal2 = Signal(name="order", parameters="this is the second sentence")
synapse1 = Synapse(name="Synapse1", neurons=[neuron1, neuron2], signals=[signal1])
synapse2 = Synapse(name="Synapse2", neurons=[neuron3, neuron4], signals=[signal2])
synapses_list = [synapse1, synapse2]
br = Brain(synapses=synapses_list)
# Stubbing the Geolocation Signal with the brain
geo = Geolocation()
geo.brain = br
with self.assertRaises(MissingParameter):
geo.run()
if __name__ == '__main__':
unittest.main()
|
kalliope-project/kalliope
|
kalliope/signals/geolocation/tests/test_geolocalisation.py
|
Python
|
gpl-3.0
| 3,673
|
[
"NEURON"
] |
58f38d2eb38acbd364253fe2e01bdfa92f1b31400ca1f835d810a452a204cc98
|
"""
Here we generate the interaction tables for the dimerized gromacs input.
If PME are used, the tables will consider it and remove half of it for the beads.
Otherwise the standard coulomb table is passed.
"""
|
marckn/dimerizer
|
dimerizer/tables/__init__.py
|
Python
|
gpl-3.0
| 214
|
[
"Gromacs"
] |
47a1aa82cfdbdc46b351a9705ee60bafb000279ba8df405ca2c0a397e8c3df3f
|
# Copyright 2008 by Norbert Dojer. All rights reserved.
# Adapted by Bartek Wilczynski.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Approximate calculation of appropriate thresholds for motif finding
"""
import math,random
class ScoreDistribution:
""" Class representing approximate score distribution for a given motif.
Utilizes a dynamic programming approch to calculate the distribution of
scores with a predefined precision. Provides a number of methods for calculating
thresholds for motif occurences.
"""
def __init__(self,motif,precision=10**3):
self.min_score=min(0.0,motif.min_score())
self.interval=max(0.0,motif.max_score())-self.min_score
self.n_points=precision*motif.length
self.step=self.interval/(self.n_points-1)
self.mo_density=[0.0]*self.n_points
self.mo_density[-self._index_diff(self.min_score)]=1.0
self.bg_density=[0.0]*self.n_points
self.bg_density[-self._index_diff(self.min_score)]=1.0
self.ic=motif.ic()
for lo,mo in zip(motif.log_odds(),motif.pwm()):
self.modify(lo,mo,motif.background)
def _index_diff(self,x,y=0.0):
return int((x-y+0.5*self.step)//self.step)
def _add(self,i,j):
return max(0,min(self.n_points-1,i+j))
def modify(self,scores,mo_probs,bg_probs):
mo_new=[0.0]*self.n_points
bg_new=[0.0]*self.n_points
for k, v in scores.iteritems():
d=self._index_diff(v)
for i in range(self.n_points):
mo_new[self._add(i,d)]+=self.mo_density[i]*mo_probs[k]
bg_new[self._add(i,d)]+=self.bg_density[i]*bg_probs[k]
self.mo_density=mo_new
self.bg_density=bg_new
def threshold_fpr(self,fpr):
"""
Approximate the log-odds threshold which makes the type I error (false positive rate).
"""
i=self.n_points
prob=0.0
while prob<fpr:
i-=1
prob+=self.bg_density[i]
return self.min_score+i*self.step
def threshold_fnr(self,fnr):
"""
Approximate the log-odds threshold which makes the type II error (false negative rate).
"""
i=-1
prob=0.0
while prob<fnr:
i+=1
prob+=self.mo_density[i]
return self.min_score+i*self.step
def threshold_balanced(self,rate_proportion=1.0,return_rate=False):
"""
Approximate the log-odds threshold which makes FNR equal to FPR times rate_proportion
"""
i=self.n_points
fpr=0.0
fnr=1.0
while fpr*rate_proportion<fnr:
i-=1
fpr+=self.bg_density[i]
fnr-=self.mo_density[i]
if return_rate:
return self.min_score+i*self.step,fpr
else:
return self.min_score+i*self.step
def threshold_patser(self):
"""Threshold selection mimicking the behaviour of patser (Hertz, Stormo 1999) software.
It selects such a threshold that the log(fpr)=-ic(M)
note: the actual patser software uses natural logarithms instead of log_2, so the numbers
are not directly comparable.
"""
return self.threshold_fpr(fpr=2**-self.ic)
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/Motif/Thresholds.py
|
Python
|
gpl-2.0
| 3,439
|
[
"Biopython"
] |
27bab89035c058614f69346d214c70387df09474bda80881077263ce0248720e
|
"""Miscellaneous utilities."""
from functools import reduce, partial
import os
import os.path as op
import re
import shutil
import subprocess
import warnings
import numpy as np
import mne
from mne import (pick_types, pick_info, make_sphere_model, DipoleFixed, Epochs,
Dipole, make_forward_dipole, Projection)
from mne.channels import make_standard_montage, make_dig_montage
from mne.fixes import _get_args as get_args # noqa: F401
from mne.io.constants import FIFF
from mne.utils import verbose
from h5io import read_hdf5 as _read_hdf5, write_hdf5 as _write_hdf5
read_hdf5 = partial(_read_hdf5, title='mnepython')
write_hdf5 = partial(_write_hdf5, title='mnepython')
def _fix_raw_eog_cals(raws, kind='EOG'):
"""Fix for annoying issue where EOG cals don't match."""
# Warning: this will only produce correct EOG scalings with preloaded
# raw data!
if kind == 'EOG':
picks = pick_types(raws[0].info, eeg=False, meg=False, eog=True,
exclude=[])
else:
assert kind == 'all'
picks = np.arange(len(raws[0].ch_names))
if len(picks) > 0:
first_cals = _cals(raws[0])[picks]
for ri, r in enumerate(raws[1:]):
if kind == 'EOG':
picks_2 = pick_types(r.info, eeg=False, meg=False, eog=True,
exclude=[])
else:
picks_2 = np.arange(len(r.ch_names))
assert np.array_equal(picks, picks_2)
these_cals = _cals(r)[picks]
if not np.array_equal(first_cals, these_cals):
warnings.warn('Adjusting %s cals for %s'
% (kind, op.basename(r._filenames[0])))
_cals(r)[picks] = first_cals
def _cals(raw):
"""Helper to deal with the .cals->._cals attribute change."""
try:
return raw._cals
except AttributeError:
return raw.cals
def _get_baseline(p):
"""Helper to extract baseline from params."""
if p.baseline is None:
return p.baseline
elif p.baseline == 'individual':
baseline = (p.bmin, p.bmax)
else:
baseline = p.baseline
# XXX this and some downstream stuff (e.g., tmin=-baseline[0]) won't work
# for baseline=None, but we can fix that when someone needs it
# SMB (2020.04.20): added return None to skip baseline application.
baseline = tuple(baseline)
if baseline[0] is None:
baseline = (p.tmin, baseline[1])
if baseline[1] is None:
baseline = (baseline[0], p.tmax)
return baseline
def _handle_dict(entry, subj):
out = entry
if isinstance(entry, dict):
try:
out = entry[subj]
except KeyError:
pass
return out
def _handle_decim(decim, sfreq):
decim = np.array(decim)
assert decim.shape == ()
if decim.dtype.char in 'il':
return decim
else:
# float
assert decim.dtype.char == 'd', decim.dtype.char
got_decim = int(round(sfreq / decim))
assert np.isclose(sfreq / got_decim, decim), (sfreq, decim, got_decim)
return got_decim
def _safe_remove(fnames):
if isinstance(fnames, str):
fnames = [fnames]
for fname in fnames:
if op.isfile(fname):
os.remove(fname)
def _restrict_reject_flat(reject, flat, raw):
"""Restrict a reject and flat dict based on channel presence"""
reject = {} if reject is None else reject
flat = {} if flat is None else flat
assert isinstance(reject, dict)
assert isinstance(flat, dict)
use_reject, use_flat = dict(), dict()
for in_, out in zip([reject, flat], [use_reject, use_flat]):
use_keys = [key for key in in_.keys() if key in raw]
for key in use_keys:
out[key] = in_[key]
return use_reject, use_flat
def timestring(t):
"""Reformat time to convenient string
Parameters
----------
t : float
Elapsed time in seconds.
Returns
time : str
The time in HH:MM:SS.
"""
def rediv(ll, b):
return list(divmod(ll[0], b)) + ll[1:]
return "%d:%02d:%02d.%03d" % tuple(reduce(rediv, [[t * 1000, ], 1000, 60,
60]))
def source_script(script_name):
"""Set environmental variables by source-ing a bash script
Parameters
----------
script_name : str
Path to the script to execute and get the environment variables from.
"""
cmd = ['bash', '-c', 'source ' + script_name + ' > /dev/null && env']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
for line in proc.stdout:
(key, _, value) = line.partition("=")
os.environ[key] = value.strip()
proc.communicate()
def make_montage(info, kind, check=False):
from mne.utils import _clean_names
import mnefun
assert kind in ('mgh60', 'mgh70', 'uw_70', 'uw_60')
picks = pick_types(info, meg=False, eeg=True, exclude=())
sphere = make_sphere_model('auto', 'auto', info)
info = pick_info(info, picks)
to_names = info['ch_names']
if kind in ('mgh60', 'mgh70'):
if kind == 'mgh60':
assert len(to_names) in (59, 60)
else:
assert len(to_names) in (70,)
montage = make_standard_montage(
kind, head_size=sphere.radius)
from_names = _clean_names(to_names, remove_whitespace=True)
else:
assert len(to_names) == 60
from_names = getattr(mnefun, 'ch_names_' + kind)
montage = make_standard_montage(
'standard_1020', head_size=sphere.radius)
assert len(from_names) == len(to_names)
montage_pos = montage._get_ch_pos()
montage = make_dig_montage(
{to: montage_pos[fro] for fro, to in zip(from_names, to_names)},
coord_frame='head')
eeg_pos = np.array([ch['loc'][:3] for ch in info['chs']])
montage_pos = montage._get_ch_pos()
montage_pos = np.array([montage_pos[name] for name in to_names])
assert len(eeg_pos) == len(montage_pos)
if check:
from mayavi import mlab
mlab.figure(size=(800, 800))
mlab.points3d(*sphere['r0'], scale_factor=2 * sphere.radius,
color=(0., 0., 1.), opacity=0.1, mode='sphere')
mlab.points3d(*montage_pos.T, scale_factor=0.01,
color=(1, 0, 0), mode='sphere', opacity=0.5)
mlab.points3d(*eeg_pos.T, scale_factor=0.005, color=(1, 1, 1),
mode='sphere', opacity=1)
return montage, sphere
def compute_auc(dip, tmin=-np.inf, tmax=np.inf):
"""Compute the AUC values for a DipoleFixed object."""
from mne.utils import _time_mask
if not isinstance(dip, DipoleFixed):
raise TypeError('dip must be a DipoleFixed, got "%s"' % (type(dip),))
pick = pick_types(dip.info, meg=False, dipole=True)
if len(pick) != 1:
raise RuntimeError('Could not find dipole data')
time_mask = _time_mask(dip.times, tmin, tmax, dip.info['sfreq'])
data = dip.data[pick[0], time_mask]
return np.sum(np.abs(data)) * len(data) * (1. / dip.info['sfreq'])
def _get_epo_kwargs():
from mne.fixes import _get_args
epo_kwargs = dict(verbose=False)
if 'overwrite' in _get_args(Epochs.save):
epo_kwargs['overwrite'] = True
return epo_kwargs
def _check_reject_annot_regex(params, raw):
if isinstance(params.reject_epochs_by_annot, str):
reject_epochs_by_annot = True
reg = re.compile(params.reject_epochs_by_annot)
n_orig = sum(desc.lower().startswith('bad_')
for desc in raw.annotations.description)
mask = np.array([reg.match(desc) is not None
for desc in raw.annotations.description], bool)
print(f' Rejecting {mask.sum()} epochs with annotation(s) via '
f'regex matching ({n_orig} originally were BAD_ type)')
# remove the unwanted ones
raw.annotations.delete(np.where(~mask)[0])
for ii in range(len(raw.annotations)):
raw.annotations.description[ii] = 'BAD_REGEX'
else:
assert isinstance(params.reject_epochs_by_annot, bool)
reject_epochs_by_annot = params.reject_epochs_by_annot
return reject_epochs_by_annot
@verbose
def make_dipole_projectors(info, pos, ori, bem, trans, verbose=None):
"""Make dipole projectors.
Parameters
----------
info : instance of Info
The measurement info.
pos : ndarray, shape (n_dip, 3)
The dipole positions.
ori : ndarray, shape (n_dip, 3)
The dipole orientations.
bem : instance of ConductorModel
The conductor model to use.
trans : instance of Transform
The mri-to-head transformation.
%(verbose)s
Returns
-------
projs : list of Projection
The projectors.
"""
pos = np.atleast_2d(pos).astype(float)
ori = np.atleast_2d(ori).astype(float)
if pos.shape[1] != 3 or pos.shape != ori.shape:
raise ValueError('pos and ori must be 2D, the same shape, and have '
f'last dimension 3, got {pos.shape} and {ori.shape}')
dip = Dipole(
pos=pos, ori=ori, amplitude=np.ones(pos.shape[0]),
gof=np.ones(pos.shape[0]), times=np.arange(pos.shape[0]))
info = pick_info(info, pick_types(info, meg=True, eeg=True, exclude=()))
fwd, _ = make_forward_dipole(dip, bem, info, trans)
assert fwd['sol']['data'].shape[1] == pos.shape[0]
projs = list()
for kind in ('meg', 'eeg'):
kwargs = dict(meg=False, eeg=False, exclude=())
kwargs.update({kind: True})
picks = pick_types(info, **kwargs)
if len(picks) > 0:
ch_names = [info['ch_names'][pick] for pick in picks]
projs.extend([
Projection(
data=dict(data=p[np.newaxis, picks], row_names=None,
nrow=1, col_names=list(ch_names),
ncol=len(ch_names)),
kind=FIFF.FIFFV_PROJ_ITEM_DIP_FIX, explained_var=None,
active=False, desc=f'Dipole #{pi}')
for pi, p in enumerate(fwd['sol']['data'].T, 1)])
return projs
@verbose
def repeat_coreg(subject, subjects_dir=None, subjects_dir_old=None,
overwrite=False, verbose=None):
"""Repeat a mne coreg warping of an MRI.
This is useful for example when bugs are fixed with
:func:`mne.scale_mri`.
.. warning:: This function should not be used when the parameters
in ``'MRI scaling parameters.cfg'`` have been changed.
Parameters
----------
subject : str
The subject name.
subjects_dir : str | None
The subjects directory where the redone subject should go.
The template/surrogate MRI must also be in this directory.
subjects_dir_old : str | None
The subjects directory where the old subject is.
Can be None to use ``subjects_dir``.
overwrite : bool
If True (default False), overwrite an existing subject directory
if it exists.
verbose : str | None
The verbose level to use.
Returns
-------
out_dir : str
The output subject directory.
"""
subjects_dir = mne.utils.get_subjects_dir(subjects_dir)
if subjects_dir_old is None:
subjects_dir_old = subjects_dir
config = mne.coreg.read_mri_cfg(subject, subjects_dir_old)
n_params = config.pop('n_params')
assert n_params in (3, 1), n_params
out_dir = op.join(subjects_dir, subject)
mne.coreg.scale_mri(subject_to=subject, subjects_dir=subjects_dir,
labels=True, annot=True, overwrite=overwrite,
**config)
for pattern in ('-5120', '-5120-5120-5120', 'inner_skull'):
fname_bem = op.join(
subjects_dir, subject, 'bem', f'{subject}{pattern}-bem.fif')
fname_sol = fname_bem[:-4] + '-sol.fif'
if op.isfile(fname_bem) and not op.isfile(fname_sol):
bem = mne.read_bem_surfaces(fname_bem)
sol = mne.make_bem_solution(bem)
mne.write_bem_solution(fname_sol, sol)
return out_dir
def convert_ANTS_surrogate(subject, trans, subjects_dir):
"""Convert an old ANTS surrogate to a modern one.
Parameters
----------
subject : str
The subject name.
trans : str
The path to the subject's MRI<->head transformation.
subjects_dir : str
The subjects dir that contains the old ``subject`` MRI.
Notes
-----
The "old" templates are the ones created by Eric Larson around 2019 and
only include volumetric source spaces. The "modern" templates come from the
2021 NeuroImage paper by O'Reilly et al. and use the same templates,
just processed differently. Given a surrogate created using the old
template, this function will create an equivalent one for the new
template. It operates in-place by first backing up (renaming) the MRI
directory for the subject, copying the ``-trans.fif`` file to that
directory, and then creating the new surrogate and overwriting the old
trans file.
"""
# load morph params
subjects_dir = mne.utils.get_subjects_dir(subjects_dir)
config = mne.coreg.read_mri_cfg(subject, subjects_dir)
n_params = config.pop('n_params')
subject_from = config['subject_from']
if subject_from not in ('ANTS3-0Months3T', 'ANTS6-0Months3T',
'ANTS12-0Months3T'):
raise RuntimeError('Cannot convert subject that used '
f'{repr(subject_from)} as a surrogate')
age = int(subject_from.split('-')[0].split('S')[-1])
assert n_params in (3, 1), n_params
out_dir = op.join(subjects_dir, subject)
backup_dir = op.join(subjects_dir, subject + '_old')
if not isinstance(trans, (str, os.PathLike)):
raise TypeError(f'trans must be path-like, got {type(trans)}')
assert isinstance(trans, str)
trans, trans_fname = mne.transforms._get_trans(trans, 'head', 'mri')
if op.exists(backup_dir):
raise RuntimeError(f'Backup dir {backup_dir} must not already exist')
backup_trans = op.join(out_dir, op.basename(trans_fname))
if op.exists(backup_trans):
raise RuntimeError(f'Backup trans location {backup_trans} must not '
'exist')
from_dir = op.join(subjects_dir, subject_from)
if not op.isdir(from_dir):
raise RuntimeError(f'Template MRI directory not found: {from_dir}')
bem_path = op.join(
from_dir, 'bem', f'{subject_from}-5120-5120-5120-bem-sol.fif')
if not op.isfile(bem_path):
raise RuntimeError(f'{subject_from} in {repr(subjects_dir)} does not '
'appear to be a new-style template, consider '
'running:\n\n'
'import shutil, mne\n'
f'shutil.rmtree({repr(from_dir)})\n'
f'mne.datasets.fetch_infant_template(\'{age}mo\''
f', subjects_dir={repr(subjects_dir)}'
', verbose=True)\n')
shutil.move(trans_fname, backup_trans)
shutil.move(out_dir, backup_dir)
print('Rescaling MRI (will be slow)...')
mne.coreg.scale_mri(subject_to=subject, subjects_dir=subjects_dir,
labels=True, annot=True, overwrite=False,
**config)
bem_path = op.join(
out_dir, 'bem', f'{subject}-5120-5120-5120-bem-sol.fif')
sol = mne.make_bem_solution(mne.read_bem_surfaces(bem_path[:-8] + '.fif'))
mne.write_bem_solution(bem_path, sol)
# A factor beacuse Christian's MRIs weren't conformed:
# tra = {
# 3: [3, 8, 7.5], 6: [-1, 10.5, 10], 12: [0.5, 8, 15],
# }
# But these factors didn't completely explain the differences. So these
# adjustments were done by eye, and confirmed by
# surface-matching code that follows this function.
tra = {
3: [2, 7, 10], 6: [-1, 11, 8.5], 12: [-1, 10, 13],
}
x_rot = {3: -6.5, 6: 0, 12: 8}
y_rot = {3: -2.5, 6: 2, 12: 0}
rot = mne.transforms.rotation(x=np.deg2rad(x_rot[age]),
y=np.deg2rad(y_rot[age]))
tra = mne.transforms.translation(*tra[age])
xform = rot @ tra
xform[:3, 3] *= config['scale'] / 1000. # scale and mm->m
trans['trans'][:] = xform @ trans['trans']
mne.transforms.write_trans(trans_fname, trans)
# This was used for the automatic fitting step:
"""
import numpy as np
from scipy.spatial import KDTree
import mne
from mne.transforms import apply_trans
for subject in ('ANTS3-0Months3T', 'ANTS6-0Months3T', 'ANTS12-0Months3T'):
rr_from = mne.read_surface(f'/mnt/bakraid/larsoner/mri/Infants/subjects/{subject}/bem/outer_skin.surf')[0] / 1000.
rr_to = mne.read_surface(f'/mnt/bakraid/data/structurals/{subject}/bem/outer_skin.surf')[0] / 1000.
tree = KDTree(rr_to)
transform = np.eye(4)
for ii in range(10):
rr_trans = apply_trans(transform, rr_from)
dists, nearest = tree.query(rr_trans)
use = np.arange(len(rr_from))
a = rr_from[use]
b = rr_to[nearest[use]]
print(f'Iteration {ii}: {1000 * np.median(dists[use]):0.2f} mm')
transform = mne.coreg.fit_matched_points(a, b)
assert transform.shape == (4, 4)
dists, nearest = tree.query(rr_trans)
print(f'Done: {1000 * np.median(dists):0.2f} mm')
with np.printoptions(precision=None, suppress=True, linewidth=150, floatmode='unique'):
print(repr(transform))
""" # noqa: E501
|
LABSN/mnefun
|
mnefun/_utils.py
|
Python
|
bsd-3-clause
| 17,637
|
[
"Mayavi"
] |
ee207dbd056b75f59076c852e1c0d9bd6108497c72e2f225d1ef8299803bbb69
|
# -------------------------------------------------------------------------
# Name: Data handling
# Purpose: Transforming netcdf to numpy arrays, checking mask file
#
# Author: PB
#
# Created: 13/07/2016
# Copyright: (c) PB 2016
# -------------------------------------------------------------------------
import os, glob
import calendar
#import numpy as np
from . import globals
from cwatm.management_modules.checks import *
from cwatm.management_modules.timestep import *
from cwatm.management_modules.replace_pcr import *
from cwatm.management_modules.messages import *
import difflib # to check the closest word in settingsfile, if an error occurs
import math
from cwatm.management_modules.dynamicModel import *
from netCDF4 import Dataset,num2date,date2num,date2index
#from netcdftime import utime
import gdal
from osgeo import osr
from gdalconst import *
import warnings
def valuecell( coordx, coordstr, returnmap = True):
"""
to put a value into a raster map -> invert of cellvalue, map is converted into a numpy array first
:param coordx: x,y or lon/lat coordinate
:param coordstr: String of coordinates
:return: 1D array with new value
"""
coord = []
col = []
row = []
for xy in coordx:
try:
coord.append(float(xy))
except:
msg = "Error 101: Gauges in settings file: " + xy + " in " + coordstr + " is not a coordinate"
raise CWATMError(msg)
null = np.zeros((maskmapAttr['row'], maskmapAttr['col']))
null[null == 0] = -9999
for i in range(int(len(coord) / 2)):
col.append(int((coord[i * 2] - maskmapAttr['x']) * maskmapAttr['invcell']))
row.append(int((maskmapAttr['y'] - coord[i * 2 + 1]) * maskmapAttr['invcell']))
if col[i] >= 0 and row[i] >= 0 and col[i] < maskmapAttr['col'] and row[i] < maskmapAttr['row']:
null[row[i], col[i]] = i + 1
else:
x1 = maskmapAttr['x']
x2 = x1 + maskmapAttr['cell']* maskmapAttr['col']
y1 = maskmapAttr['y']
y2 = y1 - maskmapAttr['cell']* maskmapAttr['row']
box = "%5s %5.1f\n" %("",y1)
box += "%5s ---------\n" % ""
box += "%5s | |\n" % ""
box += "%5.1f | |%5.1f <-- Box of mask map\n" %(x1,x2)
box += "%5s | |\n" % ""
box += "%5s ---------\n" % ""
box += "%5s %5.1f\n" % ("", y2)
#print box
print("%2s %-17s %10s %8s" % ("No", "Name", "time[s]", "%"))
msg = "Error 102: Coordinates: x = " + str(coord[i * 2]) + ' y = ' + str(
coord[i * 2 + 1]) + " of gauge is outside mask map\n\n"
msg += box
msg +="\nPlease have a look at \"MaskMap\" or \"Gauges\""
raise CWATMError(msg)
if returnmap:
mapnp = compressArray(null).astype(np.int64)
return mapnp
else:
return col, row
def setmaskmapAttr(x,y,col,row,cell):
"""
Definition of cell size, coordinates of the meteo maps and maskmap
:param x: upper left corner x
:param y: upper left corner y
:param col: number of cols
:param row: number of rows
:param cell: cell size
:return: -
"""
invcell = round(1/cell,0)
# getgeotransform only delivers single precision!
cell = 1 / invcell
if (x-int(x)) != 0.:
if abs(x - int(x)) > 1e9:
x = 1/round(1/(x-int(x)),4) + int(x)
else: x = round(x,4)
if (y - int(y)) != 0.:
if abs(y - int(y)) > 1e9:
y = 1 / round(1 / (y - int(y)), 4) + int(y)
else: y = round(y,4)
maskmapAttr['x'] = x
maskmapAttr['y'] = y
maskmapAttr['col'] = col
maskmapAttr['row'] = row
maskmapAttr['cell'] = cell
maskmapAttr['invcell'] = invcell
def loadsetclone(self,name):
"""
load the maskmap and set as clone
:param name: name of mask map, can be a file or - row col cellsize xupleft yupleft -
:return: new mask map
"""
filename = cbinding(name)
coord = filename.split()
if len(coord) == 2:
name = "Ldd"
if len(coord) == 5:
# changed order of x, y i- in setclone y is first in CWATM
# settings x is first
# setclone row col cellsize xupleft yupleft
# retancle: Number of Cols, Number of rows, cellsize, upper left corner X, upper left corner Y
mapnp = np.ones((int(coord[1]), int(coord[0])))
setmaskmapAttr(float(coord[3]),float(coord[4]), int(coord[0]),int(coord[1]),float(coord[2]))
#mapnp[mapnp == 0] = 1
#map = numpy2pcr(Boolean, mapnp, -9999)
elif len(coord) < 3:
filename = os.path.splitext(cbinding(name))[0] + '.nc'
try:
nf1 = Dataset(filename, 'r')
value = list(nf1.variables.items())[-1][0] # get the last variable name
#x1 = nf1.variables.values()[0][0]
#x2 = nf1.variables.values()[0][1]
#xlast = nf1.variables.values()[0][-1]
x1 = nf1.variables['lon'][0]
x2 = nf1.variables['lon'][1]
xlast = nf1.variables['lon'][-1]
y1 = nf1.variables['lat'][0]
ylast = nf1.variables['lat'][-1]
# swap to make y1 the biggest number
if y1 < ylast: y1, ylast = ylast, y1
cellSize = np.abs(x2 - x1)
invcell = round(1/cellSize)
nrRows = int(0.5 + np.abs(ylast - y1) * invcell + 1)
nrCols = int(0.5 + np.abs(xlast - x1) * invcell + 1)
x = x1 - cellSize / 2
y = y1 + cellSize / 2
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mapnp = np.array(nf1.variables[value][0:nrRows, 0:nrCols])
nf1.close()
setmaskmapAttr( x, y, nrCols, nrRows, cellSize)
flagmap = True
except:
# load geotiff
try:
filename = cbinding(name)
nf2 = gdal.Open(filename, GA_ReadOnly)
geotransform = nf2.GetGeoTransform()
geotrans.append(geotransform)
setmaskmapAttr( geotransform[0], geotransform[3], nf2.RasterXSize, nf2.RasterYSize, geotransform[1])
band = nf2.GetRasterBand(1)
#bandtype = gdal.GetDataTypeName(band.DataType)
mapnp = band.ReadAsArray(0, 0, nf2.RasterXSize, nf2.RasterYSize)
# 10 because that includes all valid LDD values [1-9]
mapnp[mapnp > 10] = 0
mapnp[mapnp < -10] = 0
flagmap = True
except:
raise CWATMFileError(filename,msg = "Error 201: File reading Error\n", sname=name)
if Flags['check']:
checkmap(name, filename, mapnp, flagmap, False,0)
else:
msg = "Error 103: Maskmap: " + filename + " is not a valid mask map nor valid coordinates nor valid point\n"
msg +="Or there is a whitespace or undefined character in Maskmap"
raise CWATMError(msg)
# put in the ldd map
# if there is no ldd at a cell, this cell should be excluded from modelling
maskldd = loadmap('Ldd', compress = False)
maskarea = np.bool8(mapnp)
mask = np.logical_not(np.logical_and(maskldd,maskarea))
# mask=np.isnan(mapnp)
# mask[mapnp==0] = True # all 0 become mask out
mapC = np.ma.compressed(np.ma.masked_array(mask,mask))
# Definition of compressed array and info how to blow it up again
maskinfo['mask']=mask
maskinfo['shape']=mask.shape
maskinfo['maskflat']=mask.ravel() # map to 1D not compresses
maskinfo['shapeflat']=maskinfo['maskflat'].shape #length of the 1D array
maskinfo['mapC']=mapC.shape # length of the compressed 1D array
maskinfo['maskall'] =np.ma.masked_all(maskinfo['shapeflat']) # empty map 1D but with mask
maskinfo['maskall'].mask = maskinfo['maskflat']
globals.inZero=np.zeros(maskinfo['mapC'])
if Flags['check']:
checkmap("Mask+Ldd", "", np.ma.masked_array(mask,mask), flagmap, True, mapC)
outpoints = 0
if len(coord) == 2:
outpoints = valuecell(coord, filename)
outpoints[outpoints < 0] = 0
print("Create catchment from point and river network")
mask2D, xleft, yup = self.routing_kinematic_module.catchment(outpoints)
mapC = maskfrompoint(mask2D, xleft, yup) + 1
area = np.sum(loadmap('CellArea')) * 1e-6
print("Number of cells in catchment: %6i = %7.0f km2" % (np.sum(mask2D), area))
# if the final results map should be cover up with some mask:
if "coverresult" in binding:
coverresult[0] = returnBool('coverresult')
if coverresult[0]:
cover = loadmap('covermap', compress=False, cut = False)
cover[cover > 1] = False
cover[cover == 1] = True
coverresult[1] = cover
return mapC
def maskfrompoint(mask2D, xleft, yup):
"""
load a static map either value or pc raster map or netcdf
:param mask2D: 2D array of new mask
:param xleft: left lon coordinate
:param yup: upper lat coordinate
:return: new mask map
"""
if xleft == -1:
msg = "Error 104: MaskMap point does not have a valid value in the river network (LDD)"
raise CWATMError(msg)
x = xleft * maskmapAttr['cell'] + maskmapAttr['x']
y = maskmapAttr['y'] - yup * maskmapAttr['cell']
maskmapAttr['x'] = x
maskmapAttr['y'] = y
maskmapAttr['col'] = mask2D.shape[1]
maskmapAttr['row'] = mask2D.shape[0]
mask = np.invert(np.bool8(mask2D))
mapC = np.ma.compressed(np.ma.masked_array(mask, mask))
# Definition of compressed array and info how to blow it up again
maskinfo['mask'] = mask
maskinfo['shape'] = mask.shape
maskinfo['maskflat'] = mask.ravel() # map to 1D not compresses
maskinfo['shapeflat'] = maskinfo['maskflat'].shape # length of the 1D array
maskinfo['mapC'] = mapC.shape # length of the compressed 1D array
maskinfo['maskall'] = np.ma.masked_all(maskinfo['shapeflat']) # empty map 1D but with mask
maskinfo['maskall'].mask = maskinfo['maskflat']
globals.inZero = np.zeros(maskinfo['mapC'])
return mapC
def loadmap(name, lddflag=False,compress = True, local = False, cut = True):
"""
load a static map either value or pc raster map or netcdf
:param name: name of map
:param lddflag: if True the map is used as a ldd map
:param compress: if True the return map will be compressed
:param local: if True the map is local and will be not cut
:param cut: if True the map will be not cut
:return: 1D numpy array of map
"""
value = cbinding(name)
filename = value
mapC = 0 # initializing to prevent warning in code inspection
try: # loading an integer or float but not a map
mapC = float(value)
flagmap = False
load = True
if Flags['check']:
checkmap(name, filename, mapC, False, False, 0)
except ValueError:
load = False
if not load: # read a netcdf (single one not a stack)
filename = os.path.splitext(value)[0] + '.nc'
# get mapextend of netcdf map and calculate the cutting
#cut0, cut1, cut2, cut3 = mapattrNetCDF(filename)
try:
nf1 = Dataset(filename, 'r')
cut0, cut1, cut2, cut3 = mapattrNetCDF(filename, check = False)
# load netcdf map but only the rectangle needed
#nf1 = Dataset(filename, 'r')
value = list(nf1.variables.items())[-1][0] # get the last variable name
if (nf1.variables['lat'][0] - nf1.variables['lat'][-1]) < 0:
msg = "Error 202: Latitude is in wrong order\n"
raise CWATMFileError(filename, msg)
if not timestepInit:
#with np.errstate(invalid='ignore'):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# in order to ignore some invalid value comments
if cut:
mapnp = nf1.variables[value][cut2:cut3, cut0:cut1].astype(np.float64)
else:
mapnp = nf1.variables[value][:]
else:
if 'time' in nf1.variables:
timestepI = Calendar(timestepInit[0])
if type(timestepI) is datetime.datetime:
timestepI = date2num(timestepI,nf1.variables['time'].units)
else: timestepI = int(timestepI) -1
if not(timestepI in nf1.variables['time'][:]):
msg = "Error 105 time step " + str(int(timestepI)+1)+" not stored in "+ filename
raise CWATMError(msg)
itime = np.where(nf1.variables['time'][:] == timestepI)[0][0]
if cut:
mapnp = nf1.variables[value][itime,cut2:cut3, cut0:cut1]
else:
mapnp = nf1.variables[value][itime][:]
else:
if cut:
mapnp = nf1.variables[value][cut2:cut3, cut0:cut1]
else:
mapnp = nf1.variables[value][:]
nf1.close()
except:
filename = cbinding(name)
try:
nf2 = gdal.Open(filename, GA_ReadOnly)
band = nf2.GetRasterBand(1)
mapnp = band.ReadAsArray(0, 0, nf2.RasterXSize, nf2.RasterYSize).astype(np.float64)
# if local no cut
if not local:
if cut:
cut0, cut1, cut2, cut3 = mapattrTiff(nf2)
mapnp = mapnp[cut2:cut3, cut0:cut1]
except:
msg = "Error 203: File does not exists"
raise CWATMFileError(filename,msg,sname=name)
try:
if any(maskinfo) and compress: mapnp.mask = maskinfo['mask']
except:
ii=0
if compress:
mapC = compressArray(mapnp,name=filename)
if Flags['check']:
checkmap(name, filename, mapnp, True, True, mapC)
else:
mapC = mapnp
if Flags['check']:
checkmap(name, filename, mapnp, True, False, 0)
return mapC
# -----------------------------------------------------------------------
# Compressing to 1-dimensional numpy array
# -----------------------------------------------------------------------
def compressArray(map, name="None", zeros = 0.):
"""
Compress 2D array with missing values to 1D array without missing values
:param map: in map
:param name: filename of the map
:param zeros: add zeros (default= 0) if values of map are to big or too small
:return: Compressed 1D array
"""
if map.shape != maskinfo['mask'].shape:
msg = "Error 105: " + name + " has less a different shape than area or ldd \n"
raise CWATMError(msg)
mapnp1 = np.ma.masked_array(map, maskinfo['mask'])
mapC = np.ma.compressed(mapnp1)
# if fill: mapC[np.isnan(mapC)]=0
if name != "None":
if np.max(np.isnan(mapC)):
msg = "Error 106:" + name + " has less valid pixels than area or ldd \n"
raise CWATMError(msg)
# test if map has less valid pixel than area.map (or ldd)
# if a value is bigger or smaller than 1e20, -1e20 than the standard value is taken
mapC[mapC > 1.E20] = zeros
mapC[mapC < -1.E20] = zeros
return mapC
def decompress(map):
"""
Decompress 1D array without missing values to 2D array with missing values
:param map: numpy 1D array as input
:return: 2D array for displaying
"""
# dmap=np.ma.masked_all(maskinfo['shapeflat'], dtype=map.dtype)
dmap = maskinfo['maskall'].copy()
dmap[~maskinfo['maskflat']] = map[:]
dmap = dmap.reshape(maskinfo['shape'])
# check if integer map (like outlets, lakes etc
try:
checkint = str(map.dtype)
except:
checkint = "x"
if checkint == "int16" or checkint == "int32":
dmap[dmap.mask] = -9999
elif checkint == "int8":
dmap[dmap < 0] = 0
else:
dmap[dmap.mask] = -9999
return dmap
# -----------------------------------------------------------------------
# NETCDF
# -----------------------------------------------------------------------
def getmeta(key,varname,alternative):
"""
get the meta data information for the netcdf output from the global
variable metaNetcdfVar
:param key: key
:param varname: variable name e.g. self.var.Precipitation
:return: metadata information
"""
ret = alternative
if varname in metaNetcdfVar:
if key in metaNetcdfVar[varname]:
ret = metaNetcdfVar[varname][key]
return ret
def metaNetCDF():
"""
get the map metadata from precipitation netcdf maps
"""
try:
name = cbinding('PrecipitationMaps')
name1 = glob.glob(os.path.normpath(name))[0]
nf1 = Dataset(name1, 'r')
for var in nf1.variables:
metadataNCDF[var] = nf1.variables[var].__dict__
nf1.close()
except:
msg = "Error 204: Trying to get metadata from netcdf\n"
raise CWATMFileError(cbinding('PrecipitationMaps'),msg)
def readCoord(name):
"""
get the meta data information for the netcdf output from the global
variable metaNetcdfVar
:param name: name of the netcdf file
:return: latitude, longitude, cell size, inverse cell size
"""
namenc = os.path.splitext(name)[0] + '.nc'
try:
nf1 = Dataset(namenc, 'r')
nc = True
except:
nc = False
if nc:
lat, lon, cell, invcell, rows, cols = readCoordNetCDF(namenc)
else:
raster = gdal.Open(name)
rows = raster.RasterYSize
cols = raster.RasterXSize
gt = raster.GetGeoTransform()
cell = gt[1]
invcell = round(1.0 / cell, 0)
# getgeotransform only delivers single precision!
cell = 1 / invcell
x1 = gt[0]
y1 = gt[3]
lon = 1 / round(1 / (x1 - int(x1)), 4) + int(x1)
lat = 1 / round(1 / (y1 - int(y1)), 4) + int(y1)
return lat, lon, cell, invcell, rows, cols
def readCoordNetCDF(name,check = True):
"""
reads the map attributes col, row etc from a netcdf map
:param name: name of the netcdf file
:param check: checking if netcdffile exists
:return: latitude, longitude, cell size, inverse cell size
:raises if no netcdf map can be found: :meth:`management_modules.messages.CWATMFileError`
"""
if check:
try:
nf1 = Dataset(name, 'r')
except:
msg = "Error 205: Checking netcdf map \n"
raise CWATMFileError(name,msg)
else:
# if subroutine is called already from inside a try command
nf1 = Dataset(name, 'r')
rows = nf1.variables['lat'].shape[0]
cols = nf1.variables['lon'].shape[0]
lon0 = nf1.variables['lon'][0]
lon1 = nf1.variables['lon'][1]
lat0 = nf1.variables['lat'][0]
latlast = nf1.variables['lat'][-1]
nf1.close()
# swap to make lat0 the biggest number
if lat0 < latlast:
lat0, latlast = latlast, lat0
cell = round(np.abs(lon1 - lon0),8)
invcell = round(1.0 / cell, 0)
lon = round(lon0 - cell / 2,8)
lat = round(lat0 + cell / 2,8)
return lat,lon, cell,invcell,rows,cols
def readCalendar(name):
nf1 = Dataset(name, 'r')
dateVar['calendar'] = nf1.variables['time'].calendar
nf1.close()
def checkMeteo_Wordclim(meteodata, wordclimdata):
"""
reads the map attributes of meteo dataset and wordclima dataset
and compare if it has the same map extend
:param nmeteodata: name of the meteo netcdf file
:param wordlclimdata: cname of the wordlclim netcdf file
:return: True if meteo and wordclim has the same mapextend
:raises if map extend is different :meth:`management_modules.messages.CWATMFileError`
"""
try:
nf1 = Dataset(meteodata, 'r')
except:
msg = "Error 206: Checking netcdf map \n"
raise CWATMFileError(meteodata, msg)
lonM0 = nf1.variables['lon'][0]
lon1 = nf1.variables['lon'][1]
cellM = round(np.abs(lon1 - lonM0) / 2.,8)
lonM0 = round(lonM0 - cellM,8)
lonM1 = round(nf1.variables['lon'][-1] + cellM,8)
latM0 = nf1.variables['lat'][0]
latM1 = nf1.variables['lat'][-1]
nf1.close()
# swap to make lat0 the biggest number
if latM0 < latM1:
latM0, latM1 = latM1, latM0
latM0 = round(latM0 + cellM,8)
latM1 = round(latM1 - cellM,8)
# load Wordclima data
try:
nf1 = Dataset(wordclimdata, 'r')
except:
msg = "Error 207: Checking netcdf map \n"
raise CWATMFileError(wordclimdata, msg)
lonW0 = nf1.variables['lon'][0]
lon1 = nf1.variables['lon'][1]
cellW = round(np.abs(lon1 - lonW0) / 2.,8)
lonW0 = round(lonW0 - cellW,8)
lonW1 = round(nf1.variables['lon'][-1] + cellW,8)
latW0 = nf1.variables['lat'][0]
latW1 = nf1.variables['lat'][-1]
nf1.close()
# swap to make lat0 the biggest number
if latW0 < latW1:
latW0, latW1 = latW1, latW0
latW0 = round(latW0 + cellW,8)
latW1 = round(latW1 - cellW,8)
# calculate the controll variable
contr1 = (lonM0 + lonM1 + latM0 + latM1)
contr2 = (lonW0 + lonW1 + latW0 + latW1)
contr = abs(round(contr1 - contr2,5))
check = True
if contr > 0.00001:
#msg = "Data from meteo dataset and Wordclim dataset does not match"
#raise CWATMError(msg)
check = False
return check
def mapattrNetCDF(name, check=True):
"""
get the 4 corners of a netcdf map to cut the map
defines the rectangular of the mask map inside the netcdf map
calls function :meth:`management_modules.data_handling.readCoord`
:param name: name of the netcdf file
:param check: checking if netcdffile exists
:return: cut1,cut2,cut3,cut4
:raises if cell size is different: :meth:`management_modules.messages.CWATMError`
"""
lat, lon, cell, invcell, rows, cols = readCoord(name)
if maskmapAttr['invcell'] != invcell:
msg = "Error 107: Cell size different in maskmap: " + \
binding['MaskMap'] + " and: " + name
raise CWATMError(msg)
xx = maskmapAttr['x']
yy = maskmapAttr['y']
cut0 = int(0.0001 + np.abs(xx - lon) * invcell) # argmin() ??
cut2 = int(0.0001 + np.abs(yy - lat) * invcell)
cut1 = cut0 + maskmapAttr['col']
cut3 = cut2 + maskmapAttr['row']
return cut0, cut1, cut2, cut3
def mapattrNetCDFMeteo(name, check = True):
"""
get the map attributes like col, row etc from a netcdf map
and define the rectangular of the mask map inside the netcdf map
calls function :meth:`management_modules.data_handling.readCoordNetCDF`
:param name: name of the netcdf file
:param check: checking if netcdffile exists
:return: cut0,cut1,cut2,cut3,cut4,cut5,cut6,cut7
"""
lat, lon, cell, invcell, rows, cols = readCoordNetCDF(name, check)
# x0,xend, y0,yend - borders of fine resolution map
lon0 = maskmapAttr['x']
lat0 = maskmapAttr['y']
lonend = lon0 + maskmapAttr['col'] / maskmapAttr['invcell']
latend = lat0 - maskmapAttr['row'] / maskmapAttr['invcell']
# cut for 0.5 deg map based on finer resolution
# lats = nc_simulated.variables['lat'][:]
# in_lat = discharge_location[1]
# lat_idx = geo_idx(in_lat, lats)
# geo_idx = (np.abs(dd_array - dd)).argmin()
# geo_idx(dd, dd_array):
cut0 = int(0.0001 + np.abs(lon0 - lon) * invcell)
cut2 = int(0.0001 + np.abs(lat0 - lat) * invcell)
# lon and lat of coarse meteo dataset
lonCoarse = (cut0 * cell) + lon
latCoarse = lat - (cut2 * cell)
cut4 = int(0.0001 + np.abs(lon0 - lonCoarse) * maskmapAttr['invcell'])
cut5 = cut4 + maskmapAttr['col']
cut6 = int(0.0001 + np.abs(lat0 - latCoarse) * maskmapAttr['invcell'])
cut7 = cut6 + maskmapAttr['row']
# now coarser cut of the coarse meteo dataset
cut1 = int(0.0001 + np.abs(lonend - lon) * invcell)
cut3 = int(0.0001 + np.abs(latend - lat) * invcell)
# test if fine cut is inside coarse cut
cellx = (cut1 - cut0) * maskmapAttr['reso_mask_meteo']
celly = (cut3 - cut2) * maskmapAttr['reso_mask_meteo']
if cellx < cut5:
cut1 += 1
if celly < cut7:
cut3 += 1
if cut1 > (360 * invcell): cut1 = int(360 * invcell)
if cut3 > (180 * invcell): cut3 = int(180 * invcell)
return cut0, cut1, cut2, cut3, cut4, cut5, cut6, cut7
def mapattrTiff(nf2):
"""
map attributes of a geotiff file
:param nf2:
:return: cut0,cut1,cut2,cut3
"""
geotransform = nf2.GetGeoTransform()
x1 = geotransform[0]
y1 = geotransform[3]
#maskmapAttr['col'] = nf2.RasterXSize
#maskmapAttr['row'] = nf2.RasterYSize
cellSize = geotransform[1]
invcell = round(1/cellSize,0)
# getgeotransform only delivers single precision!
cellSize = 1 / invcell
if (x1-int(x1)) != 0:
x1 = 1/round(1/(x1-int(x1)),4) + int(x1)
if (y1-int(y1)) != 0:
y1 = 1 / round(1 / (y1 - int(y1)), 4) + int(y1)
if maskmapAttr['invcell'] != invcell:
msg = "Error 108: Cell size different in maskmap: " + \
binding['MaskMap']
raise CWATMError(msg)
x = x1 - cellSize / 2
y = y1 + cellSize / 2
cut0 = int(0.01 + np.abs(maskmapAttr['x'] - x) * invcell)
cut2 = int(0.01 + np.abs(maskmapAttr['y'] - y) * invcell)
cut1 = cut0 + maskmapAttr['col']
cut3 = cut2 + maskmapAttr['row']
return cut0, cut1, cut2, cut3
def multinetdf(meteomaps, startcheck = 'dateBegin'):
"""
:param meteomaps: list of meteomaps to define start and end time
:param startcheck: date of beginning simulation
:return:
:raises if no map stack in meteo map folder: :meth:`management_modules.messages.CWATMFileError`
"""
end = dateVar['dateEnd']
for maps in meteomaps:
name = cbinding(maps)
nameall = glob.glob(os.path.normpath(name))
if not nameall:
msg ="Error 208: File missing \n"
raise CWATMFileError(name,msg, sname=maps)
nameall.sort()
meteolist = {}
startfile = 0
for filename in nameall:
try:
nf1 = Dataset(filename, 'r')
except:
msg = "Error 209: Netcdf map stacks: " + filename +"\n"
raise CWATMFileError(filename, msg, sname=maps)
nctime = nf1.variables['time']
unitconv1 = ["DAYS", "HOUR", "MINU", "SECO"]
unitconv2 = [1, 24, 1440, 86400]
try:
unitconv3 = nctime.units[:4].upper()
datediv = unitconv2[unitconv1.index(unitconv3)]
except:
datediv = 1
datestart = num2date(nctime[0] ,units=nctime.units,calendar=nctime.calendar)
# sometime daily records have a strange hour to start with -> it is changed to 0:00 to haqve the same record
datestart = datestart.replace(hour=0, minute=0)
dateend = num2date(nctime[-1], units=nctime.units, calendar=nctime.calendar)
datestartint = int(nctime[0]) // datediv
dateendint = int(nctime[-1]) // datediv
dateend = dateend.replace(hour=0, minute=0)
#if dateVar['leapYear'] > 0:
startint = int(date2num(dateVar[startcheck],nctime.units,calendar=nctime.calendar))
start = num2date(startint, units=nctime.units, calendar=nctime.calendar)
startint = startint // datediv
endint = int(date2num(end, nctime.units, calendar=nctime.calendar))
endint = endint // datediv
#else:
# start = dateVar[startcheck]
if startfile == 0: # search first file where dynamic run starts
if (dateendint >= startint) and (datestartint <= startint): # if enddate of a file is bigger than the start of run
startfile = 1
#indstart = (start - datestart).days
indstart = startint - datestartint
#indend = (dateend -datestart).days
indend = dateendint - datestartint
meteolist[startfile-1] = [filename,indstart,indend, start,dateend]
inputcounter[maps] = indstart # startindex of timestep 1
#start = dateend + datetime.timedelta(days=1)
#start = start.replace(hour=0, minute=0)
startint = dateendint + 1
start = num2date(startint * datediv, units=nctime.units, calendar=nctime.calendar)
else:
if (datestartint >= startint) and (datestartint < endint ):
startfile += 1
indstart = startint - datestartint
indend = dateendint - datestartint
meteolist[startfile - 1] = [filename, indstart,indend, start, dateend,]
#start = dateend + datetime.timedelta(days=1)
#start = start.replace(hour=0, minute=0)
startint = dateendint + 1
start = num2date(startint * datediv, units=nctime.units, calendar=nctime.calendar)
nf1.close()
meteofiles[maps] = meteolist
flagmeteo[maps] = 0
def readmeteodata(name, date, value='None', addZeros = False, zeros = 0.0,mapsscale = True, modflowSteady = False):
"""
load stack of maps 1 at each timestamp in netcdf format
:param name: file name
:param date:
:param value: if set the name of the parameter is defined
:param addZeros:
:param zeros: default value
:param mapsscale: if meteo maps have the same extend as the other spatial static m
:return: Compressed 1D array of meteo data
:raises if data is wrong: :meth:`management_modules.messages.CWATMError`
:raises if meteo netcdf file cannot be opened: :meth:`management_modules.messages.CWATMFileError`
"""
if modflowSteady:
idx = 0
filename = os.path.normpath(cbinding(name))
else:
try:
meteoInfo = meteofiles[name][flagmeteo[name]]
idx = inputcounter[name]
filename = os.path.normpath(meteoInfo[0])
except:
date1 = "%02d/%02d/%02d" % (date.day, date.month, date.year)
msg = "Error 210: Netcdf map error for: " + name + " -> " + cbinding(name) + " on: " + date1 + ": \n"
raise CWATMError(msg)
try:
nf1 = Dataset(filename, 'r')
except:
msg = "Error 211: Netcdf map stacks: \n"
raise CWATMFileError(filename,msg, sname = name)
warnings.filterwarnings("ignore")
if value == "None":
value = list(nf1.variables.items())[-1][0] # get the last variable name
if value in ["lon","lat","time"]:
for i in range(2,5):
value = list(nf1.variables.items())[-i][0]
if not(value in ["lon","lat","time"]) : break
# check if mask = map size -> if yes do not cut the map
cutcheckmask = maskinfo['shape'][0] * maskinfo['shape'][1]
cutcheckmap = nf1.variables[value].shape[1] * nf1.variables[value].shape[2]
cutcheck = True
if cutcheckmask == cutcheckmap: cutcheck = False
#checkif latitude is reversed
turn_latitude = False
if (nf1.variables['lat'][0] - nf1.variables['lat'][-1]) < 0:
turn_latitude = True
mapnp = nf1.variables[value][idx].astype(np.float64)
mapnp = np.flipud(mapnp)
if cutcheck:
if turn_latitude:
mapnp = mapnp[cutmapFine[2]:cutmapFine[3], cutmapFine[0]:cutmapFine[1]]
else:
mapnp = nf1.variables[value][idx, cutmapFine[2]:cutmapFine[3], cutmapFine[0]:cutmapFine[1]].astype(np.float64)
else:
if not(turn_latitude):
mapnp = nf1.variables[value][idx].astype(np.float64)
try:
mapnp.mask.all()
mapnp = mapnp.data
mapnp[mapnp>1e15] = np.nan
except:
ii =1
nf1.close()
# add zero values to maps in order to supress missing values
if addZeros: mapnp[np.isnan(mapnp)] = zeros
if mapsscale: # if meteo maps have the same extend as the other spatial static maps -> meteomapsscale = True
if maskinfo['shapeflat'][0]!= mapnp.size:
msg = "Error 109: " + name + " has less or more valid pixels than the mask map \n"
msg += "if it is the ET maps, it might be from another run with different mask. Please look at the option: calc_evaporation"
raise CWATMWarning(msg)
mapC = compressArray(mapnp, name=filename,zeros = zeros)
if Flags['check']:
checkmap(name, filename, mapnp, True, True, mapC)
else: # if static map extend not equal meteo maps -> downscaling in readmeteo
mapC = mapnp
if Flags['check']:
checkmap(name, filename, mapnp, True, False, 0)
# increase index and check if next file
#if (dateVar['leapYear'] == 1) and calendar.isleap(date.year):
# if (date.month ==2) and (date.day == 28):
# ii = 1 # dummmy for not doing anything
# else:
if not(modflowSteady):
inputcounter[name] += 1
if inputcounter[name] > meteoInfo[2]:
inputcounter[name] = 0
flagmeteo[name] += 1
return mapC
def readnetcdf2(namebinding, date, useDaily='daily', value='None', addZeros = False,cut = True, zeros = 0.0,meteo = False, usefilename = False, compress = True):
"""
load stack of maps 1 at each timestamp in netcdf format
:param namebinding: file name in settings file
:param date:
:param useDaily: if True daily values are used
:param value: if set the name of the parameter is defined
:param addZeros:
:param cut: if True the map is clipped to mask map
:param zeros: default value
:param meteo: if map are meteo maps
:param usefilename: if True filename is given False: filename is in settings file
:param compress: True - compress data to 1D
:return: Compressed 1D array of netcdf stored data
:raises if netcdf file cannot be opened: :meth:`management_modules.messages.CWATMFileError`
:raises if netcdf file is not of the size of mask map: :meth:`management_modules.messages.CWATMWarning`
"""
# in case a filename is used e.g. because of direct loading of pre results
if usefilename:
name = namebinding
else:
name = cbinding(namebinding)
filename = os.path.normpath(name)
try:
nf1 = Dataset(filename, 'r')
except:
msg = "Error 212: Netcdf map stacks: \n"
raise CWATMFileError(filename,msg, sname = namebinding)
if value == "None":
value = list(nf1.variables.items())[-1][0] # get the last variable name
# date if used daily, monthly or yearly or day of year
idx = None # will produce an error and indicates something is wrong with date
if useDaily == "DOY": # day of year 1-366
idx = date - 1
if useDaily == "10day": # every 10 days
idx = date
if useDaily == "month":
idx = int(date.month) - 1
if useDaily in ["monthly","yearly","daily"]:
# DATE2INDEX TAKES A LONG TIME TO GET THE INDEX, THIS SHOULD BE A FASTER VERSION, ONCE THE FIRST INDEX IS COLLECTED
if (value in inputcounter) and meteo:
inputcounter[value] += 1
idx = inputcounter[value]
else:
if useDaily == "yearly":
date = datetime.datetime(date.year, int(1), int(1))
# if useDaily == "monthly":
date = datetime.datetime(date.year, date.month, int(1))
# A netCDF time variable object - time index (in the netCDF file)
nctime = nf1.variables['time']
if nctime.calendar in ['noleap', '365_day']:
dateVar['leapYear'] = 1
idx = date2indexNew(date, nctime, calendar=nctime.calendar, select='nearest', name = name)
elif nctime.calendar in ['360_day']:
dateVar['leapYear'] = 2
idx = date2indexNew(date, nctime, calendar=nctime.calendar, select='nearest', name = name)
else:
#idx = date2index(date, nctime, calendar=nctime.calendar, select='exact')
idx = date2indexNew(date, nctime, calendar=nctime.calendar, select='nearest', name = name)
if meteo: inputcounter[value] = idx
#checkif latitude is reversed
turn_latitude = False
try:
if (nf1.variables['lat'][0] - nf1.variables['lat'][-1]) < 0:
turn_latitude = True
mapnp = nf1.variables[value][idx].astype(np.float64)
mapnp = np.flipud(mapnp)
except:
ii = 1
if cut:
if turn_latitude:
mapnp = mapnp[cutmap[2]:cutmap[3], cutmap[0]:cutmap[1]]
else:
mapnp = nf1.variables[value][idx, cutmap[2]:cutmap[3], cutmap[0]:cutmap[1]].astype(np.float64)
else:
if not(turn_latitude):
mapnp = nf1.variables[value][idx].astype(np.float64)
try:
mapnp.mask.all()
mapnp = mapnp.data
except:
ii =1
nf1.close()
# add zero values to maps in order to supress missing values
if addZeros: mapnp[np.isnan(mapnp)] = zeros
if not compress:
return mapnp
if maskinfo['shapeflat'][0]!= mapnp.size:
msg = "Error 110: " + name + " has less or more valid pixels than the mask map \n"
raise CWATMWarning(msg)
mapC = compressArray(mapnp, name=filename)
if Flags['check']:
checkmap(value, filename, mapnp, True, True, mapC)
return mapC
def readnetcdfWithoutTime(name, value="None"):
"""
load maps in netcdf format (has no time format)
:param namebinding: file name in settings file
:param value: (optional) netcdf variable name. If not given -> last variable is taken
:return: Compressed 1D array of netcdf stored data
"""
filename = os.path.normpath(name)
try:
nf1 = Dataset(filename, 'r')
except:
msg = "Error 213: Netcdf map stacks: \n"
raise CWATMFileError(filename,msg)
if value == "None":
value = list(nf1.variables.items())[-1][0] # get the last variable name
if (nf1.variables['lat'][0] - nf1.variables['lat'][-1]) < 0:
msg = "Error 111: Latitude is in wrong order\n"
raise CWATMFileError(filename, msg)
mapnp = nf1.variables[value][cutmap[2]:cutmap[3], cutmap[0]:cutmap[1]].astype(np.float64)
nf1.close()
mapC = compressArray(mapnp, name=filename)
if Flags['check']:
checkmap(value, filename, mapnp, True, True, mapC)
return mapC
def readnetcdfInitial(name, value,default = 0.0):
"""
load initial condition from netcdf format
:param name: file name
:param value: netcdf variable name
:param default: (optional) if no variable is found a warning is given and value is set to default
:return: Compressed 1D array of netcdf stored data
:raises if netcdf file is not of the size of mask map: :meth:`management_modules.messages.CWATMError`
:raises if varibale name is not included in the netcdf file: :meth:`management_modules.messages.CWATMWarning`
"""
filename = os.path.normpath(name)
try:
nf1 = Dataset(filename, 'r')
except:
msg = "Error 214: Netcdf Initial file: \n"
raise CWATMFileError(filename,msg)
if value in list(nf1.variables.keys()):
try:
#mapnp = nf1.variables[value][cutmap[2]:cutmap[3], cutmap[0]:cutmap[1]]
if (nf1.variables['lat'][0] - nf1.variables['lat'][-1]) < 0:
msg = "Error 112: Latitude is in wrong order\n"
raise CWATMFileError(filename, msg)
mapnp = (nf1.variables[value][:].astype(np.float64))
nf1.close()
mapC = compressArray(mapnp, name=filename)
if Flags['check']:
checkmap(value, filename, mapnp, True, True, mapC)
a = globals.inZero
if mapC.shape != globals.inZero.shape:
msg = "Error 113: map shape is different than mask shape\n"
raise CWATMError(msg)
return mapC
except:
#nf1.close()
msg ="Error 114: ===== Problem reading initial data ====== \n"
msg += "Initial value: " + value + " is has not the same shape as the mask map\n"
msg += "Maybe put\"load_initial = False\""
raise CWATMError(msg)
else:
nf1.close()
msg = "Warning: Initial value: " + value + " is not included in: " + name + " - using default: " + str(default)
print(CWATMWarning(msg))
return default
# --------------------------------------------------------------------------------------------
def writenetcdf(netfile,prename,addname,varunits,inputmap, timeStamp, posCnt, flag,flagTime, nrdays=None, dateunit="days"):
"""
write a netcdf stack
:param netfile: file name
:param prename: 1st part of variable name with tell which variable e.g. discharge
:param addname: part of the variable name with tells about the timestep e.g. daily, monthly
:param varunits: unit of the variable
:param inputmap: 1D array to be put as netcdf
:param timeStamp: time
:param posCnt: calculate nummer of the indece for time
:param flag: to indicate if the file is new -> netcdf header has to be written,or simply appending data
:param flagtime: to indicate the variable is time dependend (not a single array!)
:param nrdays: (optional) if indicate number of days are set in the time variable (makes files smaller!)
:param dateunit: (optional) dateunit indicate if the timestep in netcdf is days, month or years
:return: flag: to indicate if the file is set up
"""
row = np.abs(cutmap[3] - cutmap[2])
col = np.abs(cutmap[1] - cutmap[0])
# check if it is a modflow grid which has another resolution
modflow = False
if "modflow" in prename.lower():
modflow = True
row = domain['nrow']
col = domain['ncol']
metadataNCDF['modflow_x'] = {}
metadataNCDF['modflow_x']['standard_name'] = 'UTM_X'
metadataNCDF['modflow_x']['units'] = 'm'
metadataNCDF['modflow_y'] = {}
metadataNCDF['modflow_y']['standard_name'] = 'UTM_Y'
metadataNCDF['modflow_y']['units'] = 'm'
# create real varname with variable name + time depending name e.g. discharge + monthavg
varname = prename + addname
if not flag:
nf1 = Dataset(netfile, 'w', format='NETCDF4')
# general Attributes
settings = os.path.realpath(settingsfile[0])
nf1.settingsfile = settings + ": " + xtime.ctime(os.path.getmtime(settings))
nf1.run_created = xtime.ctime(xtime.time())
nf1.Source_Software = 'CWATM Python: ' + versioning['exe']
nf1.Platform = versioning['platform']
nf1.Version = versioning['version'] + ": " + versioning['lastfile'] + " " + versioning['lastdate']
nf1.institution = cbinding ("institution")
nf1.title = cbinding ("title")
nf1.source = 'CWATM output maps'
nf1.Conventions = 'CF-1.6'
# put the additional genaral meta data information from the xml file into the netcdf file
# infomation from the settingsfile comes first
if prename in metaNetcdfVar:
for key in metaNetcdfVar[prename]:
if not (key in list(nf1.__dict__.keys())):
if not (key in ["unit", "long_name", "standard_name"]):
nf1.__setattr__(key, metaNetcdfVar[prename][key])
# Dimension
if modflow:
lon = nf1.createDimension('x', col) # x 1000
longitude = nf1.createVariable('x', 'f8', ('x',))
for i in metadataNCDF['modflow_x']:
exec('%s="%s"' % ("longitude." + i, metadataNCDF['modflow_x'][i]))
lat = nf1.createDimension('y', row) # x 950
latitude = nf1.createVariable('y', 'f8', 'y')
for i in metadataNCDF['modflow_y']:
exec('%s="%s"' % ("latitude." + i, metadataNCDF['modflow_y'][i]))
else:
if 'x' in list(metadataNCDF.keys()):
lon = nf1.createDimension('x', col) # x 1000
longitude = nf1.createVariable('x', 'f8', ('x',))
for i in metadataNCDF['x']:
exec('%s="%s"' % ("longitude." + i, metadataNCDF['x'][i]))
if 'lon' in list(metadataNCDF.keys()):
lon = nf1.createDimension('lon', col)
longitude = nf1.createVariable('lon', 'f8', ('lon',))
for i in metadataNCDF['lon']:
exec('%s="%s"' % ("longitude." + i, metadataNCDF['lon'][i]))
if 'y' in list(metadataNCDF.keys()):
lat = nf1.createDimension('y', row) # x 950
latitude = nf1.createVariable('y', 'f8', 'y')
for i in metadataNCDF['y']:
exec('%s="%s"' % ("latitude." + i, metadataNCDF['y'][i]))
if 'lat' in list(metadataNCDF.keys()):
lat = nf1.createDimension('lat', row) # x 950
latitude = nf1.createVariable('lat', 'f8', 'lat')
for i in metadataNCDF['lat']:
exec('%s="%s"' % ("latitude." + i, metadataNCDF['lat'][i]))
# projection
if 'laea' in list(metadataNCDF.keys()):
proj = nf1.createVariable('laea', 'i4')
for i in metadataNCDF['laea']:
exec('%s="%s"' % ("proj." + i, metadataNCDF['laea'][i]))
if 'lambert_azimuthal_equal_area' in list(metadataNCDF.keys()):
proj = nf1.createVariable('lambert_azimuthal_equal_area', 'i4')
for i in metadataNCDF['lambert_azimuthal_equal_area']:
exec('%s="%s"' % (
"proj." + i, metadataNCDF['lambert_azimuthal_equal_area'][i]))
# Fill variables
if modflow:
lats = np.arange(domain['north'], domain['south'] - 1, domain['cellsize'] * -1)
lons = np.arange(domain['west'], domain['east']+1, domain['cellsize'])
#lons = np.linspace(domain['north'] , domain['south'], col, endpoint=False)
latitude[:] = lats
longitude[:] = lons
else:
cell = maskmapAttr['cell']
xl = maskmapAttr['x']
xr = xl + col * cell
yu = maskmapAttr['y']
yd = yu - row * cell
lats = np.linspace(yu, yd, row, endpoint=False)
lons = np.linspace(xl, xr, col, endpoint=False)
latitude[:] = lats - cell / 2.0
longitude[:] = lons + cell /2.0
if flagTime:
year = dateVar['dateStart'].year
if year > 1900: yearstr = "1901"
elif year < 1861: yearstr = "1650"
else: yearstr = "1861"
#nf1.createDimension('time', None)
nf1.createDimension('time', nrdays)
time = nf1.createVariable('time', 'f8', 'time')
time.standard_name = 'time'
if dateunit == "days": time.units = 'Days since ' + yearstr + '-01-01'
if dateunit == "months": time.units = 'Months since ' + yearstr + '-01-01'
if dateunit == "years": time.units = 'Years since ' + yearstr + '-01-01'
#time.calendar = 'standard'
time.calendar = dateVar['calendar']
if modflow:
value = nf1.createVariable(varname, 'f4', ('time', 'y', 'x'), zlib=True, fill_value=1e20)
else:
if 'x' in list(metadataNCDF.keys()):
value = nf1.createVariable(varname, 'f4', ('time', 'y', 'x'), zlib=True,fill_value=1e20)
if 'lon' in list(metadataNCDF.keys()):
#value = nf1.createVariable(varname, 'f4', ('time', 'lat', 'lon'), zlib=True, fill_value=1e20)
value = nf1.createVariable(varname, 'f4', ('time', 'lat', 'lon'), zlib=True, fill_value=1e20,chunksizes=(1,row,col))
else:
if modflow:
value = nf1.createVariable(varname, 'f4', ('y', 'x'), zlib=True, fill_value=1e20)
else:
if 'x' in list(metadataNCDF.keys()):
value = nf1.createVariable(varname, 'f4', ('y', 'x'), zlib=True,fill_value=1e20)
if 'lon' in list(metadataNCDF.keys()):
# for world lat/lon coordinates
value = nf1.createVariable(varname, 'f4', ('lat', 'lon'), zlib=True, fill_value=1e20)
value.standard_name = getmeta("standard_name",prename,varname)
p1 = getmeta("long_name",prename,prename)
p2 = getmeta("time", addname, addname)
value.long_name = p1 + p2
value.units= getmeta("unit",prename,varunits)
for var in list(metadataNCDF.keys()):
if "esri_pe_string" in list(metadataNCDF[var].keys()):
value.esri_pe_string = metadataNCDF[var]['esri_pe_string']
else:
nf1 = Dataset(netfile, 'a')
if flagTime:
date_time = nf1.variables['time']
if dateunit == "days": nf1.variables['time'][posCnt-1] = date2num(timeStamp, date_time.units, date_time.calendar)
if dateunit == "months": nf1.variables['time'][posCnt - 1] = (timeStamp.year - 1901) * 12 + timeStamp.month - 1
if dateunit == "years": nf1.variables['time'][posCnt - 1] = timeStamp.year - 1901
#nf1.variables['time'][posCnt - 1] = 60 + posCnt
mapnp = maskinfo['maskall'].copy()
# if inputmap is not an array give out errormessage
if not(hasattr(inputmap, '__len__')):
date1 = "%02d/%02d/%02d" % (timeStamp.day, timeStamp.month, timeStamp.year)
msg = "No values in: " + varname + " on date: " + date1 +"\nCould not write: " + netfile
nf1.close()
print(CWATMWarning(msg))
return False
if modflow:
mapnp = inputmap
else:
mapnp[~maskinfo['maskflat']] = inputmap[:]
#mapnp = mapnp.reshape(maskinfo['shape']).data
mapnp = mapnp.reshape(maskinfo['shape'])
if coverresult[0]:
mapnp = mapnp.reshape(maskinfo['shape']).data
mapnp = np.where(coverresult[1], mapnp, np.nan)
else:
mapnp = mapnp.reshape(maskinfo['shape'])
if flagTime:
nf1.variables[varname][posCnt -1, :, :] = mapnp
else:
# without timeflag
nf1.variables[varname][:, :] = mapnp
nf1.close()
flag = True
return flag
# --------------------------------------------------------------------------------------------
def writeIniNetcdf(netfile,varlist, inputlist):
"""
write variables to netcdf init file
:param netfile: file name
:param varlist: list of variable to be written in the netcdf file
:param inputlist: stack of 1D arrays
:return: -
"""
row = np.abs(cutmap[3] - cutmap[2])
col = np.abs(cutmap[1] - cutmap[0])
nf1 = Dataset(netfile, 'w', format='NETCDF4')
# general Attributes
nf1.settingsfile = os.path.realpath(settingsfile[0])
nf1.date_created = xtime.ctime(xtime.time())
nf1.Source_Software = 'CWATM Python'
nf1.institution = cbinding ("institution")
nf1.title = cbinding ("title")
nf1.source = 'CWATM initial conditions maps'
nf1.Conventions = 'CF-1.6'
# put the additional genaral meta data information from the xml file into the netcdf file
# infomation from the settingsfile comes first
if "initcondition" in metaNetcdfVar:
for key in metaNetcdfVar["initcondition"]:
if not (key in list(nf1.__dict__.keys())):
if not (key in ["unit", "long_name", "standard_name"]):
nf1.__setattr__(key, metaNetcdfVar["initcondition"][key])
# Dimension
if 'x' in list(metadataNCDF.keys()):
lon = nf1.createDimension('x', col) # x 1000
longitude = nf1.createVariable('x', 'f8', ('x',))
for i in metadataNCDF['x']:
exec('%s="%s"' % ("longitude." + i, metadataNCDF['x'][i]))
if 'lon' in list(metadataNCDF.keys()):
lon = nf1.createDimension('lon', col)
longitude = nf1.createVariable('lon', 'f8', ('lon',))
for i in metadataNCDF['lon']:
exec('%s="%s"' % ("longitude." + i, metadataNCDF['lon'][i]))
if 'y' in list(metadataNCDF.keys()):
lat = nf1.createDimension('y', row) # x 950
latitude = nf1.createVariable('y', 'f8', 'y')
for i in metadataNCDF['y']:
exec('%s="%s"' % ("latitude." + i, metadataNCDF['y'][i]))
if 'lat' in list(metadataNCDF.keys()):
lat = nf1.createDimension('lat', row) # x 950
latitude = nf1.createVariable('lat', 'f8', 'lat')
for i in metadataNCDF['lat']:
exec('%s="%s"' % ("latitude." + i, metadataNCDF['lat'][i]))
# projection
if 'laea' in list(metadataNCDF.keys()):
proj = nf1.createVariable('laea', 'i4')
for i in metadataNCDF['laea']:
exec('%s="%s"' % ("proj." + i, metadataNCDF['laea'][i]))
if 'lambert_azimuthal_equal_area' in list(metadataNCDF.keys()):
proj = nf1.createVariable('lambert_azimuthal_equal_area', 'i4')
for i in metadataNCDF['lambert_azimuthal_equal_area']:
exec('%s="%s"' % ("proj." + i, metadataNCDF['lambert_azimuthal_equal_area'][i]))
# Fill variables
cell = maskmapAttr['cell']
xl = maskmapAttr['x']
xr = xl + col * cell
yu = maskmapAttr['y']
yd = yu - row * cell
lats = np.linspace(yu, yd, row, endpoint=False)
lons = np.linspace(xl, xr, col, endpoint=False)
latitude[:] = lats - cell / 2.0
longitude[:] = lons + cell /2.0
i = 0
for varname in varlist:
if 'x' in list(metadataNCDF.keys()):
value = nf1.createVariable(varname, 'f8', ('y', 'x'), zlib=True,fill_value=1e20)
if 'lon' in list(metadataNCDF.keys()):
# for world lat/lon coordinates
value = nf1.createVariable(varname, 'f8', ('lat', 'lon'), zlib=True, fill_value=1e20)
value.standard_name= getmeta("standard_name",varname,varname)
value.long_name= getmeta("long_name",varname,varname)
value.units= getmeta("unit",varname,"undefined")
# write values
mapnp = maskinfo['maskall'].copy()
help = np.minimum(10e15,np.maximum(-9999., inputlist[i][:]))
mapnp[~maskinfo['maskflat']] = help
#mapnp = mapnp.reshape(maskinfo['shape']).data
mapnp = mapnp.reshape(maskinfo['shape'])
nf1.variables[varname][:, :] = mapnp
i += 1
nf1.close()
# --------------------------------------------------------------------------------------------
# report .tif and .maps
def report(valueIn,name,compr=True):
"""
For debugging: Save the 2D array as .map or .tif
:param name: Filename of the map
:param valueIn: 1D or 2D array in
:param compr: (optional) array is 1D (default) or 2D
:return: -
::
Example:
> report(c:/temp/ksat1.map, self_.var_.ksat1)
"""
filename = os.path.splitext(name)
pcmap = False
if filename[1] == ".map": pcmap = True
if compr:
value = decompress(valueIn)
else:
value = valueIn
value = value.data
checkint = value.dtype.char in np.typecodes['AllInteger']
ny, nx = value.shape
if pcmap: # if it is a map
raster = gdal.GetDriverByName('PCRaster')
# ds = raster.Create(name, nx, ny, 1, gdal.GDT_Float32)
if checkint:
ds = raster.Create(name, nx, ny, 1, gdal.GDT_Int32, ["PCRASTER_VALUESCALE=VS_NOMINAL"])
else:
ds = raster.Create(name, nx, ny, 1, gdal.GDT_Float32, ["PCRASTER_VALUESCALE=VS_SCALAR"])
ds.SetGeoTransform(geotrans[0]) # specify coords
outband = ds.GetRasterBand(1)
# set NoData value
# outband.SetNoDataValue(np.nan)
outband.SetNoDataValue(-9999)
value[np.isnan(value)] = -9999
else: # if is not a .map
if checkint:
ds = gdal.GetDriverByName('GTiff').Create(name, nx, ny, 1, gdal.GDT_Int32, ['COMPRESS=LZW'])
else:
ds = gdal.GetDriverByName('GTiff').Create(name, nx, ny, 1, gdal.GDT_Float32, ['COMPRESS=LZW'])
ds.SetGeoTransform(geotrans[0]) # specify coords
srs = osr.SpatialReference() # establish encoding
srs.ImportFromEPSG(4326) # WGS84 lat/long
ds.SetProjection(srs.ExportToWkt()) # export coords to file
outband = ds.GetRasterBand(1)
# set NoData value
outband.SetNoDataValue(-9999)
outband.SetStatistics(np.nanmin(value).astype(np.float), np.nanmax(value).astype(np.float),
np.nanmean(value).astype(np.float), np.nanstd(value).astype(np.float))
outband.WriteArray(value)
ds.FlushCache()
ds = None
outband = None
# --------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------
def returnBool(inBinding):
"""
Test if parameter is a boolean and return an error message if not, and the boolean if everything is ok
:param inBinding: parameter in settings file
:return: boolean of inBinding
"""
b = cbinding(inBinding)
btrue = b.lower() in ("yes", "true", "t", "1")
bfalse = b.lower() in ("no", "false", "f", "0")
if btrue or bfalse:
return btrue
else:
msg = "Error 115: Value in: \"" + inBinding + "\" is not True or False! \nbut: " + b
raise CWATMError(msg)
def checkOption(inBinding):
"""
Check if option in settings file has a counterpart in the source code
:param inBinding: parameter in settings file
"""
lineclosest = ""
test = inBinding in option
if test:
return option[inBinding]
else:
close = difflib.get_close_matches(inBinding, list(option.keys()))
if close:
closest = close[0]
with open(settingsfile[0]) as f:
i = 0
for line in f:
i +=1
if closest in line:
lineclosest = "Line No. " + str(i) + ": "+ line
if not closest: closest = ["- no match -"]
else:
closest = "- no match -"
msg = "Error 116: No key with the name: \"" + inBinding + "\" in the settings file: \"" + settingsfile[0] + "\"\n"
msg += "Closest key to the required one is: \""+ closest + "\""
msg += lineclosest
raise CWATMError(msg)
def cbinding(inBinding):
"""
Check if variable in settings file has a counterpart in the source code
:param inBinding: parameter in settings file
"""
lineclosest = ""
test = inBinding in binding
if test:
return binding[inBinding]
else:
close = difflib.get_close_matches(inBinding, list(binding.keys()))
if close:
closest = close[0]
with open(settingsfile[0]) as f:
i = 0
for line in f:
i +=1
if closest in line:
lineclosest = "Line No. " + str(i) + ": "+ line
if not closest: closest = "- no match -"
else:
closest = "- no match -"
msg = "Error 117: No key with the name: \"" + inBinding + "\" in the settings file: \"" + settingsfile[0] + "\"\n"
msg += "Closest key to the required one is: \""+ closest + "\"\n"
msg += lineclosest
raise CWATMError(msg)
# --------------------------------------------------------------------------------------------
def divideValues(x,y, default = 0.):
"""
returns the result of a division that possibly involves a zero
:param x:
:param y: divisor
:param default: return value if y =0
:return: result of :math:`x/y` or default if y = 0
"""
y1 = y.copy()
y1[y1 == 0.] = 1.0
z = x / y1
z[y == 0.] = default
#with np.errstate(invalid='ignore', divide='ignore'):
# z = np.where(y > 0., x/y, default)
# have to solve this without err handler to get the error message back
return z
|
CWatM/CWatM
|
cwatm/management_modules/data_handling.py
|
Python
|
gpl-3.0
| 60,787
|
[
"NetCDF"
] |
0166837f160b584344c4f0c3827420fa2405a5d16e8a77413fb44b9dec15aa62
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Linear Density --- :mod:`MDAnalysis.analysis.lineardensity`
===========================================================
A tool to compute mass and charge density profiles along the three
cartesian axes of the simulation cell. Works only for orthorombic,
fixed volume cells (thus for simulations in canonical NVT ensemble).
"""
from __future__ import division, absolute_import
import os.path as path
import numpy as np
from MDAnalysis.analysis.base import AnalysisBase
class LinearDensity(AnalysisBase):
"""Linear density profile
LinearDensity(selection, grouping='atoms', binsize=0.25)
Parameters
----------
selection : AtomGroup
Any atomgroup
grouping : str {'atoms', 'residues', 'segments', 'fragments'}
Density profiles will be computed on the center of geometry
of a selected group of atoms ['atoms']
binsize : float
Bin width in Angstrom used to build linear density
histograms. Defines the resolution of the resulting density
profile (smaller --> higher resolution) [0.25]
start : int
The frame to start at [0]
stop : int
The frame to end at [-1]
step : int
The step size through the trajectory in frames [0]
Example
-------
First create a LinearDensity object by supplying a selection,
then use the :meth:`run` method::
ldens = LinearDensity(selection)
ldens.run()
Density profiles can be written to file through the `save` method::
ldens.save(description='mydensprof', form='txt')
which will output the density profiles in a file named
`<trajectory_filename>.mydensprof_<grouping>.ldens`.
Results can be saved in npz format by specifying `form='npz'`
.. versionadded:: 0.14.0
"""
def __init__(self, selection, grouping='atoms', binsize=0.25, **kwargs):
super(LinearDensity, self).__init__(selection.universe.trajectory,
**kwargs)
# allows use of run(parallel=True)
self._ags = [selection]
self._universe = selection.universe
self.binsize = binsize
# group of atoms on which to compute the COM (same as used in
# AtomGroup.wrap())
self.grouping = grouping
# Dictionary containing results
self.results = {'x': {'dim': 0}, 'y': {'dim': 1}, 'z': {'dim': 2}}
# Box sides
self.dimensions = self._universe.dimensions[:3]
self.volume = np.prod(self.dimensions)
# number of bins
bins = (self.dimensions // self.binsize).astype(int)
# Here we choose a number of bins of the largest cell side so that
# x, y and z values can use the same "coord" column in the output file
self.nbins = bins.max()
slices_vol = self.volume / bins
self.keys = ['pos', 'pos_std', 'char', 'char_std']
# Initialize results array with zeros
for dim in self.results:
idx = self.results[dim]['dim']
self.results[dim].update({'slice volume': slices_vol[idx]})
for key in self.keys:
self.results[dim].update({key: np.zeros(self.nbins)})
# Variables later defined in _prepare() method
self.masses = None
self.charges = None
self.totalmass = None
def _prepare(self):
# group must be a local variable, otherwise there will be
# issues with parallelization
group = getattr(self._ags[0], self.grouping)
# Get masses and charges for the selection
try: # in case it's not an atom
self.masses = np.array([elem.total_mass() for elem in group])
self.charges = np.array([elem.total_charge() for elem in group])
except AttributeError: # much much faster for atoms
self.masses = self._ags[0].masses
self.charges = self._ags[0].charges
self.totalmass = np.sum(self.masses)
def _single_frame(self):
self.group = getattr(self._ags[0], self.grouping)
self._ags[0].wrap(compound=self.grouping)
# Find position of atom/group of atoms
if self.grouping == 'atoms':
positions = self._ags[0].positions # faster for atoms
else:
# COM for res/frag/etc
positions = np.array([elem.centroid() for elem in self.group])
for dim in ['x', 'y', 'z']:
idx = self.results[dim]['dim']
key = 'pos'
key_std = 'pos_std'
# histogram for positions weighted on masses
hist, _ = np.histogram(positions[:, idx],
weights=self.masses,
bins=self.nbins,
range=(0.0, max(self.dimensions)))
self.results[dim][key] += hist
self.results[dim][key_std] += np.square(hist)
key = 'char'
key_std = 'char_std'
# histogram for positions weighted on charges
hist, _ = np.histogram(positions[:, idx],
weights=self.charges,
bins=self.nbins,
range=(0.0, max(self.dimensions)))
self.results[dim][key] += hist
self.results[dim][key_std] += np.square(hist)
def _conclude(self):
k = 6.022e-1 # divide by avodagro and convert from A3 to cm3
# Average results over the number of configurations
for dim in ['x', 'y', 'z']:
for key in ['pos', 'pos_std', 'char', 'char_std']:
self.results[dim][key] /= self.n_frames
# Compute standard deviation for the error
self.results[dim]['pos_std'] = np.sqrt(self.results[dim][
'pos_std'] - np.square(self.results[dim]['pos']))
self.results[dim]['char_std'] = np.sqrt(self.results[dim][
'char_std'] - np.square(self.results[dim]['char']))
for dim in ['x', 'y', 'z']:
self.results[dim]['pos'] /= self.results[dim]['slice volume'] * k
self.results[dim]['char'] /= self.results[dim]['slice volume'] * k
self.results[dim]['pos_std'] /= self.results[dim]['slice volume'] * k
self.results[dim]['char_std'] /= self.results[dim]['slice volume'] * k
def save(self, description='', form='txt'):
"""Save density profile to file
Allows to save the density profile to either a ASCII txt file or a
binary numpy npz file. Output file has extension 'ldens' and begins
with the name of the trajectory file.
Parameters
----------
description : str
An arbitrary description added to the output filename. Can be useful
form : str {'txt', 'npz'}
Format of the output. 'txt' will generate an ASCII text file while
'npz' will produce a numpy binary file.
Example
-------
After initializing and running a `LinearDensity` object, results can be
written to file as follows::
ldens.save(description='mydensprof', form='txt')
which will output the linear density profiles in a file named
`<trajectory_filename>.mydensprof_<grouping>.ldens`.
"""
# Take root of trajectory filename for output file naming
trajname = path.splitext(path.basename(
self._universe.trajectory.filename))[0]
# additional string for naming the output file
description = description + "_" + str(self.grouping)
filename = trajname + "." + description + ".ldens"
if form is 'txt':
self._savetxt(filename)
elif form is 'npz':
self._savez(filename)
else:
raise ValueError('form argument must be either txt or npz')
def _savetxt(self, filename):
bins = np.linspace(0.0, max(self.dimensions), num=self.nbins)
# Create list of results which will be output
output = [bins]
for dim in ['x', 'y', 'z']:
output.append(self.results[dim]['pos'])
output.append(self.results[dim]['pos_std'])
for dim in ['x', 'y', 'z']:
output.append(self.results[dim]['char'])
output.append(self.results[dim]['char_std'])
density = self.totalmass / self.volume
header = ("1 coord [Ang] 2-7 mass density (x,sx,y,sz,z,sz) [g/cm^3]"
"8-13 charge density (x,sx,y,sz,z,sz) [e/A^3]\n Average "
"density: {} g/cm3".format(density))
np.savetxt(filename,
np.column_stack(output),
fmt='%10.5f',
header=header)
def _savez(self, filename):
bins = np.linspace(0.0, max(self.dimensions), num=self.nbins)
dictionary = {'bins': bins}
for dim in self.results:
self.results[dim].pop('dim')
self.results[dim].pop('slice volume')
for key in self.results[dim]:
dictionary[dim + "_" + key] = self.results[dim][key]
np.savez(filename, **dictionary)
def _add_other_results(self, other):
# For parallel analysis
results = self.results
for dim in ['x', 'y', 'z']:
key = 'pos'
key_std = 'pos_std'
results[dim][key] += other[dim][key]
results[dim][key_std] += other[dim][key_std]
key = 'char'
key_std = 'char_std'
results[dim][key] += other[dim][key]
results[dim][key_std] += other[dim][key_std]
|
kain88-de/mdanalysis
|
package/MDAnalysis/analysis/lineardensity.py
|
Python
|
gpl-2.0
| 10,626
|
[
"MDAnalysis"
] |
6ed32f848989ec3ef00b5d4894c04c5095b3e2c8afc07a7a679576b6ccf4429f
|
#makes use of Michigan Tech's Directory
name = raw_input("Enter the first name of the person you wish to look up: ")
from splinter import Browser
with Browser() as browser:
url = "https://www.mtu.edu/mtuldapweb/web_lookup/"
browser.visit(url)
browser.fill('advtext', name)
#find and click the 'search' button
button = browser.find_by_name('submit')
#interact with elements
button.click()
raw_input("") #gives a way for me to observe
|
frankcash/Web_Scraping
|
test2.py
|
Python
|
mit
| 465
|
[
"VisIt"
] |
02d98c32025c1c4d8d82e3881d05eb867f33f0188ff488c70b7a5d5128fb17b2
|
""" Transformation Database Client Command Line Interface.
"""
__RCSID__ = '$Id: $'
#! /usr/bin/env python
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
import sys, cmd
from DIRAC.Core.Base.API import API
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC.TransformationSystem.Client.Transformation import Transformation
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
__RCSID__ = "$Id$"
def printDict( dictionary ):
""" Dictionary pretty printing """
key_max = 0
value_max = 0
for key, value in dictionary.items():
if len( key ) > key_max:
key_max = len( key )
if len( str( value ) ) > value_max:
value_max = len( str( value ) )
for key, value in dictionary.items():
print key.rjust( key_max ), ' : ', str( value ).ljust( value_max )
class TransformationCLI( cmd.Cmd, API ):
def __init__( self ):
self.server = TransformationClient()
self.indentSpace = 4
cmd.Cmd.__init__( self )
API.__init__( self )
def printPair( self, key, value, separator = ":" ):
valueList = value.split( "\n" )
print "%s%s%s %s" % ( key, " " * ( self.indentSpace - len( key ) ), separator, valueList[0].strip() )
for valueLine in valueList[ 1:-1 ]:
print "%s %s" % ( " " * self.indentSpace, valueLine.strip() )
def do_exit( self, args ):
""" Exits the shell.
usage: exit
"""
sys.exit( 0 )
def do_quit( self, *args ):
""" Exits the shell.
Usage: quit
"""
sys.exit( 0 )
def do_help( self, args ):
""" Default version of the help command
Usage: help <command>
OR use helpall to see description for all commands"""
cmd.Cmd.do_help( self, args )
# overriting default help command
def do_helpall( self, args ):
"""
Shows help information
Usage: helpall <command>
If no command is specified all commands are shown
"""
if len( args ) == 0:
print "\nAvailable commands:\n"
attrList = dir( self )
attrList.sort()
for attribute in attrList:
if attribute.find( "do_" ) == 0:
self.printPair( attribute[ 3: ], getattr( self, attribute ).__doc__[ 1: ] )
print ""
else:
command = args.split()[0].strip()
try:
obj = getattr( self, "do_%s" % command )
except:
print "There's no such %s command" % command
return
self.printPair( command, obj.__doc__[1:] )
def do_shell( self, args ):
"""Execute a shell command
usage !<shell_command>
"""
comm = args
res = shellCall( 0, comm )
if res['OK'] and res['Value'][0] == 0:
_returnCode, stdOut, stdErr = res['Value']
print "%s\n%s" % ( stdOut, stdErr )
else:
print res['Message']
def check_params( self, args, num ):
"""Checks if the number of parameters correct"""
argss = args.split()
length = len( argss )
if length < num:
print "Error: Number of arguments provided %d less that required %d, please correct." % ( length, num )
return ( False, length )
return ( argss, length )
def check_id_or_name( self, id_or_name ):
"""resolve name or Id by converting type of argument """
if id_or_name.isdigit():
return long( id_or_name ) # its look like id
return id_or_name
####################################################################
#
# These are the methods for transformation manipulation
#
def do_getall( self, args ):
"""Get transformation details
usage: getall [Status] [Status]
"""
oTrans = Transformation()
oTrans.getTransformations( transStatus = args.split(), printOutput = True )
def do_getStatus( self, args ):
"""Get transformation details
usage: getStatus <transName|ID>
"""
argss = args.split()
if not len( argss ) > 0:
print "no transformation supplied"
return
for transName in argss:
res = self.server.getTransformation( transName )
if not res['OK']:
print "Getting status of %s failed: %s" % ( transName, res['Message'] )
else:
print "%s: %s" % ( transName, res['Value']['Status'] )
def do_setStatus( self, args ):
"""Set transformation status
usage: setStatus <Status> <transName|ID>
Status <'New' 'Active' 'Stopped' 'Completed' 'Cleaning'>
"""
argss = args.split()
if not len( argss ) > 1:
print "transformation and status not supplied"
return
status = argss[0]
transNames = argss[1:]
for transName in transNames:
res = self.server.setTransformationParameter( transName, 'Status', status )
if not res['OK']:
print "Setting status of %s failed: %s" % ( transName, res['Message'] )
else:
print "%s set to %s" % ( transName, status )
def do_start( self, args ):
"""Start transformation
usage: start <transName|ID>
"""
argss = args.split()
if not len( argss ) > 0:
print "no transformation supplied"
return
for transName in argss:
res = self.server.setTransformationParameter( transName, 'Status', 'Active' )
if not res['OK']:
print "Setting Status of %s failed: %s" % ( transName, res['Message'] )
else:
res = self.server.setTransformationParameter( transName, 'AgentType', 'Automatic' )
if not res['OK']:
print "Setting AgentType of %s failed: %s" % ( transName, res['Message'] )
else:
print "%s started" % transName
def do_stop( self, args ):
"""Stop transformation
usage: stop <transID|ID>
"""
argss = args.split()
if not len( argss ) > 0:
print "no transformation supplied"
return
if not len( argss ) > 0:
print "no transformation supplied"
return
for transName in argss:
res = self.server.setTransformationParameter( transName, 'AgentType', 'Manual' )
if not res['OK']:
print "Stopping of %s failed: %s" % ( transName, res['Message'] )
else:
print "%s stopped" % transName
def do_flush( self, args ):
"""Flush transformation
usage: flush <transName|ID>
"""
argss = args.split()
if not len( argss ) > 0:
print "no transformation supplied"
return
for transName in argss:
res = self.server.setTransformationParameter( transName, 'Status', 'Flush' )
if not res['OK']:
print "Flushing of %s failed: %s" % ( transName, res['Message'] )
else:
print "%s flushing" % transName
def do_get( self, args ):
"""Get transformation definition
usage: get <transName|ID>
"""
argss = args.split()
if not len( argss ) > 0:
print "no transformation supplied"
return
transName = argss[0]
res = self.server.getTransformation( transName )
if not res['OK']:
print "Failed to get %s: %s" % ( transName, res['Message'] )
else:
res['Value'].pop( 'Body' )
printDict( res['Value'] )
def do_getBody( self, args ):
"""Get transformation body
usage: getBody <transName|ID>
"""
argss = args.split()
if not len( argss ) > 0:
print "no transformation supplied"
return
transName = argss[0]
res = self.server.getTransformation( transName )
if not res['OK']:
print "Failed to get %s: %s" % ( transName, res['Message'] )
else:
print res['Value']['Body']
def do_getFileStat( self, args ):
"""Get transformation file statistics
usage: getFileStat <transName|ID>
"""
argss = args.split()
if not len( argss ) > 0:
print "no transformation supplied"
return
transName = argss[0]
res = self.server.getTransformationStats( transName )
if not res['OK']:
print "Failed to get statistics for %s: %s" % ( transName, res['Message'] )
else:
res['Value'].pop( 'Total' )
printDict( res['Value'] )
def do_modMask( self, args ):
"""Modify transformation input definition
usage: modInput <mask> <transName|ID>
"""
argss = args.split()
if not len( argss ) > 0:
print "no transformation supplied"
return
mask = argss[0]
transNames = argss[1:]
for transName in transNames:
res = self.server.setTransformationParameter( transName, "FileMask", mask )
if not res['OK']:
print "Failed to modify input file mask for %s: %s" % ( transName, res['Message'] )
else:
print "Updated %s filemask" % transName
def do_getFiles( self, args ):
"""Get files for the transformation (optionally with a given status)
usage: getFiles <transName|ID> [Status] [Status]
"""
argss = args.split()
if not len( argss ) > 0:
print "no transformation supplied"
return
transName = argss[0]
status = argss[1:]
res = self.server.getTransformation( transName )
if not res['OK']:
print "Failed to get transformation information: %s" % res['Message']
else:
selectDict = {'TransformationID':res['Value']['TransformationID']}
if status:
selectDict['Status'] = status
res = self.server.getTransformationFiles( condDict = selectDict )
if not res['OK']:
print "Failed to get transformation files: %s" % res['Message']
elif res['Value']:
self._printFormattedDictList( res['Value'], ['LFN', 'Status', 'ErrorCount', 'TargetSE', 'LastUpdate'],
'LFN', 'LFN' )
else:
print "No files found"
def do_getFileStatus( self, args ):
"""Get file(s) status for the given transformation
usage: getFileStatus <transName|ID> <lfn> [<lfn>...]
"""
argss = args.split()
if len( argss ) < 2:
print "transformation and file not supplied"
return
transName = argss[0]
lfns = argss[1:]
res = self.server.getTransformation( transName )
if not res['OK']:
print "Failed to get transformation information: %s" % res['Message']
else:
selectDict = {'TransformationID':res['Value']['TransformationID']}
res = self.server.getTransformationFiles( condDict = selectDict )
if not res['OK']:
print "Failed to get transformation files: %s" % res['Message']
elif res['Value']:
filesList = []
for fileDict in res['Value']:
if fileDict['LFN'] in lfns:
filesList.append( fileDict )
if filesList:
self._printFormattedDictList( filesList, ['LFN', 'Status', 'ErrorCount', 'TargetSE', 'LastUpdate'],
'LFN', 'LFN' )
else:
print "Could not find any LFN in", lfns, "for transformation", transName
else:
print "No files found"
def do_setFileStatus( self, args ):
"""Set file status for the given transformation
usage: setFileStatus <transName|ID> <lfn> <status>
"""
argss = args.split()
if not len( argss ) == 3:
print "transformation file and status not supplied"
return
transName = argss[0]
lfn = argss[1]
status = argss[2]
res = self.server.setFileStatusForTransformation( transName, status, [lfn] )
if not res['OK']:
print "Failed to update file status: %s" % res['Message']
else:
print "Updated file status to %s" % status
def do_resetFile( self, args ):
"""Reset file status for the given transformation
usage: resetFile <transName|ID> <lfn>
"""
argss = args.split()
if not len( argss ) > 1:
print "transformation and file(s) not supplied"
return
transName = argss[0]
lfns = argss[1:]
res = self.server.setFileStatusForTransformation( transName, 'Unused', lfns )
if not res['OK']:
print "Failed to reset file status: %s" % res['Message']
else:
if res['Value']['Failed']:
print "Could not reset some files: "
for lfn, reason in res['Value']['Failed'].items():
print lfn, reason
else:
print "Updated file statuses to 'Unused' for %d file(s)" % len( lfns )
def do_resetProcessedFile( self, args ):
""" Reset file status for the given transformation
usage: resetFile <transName|ID> <lfn>
"""
argss = args.split()
if not len( argss ) > 1:
print "transformation and file(s) not supplied"
return
transName = argss[0]
lfns = argss[1:]
res = self.server.setFileStatusForTransformation( transName, 'Unused', lfns, force = True )
if not res['OK']:
print "Failed to reset file status: %s" % res['Message']
else:
if res['Value']['Failed']:
print "Could not reset some files: "
for lfn, reason in res['Value']['Failed'].items():
print lfn, reason
else:
print "Updated file statuses to 'Unused' for %d file(s)" % len( lfns )
####################################################################
#
# These are the methods for file manipulation
#
def do_addDirectory( self, args ):
"""Add files from the given catalog directory
usage: addDirectory <directory> [directory]
"""
argss = args.split()
if not len( argss ) > 0:
print "no directory supplied"
return
for directory in argss:
res = self.server.addDirectory( directory, force = True )
if not res['OK']:
print 'failed to add directory %s: %s' % ( directory, res['Message'] )
else:
print 'added %s files for %s' % ( res['Value'], directory )
def do_replicas( self, args ):
""" Get replicas for <path>
usage: replicas <lfn> [lfn]
"""
argss = args.split()
if not len( argss ) > 0:
print "no files supplied"
return
res = self.server.getReplicas( argss )
if not res['OK']:
print "failed to get any replica information: %s" % res['Message']
return
for lfn in sorted( res['Value']['Failed'].keys() ):
error = res['Value']['Failed'][lfn]
print "failed to get replica information for %s: %s" % ( lfn, error )
for lfn in sorted( res['Value']['Successful'].keys() ):
ses = sorted( res['Value']['Successful'][lfn].keys() )
outStr = "%s :" % lfn.ljust( 100 )
for se in ses:
outStr = "%s %s" % ( outStr, se.ljust( 15 ) )
print outStr
def do_addFile( self, args ):
"""Add new files to transformation DB
usage: addFile <lfn> [lfn]
"""
argss = args.split()
if not len( argss ) > 0:
print "no files supplied"
return
lfnDict = {}
for lfn in argss:
lfnDict[lfn] = {'PFN':'IGNORED-PFN', 'SE':'IGNORED-SE', 'Size':0, 'GUID':'IGNORED-GUID',
'Checksum':'IGNORED-CHECKSUM'}
res = self.server.addFile( lfnDict, force = True )
if not res['OK']:
print "failed to add any files: %s" % res['Message']
return
for lfn in sorted( res['Value']['Failed'].keys() ):
error = res['Value']['Failed'][lfn]
print "failed to add %s: %s" % ( lfn, error )
for lfn in sorted( res['Value']['Successful'].keys() ):
print "added %s" % lfn
def do_removeFile( self, args ):
"""Remove file from transformation DB
usage: removeFile <lfn> [lfn]
"""
argss = args.split()
if not len( argss ) > 0:
print "no files supplied"
return
res = self.server.removeFile( argss )
if not res['OK']:
print "failed to remove any files: %s" % res['Message']
return
for lfn in sorted( res['Value']['Failed'].keys() ):
error = res['Value']['Failed'][lfn]
print "failed to remove %s: %s" % ( lfn, error )
for lfn in sorted( res['Value']['Successful'].keys() ):
print "removed %s" % lfn
def do_addReplica( self, args ):
""" Add new replica to the transformation DB
usage: addReplica <lfn> <se>
"""
argss = args.split()
if not len( argss ) == 2:
print "no file info supplied"
return
lfn = argss[0]
se = argss[1]
lfnDict = {}
lfnDict[lfn] = {'PFN':'IGNORED-PFN', 'SE':se, 'Size':0, 'GUID':'IGNORED-GUID', 'Checksum':'IGNORED-CHECKSUM'}
res = self.server.addReplica( lfnDict, force = True )
if not res['OK']:
print "failed to add replica: %s" % res['Message']
return
for lfn in sorted( res['Value']['Failed'].keys() ):
error = res['Value']['Failed'][lfn]
print "failed to add replica: %s" % ( error )
for lfn in sorted( res['Value']['Successful'].keys() ):
print "added %s" % lfn
def do_removeReplica( self, args ):
"""Remove replica from the transformation DB
usage: removeReplica <lfn> <se>
"""
argss = args.split()
if not len( argss ) == 2:
print "no file info supplied"
return
lfn = argss[0]
se = argss[1]
lfnDict = {}
lfnDict[lfn] = {'PFN':'IGNORED-PFN', 'SE':se, 'Size':0, 'GUID':'IGNORED-GUID', 'Checksum':'IGNORED-CHECKSUM'}
res = self.server.removeReplica( lfnDict )
if not res['OK']:
print "failed to remove replica: %s" % res['Message']
return
for lfn in sorted( res['Value']['Failed'].keys() ):
error = res['Value']['Failed'][lfn]
print "failed to remove replica: %s" % ( error )
for lfn in sorted( res['Value']['Successful'].keys() ):
print "removed %s" % lfn
def do_setReplicaStatus( self, args ):
"""Set replica status, usually used to mark a replica Problematic
usage: setReplicaStatus <lfn> <status> <se>
"""
argss = args.split()
if not len( argss ) > 2:
print "no file info supplied"
return
lfn = argss[0]
status = argss[1]
se = argss[2]
lfnDict = {}
lfnDict[lfn] = {'Status':status, 'PFN':'IGNORED-PFN', 'SE':se, 'Size':0, 'GUID':'IGNORED-GUID', 'Checksum':'IGNORED-CHECKSUM'}
res = self.server.setReplicaStatus( lfnDict )
if not res['OK']:
print "failed to set replica status: %s" % res['Message']
return
for lfn in sorted( res['Value']['Failed'].keys() ):
error = res['Value']['Failed'][lfn]
print "failed to set replica status: %s" % ( error )
for lfn in sorted( res['Value']['Successful'].keys() ):
print "updated replica status %s" % lfn
if __name__ == "__main__":
cli = TransformationCLI()
cli.cmdloop()
|
Sbalbp/DIRAC
|
TransformationSystem/Client/TransformationCLI.py
|
Python
|
gpl-3.0
| 18,292
|
[
"DIRAC"
] |
1153463d740d18e7244e6238202d52b99ba71238cfbf2c573c86a0d33191807b
|
#!/usr/bin/env python3
"""
cv_algorithms difference-of-gaussian example
"""
import cv2
import cv_algorithms
# Read example file which contains some fractals (generated by GIMP fractal renderer)
img = cv2.imread("thinning-example.png")
# Convert to grayscale
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Difference of Gaussian
result = cv_algorithms.difference_of_gaussian(imgGray, 5, 1, invert=True)
# Write to file so you can see what's been done
cv2.imwrite("difference-of-gaussian-result.png", result)
|
ulikoehler/cv_algorithms
|
examples/difference-of-gaussian.py
|
Python
|
apache-2.0
| 513
|
[
"Gaussian"
] |
0a91d940b0e5b867333b7a4e6c5d44db9b1655921c776b81152098de3d708392
|
"""Soundex algorithm
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim (mark@diveintopython.org)"
__version__ = "$Revision: 1.3 $"
__date__ = "$Date: 2004/05/11 19:11:21 $"
__copyright__ = "Copyright (c) 2004 Mark Pilgrim"
__license__ = "Python"
import string, re
allChar = string.uppercase + string.lowercase
charToSoundex = string.maketrans(allChar, "91239129922455912623919292" * 2)
def soundex(source):
"convert string to Soundex equivalent"
# Soundex requirements:
# source string must be at least 1 character
# and must consist entirely of letters
if (not source) or (not source.isalpha()):
return "0000"
# Soundex algorithm:
# 1. make first character uppercase
# 2. translate all other characters to Soundex digits
digits = source[0].upper() + source[1:].translate(charToSoundex)
# 3. remove consecutive duplicates
digits2 = ''
last_digit = ''
for d in digits:
if d != last_digit:
digits2 += d
last_digit = d
# 4. remove all "9"s
digits3 = re.sub('9', '', digits2)
# 5. pad end with "0"s to 4 characters
while len(digits3) < 4:
digits3 += "0"
# 6. return first 4 characters
return digits3[:4]
if __name__ == '__main__':
from timeit import Timer
names = ('Woo', 'Pilgrim', 'Flingjingwaller')
for name in names:
statement = "soundex('%s')" % name
t = Timer(statement, "from __main__ import soundex")
print name.ljust(15), soundex(name), min(t.repeat())
|
tapomayukh/projects_in_python
|
sandbox_tapo/src/refs/diveintopython-pdf-5.4/diveintopython-5.4/py/soundex/stage3/soundex3a.py
|
Python
|
mit
| 1,677
|
[
"VisIt"
] |
b6219a7963635f397a0afce91f4f095f11109338748198d18b2b47f4d5af9aa0
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import json
import os
import unittest
from pymatgen.electronic_structure.cohp import (
Cohp,
CompleteCohp,
IcohpCollection,
IcohpValue,
get_integrated_cohp_in_energy_range,
)
from pymatgen.electronic_structure.core import Orbital, Spin
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp")
class CohpTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "cohp.json")) as f:
self.cohp = Cohp.from_dict(json.load(f))
self.cohp_only = Cohp(self.cohp.efermi, self.cohp.energies, self.cohp.cohp)
with open(os.path.join(test_dir, "coop.json")) as f:
self.coop = Cohp.from_dict(json.load(f))
with open(os.path.join(test_dir, "cobi.json")) as f:
self.cobi = Cohp.from_dict(json.load(f))
def test_as_from_dict(self):
with open(os.path.join(test_dir, "cohp.json")) as f:
cohp_dict = json.load(f)
self.assertEqual(self.cohp.as_dict(), cohp_dict)
with open(os.path.join(test_dir, "cobi.json")) as f:
cobi_dict = json.load(f)
self.assertEqual(self.cobi.as_dict(), cobi_dict)
def test_attributes(self):
self.assertEqual(len(self.cohp.energies), 301)
self.assertEqual(self.cohp.efermi, 9.75576)
self.assertEqual(self.coop.efermi, 5.90043)
self.assertFalse(self.cohp.are_coops)
self.assertTrue(self.coop.are_coops)
self.assertFalse(self.coop.are_cobis)
self.assertFalse(self.cobi.are_coops)
self.assertTrue(self.cobi.are_cobis)
def test_get_icohp(self):
self.assertEqual(self.cohp.get_icohp(), self.cohp.get_cohp(integrated=True))
self.assertEqual(None, self.cohp_only.get_icohp())
def test_get_interpolated_value(self):
# icohp_ef are the ICHOP(Ef) values taken from
# the ICOHPLIST.lobster file.
icohp_ef_dict = {Spin.up: -0.10218, Spin.down: -0.19701}
icoop_ef_dict = {Spin.up: 0.24714}
icohp_ef = self.cohp.get_interpolated_value(self.cohp.efermi, integrated=True)
icoop_ef = self.coop.get_interpolated_value(self.coop.efermi, integrated=True)
self.assertAlmostEqual(icohp_ef_dict, icohp_ef)
self.assertAlmostEqual(icoop_ef_dict, icoop_ef)
with self.assertRaises(ValueError):
self.cohp_only.get_interpolated_value(5.0, integrated=True)
def test_str(self):
with open(os.path.join(test_dir, "cohp.str")) as f:
str_cohp = f.read()
with open(os.path.join(test_dir, "coop.str")) as f:
str_coop = f.read()
self.assertEqual(self.cohp.__str__(), str_cohp)
self.assertEqual(self.coop.__str__(), str_coop)
def test_antibnd_states_below_efermi(self):
self.assertDictEqual(
self.cohp.has_antibnd_states_below_efermi(spin=None),
{Spin.up: True, Spin.down: True},
)
self.assertDictEqual(
self.cohp.has_antibnd_states_below_efermi(spin=None, limit=0.5),
{Spin.up: False, Spin.down: False},
)
self.assertDictEqual(
self.cohp.has_antibnd_states_below_efermi(spin=Spin.up, limit=0.5),
{Spin.up: False},
)
class IcohpValueTest(unittest.TestCase):
def setUp(self):
# without spin polarization
label = "1"
atom1 = "K1"
atom2 = "F2"
length = "2.3"
translation = [-1, 0, 0]
num = 1
icohp = {Spin.up: -2.0}
are_coops = False
self.icohpvalue = IcohpValue(
label=label,
atom1=atom1,
atom2=atom2,
length=length,
translation=translation,
num=num,
icohp=icohp,
are_coops=are_coops,
)
label_sp = "1"
atom1_sp = "K1"
atom2_sp = "F2"
length_sp = "2.3"
translation_sp = [-1, 0, 0]
num_sp = 1
icohp_sp = {Spin.up: -1.1, Spin.down: -1.0}
are_coops_sp = False
self.icohpvalue_sp = IcohpValue(
label=label_sp,
atom1=atom1_sp,
atom2=atom2_sp,
length=length_sp,
translation=translation_sp,
num=num_sp,
icohp=icohp_sp,
are_coops=are_coops_sp,
)
def test_attributes(self):
# without spin polarization
self.assertEqual(self.icohpvalue_sp.num_bonds, 1)
self.assertEqual(self.icohpvalue_sp.are_coops, False)
self.assertEqual(self.icohpvalue_sp.is_spin_polarized, True)
self.assertDictEqual(self.icohpvalue.icohp, {Spin.up: -2.0})
# with spin polarization
self.assertEqual(self.icohpvalue_sp.num_bonds, 1)
self.assertEqual(self.icohpvalue_sp.are_coops, False)
self.assertEqual(self.icohpvalue_sp.is_spin_polarized, True)
self.assertDictEqual(self.icohpvalue_sp.icohp, {Spin.up: -1.1, Spin.down: -1.0})
def test_icohpvalue(self):
# without spin polarization
self.assertEqual(self.icohpvalue.icohpvalue(spin=Spin.up), -2.0)
# with spin polarization
self.assertEqual(self.icohpvalue_sp.icohpvalue(spin=Spin.up), -1.1)
self.assertEqual(self.icohpvalue_sp.icohpvalue(spin=Spin.down), -1.0)
def test_summed_icohp(self):
# without spin polarization
self.assertEqual(self.icohpvalue.summed_icohp, -2.0)
# with spin polarization
self.assertEqual(self.icohpvalue_sp.summed_icohp, -2.1)
class CombinedIcohpTest(unittest.TestCase):
def setUp(self):
# without spin polarization:
are_coops = False
are_cobis = False
is_spin_polarized = False
list_atom2 = ["K2", "K2", "K2", "K2", "K2", "K2"]
list_icohp = [
{Spin.up: -0.40075},
{Spin.up: -0.40074},
{Spin.up: -0.40079},
{Spin.up: -0.40079},
{Spin.up: -0.40074},
{Spin.up: -0.40075},
]
list_icoop = [
{Spin.up: 0.02342},
{Spin.up: 0.02342},
{Spin.up: 0.02343},
{Spin.up: 0.02343},
{Spin.up: 0.02342},
{Spin.up: 0.02342},
]
list_labels = ["1", "2", "3", "4", "5", "6"]
list_length = [2.71199, 2.71199, 2.71199, 2.71199, 2.71199, 2.71199]
list_num = [1, 1, 1, 1, 1, 1]
list_atom1 = ["F1", "F1", "F1", "F1", "F1", "F1"]
list_translation = [
[0, -1, -1],
[-1, 0, -1],
[0, 0, -1],
[-1, -1, 0],
[0, -1, 0],
[-1, 0, 0],
]
self.icohpcollection_KF = IcohpCollection(
is_spin_polarized=is_spin_polarized,
are_coops=are_coops,
are_cobis=are_cobis,
list_labels=list_labels,
list_atom1=list_atom1,
list_atom2=list_atom2,
list_length=list_length,
list_translation=list_translation,
list_num=list_num,
list_icohp=list_icohp,
)
self.icoopcollection_KF = IcohpCollection(
is_spin_polarized=is_spin_polarized,
are_coops=True,
list_labels=list_labels,
list_atom1=list_atom1,
list_atom2=list_atom2,
list_length=list_length,
list_translation=list_translation,
list_num=list_num,
list_icohp=list_icoop,
)
# with spin polarization:
list_atom2_sp = ["Fe7", "Fe9"]
list_labels_sp = ["1", "2"]
list_translation_sp = [[0, 0, 0], [0, 0, 0]]
list_length_sp = [2.83189, 2.45249]
list_atom1_sp = ["Fe8", "Fe8"]
is_spin_polarized_sp = True
are_coops_sp = False
list_num_sp = [2, 1]
list_icohp_sp = [
{Spin.up: -0.10218, Spin.down: -0.19701},
{Spin.up: -0.28485, Spin.down: -0.58279},
]
list_icoop_sp = [
{Spin.up: -0.11389, Spin.down: -0.20828},
{Spin.up: -0.04087, Spin.down: -0.05756},
]
self.icohpcollection_Fe = IcohpCollection(
is_spin_polarized=is_spin_polarized_sp,
are_coops=are_coops_sp,
are_cobis=False,
list_labels=list_labels_sp,
list_atom1=list_atom1_sp,
list_atom2=list_atom2_sp,
list_length=list_length_sp,
list_translation=list_translation_sp,
list_num=list_num_sp,
list_icohp=list_icohp_sp,
)
self.icoopcollection_Fe = IcohpCollection(
is_spin_polarized=is_spin_polarized_sp,
are_coops=True,
list_labels=list_labels_sp,
list_atom1=list_atom1_sp,
list_atom2=list_atom2_sp,
list_length=list_length_sp,
list_translation=list_translation_sp,
list_num=list_num_sp,
list_icohp=list_icoop_sp,
)
def test_get_icohp_by_label(self):
# without spin polarization
# ICOHPs
self.assertEqual(self.icohpcollection_KF.get_icohp_by_label("1"), -0.40075)
self.assertEqual(self.icohpcollection_KF.get_icohp_by_label("2"), -0.40074)
self.assertEqual(self.icohpcollection_KF.get_icohp_by_label("3"), -0.40079)
self.assertEqual(self.icohpcollection_KF.get_icohp_by_label("4"), -0.40079)
self.assertEqual(self.icohpcollection_KF.get_icohp_by_label("5"), -0.40074)
self.assertEqual(self.icohpcollection_KF.get_icohp_by_label("6"), -0.40075)
# with spin polarization
# summed spin
# ICOHPs
self.assertEqual(self.icohpcollection_Fe.get_icohp_by_label("1"), -0.10218 - 0.19701)
self.assertEqual(self.icohpcollection_Fe.get_icohp_by_label("2"), -0.28485 - 0.58279)
# Spin up
# ICOHPs
self.assertEqual(
self.icohpcollection_Fe.get_icohp_by_label("1", summed_spin_channels=False),
-0.10218,
)
self.assertEqual(
self.icohpcollection_Fe.get_icohp_by_label("2", summed_spin_channels=False),
-0.28485,
)
# Spin down
# ICOHPs
self.assertEqual(
self.icohpcollection_Fe.get_icohp_by_label("1", summed_spin_channels=False, spin=Spin.down),
-0.19701,
)
self.assertEqual(
self.icohpcollection_Fe.get_icohp_by_label("2", summed_spin_channels=False, spin=Spin.down),
-0.58279,
)
def test_get_summed_icohp_by_label_list(self):
# without spin polarization
self.assertAlmostEqual(
self.icohpcollection_KF.get_summed_icohp_by_label_list(["1", "2", "3", "4", "5", "6"], divisor=6.0),
-0.40076,
)
# with spin polarization
sum1 = (-0.10218 - 0.19701 - 0.28485 - 0.58279) / 2.0
sum2 = (-0.10218 - 0.28485) / 2.0
sum3 = (-0.19701 - 0.58279) / 2.0
self.assertAlmostEqual(
self.icohpcollection_Fe.get_summed_icohp_by_label_list(["1", "2"], divisor=2.0),
sum1,
)
self.assertAlmostEqual(
self.icohpcollection_Fe.get_summed_icohp_by_label_list(["1", "2"], summed_spin_channels=False, divisor=2.0),
sum2,
)
self.assertAlmostEqual(
self.icohpcollection_Fe.get_summed_icohp_by_label_list(
["1", "2"], summed_spin_channels=False, spin=Spin.down, divisor=2.0
),
sum3,
)
def test_get_icohp_dict_by_bondlengths(self):
# without spin polarization
icohpvalue = {}
icohpvalue["1"] = {
"@module": "pymatgen.electronic_structure.cohp",
"num": 1,
"length": 2.71199,
"icohp": {Spin.up: -0.40075},
"are_coops": False,
"are_cobis": False,
"label": "1",
"atom2": "K2",
"@class": "IcohpValue",
"atom1": "F1",
"translation": [0, -1, -1],
}
icohpvalue["2"] = {
"@module": "pymatgen.electronic_structure.cohp",
"num": 1,
"length": 2.71199,
"icohp": {Spin.up: -0.40074},
"are_coops": False,
"are_cobis": False,
"label": "2",
"atom2": "K2",
"@class": "IcohpValue",
"atom1": "F1",
"translation": [-1, 0, -1],
}
icohpvalue["3"] = {
"@module": "pymatgen.electronic_structure.cohp",
"num": 1,
"length": 2.71199,
"icohp": {Spin.up: -0.40079},
"are_coops": False,
"are_cobis": False,
"label": "3",
"atom2": "K2",
"@class": "IcohpValue",
"atom1": "F1",
"translation": [0, 0, -1],
}
icohpvalue["4"] = {
"@module": "pymatgen.electronic_structure.cohp",
"num": 1,
"length": 2.71199,
"icohp": {Spin.up: -0.40079},
"are_coops": False,
"are_cobis": False,
"label": "4",
"atom2": "K2",
"@class": "IcohpValue",
"atom1": "F1",
"translation": [-1, -1, 0],
}
icohpvalue["5"] = {
"@module": "pymatgen.electronic_structure.cohp",
"num": 1,
"length": 2.71199,
"icohp": {Spin.up: -0.40074},
"are_coops": False,
"are_cobis": False,
"label": "5",
"atom2": "K2",
"@class": "IcohpValue",
"atom1": "F1",
"translation": [0, -1, 0],
}
icohpvalue["6"] = {
"@module": "pymatgen.electronic_structure.cohp",
"num": 1,
"length": 2.71199,
"icohp": {Spin.up: -0.40075},
"are_coops": False,
"are_cobis": False,
"label": "6",
"atom2": "K2",
"@class": "IcohpValue",
"atom1": "F1",
"translation": [-1, 0, 0],
}
dict_KF = self.icohpcollection_KF.get_icohp_dict_by_bondlengths(minbondlength=0.0, maxbondlength=8.0)
for key, value in sorted(dict_KF.items()):
v = value.as_dict()
if "@version" in v:
v.pop("@version")
self.assertDictEqual(v, icohpvalue[key])
self.assertDictEqual(
{},
self.icohpcollection_KF.get_icohp_dict_by_bondlengths(minbondlength=0.0, maxbondlength=1.0),
)
# with spin polarization
icohpvalue_spin = {}
icohpvalue_spin["1"] = {
"num": 2,
"atom2": "Fe7",
"translation": [0, 0, 0],
"@module": "pymatgen.electronic_structure.cohp",
"are_coops": False,
"are_cobis": False,
"atom1": "Fe8",
"label": "1",
"length": 2.83189,
"@class": "IcohpValue",
"icohp": {Spin.up: -0.10218, Spin.down: -0.19701},
}
icohpvalue_spin["2"] = {
"num": 1,
"atom2": "Fe9",
"translation": [0, 0, 0],
"@module": "pymatgen.electronic_structure.cohp",
"are_coops": False,
"are_cobis": False,
"atom1": "Fe8",
"label": "2",
"length": 2.45249,
"@class": "IcohpValue",
"icohp": {Spin.up: -0.28485, Spin.down: -0.58279},
}
dict_Fe = self.icohpcollection_Fe.get_icohp_dict_by_bondlengths(minbondlength=0.0, maxbondlength=8.0)
for key, value in sorted(dict_Fe.items()):
v = value.as_dict()
if "@version" in v:
v.pop("@version")
self.assertDictEqual(v, icohpvalue_spin[key])
dict_Fe2 = self.icohpcollection_Fe.get_icohp_dict_by_bondlengths(minbondlength=2.5, maxbondlength=2.9)
self.assertEqual(len(dict_Fe2), 1)
for key, value in sorted(dict_Fe2.items()):
v = value.as_dict()
if "@version" in v:
v.pop("@version")
self.assertDictEqual(v, icohpvalue_spin[key])
def test_get_icohp_dict_of_site(self):
# without spin polarization
icohpvalue = {}
icohpvalue["1"] = {
"translation": [0, -1, -1],
"are_coops": False,
"are_cobis": False,
"@module": "pymatgen.electronic_structure.cohp",
"length": 2.71199,
"atom2": "K2",
"@class": "IcohpValue",
"atom1": "F1",
"num": 1,
"label": "1",
"icohp": {Spin.up: -0.40075},
}
icohpvalue["2"] = {
"translation": [-1, 0, -1],
"are_coops": False,
"are_cobis": False,
"@module": "pymatgen.electronic_structure.cohp",
"length": 2.71199,
"atom2": "K2",
"@class": "IcohpValue",
"atom1": "F1",
"num": 1,
"label": "2",
"icohp": {Spin.up: -0.40074},
}
icohpvalue["3"] = {
"translation": [0, 0, -1],
"are_coops": False,
"are_cobis": False,
"@module": "pymatgen.electronic_structure.cohp",
"length": 2.71199,
"atom2": "K2",
"@class": "IcohpValue",
"atom1": "F1",
"num": 1,
"label": "3",
"icohp": {Spin.up: -0.40079},
}
icohpvalue["4"] = {
"translation": [-1, -1, 0],
"are_coops": False,
"are_cobis": False,
"@module": "pymatgen.electronic_structure.cohp",
"length": 2.71199,
"atom2": "K2",
"@class": "IcohpValue",
"atom1": "F1",
"num": 1,
"label": "4",
"icohp": {Spin.up: -0.40079},
}
icohpvalue["5"] = {
"translation": [0, -1, 0],
"are_coops": False,
"are_cobis": False,
"@module": "pymatgen.electronic_structure.cohp",
"length": 2.71199,
"atom2": "K2",
"@class": "IcohpValue",
"atom1": "F1",
"num": 1,
"label": "5",
"icohp": {Spin.up: -0.40074},
}
icohpvalue["6"] = {
"translation": [-1, 0, 0],
"are_coops": False,
"are_cobis": False,
"@module": "pymatgen.electronic_structure.cohp",
"length": 2.71199,
"atom2": "K2",
"@class": "IcohpValue",
"atom1": "F1",
"num": 1,
"label": "6",
"icohp": {Spin.up: -0.40075},
}
dict_KF = self.icohpcollection_KF.get_icohp_dict_of_site(site=0)
for key, value in sorted(dict_KF.items()):
v = value.as_dict()
if "@version" in v:
v.pop("@version")
self.assertDictEqual(v, icohpvalue[key])
# compare number of results dependent on minsummedicohp, maxsummedicohp,minbondlength, maxbondlength, and
# only_bonds_to
dict_KF_2 = self.icohpcollection_KF.get_icohp_dict_of_site(
site=0,
minsummedicohp=None,
maxsummedicohp=-0.0,
minbondlength=0.0,
maxbondlength=8.0,
)
dict_KF_3 = self.icohpcollection_KF.get_icohp_dict_of_site(
site=0,
minsummedicohp=None,
maxsummedicohp=-0.5,
minbondlength=0.0,
maxbondlength=8.0,
)
dict_KF_4 = self.icohpcollection_KF.get_icohp_dict_of_site(
site=0,
minsummedicohp=0.0,
maxsummedicohp=None,
minbondlength=0.0,
maxbondlength=8.0,
)
dict_KF_5 = self.icohpcollection_KF.get_icohp_dict_of_site(
site=0,
minsummedicohp=None,
maxsummedicohp=None,
minbondlength=0.0,
maxbondlength=2.0,
)
dict_KF_6 = self.icohpcollection_KF.get_icohp_dict_of_site(
site=0,
minsummedicohp=None,
maxsummedicohp=None,
minbondlength=3.0,
maxbondlength=8.0,
)
dict_KF_7 = self.icohpcollection_KF.get_icohp_dict_of_site(site=0, only_bonds_to=["K"])
dict_KF_8 = self.icohpcollection_KF.get_icohp_dict_of_site(site=1, only_bonds_to=["K"])
dict_KF_9 = self.icohpcollection_KF.get_icohp_dict_of_site(site=1, only_bonds_to=["F"])
self.assertEqual(len(dict_KF_2), 6)
self.assertEqual(len(dict_KF_3), 0)
self.assertEqual(len(dict_KF_4), 0)
self.assertEqual(len(dict_KF_5), 0)
self.assertEqual(len(dict_KF_6), 0)
self.assertEqual(len(dict_KF_7), 6)
self.assertEqual(len(dict_KF_8), 0)
self.assertEqual(len(dict_KF_9), 6)
# spin polarization
dict_Fe = self.icohpcollection_Fe.get_icohp_dict_of_site(site=0)
self.assertEqual(len(dict_Fe), 0)
# Fe8
dict_Fe2 = self.icohpcollection_Fe.get_icohp_dict_of_site(site=7)
self.assertEqual(len(dict_Fe2), 2)
# Test the values
icohplist_Fe = {}
icohplist_Fe["1"] = {
"are_coops": False,
"are_cobis": False,
"translation": [0, 0, 0],
"icohp": {Spin.down: -0.19701, Spin.up: -0.10218},
"length": 2.83189,
"@module": "pymatgen.electronic_structure.cohp",
"atom1": "Fe8",
"atom2": "Fe7",
"label": "1",
"@class": "IcohpValue",
"num": 2,
}
icohplist_Fe["2"] = {
"are_coops": False,
"are_cobis": False,
"translation": [0, 0, 0],
"icohp": {Spin.down: -0.58279, Spin.up: -0.28485},
"length": 2.45249,
"@module": "pymatgen.electronic_structure.cohp",
"atom1": "Fe8",
"atom2": "Fe9",
"label": "2",
"@class": "IcohpValue",
"num": 1,
}
for key, value in sorted(dict_Fe2.items()):
v = value.as_dict()
if "@version" in v:
v.pop("@version")
self.assertEqual(v, icohplist_Fe[key])
# Fe9
dict_Fe3 = self.icohpcollection_Fe.get_icohp_dict_of_site(site=8)
self.assertEqual(len(dict_Fe3), 1)
# compare number of results dependent on minsummedicohp, maxsummedicohp,minbondlength, maxbondlength
# Fe8
dict_Fe4 = self.icohpcollection_Fe.get_icohp_dict_of_site(
site=7,
minsummedicohp=-0.3,
maxsummedicohp=None,
minbondlength=0.0,
maxbondlength=8.0,
)
self.assertEqual(len(dict_Fe4), 1)
values = []
for key, value in dict_Fe4.items():
values.append(value)
v = values[0].as_dict()
if "@version" in v:
v.pop("@version")
self.assertDictEqual(v, icohplist_Fe["1"])
dict_Fe5 = self.icohpcollection_Fe.get_icohp_dict_of_site(
site=7,
minsummedicohp=None,
maxsummedicohp=-0.3,
minbondlength=0.0,
maxbondlength=8.0,
)
self.assertEqual(len(dict_Fe5), 1)
values = []
for key, value in dict_Fe5.items():
values.append(value)
v = values[0].as_dict()
if "@version" in v:
v.pop("@version")
self.assertDictEqual(v, icohplist_Fe["2"])
dict_Fe6 = self.icohpcollection_Fe.get_icohp_dict_of_site(
site=7,
minsummedicohp=None,
maxsummedicohp=None,
minbondlength=0.0,
maxbondlength=2.5,
)
self.assertEqual(len(dict_Fe6), 1)
values = []
for key, value in dict_Fe6.items():
values.append(value)
v = values[0].as_dict()
if "@version" in v:
v.pop("@version")
self.assertDictEqual(v, icohplist_Fe["2"])
dict_Fe7 = self.icohpcollection_Fe.get_icohp_dict_of_site(
site=7,
minsummedicohp=None,
maxsummedicohp=None,
minbondlength=2.5,
maxbondlength=8.0,
)
self.assertEqual(len(dict_Fe7), 1)
values = []
for key, value in dict_Fe7.items():
values.append(value)
v = values[0].as_dict()
if "@version" in v:
v.pop("@version")
self.assertDictEqual(v, icohplist_Fe["1"])
def test_extremum_icohpvalue(self):
# without spin polarization
# ICOHPs
self.assertEqual(self.icohpcollection_KF.extremum_icohpvalue(), -0.40079)
# ICOOPs
self.assertEqual(self.icoopcollection_KF.extremum_icohpvalue(), 0.02343)
# with spin polarization
# summed spin
# ICOHPs
self.assertEqual(self.icohpcollection_Fe.extremum_icohpvalue(), -0.86764)
self.assertAlmostEqual(self.icoopcollection_Fe.extremum_icohpvalue(), -0.09842999999999999)
# ICOOPs
# spin up
# ICOHPs
self.assertEqual(
self.icohpcollection_Fe.extremum_icohpvalue(summed_spin_channels=False),
-0.28485,
)
# ICOOPs
self.assertEqual(
self.icoopcollection_Fe.extremum_icohpvalue(summed_spin_channels=False),
-0.04087,
)
# spin down
# ICOHPs
self.assertEqual(
self.icohpcollection_Fe.extremum_icohpvalue(summed_spin_channels=False, spin=Spin.down),
-0.58279,
)
# ICOOPs
self.assertEqual(
self.icoopcollection_Fe.extremum_icohpvalue(summed_spin_channels=False, spin=Spin.down),
-0.05756,
)
class CompleteCohpTest(PymatgenTest):
def setUp(self):
filepath = os.path.join(test_dir, "complete_cohp_lobster.json")
with open(filepath) as f:
self.cohp_lobster_dict = CompleteCohp.from_dict(json.load(f))
filepath = os.path.join(test_dir, "complete_coop_lobster.json")
with open(filepath) as f:
self.coop_lobster_dict = CompleteCohp.from_dict(json.load(f))
filepath = os.path.join(test_dir, "complete_cohp_lmto.json")
with open(filepath) as f:
self.cohp_lmto_dict = CompleteCohp.from_dict(json.load(f))
filepath = os.path.join(test_dir, "complete_cohp_orbitalwise.json")
with open(filepath) as f:
self.cohp_orb_dict = CompleteCohp.from_dict(json.load(f))
# Lobster 3.0
filepath = os.path.join(test_dir, "complete_cohp_forb.json")
with open(filepath) as f:
self.cohp_lobster_forb_dict = CompleteCohp.from_dict(json.load(f))
# Lobster 2.0
filepath = os.path.join(test_dir, "COPL.BiSe")
structure = os.path.join(test_dir, "CTRL.BiSe")
self.cohp_lmto = CompleteCohp.from_file("lmto", filename=filepath, structure_file=structure)
filepath = os.path.join(test_dir, "COHPCAR.lobster")
structure = os.path.join(test_dir, "POSCAR")
self.cohp_lobster = CompleteCohp.from_file("lobster", filename=filepath, structure_file=structure)
filepath = os.path.join(test_dir, "COOPCAR.lobster.BiSe")
structure = os.path.join(test_dir, "POSCAR.BiSe")
self.coop_lobster = CompleteCohp.from_file(
"lobster", filename=filepath, structure_file=structure, are_coops=True
)
filepath = os.path.join(test_dir, "COHPCAR.lobster.orbitalwise")
structure = os.path.join(test_dir, "POSCAR.orbitalwise")
self.cohp_orb = CompleteCohp.from_file("lobster", filename=filepath, structure_file=structure)
filepath = os.path.join(test_dir, "COHPCAR.lobster.notot.orbitalwise")
self.cohp_notot = CompleteCohp.from_file("lobster", filename=filepath, structure_file=structure)
# Lobster 3.0
filepath = os.path.join(test_dir, "COHPCAR.lobster.Na2UO4")
structure = os.path.join(test_dir, "POSCAR.Na2UO4")
self.cohp_lobster_forb = CompleteCohp.from_file("lobster", filename=filepath, structure_file=structure)
# spinpolarized case:
filepath = os.path.join(test_dir, "environments", "COHPCAR.lobster.mp-190.gz")
structure = os.path.join(test_dir, "environments", "POSCAR.mp_190.gz")
self.cohp_lobster_spin_polarized = CompleteCohp.from_file(
"lobster", filename=filepath, structure_file=structure
)
# COBI
filepath = os.path.join(test_dir, "COBICAR.lobster")
structure = os.path.join(test_dir, "POSCAR.COBI")
self.cobi = CompleteCohp.from_file("lobster", filename=filepath, structure_file=structure, are_cobis=True)
def test_attiributes(self):
self.assertFalse(self.cohp_lobster.are_coops)
self.assertFalse(self.cohp_lobster.are_cobis)
self.assertFalse(self.cohp_lobster_dict.are_coops)
self.assertFalse(self.cohp_lmto.are_coops)
self.assertFalse(self.cohp_lmto_dict.are_coops)
self.assertTrue(self.coop_lobster.are_coops)
self.assertTrue(self.coop_lobster_dict.are_coops)
self.assertFalse(self.cohp_lobster_forb.are_coops)
self.assertFalse(self.cohp_lobster_forb_dict.are_coops)
self.assertEqual(len(self.cohp_lobster.energies), 301)
self.assertEqual(len(self.cohp_lmto.energies), 801)
self.assertEqual(len(self.coop_lobster.energies), 241)
self.assertEqual(len(self.cohp_lobster_forb.energies), 7)
self.assertEqual(self.cohp_lobster.efermi, 9.75576)
self.assertEqual(self.cohp_lmto.efermi, -2.3433)
self.assertEqual(self.coop_lobster.efermi, 5.90043)
self.assertEqual(self.cohp_lobster_forb.efermi, 4.12875)
self.assertTrue(self.cobi.are_cobis)
self.assertFalse(self.cobi.are_coops)
def test_dict(self):
# The json files are dict representations of the COHPs from the LMTO
# and LOBSTER calculations and should thus be the same.
self.assertEqual(self.cohp_lobster.as_dict(), self.cohp_lobster_dict.as_dict())
self.assertEqual(self.cohp_orb.as_dict(), self.cohp_orb_dict.as_dict())
# Lobster 3.0, including f orbitals
self.assertEqual(self.cohp_lobster_forb.as_dict(), self.cohp_lobster_forb_dict.as_dict())
# Testing the LMTO dicts will be more involved. Since the average
# is calculated and not read, there may be differences in rounding
# with a very small number of matrix elements, which would cause the
# test to fail
for key in ["COHP", "ICOHP"]:
self.assertArrayAlmostEqual(
self.cohp_lmto.as_dict()[key]["average"]["1"],
self.cohp_lmto_dict.as_dict()[key]["average"]["1"],
5,
)
# for key in self.cohp_lmto.as_dict():
# if key not in ["COHP", "ICOHP"]:
# self.assertEqual(self.cohp_lmto.as_dict()[key],
# self.cohp_lmto_dict.as_dict()[key])
# else:
# for bond in self.cohp_lmto.as_dict()[key]:
# if bond != "average":
# self.assertEqual(self.cohp_lmto.as_dict()[key][bond],
# self.cohp_lmto_dict.as_dict()[key][bond])
def test_icohp_values(self):
# icohp_ef are the ICHOP(Ef) values taken from
# the ICOHPLIST.lobster file.
icohp_ef_dict = {
"1": {Spin.up: -0.10218, Spin.down: -0.19701},
"2": {Spin.up: -0.28485, Spin.down: -0.58279},
}
all_cohps_lobster = self.cohp_lobster.all_cohps
for bond in icohp_ef_dict:
icohp_ef = all_cohps_lobster[bond].get_interpolated_value(self.cohp_lobster.efermi, integrated=True)
self.assertEqual(icohp_ef_dict[bond], icohp_ef)
icoop_ef_dict = {
"1": {Spin.up: 0.14245},
"2": {Spin.up: -0.04118},
"3": {Spin.up: 0.14245},
"4": {Spin.up: -0.04118},
"5": {Spin.up: -0.03516},
"6": {Spin.up: 0.10745},
"7": {Spin.up: -0.03516},
"8": {Spin.up: 0.10745},
"9": {Spin.up: -0.12395},
"10": {Spin.up: 0.24714},
"11": {Spin.up: -0.12395},
}
all_coops_lobster = self.coop_lobster.all_cohps
for bond in icoop_ef_dict:
icoop_ef = all_coops_lobster[bond].get_interpolated_value(self.coop_lobster.efermi, integrated=True)
self.assertEqual(icoop_ef_dict[bond], icoop_ef)
def test_get_cohp_by_label(self):
self.assertEqual(self.cohp_orb.get_cohp_by_label("1").energies[0], -11.7225)
self.assertEqual(self.cohp_orb.get_cohp_by_label("1").energies[5], -11.47187)
self.assertFalse(self.cohp_orb.get_cohp_by_label("1").are_coops)
self.assertEqual(self.cohp_orb.get_cohp_by_label("1").cohp[Spin.up][0], 0.0)
self.assertEqual(self.cohp_orb.get_cohp_by_label("1").cohp[Spin.up][300], 0.03392)
self.assertEqual(self.cohp_orb.get_cohp_by_label("average").cohp[Spin.up][230], -0.08792)
self.assertEqual(
self.cohp_orb.get_cohp_by_label("average").energies[230],
-0.19368000000000007,
)
self.assertFalse(self.cohp_orb.get_cohp_by_label("average").are_coops)
# test methods from super class that could be overwritten
self.assertEqual(self.cohp_orb.get_icohp()[Spin.up][3], 0.0)
self.assertEqual(self.cohp_orb.get_cohp()[Spin.up][3], 0.0)
def test_get_cohp_by_label_summed_spin(self):
# files without spin polarization
self.assertAlmostEqual(self.cohp_orb.get_cohp_by_label("1", summed_spin_channels=True).energies[0], -11.7225)
self.assertAlmostEqual(self.cohp_orb.get_cohp_by_label("1", summed_spin_channels=True).energies[5], -11.47187)
self.assertFalse(self.cohp_orb.get_cohp_by_label("1", summed_spin_channels=True).are_coops)
self.assertAlmostEqual(self.cohp_orb.get_cohp_by_label("1", summed_spin_channels=True).cohp[Spin.up][0], 0.0)
self.assertAlmostEqual(
self.cohp_orb.get_cohp_by_label("1", summed_spin_channels=True).cohp[Spin.up][300], 0.03392
)
self.assertAlmostEqual(
self.cohp_orb.get_cohp_by_label("average", summed_spin_channels=True).cohp[Spin.up][230], -0.08792
)
self.assertAlmostEqual(
self.cohp_orb.get_cohp_by_label("average", summed_spin_channels=True).energies[230],
-0.19368000000000007,
)
self.assertFalse(self.cohp_orb.get_cohp_by_label("average", summed_spin_channels=True).are_coops)
# file with spin polarization
self.assertAlmostEqual(
self.cohp_lobster_spin_polarized.get_cohp_by_label("1", summed_spin_channels=False).cohp[Spin.up][300] * 2,
self.cohp_lobster_spin_polarized.get_cohp_by_label("1", summed_spin_channels=True).cohp[Spin.up][300],
)
self.assertAlmostEqual(
self.cohp_lobster_spin_polarized.get_cohp_by_label("1", summed_spin_channels=False).cohp[Spin.down][300]
* 2,
self.cohp_lobster_spin_polarized.get_cohp_by_label("1", summed_spin_channels=True).cohp[Spin.up][300],
)
self.assertAlmostEqual(
self.cohp_lobster_spin_polarized.get_cohp_by_label("1", summed_spin_channels=True).energies[0],
-15.03759 + 1.96204,
)
self.assertAlmostEqual(
self.cohp_lobster_spin_polarized.get_cohp_by_label("1", summed_spin_channels=True).energies[5],
-14.78697 + 1.96204,
)
self.assertFalse(self.cohp_lobster_spin_polarized.get_cohp_by_label("1", summed_spin_channels=True).are_coops)
def test_get_summed_cohp_by_label_list(self):
self.assertEqual(self.cohp_orb.get_summed_cohp_by_label_list(["1"]).energies[0], -11.7225)
self.assertEqual(
self.cohp_orb.get_summed_cohp_by_label_list(["1", "1"]).energies[0],
-11.7225,
)
self.assertEqual(self.cohp_orb.get_summed_cohp_by_label_list(["1"]).energies[5], -11.47187)
self.assertFalse(self.cohp_orb.get_summed_cohp_by_label_list(["1"]).are_coops)
self.assertEqual(self.cohp_orb.get_summed_cohp_by_label_list(["1"]).cohp[Spin.up][0], 0.0)
self.assertEqual(
self.cohp_orb.get_summed_cohp_by_label_list(["1", "1"]).cohp[Spin.up][0],
0.0,
)
self.assertEqual(
self.cohp_orb.get_summed_cohp_by_label_list(["1", "1"]).cohp[Spin.up][300],
0.03392 * 2.0,
)
self.assertEqual(
self.cohp_orb.get_summed_cohp_by_label_list(["1", "1"], divisor=2).cohp[Spin.up][300],
0.03392,
)
def test_get_summed_cohp_by_label_list_summed_spin(self):
# files without spin polarization
self.assertEqual(
self.cohp_orb.get_summed_cohp_by_label_list(["1"], summed_spin_channels=True).energies[0], -11.7225
)
self.assertEqual(
self.cohp_orb.get_summed_cohp_by_label_list(["1", "1"], summed_spin_channels=True).energies[0],
-11.7225,
)
self.assertEqual(
self.cohp_orb.get_summed_cohp_by_label_list(["1"], summed_spin_channels=True).energies[5], -11.47187
)
self.assertFalse(self.cohp_orb.get_summed_cohp_by_label_list(["1"], summed_spin_channels=True).are_coops)
self.assertEqual(
self.cohp_orb.get_summed_cohp_by_label_list(["1"], summed_spin_channels=True).cohp[Spin.up][0], 0.0
)
self.assertEqual(
self.cohp_orb.get_summed_cohp_by_label_list(["1", "1"], summed_spin_channels=True).cohp[Spin.up][0],
0.0,
)
self.assertEqual(
self.cohp_orb.get_summed_cohp_by_label_list(["1", "1"], summed_spin_channels=True).cohp[Spin.up][300],
0.03392 * 2.0,
)
self.assertEqual(
self.cohp_orb.get_summed_cohp_by_label_list(["1", "1"], summed_spin_channels=True, divisor=2).cohp[Spin.up][
300
],
0.03392,
)
# file with spin polarization
self.assertAlmostEqual(
self.cohp_lobster_spin_polarized.get_summed_cohp_by_label_list(["1"], summed_spin_channels=False).cohp[
Spin.up
][300]
* 2,
self.cohp_lobster_spin_polarized.get_summed_cohp_by_label_list(["1"], summed_spin_channels=True).cohp[
Spin.up
][300],
)
self.assertAlmostEqual(
self.cohp_lobster_spin_polarized.get_summed_cohp_by_label_list(["1"], summed_spin_channels=False).cohp[
Spin.down
][300]
* 2,
self.cohp_lobster_spin_polarized.get_summed_cohp_by_label_list(["1"], summed_spin_channels=True).cohp[
Spin.up
][300],
)
self.assertAlmostEqual(
self.cohp_lobster_spin_polarized.get_summed_cohp_by_label_list(
["1", "1"], summed_spin_channels=True
).energies[0],
-15.03759 + 1.96204,
)
self.assertAlmostEqual(
self.cohp_lobster_spin_polarized.get_summed_cohp_by_label_list(["1"], summed_spin_channels=True).energies[
5
],
-14.78697 + 1.96204,
)
self.assertFalse(
self.cohp_lobster_spin_polarized.get_summed_cohp_by_label_list(["1"], summed_spin_channels=True).are_coops
)
def test_get_summed_cohp_by_label_and_orbital_list(self):
ref = self.cohp_orb.orb_res_cohp["1"]["4s-4px"]
ref2 = self.cohp_orb.orb_res_cohp["1"]["4px-4pz"]
cohp_label = self.cohp_orb.get_summed_cohp_by_label_and_orbital_list(["1"], ["4s-4px"])
cohp_label2 = self.cohp_orb.get_summed_cohp_by_label_and_orbital_list(["1", "1"], ["4s-4px", "4s-4px"])
cohp_label2x = self.cohp_orb.get_summed_cohp_by_label_and_orbital_list(
["1", "1"], ["4s-4px", "4s-4px"], divisor=2
)
cohp_label3 = self.cohp_orb.get_summed_cohp_by_label_and_orbital_list(["1", "1"], ["4px-4pz", "4s-4px"])
self.assertArrayEqual(cohp_label.cohp[Spin.up], ref["COHP"][Spin.up])
self.assertArrayEqual(cohp_label2.cohp[Spin.up], ref["COHP"][Spin.up] * 2.0)
self.assertArrayEqual(cohp_label3.cohp[Spin.up], ref["COHP"][Spin.up] + ref2["COHP"][Spin.up])
self.assertArrayEqual(cohp_label.icohp[Spin.up], ref["ICOHP"][Spin.up])
self.assertArrayEqual(cohp_label2.icohp[Spin.up], ref["ICOHP"][Spin.up] * 2.0)
self.assertArrayEqual(cohp_label2x.icohp[Spin.up], ref["ICOHP"][Spin.up])
self.assertArrayEqual(cohp_label3.icohp[Spin.up], ref["ICOHP"][Spin.up] + ref2["ICOHP"][Spin.up])
with self.assertRaises(ValueError):
self.cohp_orb.get_summed_cohp_by_label_and_orbital_list(["1"], ["4px-4pz", "4s-4px"])
with self.assertRaises(ValueError):
self.cohp_orb.get_summed_cohp_by_label_and_orbital_list(["1", "2"], ["4s-4px"])
def test_get_summed_cohp_by_label_and_orbital_list_summed_spin_channels(self):
ref = self.cohp_orb.orb_res_cohp["1"]["4s-4px"]
ref2 = self.cohp_orb.orb_res_cohp["1"]["4px-4pz"]
cohp_label = self.cohp_orb.get_summed_cohp_by_label_and_orbital_list(
["1"], ["4s-4px"], summed_spin_channels=True
)
cohp_label2 = self.cohp_orb.get_summed_cohp_by_label_and_orbital_list(
["1", "1"], ["4s-4px", "4s-4px"], summed_spin_channels=True
)
cohp_label2x = self.cohp_orb.get_summed_cohp_by_label_and_orbital_list(
["1", "1"], ["4s-4px", "4s-4px"], divisor=2, summed_spin_channels=True
)
cohp_label3 = self.cohp_orb.get_summed_cohp_by_label_and_orbital_list(
["1", "1"], ["4px-4pz", "4s-4px"], summed_spin_channels=True
)
self.assertArrayEqual(cohp_label.cohp[Spin.up], ref["COHP"][Spin.up])
self.assertArrayEqual(cohp_label2.cohp[Spin.up], ref["COHP"][Spin.up] * 2.0)
self.assertArrayEqual(cohp_label3.cohp[Spin.up], ref["COHP"][Spin.up] + ref2["COHP"][Spin.up])
self.assertArrayEqual(cohp_label.icohp[Spin.up], ref["ICOHP"][Spin.up])
self.assertArrayEqual(cohp_label2.icohp[Spin.up], ref["ICOHP"][Spin.up] * 2.0)
self.assertArrayEqual(cohp_label2x.icohp[Spin.up], ref["ICOHP"][Spin.up])
self.assertArrayEqual(cohp_label3.icohp[Spin.up], ref["ICOHP"][Spin.up] + ref2["ICOHP"][Spin.up])
with self.assertRaises(ValueError):
self.cohp_orb.get_summed_cohp_by_label_and_orbital_list(
["1"], ["4px-4pz", "4s-4px"], summed_spin_channels=True
)
with self.assertRaises(ValueError):
self.cohp_orb.get_summed_cohp_by_label_and_orbital_list(["1", "2"], ["4s-4px"], summed_spin_channels=True)
# files with spin polarization
self.assertAlmostEqual(
self.cohp_lobster_spin_polarized.get_summed_cohp_by_label_and_orbital_list(
["1"], ["6s-6s"], summed_spin_channels=False
).cohp[Spin.up][300]
* 2,
self.cohp_lobster_spin_polarized.get_summed_cohp_by_label_and_orbital_list(
["1"], ["6s-6s"], summed_spin_channels=True
).cohp[Spin.up][300],
)
self.assertAlmostEqual(
self.cohp_lobster_spin_polarized.get_summed_cohp_by_label_and_orbital_list(
["1"], ["6s-6s"], summed_spin_channels=False
).cohp[Spin.down][300]
* 2,
self.cohp_lobster_spin_polarized.get_summed_cohp_by_label_and_orbital_list(
["1"], ["6s-6s"], summed_spin_channels=True
).cohp[Spin.up][300],
)
self.assertAlmostEqual(
self.cohp_lobster_spin_polarized.get_summed_cohp_by_label_and_orbital_list(
["1"], ["6s-6s"], summed_spin_channels=True
).energies[0],
-15.03759 + 1.96204,
)
self.assertAlmostEqual(
self.cohp_lobster_spin_polarized.get_summed_cohp_by_label_and_orbital_list(
["1"], ["6s-6s"], summed_spin_channels=True
).energies[5],
-14.78697 + 1.96204,
)
self.assertFalse(
self.cohp_lobster_spin_polarized.get_summed_cohp_by_label_and_orbital_list(
["1"], ["6s-6s"], summed_spin_channels=True
).are_coops
)
def test_orbital_resolved_cohp(self):
# When read from a COHPCAR file, total COHPs are calculated from
# the orbital-resolved COHPs if the total is missing. This may be
# case for LOBSTER version 2.2.0 and earlier due to a bug with the
# cohpgenerator keyword. The calculated total should be approximately
# the total COHP calculated by LOBSTER. Due to numerical errors in
# the LOBSTER calculation, the precision is not very high though.
self.assertArrayAlmostEqual(
self.cohp_orb.all_cohps["1"].cohp[Spin.up],
self.cohp_notot.all_cohps["1"].cohp[Spin.up],
decimal=3,
)
self.assertArrayAlmostEqual(
self.cohp_orb.all_cohps["1"].icohp[Spin.up],
self.cohp_notot.all_cohps["1"].icohp[Spin.up],
decimal=3,
)
# Tests different methods for getting orbital-resolved COHPs
ref = self.cohp_orb.orb_res_cohp["1"]["4s-4px"]
cohp_label = self.cohp_orb.get_orbital_resolved_cohp("1", "4s-4px")
self.assertEqual(cohp_label.cohp, ref["COHP"])
self.assertEqual(cohp_label.icohp, ref["ICOHP"])
orbitals = [[Orbital.s, Orbital.px], ["s", "px"], [0, 3]]
cohps = [self.cohp_orb.get_orbital_resolved_cohp("1", [[4, orb[0]], [4, orb[1]]]) for orb in orbitals]
# print(cohps)
for cohp in cohps:
self.assertEqual(cohp.as_dict(), cohp_label.as_dict())
def test_orbital_resolved_cohp_summed_spin_channels(self):
ref = self.cohp_orb.orb_res_cohp["1"]["4s-4px"]
cohp_label = self.cohp_orb.get_orbital_resolved_cohp("1", "4s-4px", summed_spin_channels=True)
self.assertEqual(cohp_label.cohp, ref["COHP"])
self.assertEqual(cohp_label.icohp, ref["ICOHP"])
orbitals = [[Orbital.s, Orbital.px], ["s", "px"], [0, 3]]
cohps = [
self.cohp_orb.get_orbital_resolved_cohp("1", [[4, orb[0]], [4, orb[1]]], summed_spin_channels=True)
for orb in orbitals
]
for cohp in cohps:
self.assertEqual(cohp.as_dict(), cohp_label.as_dict())
# spin polarization
self.assertAlmostEqual(
self.cohp_lobster_spin_polarized.get_orbital_resolved_cohp("1", "6s-6s", summed_spin_channels=False).cohp[
Spin.up
][300]
* 2,
self.cohp_lobster_spin_polarized.get_orbital_resolved_cohp("1", "6s-6s", summed_spin_channels=True).cohp[
Spin.up
][300],
)
self.assertAlmostEqual(
self.cohp_lobster_spin_polarized.get_orbital_resolved_cohp("1", "6s-6s", summed_spin_channels=False).cohp[
Spin.down
][300]
* 2,
self.cohp_lobster_spin_polarized.get_orbital_resolved_cohp("1", "6s-6s", summed_spin_channels=True).cohp[
Spin.up
][300],
)
self.assertAlmostEqual(
self.cohp_lobster_spin_polarized.get_orbital_resolved_cohp(
"1", "6s-6s", summed_spin_channels=True
).energies[0],
-15.03759 + 1.96204,
)
self.assertAlmostEqual(
self.cohp_lobster_spin_polarized.get_orbital_resolved_cohp(
"1", "6s-6s", summed_spin_channels=True
).energies[5],
-14.78697 + 1.96204,
)
self.assertFalse(
self.cohp_lobster_spin_polarized.get_orbital_resolved_cohp(
"1", "6s-6s", summed_spin_channels=True
).are_coops
)
class MethodTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, "COHPCAR.lobster")
structure = os.path.join(test_dir, "POSCAR")
self.cohp_lobster = CompleteCohp.from_file("lobster", filename=filepath, structure_file=structure)
filepath = os.path.join(test_dir, "COHPCAR.lobster.orbitalwise")
structure = os.path.join(test_dir, "POSCAR.orbitalwise")
self.cohp_orb = CompleteCohp.from_file("lobster", filename=filepath, structure_file=structure)
filepath = os.path.join(test_dir, "environments", "COHPCAR.lobster.mp-190.gz")
structure = os.path.join(test_dir, "environments", "POSCAR.mp_190.gz")
self.cohp_lobster_spin_polarized = CompleteCohp.from_file(
"lobster", filename=filepath, structure_file=structure
)
def test_get_integrated_cohp_in_energy_range_full(self):
# integration up to Fermi level
cohp = self.cohp_lobster
result = get_integrated_cohp_in_energy_range(
cohp, label="1", orbital=None, energy_range=None, relative_E_Fermi=True, summed_spin_channels=True
)
self.assertAlmostEqual(result, -0.10218 - 0.19701)
result = get_integrated_cohp_in_energy_range(
cohp, label="1", orbital=None, energy_range=None, relative_E_Fermi=True, summed_spin_channels=False
)
self.assertAlmostEqual(result[Spin.up], -0.10218)
self.assertAlmostEqual(result[Spin.down], -0.19701)
# One without spin polarization
result = get_integrated_cohp_in_energy_range(
self.cohp_orb,
label="1",
orbital=None,
energy_range=None,
relative_E_Fermi=False,
summed_spin_channels=False,
)
self.assertAlmostEqual(result[Spin.up], -4.36062)
result = get_integrated_cohp_in_energy_range(
self.cohp_orb, label="1", orbital=None, energy_range=None, relative_E_Fermi=True, summed_spin_channels=False
)
self.assertAlmostEqual(result[Spin.up], -4.36062)
result = get_integrated_cohp_in_energy_range(
self.cohp_orb, label="1", orbital=None, energy_range=None, relative_E_Fermi=True, summed_spin_channels=True
)
self.assertAlmostEqual(result, -4.36062)
# something else for orbital resolved version
# self.cohp_lobster_spin_polarized
result = get_integrated_cohp_in_energy_range(
self.cohp_lobster_spin_polarized,
label="1",
orbital="6s-6s",
energy_range=None,
relative_E_Fermi=True,
summed_spin_channels=False,
)
self.assertAlmostEqual(result[Spin.up], -0.00006)
self.assertAlmostEqual(result[Spin.down], -0.00006)
result = get_integrated_cohp_in_energy_range(
self.cohp_lobster_spin_polarized,
label="1",
orbital="6s-6s",
energy_range=None,
relative_E_Fermi=True,
summed_spin_channels=True,
)
self.assertAlmostEqual(result, -0.00006 * 2)
def test_get_integrated_cohp_in_energy_range_onefloat(self):
# only one float is given for energy range
cohp = self.cohp_lobster
fermi = cohp.efermi
result = get_integrated_cohp_in_energy_range(
cohp, label="1", orbital=None, energy_range=-0.60201, relative_E_Fermi=True, summed_spin_channels=True
)
self.assertAlmostEqual(result, -0.10218 - 0.19701 + 0.14894 + 0.21889)
result = get_integrated_cohp_in_energy_range(
cohp, label="1", orbital=None, energy_range=-0.60201, relative_E_Fermi=True, summed_spin_channels=False
)
self.assertAlmostEqual(result[Spin.up], -0.10218 + 0.14894)
self.assertAlmostEqual(result[Spin.down], -0.19701 + 0.21889)
# only one float is given for energy range (relative to E-fermi)
result = get_integrated_cohp_in_energy_range(
cohp,
label="1",
orbital=None,
energy_range=-0.60201 + fermi,
relative_E_Fermi=False,
summed_spin_channels=True,
)
self.assertAlmostEqual(result, -0.10218 - 0.19701 + 0.14894 + 0.21889)
result = get_integrated_cohp_in_energy_range(
cohp,
label="1",
orbital=None,
energy_range=-0.60201 + fermi,
relative_E_Fermi=False,
summed_spin_channels=False,
)
self.assertAlmostEqual(result[Spin.up], -0.10218 + 0.14894)
self.assertAlmostEqual(result[Spin.down], -0.19701 + 0.21889)
# without spin
fermi = self.cohp_orb.efermi
result = get_integrated_cohp_in_energy_range(
self.cohp_orb,
label="1",
orbital=None,
energy_range=-14.0350 + fermi,
relative_E_Fermi=False,
summed_spin_channels=False,
)
# print(result)
self.assertAlmostEqual(result[Spin.up], -4.36062)
result = get_integrated_cohp_in_energy_range(
self.cohp_orb,
label="1",
orbital=None,
energy_range=-14.03509,
relative_E_Fermi=True,
summed_spin_channels=False,
)
# print(result)
self.assertAlmostEqual(result[Spin.up], -4.36062)
result = get_integrated_cohp_in_energy_range(
self.cohp_orb,
label="1",
orbital=None,
energy_range=-14.03509,
relative_E_Fermi=True,
summed_spin_channels=True,
)
# print(result)
self.assertAlmostEqual(result, -4.36062)
def test_get_integrated_cohp_in_energy_range_whole_range(self):
cohp = self.cohp_lobster
fermi = cohp.efermi
result = get_integrated_cohp_in_energy_range(
cohp, label="1", orbital=None, energy_range=[-0.60201, 0], relative_E_Fermi=True, summed_spin_channels=True
)
self.assertAlmostEqual(result, -0.10218 - 0.19701 + 0.14894 + 0.21889)
result = get_integrated_cohp_in_energy_range(
cohp, label="1", orbital=None, energy_range=[-0.60201, 0], relative_E_Fermi=True, summed_spin_channels=False
)
self.assertAlmostEqual(result[Spin.up], -0.10218 + 0.14894)
self.assertAlmostEqual(result[Spin.down], -0.19701 + 0.21889)
# whole energy range
result = get_integrated_cohp_in_energy_range(
cohp,
label="1",
orbital=None,
energy_range=[-0.60201 + fermi, 0 + fermi],
relative_E_Fermi=False,
summed_spin_channels=True,
)
self.assertAlmostEqual(result, -0.10218 - 0.19701 + 0.14894 + 0.21889)
result = get_integrated_cohp_in_energy_range(
cohp,
label="1",
orbital=None,
energy_range=[-0.60201 + fermi, 0 + fermi],
relative_E_Fermi=False,
summed_spin_channels=False,
)
self.assertAlmostEqual(result[Spin.up], -0.10218 + 0.14894)
self.assertAlmostEqual(result[Spin.down], -0.19701 + 0.21889)
# without spin
fermi = self.cohp_orb.efermi
result = get_integrated_cohp_in_energy_range(
self.cohp_orb,
label="1",
orbital=None,
energy_range=[-14.0350 + fermi, fermi],
relative_E_Fermi=False,
summed_spin_channels=False,
)
self.assertAlmostEqual(result[Spin.up], -4.36062)
result = get_integrated_cohp_in_energy_range(
self.cohp_orb,
label="1",
orbital=None,
energy_range=[-14.0350, 0],
relative_E_Fermi=True,
summed_spin_channels=False,
)
self.assertAlmostEqual(result[Spin.up], -4.36062)
result = get_integrated_cohp_in_energy_range(
self.cohp_orb,
label="1",
orbital=None,
energy_range=[-14.0350, 0],
relative_E_Fermi=True,
summed_spin_channels=True,
)
self.assertAlmostEqual(result, -4.36062)
if __name__ == "__main__":
unittest.main()
|
materialsproject/pymatgen
|
pymatgen/electronic_structure/tests/test_cohp.py
|
Python
|
mit
| 56,785
|
[
"pymatgen"
] |
f3c6e44f94564880acfc73f4a4d788a08a59c0967797ed5c0f972ab3407c8dee
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# showfreeze - front end to freezing files into write-once archive
# Copyright (C) 2003-2014 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.showfreeze import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
|
heromod/migrid
|
mig/cgi-bin/showfreeze.py
|
Python
|
gpl-2.0
| 1,104
|
[
"Brian"
] |
5260cff1de8ea9fcc05a2379a5d2a40898ced99f55b2764fd6a63c0146c91118
|
import numpy as np
from math import exp, sqrt
from ase.calculators.calculator import Calculator
class MorsePotential(Calculator):
"""Morse potential.
Default values chosen to be similar as Lennard-Jones.
"""
implemented_properties = ['energy', 'forces']
default_parameters = {'epsilon': 1.0,
'rho0': 6.0,
'r0': 1.0}
nolabel = True
def __init__(self, **kwargs):
Calculator.__init__(self, **kwargs)
def calculate(self, atoms=None, properties=['energy'],
system_changes=['positions', 'numbers', 'cell',
'pbc', 'charges','magmoms']):
Calculator.calculate(self, atoms, properties, system_changes)
epsilon = self.parameters.epsilon
rho0 = self.parameters.rho0
r0 = self.parameters.r0
positions = self.atoms.get_positions()
energy = 0.0
forces = np.zeros((len(self.atoms), 3))
preF = 2 * epsilon * rho0 / r0
for i1, p1 in enumerate(positions):
for i2, p2 in enumerate(positions[:i1]):
diff = p2 - p1
r = sqrt(np.dot(diff, diff))
expf = exp(rho0 * (1.0 - r / r0))
energy += epsilon * expf * (expf - 2)
F = preF * expf * (expf - 1) * diff / r
forces[i1] -= F
forces[i2] += F
self.results['energy'] = energy
self.results['forces'] = forces
|
grhawk/ASE
|
tools/ase/calculators/morse.py
|
Python
|
gpl-2.0
| 1,494
|
[
"ASE"
] |
fc37666f7caef9812b076070308e276555c23c3dfeb002c0d220ed2c23fef4c2
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['architectures', 'name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
docdir = os.path.join(cwd,'..','documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.leftlanelab.firebase.js')
if not os.path.exists(js_file):
js_file = os.path.join(cwd,'..','assets','com.leftlanelab.firebase.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','ComLeftlanelabFirebaseModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def error(msg):
print "[ERROR] %s" % msg
def validate_license():
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
c = open(license_file).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if manifest[key].strip() == '': die("manifest key '%s' missing required value" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[],includeJSFiles=False):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] == '.pyc': continue
if not includeJSFiles and len(e) == 2 and e[1] == '.js': continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def verify_build_arch(manifest, config):
binaryname = 'lib%s.a' % manifest['moduleid']
binarypath = os.path.join('build', binaryname)
manifestarch = set(manifest['architectures'].split(' '))
output = subprocess.check_output('xcrun lipo -info %s' % binarypath, shell=True)
builtarch = set(output.split(':')[-1].strip().split(' '))
print 'Check build architectures\n'
if ('arm64' not in builtarch):
warn('built module is missing 64-bit support.')
if (manifestarch != builtarch):
warn('architectures in manifest: %s' % ', '.join(manifestarch))
warn('compiled binary architectures: %s' % ', '.join(builtarch))
print '\nMODULE BUILD FAILED'
error('there is discrepancy between the architectures specified in module manifest and compiled binary.')
error('Please update manifest to match module binary architectures.')
die('')
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
p = os.path.join(cwd, 'assets')
if not os.path.exists(p):
p = os.path.join(cwd, '..', 'assets')
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,'assets'),['README'])
for dn in ('example','platform'):
p = os.path.join(cwd, dn)
if not os.path.exists(p):
p = os.path.join(cwd, '..', dn)
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,dn),['README'],True)
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
zf.write(license_file,'%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
verify_build_arch(manifest, config)
package_module(manifest,mf,config)
sys.exit(0)
|
shodanuk/firebase-titanium
|
iphone/build.py
|
Python
|
mit
| 8,646
|
[
"VisIt"
] |
c65dc8a639d13923df6efb21953d041f719b379a293c81409dc9f0b01d366f97
|
#!/usr/bin/python
# Copyright (C) 2012,2013,2014 The ESPResSo project
# Copyright (C) 2011 Olaf Lenz
# Copyright 2008 Marcus D. Hanwell <marcus@cryos.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import string, re, os
# Execute git log with the desired command line options.
fin = os.popen('git log --summary --stat --no-merges --date=short 3.0.1..', 'r')
# Set up the loop variables in order to locate the blocks we want
authorFound = False
dateFound = False
messageFound = False
filesFound = False
message = ""
messageNL = False
files = ""
prevAuthorLine = ""
commitId = ""
# The main part of the loop
for line in fin:
# The commit line marks the start of a new commit object.
m = re.match('^commit (.*)$', line)
if m is not None:
commitId = m.group(1)
# Start all over again...
authorFound = False
dateFound = False
messageFound = False
messageNL = False
message = ""
filesFound = False
files = ""
continue
# Match the author line and extract the part we want
m = re.match('^Author:\s*(.*)\s*$', line)
if m is not None:
author = m.group(1)
authorFound = True
continue
# Match the date line
m = re.match('^Date:\s*(.*)\s*$', line)
if m is not None:
date = m.group(1)
dateFound = True
continue
# The svn-id lines are ignored
# The sign off line is ignored too
if re.search('git-svn-id:|^Signed-off-by', line) >= 0:
continue
# Extract the actual commit message for this commit
if not (authorFound & dateFound & messageFound):
# Find the commit message if we can
if len(line) == 1:
if messageNL:
messageFound = True
else:
messageNL = True
elif len(line) == 4:
messageFound = True
else:
if len(message) == 0:
message = message + line.strip()
else:
message = message + " " + line.strip()
# If this line is hit all of the files have been stored for this commit
if re.search('files changed', line) >= 0:
filesFound = True
continue
# Collect the files for this commit. FIXME: Still need to add +/- to files
elif authorFound & dateFound & messageFound:
fileList = re.split(' \| ', line, 2)
if len(fileList) > 1:
if len(files) > 0:
files = files + ", " + fileList[0].strip()
else:
files = fileList[0].strip()
# All of the parts of the commit have been found - write out the entry
if authorFound & dateFound & messageFound & filesFound:
# First the author line, only outputted if it is the first for that
# author on this day
authorLine = date + " " + author
if len(prevAuthorLine) == 0:
print(authorLine)
elif authorLine == prevAuthorLine:
pass
else:
print(("\n" + authorLine))
# Assemble the actual commit message line(s) and limit the line length
# to 80 characters.
commitLine = "* " + files + ": " + message
i = 0
commit = ""
while i < len(commitLine):
if len(commitLine) < i + 78:
commit = commit + "\n " + commitLine[i:len(commitLine)]
break
index = commitLine.rfind(' ', i, i+78)
if index > i:
commit = commit + "\n " + commitLine[i:index]
i = index+1
else:
commit = commit + "\n " + commitLine[i:78]
i = i+79
# Write out the commit line
print(commit)
#Now reset all the variables ready for a new commit block.
authorFound = False
dateFound = False
messageFound = False
messageNL = False
message = ""
filesFound = False
files = ""
commitId = ""
prevAuthorLine = authorLine
# Close the input and output lines now that we are finished.
fin.close()
|
mgusenbauer/espresso
|
maintainer/git2changelog.py
|
Python
|
gpl-3.0
| 4,726
|
[
"ESPResSo"
] |
bcff87acaa80b86a9dfb297f9092092fb7b0791503b469b0a9a06833c8ad115e
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the AccessChecker class that contains helper functions
for checking access.
"""
from django.utils.translation import ugettext
from google.appengine.api import users
from google.appengine.ext import db
from soc.logic import host as host_logic
from soc.logic.exceptions import LoginRequest
from soc.logic.exceptions import RedirectRequest
from soc.logic.exceptions import BadRequest
from soc.logic.exceptions import NotFound
from soc.logic.exceptions import AccessViolation
from soc.logic.exceptions import GDocsLoginRequest
from soc.models.org_app_record import OrgAppRecord
from soc.models.org_app_survey import OrgAppSurvey
from soc.models.request import INVITATION_TYPE
from soc.models.request import REQUEST_TYPE
from soc.models.user import User
from soc.views.helper.gdata_apis import oauth as oauth_helper
DEF_AGREE_TO_TOS = ugettext(
'You must agree to the <a href="%(tos_link)s">site-wide Terms of'
' Service</a> in your <a href="/user/edit_profile">User Profile</a>'
' in order to view this page.')
DEF_ALREADY_ADMIN = ugettext(
'You cannot be a organization administrator for %s to access this page.')
DEF_ALREADY_MENTOR = ugettext(
'You cannot be a mentor for %s to access this page.')
DEF_ALREADY_PARTICIPATING = ugettext(
'You cannot become a Student because you are already participating '
'in this program.')
DEF_ALREADY_PARTICIPATING_AS_STUDENT = ugettext(
'You cannot register as a %s since you are already a '
'student in %s.')
DEF_CANNOT_ACCESS_ORG_APP = ugettext(
'You do not have access to this organization application.')
DEF_CANNOT_UPDATE_ENTITY = ugettext(
'This %(model)s cannot be updated.')
DEF_DEV_LOGOUT_LOGIN = ugettext(
'Please <a href="%%(sign_out)s">sign out</a>'
' and <a href="%%(sign_in)s">sign in</a>'
' again as %(role)s to view this page.')
DEF_ENTITY_DOES_NOT_BELONG_TO_YOU = ugettext(
'This %(model)s entity does not belong to you.')
DEF_HAS_ALREADY_ROLE_FOR_ORG = ugettext(
'You already have %(role)s role for %(org)s.')
DEF_ID_BASED_ENTITY_INVALID = ugettext(
'%(model)s entity, whose id is %(id)s, is invalid at this time.')
DEF_ID_BASED_ENTITY_NOT_EXISTS = ugettext(
'%(model)s entity, whose id is %(id)s, is does not exist.')
DEF_INVITE_DOES_NOT_EXIST = ugettext(
'There is no invite with id %s.')
DEF_INVITE_CANNOT_BE_RESUBMITTED = ugettext(
'Only withdrawn invitations may be resubmitted.')
DEF_INVITE_CANNOT_BE_ACCESSED = ugettext(
'This invite cannot be accessed from this account.')
DEF_INVITE_CANNOT_BE_WITHDRAWN = ugettext(
'Only pending invitations may be withdrawn.')
DEF_INVITE_CANNOT_BE_RESPONDED = ugettext(
'This invite cannot be responded at this moment')
DEF_INVITE_ACCEPTED = ugettext(
'This invite has been accepted.')
DEF_INVITE_REJECTED = ugettext(
'This invite has been rejected.')
DEF_INVITE_WITHDRAWN = ugettext(
'This invite has been withdrawn.')
DEF_REQUEST_DOES_NOT_EXIST = ugettext(
'There is no request with id %s.')
DEF_REQUEST_CANNOT_BE_ACCESSED = ugettext(
'This request cannot be accessed from this account.')
DEF_ACCEPTED_REQUEST_CANNOT_BE_MANAGED = ugettext(
'This request cannot be managed because it is already been accepted.')
DEF_REQUEST_CANNOT_BE_WITHDRAWN = ugettext(
'This %s request cannot be withdrawn.')
DEF_REQUEST_CANNOT_BE_RESUBMITTED = ugettext(
'This %s request cannot be resubmitted.')
DEF_IS_NOT_STUDENT = ugettext(
'This page is inaccessible because you do not have a student role '
'in the program.')
DEF_HAS_NO_PROJECT = ugettext(
'This page is inaccessible because you do not have an accepted project '
'in the program.')
DEF_IS_STUDENT = ugettext(
'This page is inaccessible because you are registered as a student.')
DEF_NO_DOCUMENT = ugettext(
'The document was not found')
DEF_NO_LINK_ID = ugettext(
'Link ID should not be empty')
DEF_NO_ORG_APP = ugettext(
'The organization application for the program %s does not exist.')
DEF_NO_SLOT_TRANSFER = ugettext(
'This page is inaccessible at this time. It is accessible only after '
'the program administrator has made the slot allocations available and '
'before %s')
DEF_NO_SUCH_PROGRAM = ugettext(
'The url is wrong (no program was found).')
DEF_NO_SURVEY_ACCESS = ugettext (
'You cannot take this survey because this survey is not created for'
'your role in the program.')
DEF_NO_USER_LOGIN = ugettext(
'Please create <a href="/user/create">User Profile</a>'
' in order to view this page.')
DEF_NO_USER_PROFILE = ugettext(
'You must not have a User profile to visit this page.')
DEF_NO_USER = ugettext(
'User with the Link ID %s does not exist.')
DEF_NOT_ADMIN = ugettext(
'You need to be a organization administrator for %s to access this page.')
DEF_NOT_DEVELOPER = ugettext(
'You need to be a site developer to access this page.')
DEF_NOT_HOST = ugettext(
'You need to be a program adminstrator to access this page.')
DEF_NOT_MENTOR = ugettext(
'You need to be a mentor for %s to access this page.')
DEF_NOT_PARTICIPATING = ugettext(
'You are not participating in this program and have no access.')
DEF_NOT_PROPOSER = ugettext(
'You are not allowed to perform this action since you are not the'
'author(proposer) for this proposal.')
DEF_NOT_PUBLIC_DOCUMENT = ugettext(
'This document is not publically readable.')
DEF_NOT_VALID_INVITATION = ugettext(
'This is not a valid invitation.')
DEF_NOT_VALID_REQUEST = ugettext(
'This is not a valid request.')
DEF_ORG_DOES_NOT_EXISTS = ugettext(
'Organization, whose link_id is %(link_id)s, does not exist in '
'%(program)s.')
DEF_ORG_NOT_ACTIVE = ugettext(
'Organization %(name)s is not active in %(program)s.')
DEF_PAGE_INACTIVE = ugettext(
'This page is inactive at this time.')
DEF_PAGE_INACTIVE_BEFORE = ugettext(
'This page is inactive before %s')
DEF_PAGE_INACTIVE_OUTSIDE = ugettext(
'This page is inactive before %s and after %s.')
DEF_PROGRAM_NOT_VISIBLE = ugettext(
'This page is inaccessible because %s is not visible at this time.')
DEF_PROGRAM_NOT_RUNNING = ugettext(
'This page is inaccessible because %s is not running at this time.')
DEF_PROPOSAL_IGNORED_MESSAGE = ugettext(
'An organization administrator has flagged this proposal to be '
'ignored. If you think this is incorrect, contact an organization '
'administrator to resolve the situation.')
DEF_PROPOSAL_MODIFICATION_REQUEST = ugettext(
'If you would like to update this proposal, request your organization '
'to which this proposal belongs, to grant permission to modify the '
'proposal.')
DEF_PROPOSAL_NOT_PUBLIC = ugettext(
'This proposal is not made public, '
'and you are not the student who submitted the proposal, '
'nor are you a mentor for the organization it was submitted to.')
DEF_PROFILE_INACTIVE = ugettext(
'This page is inaccessible because your profile is inactive in '
'the program at this time.')
DEF_NO_PROFILE = ugettext(
'This page is inaccessible because you do not have a profile '
'in the program at this time.')
DEF_SCOPE_INACTIVE = ugettext(
'The scope for this request is not active.')
DEF_ID_BASED_ENTITY_NOT_EXISTS = ugettext(
'The requested %(model)s entity whose id is %(id)s does not exist.')
DEF_STATISTIC_DOES_NOT_EXIST = ugettext(
'The statistic whose name is %(key_name)s does not exist.')
DEF_KEYNAME_BASED_ENTITY_NOT_EXISTS = ugettext(
'The requested %(model)s entity whose keyname is %(key_name)s does not exist.')
DEF_KEYNAME_BASED_ENTITY_INVALID = ugettext(
'%(model)s entity, whose keyname is %(key_name)s, is invalid at this time.')
unset = object()
def isSet(value):
"""Returns true iff value is not unset.
"""
return value is not unset
class Mutator(object):
"""Helper class for access checking.
Mutates the data object as requested.
"""
def __init__(self, data):
self.data = data
self.unsetAll()
def unsetAll(self):
self.data.action = unset
self.data.can_respond = unset
self.data.document = unset
self.data.invited_user = unset
self.data.invited_profile = unset
self.data.invite = unset
self.data.key_name = unset
self.data.request_entity = unset
self.data.requester = unset
self.data.scope_path = unset
self.data.url_profile = unset
self.data.url_student_info = unset
self.data.url_user = unset
def documentKeyNameFromKwargs(self):
"""Returns the document key fields from kwargs.
Returns False if not all fields were supplied/consumed.
"""
from soc.models.document import Document
fields = []
kwargs = self.data.kwargs.copy()
prefix = kwargs.pop('prefix', None)
fields.append(prefix)
if prefix in ['gsoc_program', 'gsoc_org', 'gci_program', 'gci_org']:
fields.append(kwargs.pop('sponsor', None))
fields.append(kwargs.pop('program', None))
if prefix in ['gsoc_org', 'gci_org']:
fields.append(kwargs.pop('organization', None))
fields.append(kwargs.pop('document', None))
if any(kwargs.values()):
raise BadRequest("Unexpected value for document url")
if not all(fields):
raise BadRequest("Missing value for document url")
self.data.scope_path = '/'.join(fields[1:-1])
self.data.key_name = '/'.join(fields)
self.data.document = Document.get_by_key_name(self.data.key_name)
def profileFromKwargs(self, profile_model):
"""Retrieves a profile from kwargs.
Args:
profile_model: The datastore model class
"""
key_name = self.data.kwargs['user']
self.data.url_user = User.get_by_key_name(key_name)
if not self.data.url_user:
raise NotFound('Requested user does not exist')
fields = ['sponsor', 'program', 'user']
key_name = '/'.join(self.data.kwargs[i] for i in fields)
self.data.url_profile = profile_model.get_by_key_name(
key_name, parent=self.data.url_user)
if not self.data.url_profile:
raise NotFound('Requested user does not have a profile')
def studentFromKwargs(self):
self.profileFromKwargs()
self.data.url_student_info = self.data.url_profile.student_info
if not self.data.url_student_info:
raise NotFound('Requested user is not a student')
def canRespondForUser(self):
assert isSet(self.data.invited_user)
assert isSet(self.data.invite)
if self.data.invited_user.key() != self.data.user.key():
# org admins may see the invitations and can respond to requests
self.data.can_respond = self.data.invite.type == 'Request'
else:
# user that the entity refers to may only respond if it is a Request
self.data.can_respond = self.data.invite.type == 'Invitation'
def commentVisible(self):
assert isSet(self.data.url_user)
self.data.public_comments_visible = False
self.data.private_comments_visible = False
# if the user is not logged in, no comments can be made
if not self.data.user:
return
# if the current user is the proposer, he or she may access public comments
if self.data.user.key() == self.data.url_user.key():
self.data.public_comments_visible = True
return
# All the mentors and org admins from the organization may access public
# and private comments.
if self.data.mentorFor(self.data.proposal_org):
self.data.public_comments_visible = True
self.data.private_comments_visible = True
return
def host(self):
assert isSet(self.data.user)
self.data.host = host_logic.getHostForUser(self.data.user)
if self.data.host or self.data.user.host_for:
self.data.is_host = True
def orgAppFromKwargs(self, raise_not_found=True):
"""Sets the organization application in RequestData object.
Args:
raise_not_found: iff False do not send 404 response.
"""
assert self.data.program
q = OrgAppSurvey.all()
q.filter('program', self.data.program)
self.data.org_app = q.get()
if raise_not_found and not self.data.org_app:
raise NotFound(DEF_NO_ORG_APP % self.data.program.name)
def orgAppRecordIfIdInKwargs(self):
"""Sets the organization application in RequestData object.
"""
assert self.data.org_app
self.data.org_app_record = None
id = self.data.kwargs.get('id')
if id:
self.data.org_app_record = OrgAppRecord.get_by_id(int(id))
if not self.data.org_app_record:
raise NotFound(DEF_NO_ORG_APP % self.data.program.name)
class DeveloperMutator(Mutator):
def canRespondForUser(self):
self.data.can_respond = True
def commentVisible(self):
self.data.public_comments_visible = True
self.data.private_comments_visible = True
def hostFromKwargs(self):
"""Set the host entity for the given user in the kwargs.
"""
self.data.host_user_key = None
key_name = self.data.kwargs.get('link_id', '')
if not key_name:
self.host()
if self.data.is_host:
return
else:
raise NotFound(DEF_NO_LINK_ID)
user_key = db.Key.from_path('User', key_name)
if not user_key:
raise NotFound(DEF_NO_USER % key_name)
self.data.host_user_key = user_key
self.data.host = host_logic.getHostForUser(user_key)
class BaseAccessChecker(object):
"""Helper class for access checking.
Should contain all access checks that apply to both regular users
and developers.
"""
def __init__(self, data):
"""Initializes the access checker object.
"""
self.data = data
self.gae_user = users.get_current_user()
def fail(self, message):
"""Raises an AccessViolation with the specified message.
"""
raise AccessViolation(message)
def isLoggedIn(self):
"""Ensures that the user is logged in.
"""
if self.gae_user:
return
raise LoginRequest()
def isLoggedOut(self):
"""Ensures that the user is logged out.
"""
if not self.gae_user:
return
raise RedirectRequest(self.data.logout_url)
def isUser(self):
"""Checks if the current user has an User entity.
"""
self.isLoggedIn()
if self.data.user:
return
raise AccessViolation(DEF_NO_USER_LOGIN)
def isNotUser(self):
"""Checks if the current user does not have an User entity.
To perform this check a User must be logged in.
"""
self.isLoggedIn()
if not self.data.user:
return
raise AccessViolation(DEF_NO_USER_PROFILE)
def isDeveloper(self):
"""Checks if the current user is a Developer.
"""
self.isUser()
if self.data.user.is_developer:
return
if users.is_current_user_admin():
return
raise AccessViolation(DEF_NOT_DEVELOPER)
def hasProfile(self):
"""Checks if the user has a profile for the current program.
"""
self.isLoggedIn()
if self.data.profile:
return
raise AccessViolation(DEF_NO_PROFILE)
def isProfileActive(self):
"""Checks if the profile of the current user is active.
"""
self.hasProfile()
if self.data.profile.status == 'active':
return
raise AccessViolation(DEF_PROFILE_INACTIVE)
def isInvitePresent(self, invite_id):
"""Checks if the invite entity is not None.
"""
assert isSet(self.data.invite)
if self.data.invite is None:
raise AccessViolation(DEF_INVITE_DOES_NOT_EXIST % invite_id)
if self.data.invite.type != INVITATION_TYPE:
raise AccessViolation(DEF_INVITE_DOES_NOT_EXIST % invite_id)
def isRequestPresent(self, request_id):
"""Checks if the invite entity is not None.
"""
assert isSet(self.data.request_entity)
if self.data.request_entity is None:
raise AccessViolation(DEF_REQUEST_DOES_NOT_EXIST % request_id)
if self.data.request_entity.type != REQUEST_TYPE:
raise AccessViolation(DEF_REQUEST_DOES_NOT_EXIST % request_id)
def canAccessGoogleDocs(self):
"""Checks if user has a valid access token to access Google Documents.
"""
self.isUser()
access_token = oauth_helper.getAccessToken(self.data.user)
if not access_token: #TODO(orc.avs):check token is valid
next = self.data.request.get_full_path()
raise GDocsLoginRequest(next)
class DeveloperAccessChecker(BaseAccessChecker):
"""Helper class for access checking.
Allows most checks.
"""
def __getattr__(self, name):
return lambda *args, **kwargs: None
class AccessChecker(BaseAccessChecker):
"""Helper class for access checking.
"""
def isHost(self):
"""Checks whether the current user has a host role.
"""
self.isLoggedIn()
if self.data.is_host:
return
raise AccessViolation(DEF_NOT_HOST)
def isProgramRunning(self):
"""Checks whether the program is running now by making sure the current
data is between program start and end and the program is visible to
normal users.
"""
if not self.data.program:
raise NotFound(DEF_NO_SUCH_PROGRAM)
self.isProgramVisible()
if self.data.timeline.programActive():
return
raise AccessViolation(
DEF_PROGRAM_NOT_RUNNING % self.data.program.name)
def isProgramVisible(self):
"""Checks whether the program exists and is visible to the user.
Visible programs are either in the visible.
Programs are always visible to hosts.
"""
if not self.data.program:
raise NotFound(DEF_NO_SUCH_PROGRAM)
if self.data.program.status == 'visible':
return
try:
self.isHost()
return
except AccessViolation:
pass
raise AccessViolation(
DEF_PROGRAM_NOT_VISIBLE % self.data.program.name)
def acceptedOrgsAnnounced(self):
"""Checks if the accepted orgs have been announced.
"""
self.isProgramVisible()
if self.data.timeline.orgsAnnounced():
return
period = self.data.timeline.orgsAnnouncedOn()
raise AccessViolation(DEF_PAGE_INACTIVE_BEFORE % period)
def acceptedStudentsAnnounced(self):
"""Checks if the accepted students have been announced.
"""
self.isProgramVisible()
if self.data.timeline.studentsAnnounced():
return
period = self.data.timeline.studentsAnnouncedOn()
raise AccessViolation(DEF_PAGE_INACTIVE_BEFORE % period)
def canApplyNonStudent(self, role, edit_url):
"""Checks if the user can apply as a mentor or org admin.
"""
self.isLoggedIn()
if self.data.profile and not self.data.profile.student_info:
raise RedirectRequest(edit_url)
if not self.data.profile:
return
raise AccessViolation(DEF_ALREADY_PARTICIPATING_AS_STUDENT % (
role, self.data.program.name))
def isActiveStudent(self):
"""Checks if the user is an active student.
"""
self.isProfileActive()
if self.data.student_info:
return
raise AccessViolation(DEF_IS_NOT_STUDENT)
def isStudentWithProject(self):
self.isActiveStudent()
if self.data.student_info.number_of_projects > 0:
return
raise AccessViolation(DEF_HAS_NO_PROJECT)
def notStudent(self):
"""Checks if the current user has a non-student profile.
"""
self.isProfileActive()
if not self.data.student_info:
return
raise AccessViolation(DEF_IS_STUDENT)
def notOrgAdmin(self):
"""Checks if the user is not an admin.
"""
self.isProfileActive()
assert isSet(self.data.organization)
if self.data.organization.key() not in self.data.profile.org_admin_for:
return
raise AccessViolation(DEF_ALREADY_ADMIN % self.data.organization.name)
def notMentor(self):
"""Checks if the user is not a mentor.
"""
self.isProfileActive()
assert isSet(self.data.organization)
if not self.data.mentorFor(self.data.organization):
return
raise AccessViolation(DEF_ALREADY_MENTOR % self.data.organization.name)
def isOrgAdmin(self):
"""Checks if the user is an org admin.
"""
assert isSet(self.data.organization)
self.isOrgAdminForOrganization(self.data.organization)
def isMentor(self):
"""Checks if the user is a mentor.
"""
assert isSet(self.data.organization)
self.isMentorForOrganization(self.data.organization)
def isOrgAdminForOrganization(self, org):
"""Checks if the user is an admin for the specified organiztaion.
"""
self.isProfileActive()
if self.data.orgAdminFor(org):
return
raise AccessViolation(DEF_NOT_ADMIN % org.name)
def isMentorForOrganization(self, org):
"""Checks if the user is a mentor for the specified organiztaion.
"""
self.isProfileActive()
if self.data.mentorFor(org):
return
raise AccessViolation(DEF_NOT_MENTOR % org.name)
def isOrganizationInURLActive(self):
"""Checks if the organization in URL exists and if its status is active.
"""
assert isSet(self.data.organization)
if not self.data.organization:
error_msg = DEF_ORG_DOES_NOT_EXISTS % {
'link_id': self.data.kwargs['organization'],
'program': self.data.program.name
}
raise AccessViolation(error_msg)
if self.data.organization.status != 'active':
error_msg = DEF_ORG_NOT_ACTIVE % {
'name': self.data.organization.name,
'program': self.data.program.name
}
raise AccessViolation(error_msg)
def isProposalInURLValid(self):
"""Checks if the proposal in URL exists.
"""
assert isSet(self.data.proposal)
if not self.data.proposal:
error_msg = DEF_ID_BASED_ENTITY_NOT_EXISTS % {
'model': 'GSoCProposal',
'id': self.data.kwargs['id']
}
raise AccessViolation(error_msg)
if self.data.proposal.status == 'invalid':
error_msg = DEF_ID_BASED_ENTITY_INVALID % {
'model': 'GSoCProposal',
'id': self.data.kwargs['id'],
}
raise AccessViolation(error_msg)
def studentSignupActive(self):
"""Checks if the student signup period is active.
"""
self.isProgramVisible()
if self.data.timeline.studentSignup():
return
raise AccessViolation(DEF_PAGE_INACTIVE_OUTSIDE %
self.data.timeline.studentsSignupBetween())
def canStudentUpdateProposalPostSignup(self):
"""Checks if the student signup deadline has passed.
"""
self.isProgramVisible()
if (self.data.timeline.afterStudentSignupEnd() and
self.data.proposal.is_editable_post_deadline):
return
violation_message = '%s %s'% ((DEF_PAGE_INACTIVE_OUTSIDE %
self.data.timeline.studentsSignupBetween()),
DEF_PROPOSAL_MODIFICATION_REQUEST)
raise AccessViolation(violation_message)
def canStudentUpdateProposal(self):
"""Checks if the student is eligible to submit a proposal.
"""
assert isSet(self.data.proposal)
self.isActiveStudent()
self.isProposalInURLValid()
# check if the timeline allows updating proposals
try:
self.studentSignupActive()
except AccessViolation:
self.canStudentUpdateProposalPostSignup()
# check if the proposal belongs to the current user
expected_profile = self.data.proposal.parent()
if expected_profile.key().name() != self.data.profile.key().name():
error_msg = DEF_ENTITY_DOES_NOT_BELONG_TO_YOU % {
'model': 'GSoCProposal'
}
raise AccessViolation(error_msg)
# check if the status allows the proposal to be updated
status = self.data.proposal.status
if status == 'ignored':
raise AccessViolation(DEF_PROPOSAL_IGNORED_MESSAGE)
elif status in ['invalid', 'accepted', 'rejected']:
raise AccessViolation(DEF_CANNOT_UPDATE_ENTITY % {
'model': 'GSoCProposal'
})
# determine what can be done with the proposal
if status == 'new' or status == 'pending':
self.data.is_pending = True
elif status == 'withdrawn':
self.data.is_pending = False
def canRespondToInvite(self):
"""Checks if the current user can accept/reject the invitation.
"""
assert isSet(self.data.invite)
assert isSet(self.data.invited_user)
# check if the entity represents an invitation
if self.data.invite.type != 'Invitation':
raise AccessViolation(DEF_NOT_VALID_INVITATION)
# check if the entity can be responded
if self.data.invite.status not in ['pending', 'rejected']:
raise AccessViolation(DEF_NOT_VALID_INVITATION)
# check if the entity is addressed to the current user
if self.data.invited_user.key() != self.data.user.key():
error_msg = DEF_ENTITY_DOES_NOT_BELONG_TO_YOU % {
'model': 'Request'
}
raise AccessViolation(error_msg)
# check if the user does not have this role
if self.data.invite.role == 'org_admin':
self.notOrgAdmin()
else:
self.notMentor()
def canResubmitInvite(self):
"""Checks if the current user can resubmit the invitation.
"""
assert isSet(self.data.invite)
# check if the entity represents an invitation
if self.data.invite.type != INVITATION_TYPE:
raise AccessViolation(DEF_INVITE_DOES_NOT_EXIST)
# only withdrawn requests may be resubmitted
if self.data.invite.status != 'withdrawn':
raise AccessViolation(DEF_NOT_VALID_REQUEST)
# check if the user is an admin for the organization
self.isOrgAdmin()
def canInviteBeResubmitted(self):
"""Checks if the invitation may be resubmitted.
"""
assert isSet(self.data.invite)
# check if the entity represents an invitation
if self.data.invite.type != INVITATION_TYPE:
raise AccessViolation(DEF_INVITE_DOES_NOT_EXIST)
# only withdrawn requests may be resubmitted
if self.data.invite.status != 'withdrawn':
raise AccessViolation(DEF_INVITE_CANNOT_BE_RESUBMITTED)
#TODO(dhans): actually it needs to be checked if the user has not accepted
# a request in the meantime.
def canInviteBeWithdrawn(self):
"""Checks if the invitation may be withdrawn.
"""
assert isSet(self.data.invite)
# check if the entity represents an invitation
if self.data.invite.type != INVITATION_TYPE:
raise AccessViolation(DEF_INVITE_DOES_NOT_EXIST)
# only pending requests may be withdrawn
if self.data.invite.status != 'pending':
raise AccessViolation(DEF_INVITE_CANNOT_BE_WITHDRAWN)
def canRespondInvite(self):
"""Checks if the current user may respond to invite entity.
"""
assert isSet(self.data.invite)
# check if the entity represents an invitation
if self.data.invite.type != INVITATION_TYPE:
raise AccessViolation(DEF_INVITE_DOES_NOT_EXIST)
# only the invited user may respond to the invitation
if self.data.user.key() != self.data.invite.user.key():
raise AccessViolation(DEF_INVITE_CANNOT_BE_ACCESSED)
def isInviteRespondable(self):
"""Checks if the invite may be responded at this moment.
"""
assert isSet(self.data.invite)
# only pending invites may be responded
if self.data.invite.status == 'accepted':
raise AccessViolation(DEF_INVITE_ACCEPTED)
if self.data.invite.status == 'rejected':
raise AccessViolation(DEF_INVITE_REJECTED)
if self.data.invite.status == 'withdrawn':
raise AccessViolation(DEF_INVITE_WITHDRAWN)
def canManageRequest(self):
"""Checks if the current user may manage the specified request.
"""
assert isSet(self.data.request_entity)
# only the author may manage their request
if self.data.user.key() != self.data.request_entity.user.key():
raise AccessViolation(DEF_REQUEST_CANNOT_BE_ACCESSED)
# accepted requests cannot be managed
if self.data.request_entity.status == 'accepted':
raise AccessViolation(DEF_ACCEPTED_REQUEST_CANNOT_BE_MANAGED)
def isRequestManageable(self):
"""Checks if the request may be managed with the specified action.
"""
assert isSet(self.data.request_entity)
current_status = self.data.request_entity.status
if 'withdraw' in self.data.POST and current_status != 'pending':
raise AccessViolation(DEF_REQUEST_CANNOT_BE_WITHDRAWN % current_status)
if 'resubmit' in self.data.POST and \
current_status not in ['rejected', 'withdrawn']:
raise AccessViolation(DEF_REQUEST_CANNOT_BE_RESUBMITTED % current_status)
def canRespondToRequest(self):
"""Checks if the current user can accept/reject the request.
"""
assert isSet(self.data.request_entity)
assert isSet(self.data.requester)
# check if the entity represents an invitation
if self.data.request_entity.type != 'Request':
raise AccessViolation(DEF_NOT_VALID_REQUEST)
# check if the entity can be responded
if self.data.request_entity.status not in ['pending', 'rejected']:
raise AccessViolation(DEF_NOT_VALID_REQUEST)
# check if the user is an admin for the organization
self.isOrgAdmin()
def canResubmitRequest(self):
"""Checks if the current user can resubmit the request.
"""
assert isSet(self.data.request_entity)
assert isSet(self.data.requester)
# check if the entity represents an invitation
if self.data.request_entity.type != 'Request':
raise AccessViolation(DEF_NOT_VALID_REQUEST)
# only withdrawn requests may be resubmitted
if self.data.request_entity.status != 'withdrawn':
raise AccessViolation(DEF_NOT_VALID_REQUEST)
# check if the request belongs to the current user
if self.data.requester.key() != self.data.user.key():
error_msg = DEF_ENTITY_DOES_NOT_BELONG_TO_YOU % {
'model': 'Request'
}
raise AccessViolation(error_msg)
def canViewInvite(self):
"""Checks if the current user can see the invitation.
"""
assert isSet(self.data.organization)
assert isSet(self.data.invite)
assert isSet(self.data.invited_user)
self._canAccessRequestEntity(
self.data.invite, self.data.invited_user, self.data.organization)
def canViewRequest(self):
"""Checks if the current user can see the request.
"""
assert isSet(self.data.organization)
assert isSet(self.data.request_entity)
assert isSet(self.data.requester)
self._canAccessRequestEntity(
self.data.request_entity, self.data.requester, self.data.organization)
def _canAccessRequestEntity(self, entity, user, org):
"""Checks if the current user is allowed to access a Request entity.
Args:
entity: an entity which belongs to Request model
user: user entity that the Request refers to
org: organization entity that the Request refers to
"""
# check if the entity is addressed to the current user
if user.key() != self.data.user.key():
# check if the current user is an org admin for the organization
self.isOrgAdmin()
def canAccessProposalEntity(self):
"""Checks if the current user is allowed to access a Proposal entity.
"""
assert isSet(self.data.proposal)
assert isSet(self.data.proposal_org)
assert isSet(self.data.url_user)
# if the proposal is public, everyone may access it
if self.data.proposal.is_publicly_visible:
return
if not self.data.user:
raise AccessViolation(DEF_PROPOSAL_NOT_PUBLIC)
self.isProfileActive()
# if the current user is the proposer, he or she may access it
if self.data.user.key() == self.data.url_user.key():
return
# all the mentors and org admins from the organization may access it
if self.data.mentorFor(self.data.proposal_org):
return
raise AccessViolation(DEF_PROPOSAL_NOT_PUBLIC)
def canEditDocument(self):
self.isHost()
def canViewDocument(self):
"""Checks if the specified user can see the document.
"""
assert isSet(self.data.document)
if not self.data.document:
raise NotFound(DEF_NO_DOCUMENT)
if self.data.document.read_access == 'public':
return
raise AccessViolation(DEF_NOT_PUBLIC_DOCUMENT)
def isProposer(self):
"""Checks if the current user is the author of the proposal.
"""
self.isProgramVisible()
self.isProfileActive()
assert isSet(self.data.proposer)
if self.data.proposer.key() == self.data.profile.key():
return
raise AccessViolation(DEF_NOT_PROPOSER)
def isSlotTransferActive(self):
"""Checks if the slot transfers are active at the time.
"""
assert isSet(self.data.program)
assert isSet(self.data.timeline)
if (self.data.program.allocations_visible and
self.data.timeline.beforeStudentsAnnounced()):
return
raise AccessViolation(DEF_NO_SLOT_TRANSFER % (
self.data.timeline.studentsAnnouncedOn()))
def isProjectInURLValid(self):
"""Checks if the project in URL exists.
"""
assert isSet(self.data.project)
if not self.data.project:
error_msg = DEF_ID_BASED_ENTITY_NOT_EXISTS % {
'model': 'GSoCProject',
'id': self.data.kwargs['id']
}
raise AccessViolation(error_msg)
if self.data.project.status == 'invalid':
error_msg = DEF_ID_BASED_ENTITY_INVALID % {
'model': 'GSoCProject',
'id': self.data.kwargs['id'],
}
raise AccessViolation(error_msg)
def canStudentUpdateProject(self):
"""Checks if the student can edit the project details.
"""
assert isSet(self.data.program)
assert isSet(self.data.timeline)
assert isSet(self.data.project)
assert isSet(self.data.project_owner)
self.isProjectInURLValid()
# check if the timeline allows updating project
self.isProgramVisible()
self.acceptedStudentsAnnounced()
# check if the project belongs to the current user
expected_profile_key = self.data.project.parent_key()
if expected_profile_key != self.data.profile.key():
error_msg = DEF_ENTITY_DOES_NOT_BELONG_TO_YOU % {
'model': 'GSoCProject'
}
raise AccessViolation(error_msg)
# check if the status allows the project to be updated
if self.data.project.status in ['invalid', 'withdrawn', 'failed']:
raise AccessViolation(DEF_CANNOT_UPDATE_ENTITY % {
'model': 'GSoCProject'
})
def isSurveyActive(self, survey, show_url=None):
"""Checks if the survey in the request data is active.
Args:
survey: the survey entity for which the access must be checked
show_url: The survey show page url to which the user must be
redirected to
"""
assert isSet(self.data.program)
assert isSet(self.data.timeline)
if self.data.timeline.surveyPeriod(survey):
return
if self.data.timeline.afterSurveyEnd(survey) and show_url:
raise RedirectRequest(show_url)
raise AccessViolation(DEF_PAGE_INACTIVE_OUTSIDE %
(survey.survey_start, survey.survey_end))
def canUserTakeSurvey(self, survey, taking_access='user'):
"""Checks if the user with the given profile can take the survey.
Args:
survey: the survey entity for which the access must be checked
"""
assert isSet(self.data.program)
assert isSet(self.data.timeline)
self.isProjectInURLValid()
if taking_access == 'student':
self.isActiveStudent()
return
elif taking_access == 'org':
assert isSet(self.data.organization)
self.isMentor()
return
elif taking_access == 'user':
self.isUser()
return
raise AccessViolation(DEF_NO_SURVEY_ACCESS)
def isStatisticValid(self):
"""Checks if the URL refers to an existing statistic.
"""
assert isSet(self.data.statistic)
# check if the statistic exist
if not self.data.statistic:
error_msg = DEF_STATISTIC_DOES_NOT_EXIST % {
'key_name': self.data.kwargs['id']
}
raise AccessViolation(error_msg)
def canRetakeOrgApp(self):
"""Checks if the user can edit the org app record.
"""
assert isSet(self.data.org_app_record)
self.isUser()
allowed_keys = [self.data.org_app_record.main_admin.key(),
self.data.org_app_record.backup_admin.key()]
if self.data.user.key() not in allowed_keys:
raise AccessViolation(DEF_CANNOT_ACCESS_ORG_APP)
def canViewOrgApp(self):
"""Checks if the user can view the org app record. Only the org admins and
hosts are allowed to view.
"""
assert isSet(self.data.org_app_record)
try:
self.canRetakeOrgApp()
return
except AccessViolation:
pass
self.isHost()
|
adviti/melange
|
app/soc/views/helper/access_checker.py
|
Python
|
apache-2.0
| 37,137
|
[
"VisIt"
] |
0067d1ace032ef639cbc4c88b89bc1dff94154400f570a1c361e5d15fd9a645b
|
from neuron import h, gui
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as pyplot
import math
import random
#neuron.load_mechanisms("./mod")
from cfiber import cfiber
from onefibersimulation import balance
#paralleling NEURON interface
pc = h.ParallelContext()
rank = int(pc.id())
nhost = int(pc.nhost())
#parameters
cell_number = 2 # number of neurons
fibers = []
nclist = []
spike_times_vec = h.Vector()
id_vec = h.Vector()
def addfibers(num = cell_number):
'''
Creates neuronal pool and returns gids of pool
Parameters
----------
num: int
neurons number in pool
Returns
-------
gids: list
the list of neurons gids
'''
global fibers, rank, nhost, spike_times_vec, id_vec
gids = []
for i in range(rank, num, nhost):
cell = cfiber(random.uniform(200, 350), random.uniform(0.2, 1), random.randint(10, 30), random.randint(0, 10), False)
fibers.append(cell)
pc.set_gid2node(i, rank)
nc = cell.connect2target(None)
pc.cell(i, nc)
nclist.append(nc)
gids.append(i)
pc.spike_record(i, spike_times_vec, id_vec)
return gids
def spike_record(pool):
''' Records spikes from gids
Parameters
----------
pool: list
list of neurons gids
Returns
-------
v_vec: list of h.Vector()
recorded voltage
'''
v_vec = []
for i in pool:
cell = pc.gid2cell(i)
vec = h.Vector()
vec.record(cell.branch(0.5)._ref_vext[0])
v_vec.append(vec)
return v_vec
def simulate(pool, tstop=30000, vinit=-55):
''' simulation control
Parameters
----------
cell: NEURON cell
cell for simulation
tstop: int (ms)
simulation time
vinit: int (mV)
initialized voltage
'''
h.finitialize(vinit)
for i in pool:
cell = pc.gid2cell(i)
balance(cell)
if h.cvode.active():
h.cvode.active()
else:
h.fcurrent()
h.frecord_init()
h.tstop = tstop
h.v_init = vinit
pc.set_maxstep(0.5)
h.stdinit()
pc.psolve(tstop)
def finish():
''' proper exit '''
pc.runworker()
pc.done()
h.quit()
def spikeout(pool, name, v_vec):
''' Reports simulation results
Parameters
----------
pool: list
list of neurons gids
name: string
pool name
v_vec: list of h.Vector()
recorded voltage
'''
global rank
pc.barrier()
for i in range(nhost):
if i == rank:
for j in range(len(pool)):
path=str('./res/'+ name + '%dr%d'%(j,rank))
f = open(path, 'w')
for v in list(v_vec[j]):
f.write(str(v)+"\n")
pc.barrier()
def spiketimeout(file_name):
''' Reports simulation results
Parameters
----------
pool: list
list of neurons gids
name: string
pool name
v_vec: list of h.Vector()
recorded voltage
'''
global spike_times_vec, id_vec
for i in range(int(pc.nhost())):
pc.barrier() # Sync all processes at this point
if i == int(pc.id()):
if i == 0:
mode = 'w' # write
else:
mode = 'a' # append
with open(file_name, mode) as spk_file: # Append
for (t, idd) in zip(spike_times_vec, id_vec):
spk_file.write('%.3f\t%d\n' %(t, idd)) # timestamp, i
print(t)
print(idd)
pc.barrier()
print(spk_file)
if __name__ == '__main__':
pool = addfibers()
vext = spike_record(pool)
print("- "*10, "\nstart")
simulate(pool)
print("- "*10, "\nend")
spikeout(pool, "vext", vext)
spiketimeout("out.spk")
#if (nhost > 1):
finish()
|
research-team/robot-dream
|
Nociception/parallelsimulation.py
|
Python
|
mit
| 3,866
|
[
"NEURON"
] |
c31047456f443338442f72d64a19c411f8cd19d5843e4836872c051def3ba33f
|
#!/usr/bin/env python
from traits.api import HasTraits, List, Instance, Bool, Str, File, Dict
from traitsui.api import View, Item, VGroup, HGroup, Group, \
RangeEditor, TableEditor, Handler, Include,HSplit, EnumEditor, HSplit, Action, \
CheckListEditor, ObjectColumn
from ..streamlines.track_dataset import TrackDataset
from ..streamlines.track_dataset import TrackDataset
from mayavi.tools.mlab_scene_model import MlabSceneModel
from .traited_query import Scan
from .local_data import get_local_data
import numpy as np
import os
from collections import defaultdict
import dsi2.config
def dictmatch(qdict,ddict):
return all([ ddict.get(key,"") == val for key,val in qdict.iteritems() ] )
track_dataset_table = TableEditor(
columns =
[ ObjectColumn(name="properties.scan_id",editable=True),
ObjectColumn(name="properties.study",editable=True),
ObjectColumn(name="properties.scan_group",editable=True),
ObjectColumn(name="properties.reconstruction",editable=True),
],
auto_size = True,
edit_view="import_view"
)
class TrackDataSource(HasTraits):
# Holds a list of objects sporting
# a .tracks_at_ijk() function.
track_datasets = List
track_dataset_properties = List(Instance(Scan))
#scene3d=Instance(MlabSceneModel)
interactive=Bool(False)
atlas_name = Str("None")
# if the atlas_name gets changed and new vectors need to be loaded,
# only do it before a query is requested
needs_atlas_update = Bool(False)
json_source = File("")
label_cache = List(List(Dict))
def __init__(self,**traits):
super(TrackDataSource,self).__init__(**traits)
# if track_datasets is not passed explicitly
if not self.track_datasets:
# Load from a json_source
if self.json_source:
self.track_datasets = [ d.get_track_dataset() for d in \
get_local_data(self.json_source) ]
# grab the properties from each loaded TrackDataset
self.track_dataset_properties = \
[tds.properties for tds in self.track_datasets]
def get_subjects(self):
return sorted(list(set(
[ds.subject_id for ds in self.track_dataset_properties])))
def set_render_tracks(self,visibility):
for tds in self.track_datasets:
tds.render_tracks = visibility
def within_between_subjects_triangle_slice(self):
props = self.track_dataset_properties
withins = []
betweens = []
for a,b in np.array( np.triu_indices(len(self),1) ).T:
if props[a].scan_id == props[b].scan_id: continue
if props[a].subject_id == props[b].subject_id:
withins.append((a,b))
else:
betweens.append((a,b))
return tuple(np.array(withins).T), tuple(np.array(betweens).T)
def __len__(self):
return len(self.track_datasets)
def query_ijk(self,ijk,every=0):
if self.needs_atlas_update: self.update_atlas()
return [ tds.subset(tds.get_tracks_by_ijks(ijk),every=every) \
for tds in self.track_datasets ]
def query_connection_id(self,connection_id,every=0):
"""
Subsets the track datasets so that only streamlines labeled as
`region_pair_id`
"""
if self.needs_atlas_update: self.update_atlas()
return [ tds.subset(tds.get_tracks_by_connection_id(connection_id),every=every) \
for tds in self.track_datasets ]
def change_atlas(self, query_specs):
""" Sets the .connections for each TrackDataset to be loaded from the path
specified in its properties.atlases
"""
print "\t+ Setting new atlas in TrackDataSource"
new_labels = []
for tds, cache in zip(self.track_datasets,self.label_cache):
match = [lbl["data"] for lbl in cache if dictmatch(query_specs,lbl)]
if not len(match) ==1:
raise ValueError("Query did not return exactly one match")
# ATTACHES LABELS TO THE `.connections` ATTRIBUTE OF EACH TDS
tds.set_connections(match[0])
new_labels += match
return new_labels
def load_label_data(self):
# What are the unique atlas names?
atlases = set([])
for prop in self.track_dataset_properties:
atlases.update([d.name for d in prop.track_label_items])
atlases = list(atlases)
print "\t+ Found %d unique atlases" % len(atlases)
# HORRIBLE.
label_lut = {}
for atlas_name in atlases:
label_lut[atlas_name] = defaultdict(set)
for tds_props in self.track_dataset_properties:
# loop over label sources
for label_source in tds_props.track_label_items:
# If they're for this atlas, append the parameter value
if label_source.name == atlas_name:
# For each parameter
for prop, propval in label_source.parameters.iteritems():
if prop == "notes": continue
label_lut[atlas_name][prop].update( (propval,) )
varying_properties = {}
for atlas_name in atlases:
varying_properties[atlas_name] = {}
for propname, propvals in label_lut[atlas_name].iteritems():
# Are there multiple values this property can take on?
if len(propvals) <= 1: continue
varying_properties[atlas_name][propname] = sorted(list(propvals))
#print varying_properties
dsi2.config.logger.debug("%s", varying_properties)
# Put in a look-up
self.label_cache = [] # one row for each subject
for props in self.track_dataset_properties:
subj_cache = []
# one row in the cache for each item
for label_item in props.track_label_items:
subj_lut = {"name":label_item.name}
subj_lut.update(label_item.parameters)
subj_lut['data'] = \
np.load(
os.path.join(props.pkl_dir,label_item.numpy_path)
).astype(np.uint64)
subj_cache.append(subj_lut)
self.label_cache.append(subj_cache)
# Make sure all the graphml paths are the same and
#self.graphml_cache = {}
#for atlas, vary_props in varying_properties.iteritems():
# self.graphml_cache[atlas] = {}
# for vprop,possible_values in vary_props.iteritems():
# self.graphml_cache[atlas][
# if not prop in self.graphml_cache[atlas].keys():
# self.graphml_cache[atlas][prop] =
return varying_properties
traits_view = View(
Group(
Item("json_source"),
Group(
Item("track_datasets", editor=track_dataset_table),
orientation="horizontal",
show_labels=False
),
orientation="vertical"
),
resizable=True,
width=900,
height=500
)
|
mattcieslak/DSI2
|
dsi2/database/track_datasource.py
|
Python
|
gpl-3.0
| 7,393
|
[
"Mayavi"
] |
a9c6a5febcb2942c17cc7ac4c12fde0378bf6d324fb2d04e650e7afc7902e1e6
|
from bayesflare import Lightcurve
import bayesflare as pf
from math import *
import numpy as np
from random import random
__all__ = ["SimLightcurve", "simulate_single"]
class SimLightcurve(Lightcurve):
"""
Contains methods to simulate various light curves; builds upon the framework of the :class:`Lightcurve` class.
Parameters
----------
dt : float, optional
The time separating each data point, in seconds. Defaults to 1765.55929 seconds; the spacing in Kepler Q1
long cadence data.
length : float, optional
The length of the light curve, in days. Defaults to 33.5 days, the length of a Kepler Q1 light curve.
sigma : float, optional
The standard deviation of the noise to be simulated. Defaults to 0.5.
mean : float, optional
The mean of the noise to be simulated. Defaults to 1.
cadence : {'long', 'short'}, optional
The Kepler cadence which is being simulated. Defaults to 'long'.
"""
original = []
def __init__(self, dt=1765.55929, length=33.5, sigma=0.5, mean=1, cadence='long'):
self.phi = random()
self.frq = 0.000005*random()
self.va = 15*random()
self.sdt = dt
self.length = length
self.sigma = sigma
self.mean = mean
self.cadence= cadence
self.lc = []
self.generate_curve()
def __str__(self):
return "<BayesFlare Simulated Lightcurve with s="+str(self.sigma)+">"
def snr(self):
"""
Attempts to calculate the signal-to-noise ratio of the light curve.
Returns
-------
float
The estimated signal-to-noise ratio (SNR).
"""
signalarea = np.sum(self.original)
noise = pf.estimate_noise(self)[0]
return np.sqrt( signalarea**2 / noise**2)
def generate_curve(self):
"""
Generates the light curve according to the parameters provided to the initiator.
"""
hours = 3600
days = 86400
x = np.arange(0, self.length, self.sdt) # create the time stamps
z = np.zeros_like(x) # create the data array
# add low frequency variation
z = self.va * np.sin(2*np.pi*self.frq*x + self.phi);
# Add Gaussian noise
self.lc.append(pf.addNoise(z, self.mean, self.sigma))
self.ts.append(x)
self.le.append(np.zeros_like(x))
self.original.append(np.zeros_like(x))
self.combine()
self.detrend()
def inject_model(self, model, instance):
"""
Injects a model into the light curve.
Parameters
----------
model : BayesFlare Model instance
An object describing the model to be injected.
instance : int
The specific model instance (i.e. combination of parameters) to be injected.
Returns
-------
BayesFlare LightCurve
The light curve containing an injected model.
"""
return pf.inject_model(self, model, instance)
def detrend(self, nbins=101, order=3):
"""
Detrends the simulated light curve using the Savitsky-Golay filter.
Parameters
----------
nbins : int
The number of bins used for the filter window width.
order : int
The polynomial order of the filter.
"""
self.clc = (self.clc - pf.savitzky_golay(self.clc, nbins, order))
def simulate_single( dt = 1765.55929, length=33.5, sigma=0.5, mean=1, amp = 1.5):
"""
Produce a timeseries of simulated data containing randomly
generated noise and a single flare.
Parameters
----------
dt : float, optional
The sample time of the required data in seconds.
Default is 1765.55929, the sample time of the quarter 1
*Kepler* data
length : float, optional
The number of days long the required data should be.
Default is 33.5, the length of the quarter 1 *Kepler*
data
sigma : float, optional
The standard deviation of the noise in the time series.
Default is 0.5
mean : float, optional
The mean of the noise in the time series.
The default is 1.
Returns
-------
x : np.ndarray
An array of times
z : np.array
An array containing the time series data, including noise
o : np.array
An array containing only the flares, without noise or sinusoidal
variations.
n : int
The number of flares injected.
See also
--------
simulate, simulate_single_chunks
"""
hours = 3600
days = 86400
dt = 1765.55929 # sample interval (sec)
x = np.arange(0, length*days, dt) # create the time stamps
z = np.zeros_like(x) # create the data array
o = np.zeros_like(x) # create clean data array
# add low frequency variation
va = 15*random() # amplitude of low frequency variation
phi = random() # initial phase of low frequency variation
frq = 0.000005*random() # frequency of variation (Hz)
z = va * np.sin(2*np.pi*frq*x + phi);
# add random flare
#amp = 3*sigma # amplitude of flare
tau1= np.floor(random()*10)/2
tau2= 0
while tau2 < tau1:
tau2= np.floor(random()*10)/2
tau = [tau1*hours, tau2*hours] # decay consts of flare
pos = floor(random()*len(x)) # random position
t0 = x[floor(len(x)/2)] # position of flare peak
z += pf.flare(amp, tau, x, t0)
o += pf.flare(amp, tau, x, t0)
# Add Gaussian noise
z = pf.addNoise(z, mean, sigma)
x = x/86400
ze = np.zeros_like(x)
a = SimLightcurve()
a.sigma = sigma
a.ts.append(x)
a.lc.append(z)
a.le.append(ze)
a.original.append(o)
a.combine()
return a
|
BayesFlare/bayesflare
|
bayesflare/simulate/simulate.py
|
Python
|
gpl-2.0
| 5,965
|
[
"Gaussian"
] |
8f1b83e725593d5944471e55c7bad8328414e6bede0d238ac676e0a5a35230a5
|
# encoding: utf-8
# Copyright (c) Marnik Bercx
from monty.json import MSONable
from cage.core import Cage
from matplotlib.pyplot import subplots
import json
import math
import os
import warnings
import matplotlib.pyplot as plt
import pymatgen as pmg
import pymatgen.io.nwchem as nw
import numpy as np
import cage.utils as utils
"""
Tools to set up and run calculations to study energy landscapes, specifically
for Cage molecules.
"""
__author__ = "Marnik Bercx"
__version__ = "0.1"
__maintainer__ = "Marnik Bercx"
__email__ = "marnik.bercx@uantwerpen.be"
__status__ = "alpha"
__date__ = "16 JUN 2017"
class Landscape(MSONable):
"""
A selection of points representing a line, area or volume.
"""
def __init__(self, points):
"""
Initializes a Landscape from the points that it consists of.
Args:
points (list): List of (3,) numpy.ndarray cartesian coordinates of the
points in the landscape.
"""
if type(points) is list:
self._points = points
elif type(points) is tuple:
self._points = list(points)
else:
raise TypeError("Provided points are not formatted as a List or Tuple.")
def __add__(self, other):
"""
Add two Landscapes to each other into a new Landscape.
Args:
other (cage.landscape.Landscape): Landscape to which the current
Landscape should be added.
Returns:
Landscape: Sum of the two landscapes, i.e. a landscape that consists
of the points of the two landscapes combined.
"""
points = self.points + other.points
return Landscape(points)
@property
def points(self):
"""
All of the points of the Landscape.
Returns:
list: List of (3,) numpy.ndarray coordinates of the points
in the landscape.
"""
return self._points
@property
def center(self):
"""
Center of the points of the Landscape.
Returns:
numpy.ndarray: (3,) shaped array of the coordinates of the mathematical
center of the points of the landscape.
"""
return sum(self.points) / len(self.points)
def change_center(self, center):
"""
Change the center of the landscape. This simply translates all the
coordinates so the new mathematical center is the one provided by
the user.
Args:
center (numpy.ndarray): (3,) shaped array that is the new center of
the landscape.
"""
new_points = []
for point in self.points:
new_points.append(point - self.center + center)
self._points = new_points
def extend_by_rotation(self, axis, density=10.0, remove_endline=False,
distance_tol=1e-3):
"""
Extends the landscape using an axis vector and turning all the vertices
in the landscape around the origin by a value and direction determined
by the axis vector.
Args:
axis (numpy.ndarray): (3,) shaped array that represents the axis around
which the landscape is rotated. The length of the vector is equal
to the total rotation angle in radians.
density (float): Density of grid points along the rotation angle.
Defined in #grid points per radians.
remove_endline (bool): Do not include the final rotation angle grid
points.
distance_tol (float): Minimum distance between two points in
the landscape.
Returns:
None
"""
# TODO Extend this method
# so it also allows rotations around other points than origin
# Find the rotation matrices
rotation_matrices = []
total_angle = np.linalg.norm(axis)
npoints = int(total_angle / np.pi * density)
if remove_endline:
for i in range(npoints - 1):
angle = (i + 1) / npoints * total_angle
rotation_matrices.append(rotation_matrix(axis, angle))
else:
for i in range(npoints):
angle = (i + 1) / npoints * total_angle
rotation_matrices.append(rotation_matrix(axis, angle))
# Add all the points TODO This might be done more quickly
points = self.points.copy()
for matrix in rotation_matrices:
for point in self.points:
newpoint = point.dot(matrix)
distance = np.linalg.norm(point - newpoint)
if distance > distance_tol:
points.append(newpoint)
self._points = points.copy()
def extend_by_translation(self, vector, density=10):
"""
Extends the Landscape by translating the points along a certain vector.
Args:
vector (numpy.ndarray): Translation vector of the extension.
density (float): Density of the grid.
"""
pass # TODO
def extend_from_point(self, point, extension, density=10):
"""
Extends the Landscape by scaling the points from a specific point or
origin, i.e. by homothetic transformations.
Args:
point:
extension:
density:
"""
pass # TODO
def as_dict(self):
"""
A JSON serializable dictionary of the Landscape.
Returns:
"""
d = {"points": self.points}
return d
@classmethod
def from_file(cls, filename, fmt="json"):
"""
Load a Landscape from a file.
Args:
filename:
fmt:
Returns:
"""
pass
@classmethod
def from_vertices(cls, vertices, num=10):
"""
Define a landscape by providing the vertices (end points in the case of
a line).
Args:
vertices (list): List of (3,) shaped numpy arrays that are the
vertices of the Landscape.
num (int): Number of points in the landscape.
# TODO switch to density?
Returns:
Landscape: Landscape defined by the vertices.
"""
# Calculate the points of the Landscape depending on the number of
# vertices
if len(vertices) == 1:
raise IOError("Number of vertices must be at least two.")
elif len(vertices) == 2:
if np.linalg.norm(vertices[1] - vertices[0]) < 1e-3:
raise IOError("Vertices are too close to each other.")
else:
vector = vertices[1] - vertices[0]
length = np.linalg.norm(vector)
unitvector = vector / length
npoints = num # int(length * num + 1)
mesh_distance = length / npoints
points = []
for i in range(npoints):
points.append(vertices[0] + i * mesh_distance * unitvector)
else:
raise NotImplementedError(
"Higher dimensions than 1 not implemented yet.")
return Landscape(points)
@classmethod
def create_sphere(cls, radius, center=np.array([0, 0, 0]),
axis=np.array([0, 0, 1]), density=15):
"""
Set up a spherical landscape with a specified radius.
Args:
radius (float): Radius of the spherical landscape.
center (3x1 numpy.array): Coordinates of the center of the
spherical landscape.
axis (3x1 numpy.array): Rotational axis which is used to
construct the landscape.
density (float): Grid density of the landscape, provided in
number of grid points per radians.
Returns:
(Landscape): Spherical Landscape object as specified by the user.
"""
starting_point = radius * unit_vector(axis)
sphere_landscape = Landscape([starting_point, ])
if angle_between(np.array([1, 0, 0]), axis) < 1e-2:
phi_axis = unit_vector(
np.cross(np.array([0, 1, 0]), starting_point)
)
else:
phi_axis = unit_vector(
np.cross(np.array([1, 0, 0]), starting_point)
)
sphere_landscape.extend_by_rotation(
axis=phi_axis * math.pi,
density=density)
sphere_landscape.extend_by_rotation(
axis=unit_vector(axis) * 2 * math.pi,
density=density
)
sphere_landscape.change_center(center)
return sphere_landscape
class LandscapeAnalyzer(MSONable):
"""
An analyzer class for interpreting data from calculations on a landscape.
"""
def __init__(self, data, datapoints=(), software="nwchem"):
"""
Initialize an instance of LandscapeAnalyzer. This method is rarely
used directly. Usually a LandscapeAnalyzer is initialized from a
directory or file.
"""
self._data = data
self._software = software
self._datapoints = datapoints
@property
def data(self):
return self._data
@property
def software(self):
return self._software
@property
def datapoints(self):
return self._datapoints
@classmethod
def from_data(cls, directory, output_file='result.out', software='nwchem'):
"""
Looks through all subdirectories of the provided directory and
extracts all output.
Args:
directory (str): Path to the directory in which the data is stored.
output_file (str): Filename of the output file.
software (str): Software package that was used for the DFT
calculations. Currently only supports nwchem.
Returns:
LandscapeAnalyzer
"""
# TODO Make subdirectory finder recursive?
# Currently the method only checks in the immediate subdirectories
# of the directory provided by the user. It might be useful to
# extend this to subdirectories further down the tree. However,
# maybe it's best to stick to calculating landscape properties in
# the same directory.
# Check if the directory exists
if not os.path.isdir(directory):
raise IOError("Directory " + directory + " not found.")
# Find all the subdirectories in the specified directory
dir_list = [d for d in os.listdir(directory)
if os.path.isdir(os.path.join(directory, d))]
# Get all of the output of the calculations in the directory
output = []
if software == 'nwchem':
for directory in dir_list:
try:
out = nw.NwOutput(
os.path.join(directory, directory, output_file)
)
except FileNotFoundError:
print('Did not find output file in directory ' + directory)
except IndexError:
print('Did not find output in ' + output_file +
' in directory ' + directory)
else:
# Check if the output has an error
if out.data[-1]['has_error']:
print(
'Error found in output in directory ' + directory)
else:
if out.data[-1]['task_time'] == 0:
print('No timing data found for final task. '
'Calculation might not have completed '
'successfully.')
output.append(out)
print('Grabbed output in directory ' + directory)
# TODO This currently only considers the final NwChem task.
# Not a very general approach, but maybe the most sensible?
data = [out.data[-1] for out in output]
else:
raise NotImplementedError("Only NwChem is currently supported.")
return LandscapeAnalyzer(data, software=software)
def analyze_cation_energies(self, coordinates, reference=None,
cation="Li"):
"""
Extract the total energies for all the calculations, and connect
them with the proper coordinates. Currently supports the following
landscapes for analysis:
Interfacet wedges: 2D landscapes that connect two Facets via a wedge.
Polar coordinates are used to plot this landscape, and the program
needs a reference facet to determine an appropriate angle. In case the
reference is not provided, the method will attempt to find the closest
facet to the cation in the first data point.
Spherical landscapes: 2D landscapes that correspond to spheres around the
cage. Spherical coordinates are used to plot this landscape,
and the program needs a reference axis to determine the angles
properly. This axis corresponds to the one used to construct the
landscape, using the Landscape.create_sphere() method.
Args:
coordinates (str): Type of coordinates to use for reconstructing the
landscape coordinates of each datapoint.
"""
datapoints = []
dtype = None
if coordinates == "polar":
facet = reference
dtype = [('Distance', float), ('Angle', float), ('Energy', float)]
# If a facet is not provided, try to find the closest one to the
# first ion coordinate data point. This might not always work.
if reference is None:
warnings.warn(
"No Facet was provided. Since the facet is important for "
"defining the coordinates of the landscape, the program "
"will automatically determine the closest facet to the "
"cation in the first datapoint."
)
cage_init = Cage.from_molecule(self.data[0]['molecules'][0])
init_cation_coords = [
site.coords for site in cage_init.sites
if site.specie == pmg.Element(cation)][-1]
facet_init = cage_init.facets[0]
for cage_facet in cage_init.facets:
if utils.distance(cage_facet.center, init_cation_coords) \
< utils.distance(facet_init.center,
init_cation_coords):
facet = cage_facet
for data in self.data:
# Extract the cartesian coordinates of the cation
cage = Cage.from_molecule(data["molecules"][0])
cation_coords = [site.coords for site in cage.sites
if site.specie == pmg.Element(cation)]
# Check to see how many cations are found in the structure
if len(cation_coords) == 0:
# If no cation is found, raise an error
raise ValueError("Requested cation not found in molecule.")
elif len(cation_coords) == 1:
cation_coords = cation_coords[0]
else:
# Take the last cation, this one is usually the one that
# is part of the landscape
cation_coords = cation_coords[-1]
# Determine the polar coordinates with the facet center as a
# reference axis for the angle
r = np.linalg.norm(cation_coords - cage.anion_center)
theta = angle_between(facet.center, cation_coords)
if theta > math.pi / 2:
theta = math.pi - theta
coordinate = [r, theta]
energy_final = data['energies'][-1]
coordinate.append(energy_final)
datapoints.append(coordinate)
if coordinates == "spherical":
axis = reference
dtype = [('Theta', float), ('Phi', float), ('Energy', float)]
if reference is None:
raise IOError("No reference axis was provided for the "
"analysis of the landscape energies. The "
"program currently has no recourse for "
"determining this axis automatically.")
# Find a suitable perpendicular axis
cage_init = Cage.from_molecule(self.data[0]['molecules'][0])
i = 0
phi_axis = None
while i < len(cage_init) and phi_axis is None:
v = cage_init.sites[i].coords
if axis.dot(cage_init.sites[i].coords) > 1e-2:
phi_axis = unit_vector(
perpendicular_part(v, axis)
)
i += 1
for data in self.data:
# Extract the cartesian coordinates of the cation
cage = Cage.from_molecule(data["molecules"][0])
cation_coords = [site.coords for site in cage.sites
if site.specie == pmg.Element(cation)]
# Check to see how many cations are found in the structure
if len(cation_coords) == 0:
# If no cation is found, raise an error
raise ValueError("Requested cation not found in molecule.")
elif len(cation_coords) == 1:
cation_coords = cation_coords[0]
else:
# Take the last cation, this one is usually the one that
# is part of the landscape
cation_coords = cation_coords[-1]
# Determine the spherical coordinates with the reference axis
# for the angles
theta = angle_between(axis, cation_coords)
phi = angle_between(
perpendicular_part(cation_coords, axis), phi_axis
)
if angle_between(np.cross(phi_axis, axis), cation_coords) \
> \
math.pi / 2:
phi = 2 * math.pi - phi
coordinate = [theta, phi]
energy_final = data['energies'][-1]
coordinate.append(energy_final)
datapoints.append(coordinate)
data_tuples = [tuple(point) for point in datapoints]
darray = np.array(data_tuples, dtype=dtype)
self._datapoints = darray
def flip_coordinates(self, coord_name):
"""
Flip the coordinate values for a chosen coordinate.
:param coord_name:
:return:
"""
if len(self._datapoints) == 0:
raise ValueError('No processed data present.')
data = self._datapoints
data[coord_name] = data[coord_name].max() - data[coord_name]
self._datapoints = data
def plot_energies(self, dimension, coordinates="polar", style='trisurf'):
"""
Plot the energy landscape from the datapoints.
Args:
dimension:
coordinates:
style:
Returns:
"""
if len(self._datapoints) == 0:
self.analyze_cation_energies(coordinates=coordinates)
data = self.datapoints
data['Distance'] = np.round(data['Distance'], 5)
data = np.sort(data, order=['Distance', 'Angle'])
if dimension == 1:
plt.figure()
plt.xlabel('Distance (Angstrom)')
plt.ylabel('Energy (eV)')
plt.plot(data['Distance'], data['Energy'])
plt.show()
if dimension == 2:
if style == 'trisurf':
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_trisurf(data['Distance'], data['Angle'],
data['Energy'], linewidth=0.2,
antialiased=True)
plt.show()
elif style == 'pcolor':
# Find the number of radii and angles
r_init = data['Distance'][0]
nangles = 1
while abs(data['Distance'][nangles] - r_init) < 1e-5:
nangles += 1
nradii = int(len(data) / nangles)
# Get the right format for the data
radii = data['Distance'].reshape(nradii, nangles) # [::nradii]
angles = data['Angle'].reshape(nradii, nangles) # [:nangles]
energy = data['Energy'].reshape(nradii, nangles)
# Plot
fig, ax = subplots()
p = ax.pcolor(angles, radii, energy, vmin=energy.min(),
vmax=energy.mean())
cb = fig.colorbar(p)
plt.xlabel('Angle (radians)')
plt.ylabel('Distance (Angstrom)')
plt.show()
def as_dict(self):
"""
Return a dictionary representing the LandscapeAnalyzer instance.
:return:
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
data_list = []
for chunk in self.data:
data_dict = chunk.copy()
if data_dict['structures']:
struc_dicts = []
for structure in data_dict['structures']:
struc_dicts.append(structure.as_dict())
data_dict['structures'] = struc_dicts
if data_dict['molecules']:
mol_dicts = []
for molecule in data_dict['molecules']:
mol_dicts.append(molecule.as_dict())
data_dict['molecules'] = mol_dicts
data_list.append(data_dict)
d["data"] = data_list
d["datapoints"] = self.datapoints
# TODO MAKE THIS WORK datapoints after analysis
return d
@classmethod
def from_dict(cls, d):
"""
Initialize the LandscapeAnalyzer from a dictionary.
:param d:
:return:
"""
pass
@classmethod
def from_string(cls):
"""
Initialize a LandscapeAnalyzer from a string.
:return:
"""
pass
@classmethod
def from_file(cls, filename, fmt='json'):
"""
Initialize an instance of LandscapeAnalyzer from a file.
:return:
"""
data = []
if fmt == "json":
with open(filename, "r") as f:
file_data = json.load(f)
for chunk in file_data["data"]:
data_dict = chunk.copy()
if data_dict['structures']:
structures = []
for struc_dict in data_dict['structures']:
structures.append(pmg.Structure.from_dict(struc_dict))
data_dict['structures'] = structures
if data_dict['molecules']:
molecules = []
for mol_dict in data_dict['molecules']:
molecules.append(pmg.Molecule.from_dict(mol_dict))
data_dict['molecules'] = molecules
data.append(data_dict)
else:
raise NotImplementedError("Only json format is currently "
"supported.")
return LandscapeAnalyzer(data, datapoints=file_data["datapoints"])
def to(self, filename, fmt="json"):
"""
Write the landscape to a file for quick reading.
:return:
"""
if fmt == "json":
with open(filename, "w") as f:
json.dump(self.as_dict(), f)
return
else:
raise NotImplementedError("Only json format is currently "
"supported.")
# TODO Add support for .yaml
def perpendicular_part(v1, v2):
""" Returns the projection of v1 on the plane perpendicular to v2. """
return v1 - v1.dot(v2) * v2 / np.linalg.norm(v2) ** 2
# Functions stolen from SO
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
"""
Returns the angle in radians between vectors 'v1' and 'v2'::
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with clockwise rotation about
the given axis by theta radians.
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
|
mbercx/cage
|
cage/landscape.py
|
Python
|
mit
| 25,423
|
[
"NWChem",
"pymatgen"
] |
f8089587425b4aa8afd37e3bc569809c206eddb1bc3f4c5bd338b3bba736e8ea
|
#! /usr/bin/env python
import os, sys, optparse
import PEATSA.Core as Core
import PEATSA.Core.Matrix
import MySQLdb
import matplotlib.pyplot as plt
import numpy as np
"""
Requires ProteinComplexTool_execute.py to do calculations!
"""
class ProteinComplexTool:
def __init__(self):
return
def DeltaStability(self,inputFile, mutationList, configurationFile, workingDirectory, outputDirectory):
'''Calculates the stability difference between a protein and set of mutants
Parameters:
inputFile: A PDB file of the protein
mutationList: A list of Data.MutationSet instances. Each represents a mutant of the protein.
configurationFile: The location of a proteinDesignTool.conf file - defaults to home directory.
workingDirectory: Where the calculation will be run.
outputDirectory: Where the results will be written.
Returns
A Data.DataSet instance containing one matrix, stabilityResults.
Each row of this matrix corresponds to a mutant defined in the mutationList argument.'''
#Create the ProteinDesignTool instance
tool = Core.ProteinDesignTool.ProteinDesignTool(configurationFile,
workingDirectory=workingDirectory,
pdbFile=inputFile,
outputDirectory=outputDirectory,
removeHeterogens=True)
#The above cleans the pdb file and copies it to the working directory.
#Use this pdb from now on.
inputFile = tool.pdbFile
#Create the mutants
mutantCollection = Core.Data.MutantCollection(pdbFile=inputFile,mutationList=mutationList,location=outputDirectory,temporary=True)
#Run stability calculation
#The results are added to the ProteinDesignTool instance's dataDirectory attribute
#This is an instance of Data.DataSet class
tool.runStabilityCalculation(mutantFiles=mutantCollection.mutantFiles())
#Clean up - Deletes files copied to the working directory for Uffbaps
tool.cleanUp()
return tool.dataDirectory
def remALT(self,pdb):
'''Removes alternative residues from the working pdb. Replaces the working pdb.'''
import Protool
x = Protool.structureIO()
x.readpdb('%s.pdb' % (pdb))
x.RemoveALT()
x.writepdb('%s.pdb' % (pdb), dont_write_HETATMS=1)
print "[ProteinComplexTool] Alternative Residues removed."
def splitter(self,pdbDir,pdb,reactions_list,cur,db):
''' Takes the reaction list provided by the user, ensures there is no duplication
in the list and converts the list into the command required by MDAnalysis to split
the pdb. If the user does not provide a list, it was automatically split the pdb
into constituent chains.
Returns a list of the chains split from the pdb.'''
import string
if reactions_list == ['']:
# query the database
environment.output(cur.execute("SELECT DISTINCT Chain_ID from pdb where PDB_ID = '%s';" % (pdb)))
a = cur.fetchall() # fetch results
print 'a', a
expr=[]
chains = [i[0] for i in a]
for i in chains:
s=["segid "+i]
expr.append(s)
b = str(a) # convert from tuple to string
exclude = set(string.punctuation) # set of punctutation characters
b = ''.join(ch for ch in b if ch not in exclude) # remove punctuation from b
e = ''.join(b.split(' '))
self._do_split(pdbDir, pdb, expr, e)
return e
else:
expr1=[]
for c in reactions_list:
if len(c)>1:
s = ["segid "+i for i in c]
expr1.append(s)
else:
expr1.append(["segid "+c])
self._do_split(pdbDir,pdb, expr1, reactions_list)
return reactions_list
def _do_split(self,pdbDir,pdb, expr, e):
import MDAnalysis
u = MDAnalysis.Universe(pdbDir, permissive=False)
for i in range(len(expr)):
Z = u.selectAtoms(*expr[i])
Z.write('%s_%s.pdb' % (pdb,e[i]))
print '[ProteinComplexTool] Extracted chain(s) %s from %s' % (e[i], pdb)
def displayResults(self,pdb,split_list,comp_list,mutList,reaction,cur,db):
''' Reads the results from the MySQL db based on the split_list. The ddG binding is calculated here
by subtracting the dGs of all the extracted chains (reactants) from the complex (products). The ddGs
are then stored in a new table named based on the reaction list and scan type.
Automatic output is generated by ddG(multichain complex, line graph) or dG (single chain complex,
bar chart)'''
width=0.5
cur.execute("SELECT * FROM results_%s_%s;" % (split_list[0], mutList))
complexResults = cur.fetchall()
mutations = [i[0] for i in complexResults] # Mutation list
complexScores = [i[1] for i in complexResults] # dG scores of pdb complex
count = len(mutations) # Number of calcs
ind = np.arange(count)
"""
nums = []
for y in muts:
nums.append(''.join(i for i in y if i.isdigit()))
"""
if len(split_list)>1: # For binding calcs, no matter in what order chains were split
chainResults = []
for i in split_list[1:]:
cur.execute("select * from results_%s_%s;" % (i,mutList))
chainResults.append(cur.fetchall())
chainScores = [i[1] for y in chainResults for i in y] # dG scores of chains split from pdb
ddG = []
cur.execute("create table if not exists ddG_%s_%s(mutation VARCHAR(10), ddG FLOAT);" % (pdb, comp_list))
for i in range(len(complexScores)):
ddG.append(complexScores[i] - chainScores[i]) # ddG = dG complex - sum of dG of split chains
for i in range(len(mutations)):
print "ddG %s %s" % (mutations[i], ddG[i])
cur.execute("insert into ddG_%s_%s (mutation, ddG) VALUES (%s%s%s, %s%s%s);" % (pdb, comp_list, '"', mutations[i], '"', '"',ddG[i],'"'))
#plt.bar(ind+(width/2), ddG, width,color='b')
#plt.axhline(linewidth=2, color='r')
plt.figure(num=None, figsize=(16, 14), dpi=80)
for i in range(len(mutations)):
if mutations[i].startswith('A'):
plt.scatter(chainScores[i], complexScores[i], s=25,c='b',marker='o', linewidths=None)
#plt.text(chainScores[i], complexScores[i], mutations[i])
if mutations[i].startswith('B'):
plt.scatter(chainScores[i], complexScores[i], s=25,c='r',marker='o', linewidths=None)
#plt.text(chainScores[i], complexScores[i], mutations[i])
if mutations[i].startswith('C'):
plt.scatter(chainScores[i], complexScores[i], s=25,c='g',marker='o', linewidths=None)
#plt.text(chainScores[i], complexScores[i], mutations[i])
if mutations[i].startswith('D'):
plt.scatter(chainScores[i], complexScores[i], s=25,c='y',marker='o', linewidths=None)
#plt.text(chainScores[i], complexScores[i], mutations[i])
plt.plot([min(complexScores)*3,max(complexScores)*1.1],[min(complexScores)*3,max(complexScores)*1.1], 'r-')
plt.xlabel("dG kJ/mol of the chains")
plt.ylabel("dG kJ/mol of the complex")
#plt.xlabel("Position of Mutation")
#plt.ylabel("ddG kJ/mol")
#plt.title("ddG Binding calculations for ALA scan of %s %s" % (split_list[0], reaction))
plt.title("dG stability for ALA scan of %s %s complex vs chains" % (split_list[0], reaction))
else:
for i in range(len(mutations)):
print " dG %s, %s" % (mutations[i], complexScores[i])
plt.bar(ind,complexScores,width,color='r')
plt.title("dG Stability calculations for ALA scan of %s %s" % (split_list[0], reaction))
#plt.xticks(ind+(width/2), mutations, rotation=90, fontsize=8)
#plt.figure(num=None, figsize=(8, 6), dpi=80)
plt.show()
sys.exit()
def run_calcs(self, split_list, mutList, numProc):
for i in split_list:
w_pdb = os.path.join(os.getcwd(),'%s.pdb' % (i))
os.system("mpirun -np %d python ProteinComplexTool_execute.py -p %s -d %s -m %s" %(opts.numProc, i, w_pdb, opts.mutList))
print "[ProteinComplexTool] Calculations completed."
def main():
import pypar
run = ProteinComplexTool()
# Option to select pdb, config file, working dir etc..
parser = optparse.OptionParser()
# PDB option
parser.add_option("-p", "--pdb",
help="Choose all or a pdb id",
dest="pdb", default ="all")
# Configuration File
parser.add_option("-c", "--configurationFile",
help="Location of configuration file",
dest="configFile", default="/home/satnam/proteinDesignTool.conf")
# Output Directory
parser.add_option("-o", "--outputDirectory",
help="Location of output directory",
dest="outputDir", default=os.getcwd())
# Working Directory
parser.add_option("-w", "--workingDirectory",
help="Location of working directory",
dest="workingDir", default=os.getcwd())
# Mutation List or ALA scan option
parser.add_option("-m", "--mutationList",
help="Location of mutation list file",
dest="mutList", default="ALA")
# Choose option for user-defined calculations
parser.add_option("-u", "--userCalcs",
help="Choose True or False if you would like to specifiy the calculations, otherwise each chain will be split",
dest="userCalcOpt", default='True')
# Show Results
parser.add_option("-s", "--showResults",
help="Shows previous results? True or False. If they don't exist, they will be calculated.",
dest="showResults", default=True)
# Delete results from database
parser.add_option("-d", "--deleteResults",
help="Deletes all results for the specified pdb from the database. Default False.",
dest="deleteResults", default='False')
# Number of CPUs for execute script
parser.add_option("-n", "--numProc",
help="Number of Processors allocated for execute script.",
dest="numProc", default=8)
(opts, args) = parser.parse_args()
db = MySQLdb.connect(host="localhost",
user = "root",
passwd = "samsung",
db = "sat")
cur = db.cursor()
# Connect ot the database and show PDBs
cur.execute("SELECT VERSION()")
ver = cur.fetchone()
print "[ProteinComplexTool] MySQLdb connection successful!"
print "[ProteinComplexTool] MySQL server version: %s" % ver[0]
cur.execute("SELECT distinct PDB_ID from pdb;")
a = cur.fetchall()
b = ','.join([i[0] for i in a])
print "[ProteinComplexTool] PDBs in local db"
print b
# PDB filename and directory handling
pdb = opts.pdb
pdbfile = ''.join((pdb,'.pdb'))
pdbDir = os.path.join(opts.outputDir,pdbfile)
# Delete tables already in database
if opts.deleteResults != 'False':
cur.execute("SHOW tables like 'results_%s%s';" % (pdb, '%'))
drop_tables=cur.fetchall()
for i in drop_tables:
for y in i:
cur.execute("DROP TABLE %s;" % (y))
print "[ProteinComplexTool] Results for %s deleted" % (pdb)
else:
pass
# Query database and displays entity and chain information of pdb
cur.execute("SELECT distinct Entity_ID, Chain_ID, Chain_name, type from pdb where PDB_ID = %s;", (pdb))
entity = [] # entities in the pdbfile
chains = []
for i in cur.fetchall():
print "Entity: %s, Chain Name: %s, Type: %s, Chain ID: %s" % (i[0], i[2], i[3], i[1])
entity.append(i[0])
chains.append(i[1])
entity.sort()
# Remove alternative residues
run.remALT(pdb)
# User defined interactions. If left blank will extract each chain from pdb.
if opts.userCalcOpt == 'True':
print "[ProteinComplexTool] What components are consumed (enter chain IDs in the form AB+C+D):"
reactants = sys.stdin.readline()
reactants = reactants.rstrip("\n")
print "[ProteinComplexTool] What products are produced (enter chain IDs in the form ABC+D):"
products = sys.stdin.readline()
products = products.rstrip("\n")
if reactants == '':
reactants = '+'.join(chains)
else:
pass
reaction = reactants+'-->'+products
print reaction
if products == '':
products = ''.join(chains)
else:
pass
reactants_list = reactants.split('+')
products_list = products.split('+')
# Split the pdb into chains, returns chains that have been split (A,B etc)
split_reactants = run.splitter(pdbDir,pdb,reactants_list,cur,db)
split_products = run.splitter(pdbDir,pdb,products_list,cur,db)
comp_list = split_products + split_reactants
comp_list = '_'.join(comp_list)
split_list = []
split_list_products = []
split_list_reactants = []
for i in split_reactants:
s = pdb+'_'+i
split_list_reactants.append(s)
for i in split_products:
s = pdb+'_'+i
split_list_products.append(s)
# Comp_list, used for naming of ddG table - pdb_prods_reacts_mut-type
comp_list = comp_list +'_'+os.path.split(opts.mutList)[1]
# Ensuring no duplicate reactions are run, espicially when user leaves input blank.
splitlist = split_list_products + split_list_reactants
for i in splitlist:
if i not in split_list:
split_list.append(i)
# Displays results results
if opts.showResults == 'True':
run.run_calcs(split_list, opts.mutList, opts.numProc)
run.displayResults(pdb, split_list, comp_list, opts.mutList, reaction, cur, db)
else:
run.run_calcs(split_list, opts.mutList, opts.numProc)
"""
# Runs calculations and send the names of the split fes to the execute script
for i in split_list:
w_pdb = os.path.join(os.getcwd(),'%s.pdb' % (i))
os.system("mpirun -np %d python ProteinComplexTool_execute.py -p %s -d %s -m %s" %(opts.numProc, i, w_pdb, opts.mutList))
print "[ProteinComplexTool] Calculations completed."
#run.displayResults(pdb, split_list, comp_list, reaction, cur, db)
"""
if __name__ == '__main__':
main()
|
dmnfarrell/peat
|
sandbox/ProteinComplexTool_pypar_.py
|
Python
|
mit
| 15,460
|
[
"MDAnalysis"
] |
ae4cb4e8f9a354f73b072211ced23b49f53920b77abbbde791c699359af57fba
|
#!/usr/bin/env python
#encoding=utf8
#Copyright [2014] [Wei Zhang]
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
###################################################################
# Date: 2014/4/10 #
# Call Mean Regularized Multi-task Learning with LR model #
###################################################################
import csv, json, sys, argparse
sys.path.append("../")
import data_io
#from multiTaskLR import MeanRegularizedMultiTaskLR
from multiTaskLR1 import MeanRegularizedMultiTaskLR
settings = json.loads(open("../../SETTINGS.json").read())
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-t', type=int, action='store',
dest='target', help='for validation or test dataset')
if len(sys.argv) != 3:
print 'Command e.g.: python train.py -t 0(1)'
sys.exit(1)
para = parser.parse_args()
if para.target == 0:
features_targets = [entry for entry in csv.reader(open(settings["MTLR_TRAIN_FILE"]))]
elif para.target == 1:
features_targets = [entry for entry in csv.reader(open(settings["MTLR_TRAIN_FILE_FOR_SUBMIT"]))]
else:
print 'Invalid train data target choice...'
sys.exit(1)
features = [map(float, entry[2:-1]) for entry in features_targets]
pairs = [map(int, entry[:2]) for entry in features_targets]
targets = [map(int, entry[-1]) for entry in features_targets]
classifier = MeanRegularizedMultiTaskLR(C=0.1,
tol=0.0001,
intercept_scaling=1,
lr=0.02,
eta=0.1,
field_for_model_num=2,
max_niters=200,
confidence=20,
para_init="gaussian")
classifier.fit(pairs, features, targets)
data_io.save_model(classifier, settings["MTLR_MODEL_FILE"])
if __name__ == "__main__":
main()
|
anthonylife/TaobaoCompetition2014
|
src/MTL-LR/train.py
|
Python
|
gpl-2.0
| 2,562
|
[
"Gaussian"
] |
6bb01d5b743c5847aad64e2a36f2a951a747583029416faad62e09c675dea861
|
"""
vtkImageImportFromArray: a NumPy front-end to vtkImageImport
Load a python array into a vtk image.
To use this class, you must have NumPy installed (http://numpy.scipy.org/)
Methods:
GetOutput() -- connect to VTK image pipeline
SetArray() -- set the array to load in
Convert python 'Int' to VTK_UNSIGNED_SHORT:
(python doesn't support unsigned short, so this might be necessary)
SetConvertIntToUnsignedShort(yesno)
ConvertIntToUnsignedShortOn()
ConvertIntToUnsignedShortOff()
Methods from vtkImageImport:
(if you don't set these, sensible defaults will be used)
SetDataExtent()
SetDataSpacing()
SetDataOrigin()
"""
import numpy
from vtk import vtkImageImport
from vtk.util.vtkConstants import *
class vtkImageImportFromArray:
def __init__(self):
self.__import = vtkImageImport()
self.__ConvertIntToUnsignedShort = False
self.__Array = None
# type dictionary: note that python doesn't support
# unsigned integers properly!
__typeDict = {'b':VTK_CHAR, # int8
'B':VTK_UNSIGNED_CHAR, # uint8
'h':VTK_SHORT, # int16
'H':VTK_UNSIGNED_SHORT, # uint16
'i':VTK_INT, # int32
'I':VTK_UNSIGNED_INT, # uint32
'l':VTK_LONG, # int64
'L':VTK_UNSIGNED_LONG, # uint64
'f':VTK_FLOAT, # float32
'd':VTK_DOUBLE, # float64
}
# convert 'Int32' to 'unsigned short'
def SetConvertIntToUnsignedShort(self, yesno):
self.__ConvertIntToUnsignedShort = yesno
def GetConvertIntToUnsignedShort(self):
return self.__ConvertIntToUnsignedShort
def ConvertIntToUnsignedShortOn(self):
self.__ConvertIntToUnsignedShort = True
def ConvertIntToUnsignedShortOff(self):
self.__ConvertIntToUnsignedShort = False
# get the output
def GetOutputPort(self):
return self.__import.GetOutputPort()
# get the output
def GetOutput(self):
return self.__import.GetOutput()
# import an array
def SetArray(self, imArray):
self.__Array = imArray
imString = imArray.tostring()
numComponents = 1
dim = imArray.shape
if (len(dim) == 4):
numComponents = dim[3]
dim = (dim[0], dim[1], dim[2])
typecode = imArray.dtype.char
ar_type = self.__typeDict[typecode]
if (typecode == 'F' or typecode == 'D'):
numComponents = numComponents * 2
if (self.__ConvertIntToUnsignedShort and typecode == 'i'):
imString = imArray.astype(numpy.core.numeric.int16).tostring()
ar_type = VTK_UNSIGNED_SHORT
else:
imString = imArray.tostring()
self.__import.CopyImportVoidPointer(imString, len(imString))
self.__import.SetDataScalarType(ar_type)
self.__import.SetNumberOfScalarComponents(numComponents)
extent = self.__import.GetDataExtent()
self.__import.SetDataExtent(extent[0], extent[0] + dim[2] - 1,
extent[2], extent[2] + dim[1] - 1,
extent[4], extent[4] + dim[0] - 1)
self.__import.SetWholeExtent(extent[0], extent[0] + dim[2] - 1,
extent[2], extent[2] + dim[1] - 1,
extent[4], extent[4] + dim[0] - 1)
self.__import.SetDataScalarTypeToUnsignedShort()
def GetArray(self):
return self.__Array
def GetImport(self):
return self.__import
# a whole bunch of methods copied from vtkImageImport
def SetDataExtent(self, extent):
self.__import.SetDataExtent(extent)
def GetDataExtent(self):
return self.__import.GetDataExtent()
def SetDataSpacing(self, spacing):
self.__import.SetDataSpacing(spacing)
def GetDataSpacing(self):
return self.__import.GetDataSpacing()
def SetDataOrigin(self, origin):
self.__import.SetDataOrigin(origin)
def GetDataOrigin(self):
return self.__import.GetDataOrigin()
|
chapering/PyVolRender
|
imageImportFromArray.py
|
Python
|
gpl-2.0
| 4,251
|
[
"VTK"
] |
ad26c82923f09f6ed79f73cbc6640d2967c26236053bbe5e2540525450715049
|
"""Streaming pickle implementation for efficiently serializing and
de-serializing an iterable (e.g., list)
Created on 2010-06-19 by Philip Guo
http://code.google.com/p/streaming-pickle/
Modified by Brian Thorne 2013 to add base64 encoding to support
python3 bytearray and the like.
"""
import base64
from pickle import dumps, loads
import unittest
import tempfile
def s_dump(iterable_to_pickle, file_obj):
"""dump contents of an iterable iterable_to_pickle to file_obj, a file
opened in write mode"""
for elt in iterable_to_pickle:
s_dump_elt(elt, file_obj)
def s_dump_elt(elt_to_pickle, file_obj):
"""dumps one element to file_obj, a file opened in write mode"""
pickled_elt = dumps(elt_to_pickle)
encoded = base64.b64encode(pickled_elt)
file_obj.write(encoded)
# record separator is a blank line
# (since pickled_elt as base64 encoded cannot contain its own newlines)
file_obj.write(b'\n\n')
def s_load(file_obj):
"""load contents from file_obj, returning a generator that yields one
element at a time"""
cur_elt = []
for line in file_obj:
if line == b'\n':
encoded_elt = b''.join(cur_elt)
try:
pickled_elt = base64.b64decode(encoded_elt)
elt = loads(pickled_elt)
except EOFError:
print("EOF found while unpickling data")
print(pickled_elt)
raise StopIteration
cur_elt = []
yield elt
else:
cur_elt.append(line)
class TestStreamingPickle(unittest.TestCase):
def setUp(self):
pass
def testSimpleList(self):
data = [1, 2, 3, 4, None, b'test', '\n', '\x00', 3, b'\n\n\n\n', 5, 7, 9, 11, "hello", bytearray([2, 4, 4])]
with tempfile.TemporaryFile() as f:
s_dump(data, f)
# reset the temporary file
f.seek(0)
i = 0
for i, element in enumerate(s_load(f)):
self.assertEqual(data[i], element)
# print(i, element)
self.assertEqual(i, len(data)-1)
if __name__ == "__main__":
unittest.main()
|
mjirik/lisa
|
lisa/extern/sPickle/sPickle.py
|
Python
|
bsd-3-clause
| 2,170
|
[
"Brian"
] |
d2c9ad31d9165ea3b71beda6488dba6ec23a1fde32c473522f088db041e309dc
|
#!/bin/python3
# coding: utf-8
"""
A Sphinx extension that enables watermarks for HTML output.
https://github.com/kallimachos/sphinxmark
Copyright 2021 Brian Moss
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
from shutil import copy
from bottle import TEMPLATE_PATH, template
from PIL import Image, ImageDraw, ImageFont
from sphinx.application import Sphinx
from sphinx.environment import BuildEnvironment
from sphinx.util import logging
LOG = logging.getLogger(__name__)
def buildcss(app: Sphinx, buildpath: str, imagefile: str) -> str:
"""Create CSS file."""
# set default values
div = "body"
repeat = "repeat-y"
position = "center"
attachment = "scroll"
if app.config.sphinxmark_div != "default":
div = app.config.sphinxmark_div
if app.config.sphinxmark_repeat is False:
repeat = "no-repeat"
if app.config.sphinxmark_fixed is True:
attachment = "fixed"
border = app.config.sphinxmark_border
if border == "left" or border == "right":
css = template("border", div=div, image=imagefile, side=border)
else:
css = template(
"watermark",
div=div,
image=imagefile,
repeat=repeat,
position=position,
attachment=attachment,
)
LOG.debug(f"[sphinxmark] Template: {css}")
cssname = "sphinxmark.css"
cssfile = Path(buildpath, cssname)
with open(cssfile, "w") as f:
f.write(css)
return cssname
def createimage(app: Sphinx, srcdir: Path, buildpath: Path) -> str:
"""Create PNG image from string."""
text = app.config.sphinxmark_text
# draw transparent background
width = app.config.sphinxmark_text_width
height = app.config.sphinxmark_text_spacing
img = Image.new("RGBA", (width, height), (255, 255, 255, 0))
d = ImageDraw.Draw(img)
# set font
fontfile = str(Path(srcdir, "arial.ttf"))
font = ImageFont.truetype(fontfile, app.config.sphinxmark_text_size)
# set x y location for text
xsize, ysize = d.textsize(text, font)
LOG.debug("[sphinxmark] x = " + str(xsize) + "\ny = " + str(ysize))
x = (width / 2) - (xsize / 2)
y = (height / 2) - (ysize / 2)
# add text to image
color = app.config.sphinxmark_text_color
d.text((x, y), text, font=font, fill=color)
# set opacity
img2 = img.copy()
img2.putalpha(app.config.sphinxmark_text_opacity)
img.paste(img2, img)
# rotate image
img = img.rotate(app.config.sphinxmark_text_rotation)
# save image
imagefile = f"textmark_{text}.png"
imagepath = Path(buildpath, imagefile)
img.save(imagepath, "PNG")
LOG.debug(f"[sphinxmark] Image saved to: {imagepath}")
return imagefile
def getimage(app: Sphinx) -> tuple:
"""Get image file."""
# append source directory to TEMPLATE_PATH so template is found
srcdir = Path(__file__).parent.resolve()
TEMPLATE_PATH.append(srcdir)
staticbase = "_static"
buildpath = Path(app.outdir, staticbase)
try:
buildpath.mkdir()
except OSError:
if not buildpath.is_dir():
raise
if app.config.sphinxmark_image == "default":
imagefile = "watermark-draft.png"
imagepath = Path(srcdir, imagefile)
copy(imagepath, buildpath)
LOG.debug(f"[sphinxmark] Using default image: {imagefile}")
elif app.config.sphinxmark_image == "text":
imagefile = createimage(app, srcdir, buildpath)
LOG.debug(f"[sphinxmark] Image: {imagefile}")
else:
imagefile = app.config.sphinxmark_image
if app.config.html_static_path:
staticpath = app.config.html_static_path[0]
else:
staticpath = "_static"
LOG.debug(f"[sphinxmark] static path: {staticpath}")
confdir = str(app.confdir)
imagepath = Path(confdir, staticpath, imagefile)
LOG.debug(f"[sphinxmark] Imagepath: {imagepath}")
try:
copy(imagepath, buildpath)
except FileNotFoundError:
LOG.info(" fail")
raise
return (buildpath, imagefile)
def watermark(app: Sphinx, env: BuildEnvironment) -> None:
"""Add watermark."""
if app.config.sphinxmark_enable is True:
LOG.info("adding watermark...", nonl=True)
try:
buildpath, imagefile = getimage(app)
cssname = buildcss(app, buildpath, imagefile)
app.add_css_file(cssname)
LOG.info(" done")
except Exception as e:
LOG.warning(f"Failed to add watermark: {e}")
return
def setup(app: Sphinx) -> dict:
"""Configure setup for Sphinx extension.
:param app: Sphinx application context.
"""
app.add_config_value("sphinxmark_enable", False, "html")
app.add_config_value("sphinxmark_div", "default", "html")
app.add_config_value("sphinxmark_border", None, "html")
app.add_config_value("sphinxmark_repeat", True, "html")
app.add_config_value("sphinxmark_fixed", False, "html")
app.add_config_value("sphinxmark_image", "default", "html")
app.add_config_value("sphinxmark_text", "default", "html")
app.add_config_value("sphinxmark_text_color", (255, 0, 0), "html")
app.add_config_value("sphinxmark_text_size", 100, "html")
app.add_config_value("sphinxmark_text_width", 1000, "html")
app.add_config_value("sphinxmark_text_opacity", 20, "html")
app.add_config_value("sphinxmark_text_spacing", 400, "html")
app.add_config_value("sphinxmark_text_rotation", 0, "html")
app.connect("env-updated", watermark)
return {
"version": "0.2.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
kallimachos/sphinxmark
|
sphinxmark/__init__.py
|
Python
|
apache-2.0
| 6,205
|
[
"Brian"
] |
cd9d8f00878b9e2cc7746f89f673ac0fc0008a4f7cc7f88f2d1421218aae0eb6
|
# -*- coding: utf-8 -*-
"""
SUNTANS bathymetry interpolation tools
Created on Fri Oct 05 11:24:10 2012
@author: mrayson
"""
from interpXYZ import Inputs, interpXYZ
import numpy as np
import sunpy
import matplotlib.pyplot as plt
from SUNTANS.sunpy import Grid
from trisearch import TriSearch
import time
# Example inputs
#infile = 'C:/Projects/GOMGalveston/DATA/Bathymetry/DEMs/NOAA_25m_UTM_DEM.nc'
#suntanspath = 'C:/Projects/GOMGalveston/MODELLING/GRIDS/GalvestonFine'
class DepthDriver(object):
"""
Driver class for interpolating depth data onto a suntans grid
"""
# Interpolation method
interpmethod='idw' # 'nn', 'idw', 'kriging', 'griddata'
# Type of plot
plottype='mpl' # 'mpl', 'vtk2' or 'vtk3'
# Interpolation options
NNear=3
p = 1.0 # power for inverse distance weighting
# kriging options
varmodel = 'spherical'
nugget = 0.1
sill = 0.8
vrange = 250.0
# Projection conversion info for input data
convert2utm=False
CS='NAD83'
utmzone=15
isnorth=True
vdatum = 'MSL'
# Smoothing options
smooth=False
smoothmethod='kriging' # USe kriging or idw for smoothing
smoothnear=5 # No. of points to use for smoothing
def __init__(self,depthfile,**kwargs):
self.__dict__.update(kwargs)
# Parse the depth data into an object
self.indata = Inputs(depthfile,convert2utm=self.convert2utm,CS=self.CS,utmzone=self.utmzone,\
isnorth=self.isnorth,vdatum=self.vdatum)
def __call__(self,suntanspath,depthmax=0.0,scalefac=-1.0, interpnodes=True):
self.suntanspath=suntanspath
# Initialise the interpolation points
print 'Loading suntans grid points...'
self.grd = sunpy.Grid(self.suntanspath)
if interpnodes:
print 'Interpolating depths onto nodes and taking min...'
self.xy = np.column_stack((self.grd.xp,self.grd.yp))
else:
print 'Interpolating depths straight to cell centres...'
self.xy = np.column_stack((self.grd.xv,self.grd.yv))
# Initialise the Interpolation class
print 'Building interpolant class...'
self.F = interpXYZ(self.indata.XY,self.xy,method=self.interpmethod,NNear=self.NNear,\
p=self.p,varmodel=self.varmodel,nugget=self.nugget,sill=self.sill,vrange=self.vrange)
# Interpolate the data
print 'Interpolating data...'
dv = self.F(self.indata.Zin)*scalefac
if interpnodes:
self.grd.dv = np.zeros_like(self.grd.xv)
for nn in range(self.grd.Nc):
self.grd.dv[nn] = np.max(dv[self.grd.cells[nn,0:self.grd.nfaces[nn] ] ])
#self.grd.dv[nn] = np.mean(dv[self.grd.cells[nn,0:self.grd.nfaces[nn] ] ])
else:
self.grd.dv = dv
# Smooth
if self.smooth:
self.smoothDepths()
# Cap the maximum depth
ind = self.grd.dv<=depthmax
self.grd.dv[ind]=depthmax
# Write the depths to file
print 'Writing depths.dat...'
self.grd.saveBathy(suntanspath+'/depths.dat-voro')
print 'Data saved to %s.'%suntanspath+'/depths.dat-voro'
# Plot
if self.plottype=='mpl':
self.plot()
elif self.plottype=='vtk2':
self.plotvtk()
elif self.plottype=='vtk3':
self.plotvtk3D()
print 'Finished depth interpolation.'
def smoothDepths(self):
"""
Smooth the data by running an interpolant over the model grid points
"""
print 'Smoothing the data...'
Fsmooth =interpXYZ(self.xy,self.xy,method=self.smoothmethod,NNear=self.smoothnear,vrange=self.vrange)
self.grd.dv = Fsmooth(self.grd.dv)
def plot(self):
"""
Plot using matplotlib
"""
fig=plt.figure()
self.grd.plot(cmap=plt.cm.gist_earth)
outfile = self.suntanspath+'/depths.png'
fig.savefig(outfile,dpi=150)
print 'Figure saved to %s.'%outfile
def plotvtk(self):
"""
2D plot using the vtk libraries
"""
self.grd.plotvtk()
outfile = self.suntanspath+'/depths.png'
self.grd.fig.scene.save(outfile)
print 'Figure saved to %s.'%outfile
def plotvtk3D(self):
"""
3D plot using the vtk libraries
"""
from tvtk.api import tvtk
from mayavi import mlab
# Plot the data on a 3D grid
xy = np.column_stack((self.grd.xp,self.grd.yp))
dvp = self.F(xy)
vertexag = 50.0
points = np.column_stack((self.grd.xp,self.grd.yp,dvp*vertexag))
tri_type = tvtk.Triangle().cell_type
#tet_type = tvtk.Tetra().cell_type
ug = tvtk.UnstructuredGrid(points=points)
ug.set_cells(tri_type, self.grd.cells)
ug.cell_data.scalars = self.grd.dv
ug.cell_data.scalars.name = 'depths'
f=mlab.gcf()
f.scene.background = (0.,0.,0.)
d = mlab.pipeline.add_dataset(ug)
h=mlab.pipeline.surface(d,colormap='gist_earth')
mlab.colorbar(object=h,orientation='vertical')
mlab.view(0,0)
outfile = self.suntanspath+'/depths.png'
f.scene.save(outfile)
print 'Figure saved to %s.'%outfile
#mlab.show()
class AverageDepth(Grid):
"""
Returns the average of all depths inside each cells
"""
# Projection conversion info for input data
convert2utm=False
CS='NAD83'
utmzone=15
isnorth=True
vdatum = 'MSL'
def __init__(self,suntanspath,**kwargs):
self.__dict__.update(kwargs)
Grid.__init__(self,suntanspath)
# Initialise the trisearch object
self.tsearch = TriSearch(self.xp,self.yp,self.cells)
def __call__(self,depthfile,**kwargs):
self.__dict__.update(kwargs)
# Parse the depth data into an object
self.indata = Inputs(depthfile,convert2utm=False,CS=self.CS,utmzone=self.utmzone,\
isnorth=self.isnorth,vdatum=self.vdatum)
tic = time.clock()
print 'Performing triangle search...'
cells = self.tsearch(self.indata.XY[:,0],self.indata.XY[:,1])
toc = time.clock()
print 'Search time: %f seconds.'%(toc-tic)
def adjust_channel_depth(grd,shpfile,lcmax=500.):
"""
Adjusts the depths of a suntans grid object using a line shapefile.
The shapefile must have an attribute called "contour"
"""
from shapely import geometry, speedups
from maptools import readShpPointLine
if speedups.available:
speedups.enable()
print 'Adjusting depths in channel regions with a shapefile...'
# Load the shapefile
xyline,contour = readShpPointLine(shpfile,FIELDNAME='contour')
# Load all of the points into shapely type geometry
# Distance method won't work with numpy array
#P = geometry.asPoint(xy)
P = [geometry.Point(grd.xv[i],grd.yv[i]) for i in range(grd.Nc)]
L=[]
for ll in xyline:
L.append(geometry.asLineString(ll))
nlines = len(L)
weight_all = np.zeros((grd.Nc,nlines))
for n in range(nlines):
print 'Calculating distance from line %d...'%n
dist = [L[n].distance(P[i]) for i in range(grd.Nc)]
dist = np.array(dist)
# Calculate the weight from the distance
weight = -dist/lcmax+1.
weight[dist>=lcmax]=0.
weight_all[:,n] = weight
# Now go through and re-calculate the depths
dv = grd.dv*(1-weight_all.sum(axis=-1))
for n in range(nlines):
dv += weight_all[:,n]*contour[n]
grd.dv=dv
return grd
|
UT-CWE/Hyospy
|
Hyospy_ensemble/lib/SUNTANS/SUNTANS/sundepths.py
|
Python
|
mit
| 8,258
|
[
"Mayavi",
"VTK"
] |
d3b07f10ccef49cafddd0dd4cd277c677f5c522b326cae2338c8a10ef6958f8f
|
"""
Define common steps for instructor dashboard acceptance tests.
"""
# pylint: disable=C0111
# pylint: disable=W0621
from __future__ import absolute_import
from lettuce import world, step
from nose.tools import assert_in # pylint: disable=E0611
from courseware.tests.factories import StaffFactory, InstructorFactory
@step(u'Given I am "([^"]*)" for a course')
def i_am_staff_or_instructor(step, role): # pylint: disable=unused-argument
## In summary: makes a test course, makes a new Staff or Instructor user
## (depending on `role`), and logs that user in to the course
# Store the role
assert_in(role, ['instructor', 'staff'])
# Clear existing courses to avoid conflicts
world.clear_courses()
# Create a new course
course = world.CourseFactory.create(
org='edx',
number='999',
display_name='Test Course'
)
world.course_id = 'edx/999/Test_Course'
world.role = 'instructor'
# Log in as the an instructor or staff for the course
if role == 'instructor':
# Make & register an instructor for the course
world.instructor = InstructorFactory(course=course.location)
world.enroll_user(world.instructor, world.course_id)
world.log_in(
username=world.instructor.username,
password='test',
email=world.instructor.email,
name=world.instructor.profile.name
)
else:
world.role = 'staff'
# Make & register a staff member
world.staff = StaffFactory(course=course.location)
world.enroll_user(world.staff, world.course_id)
world.log_in(
username=world.staff.username,
password='test',
email=world.staff.email,
name=world.staff.profile.name
)
def go_to_section(section_name):
# section name should be one of
# course_info, membership, student_admin, data_download, analytics, send_email
world.visit('/courses/edx/999/Test_Course')
world.css_click('a[href="/courses/edx/999/Test_Course/instructor"]')
world.css_click('div.beta-button-wrapper>a')
world.css_click('a[data-section="{0}"]'.format(section_name))
@step(u'I click "([^"]*)"')
def click_a_button(step, button): # pylint: disable=unused-argument
if button == "Generate Grade Report":
# Go to the data download section of the instructor dash
go_to_section("data_download")
# Click generate grade report button
world.css_click('input[name="calculate-grades-csv"]')
# Expect to see a message that grade report is being generated
expected_msg = "Your grade report is being generated! You can view the status of the generation task in the 'Pending Instructor Tasks' section."
world.wait_for_visible('#grade-request-response')
assert_in(
expected_msg, world.css_text('#grade-request-response'),
msg="Could not find grade report generation success message."
)
elif button == "Grading Configuration":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="dump-gradeconf"]')
elif button == "List enrolled students' profile information":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="list-profiles"]')
else:
raise ValueError("Unrecognized button option " + button)
|
TangXT/GreatCatMOOC
|
lms/djangoapps/instructor/features/common.py
|
Python
|
agpl-3.0
| 3,515
|
[
"VisIt"
] |
705d40226167e6920c5bab097e6a7acb6ce8ec7ba0f6098b60ab798e62b1c5c3
|
import os
import time
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit as u
import mdtraj.reporters
import sys
code = "2evn"
which_forcefield = "amber99sbildn.xml"
which_water = 'tip3p-fb.xml'
platform_name = "CUDA"
timestep = 2.0 * u.femtoseconds
cutoff = 0.95 * u.nanometers
output_frequency = 25000
n_steps = 500000000
temperature = 300.
pressure = 1.0 * u.atmospheres
rank = int(sys.argv[1])
time.sleep(rank) # This makes sure that no two jobs run at the same time for RNG purpuses.
pdb_filename = "./%s_equil.pdb" % code
dcd_filename = "./Trajectories/%s_%d.dcd" % (code, rank)
log_filename = "./Trajectories/%s_%d.log" % (code, rank)
traj = mdtraj.load(pdb_filename)
top, bonds = traj.top.to_dataframe()
atom_indices = top.index[top.chainID == 0].values
pdb = app.PDBFile(pdb_filename)
topology = pdb.topology
positions = pdb.positions
ff = app.ForceField(which_forcefield, which_water)
platform = mm.Platform.getPlatformByName(platform_name)
system = ff.createSystem(topology, nonbondedMethod=app.PME, nonbondedCutoff=cutoff, constraints=app.HBonds)
integrator = mm.LangevinIntegrator(temperature, 1.0 / u.picoseconds, timestep)
system.addForce(mm.MonteCarloBarostat(pressure, temperature, 25))
simulation = app.Simulation(topology, system, integrator, platform=platform)
simulation.context.setPositions(positions)
simulation.context.setVelocitiesToTemperature(temperature)
print("Using platform %s" % simulation.context.getPlatform().getName())
if os.path.exists(dcd_filename):
sys.exit()
simulation.reporters.append(mdtraj.reporters.DCDReporter(dcd_filename, output_frequency, atomSubset=atom_indices))
simulation.reporters.append(app.StateDataReporter(open(log_filename, 'w'), output_frequency, step=True, time=True, speed=True))
simulation.step(n_steps)
|
hainm/open-forcefield-group
|
nmr/2EVN/code/production.py
|
Python
|
gpl-2.0
| 1,817
|
[
"MDTraj",
"OpenMM"
] |
0049ea058c4a8b6488443d80dc9640c70f26ef29d0499d3d67bb58ca5336ac85
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Abinit Post Process Application
author: Martin Alexandre
last edited: May 2014
"""
import sys,os,time,commands
import string, math
#GUI
import gui.graph as Graph
import gui.conv as Conv
#Utility
import utility.writeHIST as Write
import utility.analysis as Analysis
try:
from PyQt4 import Qt,QtGui,QtCore
except:
pass;
from numpy import *
#----------------------------------------------------------------#
#--------------------------NETCDF WRITER-------------------------#
#----------------------------------------------------------------#
class winNetcdf(QtGui.QWidget):
PTOE = Analysis.PeriodicTableElement()
def __init__(self, file, parent = None,name =''):
self.file = file
self.name = name
self.ni = 1
self.nf = 2
self.initUI(parent)
self.raise_()
def initUI(self, parent):
#-----------------Creation of the windows----------------------------#
QtGui.QWidget.__init__(self, parent)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle(self.name + ' Save Netcdf')
self.setFixedSize(600, 450)
self.center()
self.layout = QtGui.QGridLayout()
self.setLayout(self.layout)
self.lbl1 = QtGui.QLabel(" Name :", self)
self.lbl1.setFixedWidth(95)
self.lname = QtGui.QLineEdit()
self.lname.setFixedWidth(200)
self.pbClose = QtGui.QPushButton("Close")
self.pbClose.setFixedSize(70,20)
self.connect(self.pbClose,QtCore.SIGNAL("clicked()"),QtCore.SLOT('close()'))
self.pbSave = QtGui.QPushButton("Save")
self.pbSave.setFixedSize(70,20)
self.connect(self.pbSave,QtCore.SIGNAL("clicked()"),self.save)
self.layout.addWidget(self.lbl1 , 1, 0, 1, 1, QtCore.Qt.AlignRight)
self.layout.addWidget(self.lname , 1, 1, 1, 1, QtCore.Qt.AlignCenter)
self.layout.addWidget(self.pbClose , 7, 0, 1, 2, QtCore.Qt.AlignCenter)
self.layout.addWidget(self.pbSave , 7, 1, 1, 4, QtCore.Qt.AlignCenter)
self.show()
#------------------------------------------------------------------------#
def save(self):
Write.writeHIST(self.file,self.name,self.ni,self.nf)
def close(self):
del self.graphMSD
del self
def closeEvent(self, event):
try:
del self.graphMSD
except:
pass
try:
del self
except:
pass
def center(self):
screen = QtGui.QDesktopWidget().screenGeometry()
size = self.geometry()
self.move((screen.width()-size.width())/2, (screen.height()-size.height())/2)
|
jmbeuken/abinit
|
scripts/post_processing/appa/gui/netcdf.py
|
Python
|
gpl-3.0
| 2,757
|
[
"ABINIT",
"NetCDF"
] |
217c262cdde23c84a858fa0e96c23565a1307bf94cf72971b32a8bdcc070e5c9
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 12 20:20:03 2018
@author: BallBlueMeercat
"""
import time
import dnest4
import numpy as np
import numpy.random as rng
import pickle
#from numba import jitclass, int32
import datasim
import results
import tools
#from scipy.special import erf
# slow = 1, medium = 2, long = 3
speed = 1
# Sigma of the noise on data.
sigma = 0.07
dataname = 'mag_z_LCDM_1000_sigma_'+str(sigma)
# Load the data
mag, zpicks = results.load('./data', dataname)
#@jitclass([('dummy', int32)])
class Model(object):
"""
Specify the model in Python.
"""
def __init__(self):
"""
Parameter values *are not* stored inside the class
"""
pass
def from_prior(self):
"""
Unlike in C++, this must *return* a numpy array of parameters.
"""
m = rng.rand()
g = 1E3*rng.rand()
g = dnest4.wrap(g, g_min, g_max)
return np.array([m, g])
def perturb(self, params):
"""
Unlike in C++, this takes a numpy array of parameters as input,
and modifies it in-place. The return value is still logH.
"""
logH = 0.0
which = rng.randint(2)
# Note the difference between dnest4.wrap in Python and
# DNest4::wrap in C++. The former *returns* the wrapped value.
if which == 0:
log_m = np.log(params[which])
log_m += dnest4.randh()
log_m = dnest4.wrap(log_m, 0.0, 1.0)
params[which] = np.exp(log_m)
elif which == 1:
g = params[which]
g += dnest4.randh()
g = dnest4.wrap(g, g_min, g_max)
params[which] = g
return logH
def log_likelihood(self, params):
"""
Gaussian sampling distribution.
"""
m, g = params
theta = {'m':m,'gamma':g}
model = datasim.magn(theta, zpicks, key)
var = sigma**2.0
return -0.5*np.sum((mag-model)**2.0 /var +0.5*np.log(2.0*np.pi*var))
# def randh(self):
# """
# Generate from the heavy-tailed distribution.
# """
# a = np.random.randn()
# b = np.random.rand()
# t = a/np.sqrt(-np.log(b))
# n = np.random.randn()
# return 10.0**(1.5 - 3*np.abs(t))*n
#
# def wrap(self, x, a, b):
# assert b > a
# return (x - a)%(b - a) + a
# Create a model object and a sampler
model = Model()
sampler = dnest4.DNest4Sampler(model,
backend=dnest4.backends.CSVBackend(".",
sep=" "))
firstderivs_functions = [
# 'late_intxde'
# ,'heaviside_late_int'
# ,'late_int'
# ,'expgamma'
# ,'txgamma'
# ,'zxgamma'
# ,'gamma_over_z'
# ,'zxxgamma'
# ,'gammaxxz'
## ,'rdecay_m' # nan field
# ,'rdecay_de'
## ,'rdecay_mxde' # nan field
# ,'rdecay'
## ,'interacting' # nan field
'LCDM'
]
for key in firstderivs_functions:
if key == 'rdecay':
g_min = -10
g_max = 0
elif key == 'late_int' or key =='heaviside_late_int' or key=='late_intxde':
g_min = -1.45
g_max = 0.2
elif key == 'interacting':
g_min = -1.45
g_max = 1.45
elif key == 'expgamma':
g_min = -25
g_max = 25
elif key == 'zxxgamma' or key == 'gammaxxz':
g_min = 0
g_max = 10
else:
g_min = -10
g_max = 10
if speed == 3:
# LONG Set up the sampler. The first argument is max_num_levels
gen = sampler.sample(max_num_levels=30, num_steps=1000,
new_level_interval=10000, num_per_step=10000,
thread_steps=100, num_particles=5,
lam=10, beta=100, seed=1234)
elif speed == 2:
# MEDIUM num_per_step can be down to a few thousand
gen = sampler.sample(max_num_levels=30, num_steps=1000,
new_level_interval=1000, num_per_step=1000,
thread_steps=100, num_particles=5,
lam=10, beta=100, seed=1234)
elif speed == 1:
# SHORT
gen = sampler.sample(max_num_levels=1, num_steps=100,
new_level_interval=100, num_per_step=100,
thread_steps=10, num_particles=5,
lam=10, beta=100, seed=1234)
# import cProfile, pstats, io
# pr = cProfile.Profile()
# pr.enable()
ti = time.time()
# Do the sampling (one iteration here = one particle save)
for i, sample in enumerate(gen):
# print("# Saved {k} particles.".format(k=(i+1)))
pass
tf = time.time()
# pr.disable()
# s = io.StringIO()
# sortby = 'cumulative'
# ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
# ps.print_stats()
# print (s.getvalue())
dnest_time = tools.timer('Bfactor', ti, tf)
print('testing =',key)
print('data =', dataname)
print('sigma =', sigma)
# Run the postprocessing
info = dnest4.postprocess()
if speed > 1:
f = open('brief.txt','w')
f.write(dnest_time +'\n'
+'model = '+key +'\n'
+'data = '+ dataname +'\n'
+'sigma = '+str(sigma) +'\n'
+'log(Z) = '+str(info[0]) +'\n'
+'Information = '+str(info[1]) +'\n'
+'speed = '+str(speed))
f.close()
pickle.dump(info[0], open('evidence.p', 'wb'))
# Moving output .txt files into a run specific folder.
results.relocate('evidence.p', speed, key)
results.relocate('levels.txt', speed, key)
results.relocate('posterior_sample.txt', speed, key)
results.relocate('sample_info.txt', speed, key)
results.relocate('sample.txt', speed, key)
results.relocate('sampler_state.txt', speed, key)
results.relocate('weights.txt', speed, key)
results.relocate('brief.txt', speed, key)
results.relocate('plot_1.pdf', speed, key)
results.relocate('plot_2.pdf', speed, key)
results.relocate('plot_3.pdf', speed, key)
#import six
#import sys
## Run the postprocessing to get marginal likelihood and generate posterior
#samples logZdnest4, infogaindnest4, plot = dnest4.postprocess()
#
#postsamples = np.loadtxt('posterior_sample.txt')
#
#print(six.u('Marginalised evidence is {}'.format(logZdnest4)))
#
#print('Number of posterior samples is {}'.format(postsamples.shape[0]))
#
## plot posterior samples (if corner.py is installed)
#try:
# import matplotlib as mpl
# mpl.use("Agg") # force Matplotlib backend to Agg
# import corner # import corner.py
#except ImportError:
# sys.exit(1)
#
#m = 0.3
#g=0
#fig = corner.corner(postsamples, labels=[r"$m$", r"$c$"], truths=[m, g])
#fig.savefig('DNest4.png')
# LCDM
#log(Z) = -1622866.8534441872
#Information = 14.078678027261049 nats.
#Effective sample size = 129.22232212112772
#time 297min 50s
#log(Z) = -1622866.790641218
#Information = 13.905435690656304 nats.
#Effective sample size = 167.73507536834273
#time 34 min
#rdecay
#log(Z) = -1622866.8177826053
#Information = 13.970533838961273 nats.
#Effective sample size = 85.54638980461822
#Sampling time: 37min 5s
############ 0.01 sigma data
#Hdecay
#Sampling time: 38min 57s
#log(Z) = -1158842.6212481956
#Information = 26.434626991627738 nats.
#Effective sample size = 116.96489141639181
#edecay
#Sampling time: 45min 57s
#log(Z) = -49925.259544267705
#Information = 19.683044903278642 nats.
#Effective sample size = 162.7283801030449
#LCDM
#Sampling time: 31min 52s
#log(Z) = -1622866.7230921672
#Information = 13.870062695583329 nats.
#Effective sample size = 178.67158154325102
############ 0.1 sigma data
#Hdecay
#Sampling time: 26min 26s
#data = mag_z_LCDM_1000_sigma_0.1
#sigma = 0.1
#log(Z) = -11392.938034458695
#Information = 16.85219457607309 nats.
#Effective sample size = 216.9365844057018
#rdecay
#Sampling time: 25min 4s
#data = mag_z_LCDM_1000_sigma_0.1
#sigma = 0.1
#log(Z) = -16069.573635539238
#Information = 8.730470507740392 nats.
#Effective sample size = 172.4071834775586
#LCDM
#Sampling time: 23min 45s
#data = mag_z_LCDM_1000_sigma_0.1
#sigma = 0.1
#log(Z) = -16070.356294581907
#Information = 9.449718869756907 nats.
#Effective sample size = 142.47418654118337
|
lefthandedroo/Cosmo-models
|
zprev versions/Bfactor_bad_prior.py
|
Python
|
mit
| 8,675
|
[
"Gaussian"
] |
58572b51c3e58696a462bc4ef3b15fa8aa2f50b7605fb1d43620ef3b5fed9e07
|
# Authors: Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
# Many of the computations in this code were derived from Matti Hämäläinen's
# C code.
from copy import deepcopy
from functools import partial
from gzip import GzipFile
import os
import os.path as op
import numpy as np
from scipy import sparse, linalg
from .io.constants import FIFF
from .io.meas_info import create_info, Info
from .io.tree import dir_tree_find
from .io.tag import find_tag, read_tag
from .io.open import fiff_open
from .io.write import (start_block, end_block, write_int,
write_float_sparse_rcs, write_string,
write_float_matrix, write_int_matrix,
write_coord_trans, start_file, end_file, write_id)
from .io.pick import channel_type, _picks_to_idx
from .bem import read_bem_surfaces
from .fixes import _get_img_fdata
from .surface import (read_surface, _create_surf_spacing, _get_ico_surface,
_tessellate_sphere_surf, _get_surf_neighbors,
_normalize_vectors, _triangle_neighbors, mesh_dist,
complete_surface_info, _compute_nearest, fast_cross_3d,
_CheckInside)
from .utils import (get_subjects_dir, check_fname, logger, verbose, fill_doc,
_ensure_int, check_version, _get_call_line, warn,
_check_fname, _check_path_like, has_nibabel, _check_sphere,
_validate_type, _check_option, _is_numeric, _pl, _suggest,
object_size, sizeof_fmt)
from .parallel import parallel_func, check_n_jobs
from .transforms import (invert_transform, apply_trans, _print_coord_trans,
combine_transforms, _get_trans,
_coord_frame_name, Transform, _str_to_frame,
_ensure_trans, read_ras_mni_t)
def read_freesurfer_lut(fname=None):
"""Read a Freesurfer-formatted LUT.
Parameters
----------
fname : str | None
The filename. Can be None to read the standard Freesurfer LUT.
Returns
-------
atlas_ids : dict
Mapping from label names to IDs.
colors : dict
Mapping from label names to colors.
"""
lut = _get_lut(fname)
names, ids = lut['name'], lut['id']
colors = np.array([lut['R'], lut['G'], lut['B'], lut['A']], float).T
atlas_ids = dict(zip(names, ids))
colors = dict(zip(names, colors))
return atlas_ids, colors
def _get_lut(fname=None):
"""Get a FreeSurfer LUT."""
_validate_type(fname, ('path-like', None), 'fname')
if fname is None:
fname = op.join(op.dirname(__file__), 'data', 'FreeSurferColorLUT.txt')
_check_fname(fname, 'read', must_exist=True)
dtype = [('id', '<i8'), ('name', 'U'),
('R', '<i8'), ('G', '<i8'), ('B', '<i8'), ('A', '<i8')]
lut = {d[0]: list() for d in dtype}
with open(fname, 'r') as fid:
for line in fid:
line = line.strip()
if line.startswith('#') or not line:
continue
line = line.split()
if len(line) != len(dtype):
raise RuntimeError(f'LUT is improperly formatted: {fname}')
for d, part in zip(dtype, line):
lut[d[0]].append(part)
lut = {d[0]: np.array(lut[d[0]], dtype=d[1]) for d in dtype}
assert len(lut['name']) > 0
return lut
def _get_lut_id(lut, label):
"""Convert a label to a LUT ID number."""
assert isinstance(label, str)
mask = (lut['name'] == label)
assert mask.sum() == 1
return lut['id'][mask]
_src_kind_dict = {
'vol': 'volume',
'surf': 'surface',
'discrete': 'discrete',
}
class SourceSpaces(list):
"""Represent a list of source space.
Currently implemented as a list of dictionaries containing the source
space information
Parameters
----------
source_spaces : list
A list of dictionaries containing the source space information.
info : dict
Dictionary with information about the creation of the source space
file. Has keys 'working_dir' and 'command_line'.
Attributes
----------
info : dict
Dictionary with information about the creation of the source space
file. Has keys 'working_dir' and 'command_line'.
"""
def __init__(self, source_spaces, info=None): # noqa: D102
# First check the types is actually a valid config
_validate_type(source_spaces, list, 'source_spaces')
super(SourceSpaces, self).__init__(source_spaces) # list
self.kind # will raise an error if there is a problem
if info is None:
self.info = dict()
else:
self.info = dict(info)
@property
def kind(self):
types = list()
for si, s in enumerate(self):
_validate_type(s, dict, 'source_spaces[%d]' % (si,))
types.append(s.get('type', None))
_check_option('source_spaces[%d]["type"]' % (si,),
types[-1], ('surf', 'discrete', 'vol'))
if all(k == 'surf' for k in types[:2]):
surf_check = 2
if len(types) == 2:
kind = 'surface'
else:
kind = 'mixed'
else:
surf_check = 0
if all(k == 'discrete' for k in types):
kind = 'discrete'
else:
kind = 'volume'
if any(k == 'surf' for k in types[surf_check:]):
raise RuntimeError('Invalid source space with kinds %s' % (types,))
return kind
@verbose
def plot(self, head=False, brain=None, skull=None, subjects_dir=None,
trans=None, verbose=None):
"""Plot the source space.
Parameters
----------
head : bool
If True, show head surface.
brain : bool | str
If True, show the brain surfaces. Can also be a str for
surface type (e.g., 'pial', same as True). Default is None,
which means 'white' for surface source spaces and False otherwise.
skull : bool | str | list of str | list of dict | None
Whether to plot skull surface. If string, common choices would be
'inner_skull', or 'outer_skull'. Can also be a list to plot
multiple skull surfaces. If a list of dicts, each dict must
contain the complete surface info (such as you get from
:func:`mne.make_bem_model`). True is an alias of 'outer_skull'.
The subjects bem and bem/flash folders are searched for the 'surf'
files. Defaults to None, which is False for surface source spaces,
and True otherwise.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
trans : str | 'auto' | dict | None
The full path to the head<->MRI transform ``*-trans.fif`` file
produced during coregistration. If trans is None, an identity
matrix is assumed. This is only needed when the source space is in
head coordinates.
%(verbose_meth)s
Returns
-------
fig : instance of mayavi.mlab.Figure
The figure.
"""
from .viz import plot_alignment
surfaces = list()
bem = None
if brain is None:
brain = 'white' if any(ss['type'] == 'surf'
for ss in self) else False
if isinstance(brain, str):
surfaces.append(brain)
elif brain:
surfaces.append('brain')
if skull is None:
skull = False if self.kind == 'surface' else True
if isinstance(skull, str):
surfaces.append(skull)
elif skull is True:
surfaces.append('outer_skull')
elif skull is not False: # list
if isinstance(skull[0], dict): # bem
skull_map = {FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'inner_skull',
FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer_skull',
FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer_skin'}
for this_skull in skull:
surfaces.append(skull_map[this_skull['id']])
bem = skull
else: # list of str
for surf in skull:
surfaces.append(surf)
if head:
surfaces.append('head')
if self[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
coord_frame = 'head'
if trans is None:
raise ValueError('Source space is in head coordinates, but no '
'head<->MRI transform was given. Please '
'specify the full path to the appropriate '
'*-trans.fif file as the "trans" parameter.')
else:
coord_frame = 'mri'
info = create_info(0, 1000., 'eeg')
return plot_alignment(
info, trans=trans, subject=self._subject,
subjects_dir=subjects_dir, surfaces=surfaces,
coord_frame=coord_frame, meg=(), eeg=False, dig=False, ecog=False,
bem=bem, src=self
)
def __getitem__(self, *args, **kwargs):
"""Get an item."""
out = super().__getitem__(*args, **kwargs)
if isinstance(out, list):
out = SourceSpaces(out)
return out
def __repr__(self): # noqa: D105
ss_repr = []
extra = []
for si, ss in enumerate(self):
ss_type = ss['type']
r = _src_kind_dict[ss_type]
if ss_type == 'vol':
if 'seg_name' in ss:
r += " (%s)" % (ss['seg_name'],)
else:
r += ", shape=%s" % (ss['shape'],)
elif ss_type == 'surf':
r += (" (%s), n_vertices=%i" % (_get_hemi(ss)[0], ss['np']))
r += ', n_used=%i' % (ss['nuse'],)
if si == 0:
extra += ['%s coords'
% (_coord_frame_name(int(ss['coord_frame'])))]
ss_repr.append('<%s>' % r)
subj = self._subject
if subj is not None:
extra += ['subject %r' % (subj,)]
sz = object_size(self)
if sz is not None:
extra += [f'~{sizeof_fmt(sz)}']
return "<SourceSpaces: [%s] %s>" % (
', '.join(ss_repr), ', '.join(extra))
@property
def _subject(self):
return self[0].get('subject_his_id', None)
def __add__(self, other):
"""Combine source spaces."""
out = self.copy()
out += other
return SourceSpaces(out)
def copy(self):
"""Make a copy of the source spaces.
Returns
-------
src : instance of SourceSpaces
The copied source spaces.
"""
return deepcopy(self)
def __deepcopy__(self, memodict):
"""Make a deepcopy."""
# don't copy read-only views (saves a ton of mem for split-vol src)
info = deepcopy(self.info, memodict)
ss = list()
for s in self:
for key in ('rr', 'nn'):
if key in s:
arr = s[key]
id_ = id(arr)
if id_ not in memodict:
if not arr.flags.writeable:
memodict[id_] = arr
ss.append(deepcopy(s, memodict))
return SourceSpaces(ss, info)
def save(self, fname, overwrite=False):
"""Save the source spaces to a fif file.
Parameters
----------
fname : str
File to write.
overwrite : bool
If True, the destination file (if it exists) will be overwritten.
If False (default), an error will be raised if the file exists.
"""
write_source_spaces(fname, self, overwrite)
@verbose
def export_volume(self, fname, include_surfaces=True,
include_discrete=True, dest='mri', trans=None,
mri_resolution=False, use_lut=True, overwrite=False,
verbose=None):
"""Export source spaces to nifti or mgz file.
Parameters
----------
fname : str
Name of nifti or mgz file to write.
include_surfaces : bool
If True, include surface source spaces.
include_discrete : bool
If True, include discrete source spaces.
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of the
original T1 image. If 'surf' the coordinate system of the
FreeSurfer surface is used (Surface RAS).
trans : dict, str, or None
Either a transformation filename (usually made using mne_analyze)
or an info dict (usually opened using read_trans()). If string, an
ending of ``.fif`` or ``.fif.gz`` will be assumed to be in FIF
format, any other ending will be assumed to be a text file with a
4x4 transformation matrix (like the ``--trans`` MNE-C option.
Must be provided if source spaces are in head coordinates and
include_surfaces and mri_resolution are True.
mri_resolution : bool | str
If True, the image is saved in MRI resolution
(e.g. 256 x 256 x 256), and each source region (surface or
segmentation volume) filled in completely. If "sparse", only a
single voxel in the high-resolution MRI is filled in for each
source point.
.. versionchanged:: 0.21.0
Support for "sparse" was added.
use_lut : bool
If True, assigns a numeric value to each source space that
corresponds to a color on the freesurfer lookup table.
overwrite : bool
If True, overwrite the file if it exists.
.. versionadded:: 0.19
%(verbose_meth)s
Notes
-----
This method requires nibabel.
"""
_check_fname(fname, overwrite)
_validate_type(mri_resolution, (bool, str), 'mri_resolution')
if isinstance(mri_resolution, str):
_check_option('mri_resolution', mri_resolution, ["sparse"],
extra='when mri_resolution is a string')
else:
mri_resolution = bool(mri_resolution)
fname = str(fname)
# import nibabel or raise error
try:
import nibabel as nib
except ImportError:
raise ImportError('This function requires nibabel.')
# Check coordinate frames of each source space
coord_frames = np.array([s['coord_frame'] for s in self])
# Raise error if trans is not provided when head coordinates are used
# and mri_resolution and include_surfaces are true
if (coord_frames == FIFF.FIFFV_COORD_HEAD).all():
coords = 'head' # all sources in head coordinates
if mri_resolution and include_surfaces:
if trans is None:
raise ValueError('trans containing mri to head transform '
'must be provided if mri_resolution and '
'include_surfaces are true and surfaces '
'are in head coordinates')
elif trans is not None:
logger.info('trans is not needed and will not be used unless '
'include_surfaces and mri_resolution are True.')
elif (coord_frames == FIFF.FIFFV_COORD_MRI).all():
coords = 'mri' # all sources in mri coordinates
if trans is not None:
logger.info('trans is not needed and will not be used unless '
'sources are in head coordinates.')
# Raise error if all sources are not in the same space, or sources are
# not in mri or head coordinates
else:
raise ValueError('All sources must be in head coordinates or all '
'sources must be in mri coordinates.')
# use lookup table to assign values to source spaces
logger.info('Reading FreeSurfer lookup table')
# read the lookup table
lut = _get_lut()
# Setup a dictionary of source types
src_types = dict(volume=[], surface_discrete=[])
# Populate dictionary of source types
for src in self:
# volume sources
if src['type'] == 'vol':
src_types['volume'].append(src)
# surface and discrete sources
elif src['type'] in ('surf', 'discrete'):
src_types['surface_discrete'].append(src)
else:
raise ValueError('Unrecognized source type: %s.' % src['type'])
# Raise error if there are no volume source spaces
if len(src_types['volume']) == 0:
raise ValueError('Source spaces must contain at least one volume.')
# Get shape, inuse array and interpolation matrix from volume sources
src = src_types['volume'][0]
aseg_data = None
if mri_resolution:
# read the mri file used to generate volumes
if mri_resolution is True:
aseg_data = _get_img_fdata(nib.load(src['mri_file']))
# get the voxel space shape
shape3d = (src['mri_width'], src['mri_depth'],
src['mri_height'])
else:
# get the volume source space shape
# read the shape in reverse order
# (otherwise results are scrambled)
shape3d = src['shape']
# calculate affine transform for image (MRI_VOXEL to RAS)
if mri_resolution:
# MRI_VOXEL to MRI transform
transform = src['vox_mri_t']
else:
# MRI_VOXEL to MRI transform
# NOTE: 'src' indicates downsampled version of MRI_VOXEL
transform = src['src_mri_t']
# Figure out how to get from our input source space to output voxels
fro_dst_t = invert_transform(transform)
dest = transform['to']
if coords == 'head':
head_mri_t = _get_trans(trans, 'head', 'mri')[0]
fro_dst_t = combine_transforms(head_mri_t, fro_dst_t, 'head', dest)
else:
fro_dst_t = fro_dst_t
# Fill in the volumes
img = np.zeros(shape3d)
for ii, vs in enumerate(src_types['volume']):
# read the lookup table value for segmented volume
if 'seg_name' not in vs:
raise ValueError('Volume sources should be segments, '
'not the entire volume.')
# find the color value for this volume
use_id = 1.
if mri_resolution is True or use_lut:
id_ = _get_lut_id(lut, vs['seg_name'])
if use_lut:
use_id = id_
if mri_resolution == 'sparse':
idx = apply_trans(fro_dst_t, vs['rr'][vs['vertno']])
idx = tuple(idx.round().astype(int).T)
elif mri_resolution is True: # fill the represented vol
# get the values for this volume
idx = (aseg_data == id_)
else:
assert mri_resolution is False
idx = vs['inuse'].reshape(shape3d, order='F').astype(bool)
img[idx] = use_id
# loop through the surface and discrete source spaces
# get the surface names (assumes left, right order. may want
# to add these names during source space generation
for src in src_types['surface_discrete']:
val = 1
if src['type'] == 'surf':
if not include_surfaces:
continue
if use_lut:
surf_name = {
FIFF.FIFFV_MNE_SURF_LEFT_HEMI: 'Left',
FIFF.FIFFV_MNE_SURF_RIGHT_HEMI: 'Right',
}[src['id']] + '-Cerebral-Cortex'
val = _get_lut_id(lut, surf_name)
else:
assert src['type'] == 'discrete'
if not include_discrete:
continue
if use_lut:
logger.info('Discrete sources do not have values on '
'the lookup table. Defaulting to 1.')
# convert vertex positions from their native space
# (either HEAD or MRI) to MRI_VOXEL space
if mri_resolution is True:
use_rr = src['rr']
else:
assert mri_resolution is False or mri_resolution == 'sparse'
use_rr = src['rr'][src['vertno']]
srf_vox = apply_trans(fro_dst_t['trans'], use_rr)
# convert to numeric indices
ix_, iy_, iz_ = srf_vox.T.round().astype(int)
# clip indices outside of volume space
ix = np.clip(ix_, 0, shape3d[0] - 1),
iy = np.clip(iy_, 0, shape3d[1] - 1)
iz = np.clip(iz_, 0, shape3d[2] - 1)
# compare original and clipped indices
n_diff = ((ix_ != ix) | (iy_ != iy) | (iz_ != iz)).sum()
# generate use warnings for clipping
if n_diff > 0:
warn(f'{n_diff} {src["type"]} vertices lay outside of volume '
f'space. Consider using a larger volume space.')
# get surface id or use default value
# update image to include surface voxels
img[ix, iy, iz] = val
if dest == 'mri':
# combine with MRI to RAS transform
transform = combine_transforms(
transform, vs['mri_ras_t'],
transform['from'], vs['mri_ras_t']['to'])
# now setup the affine for volume image
affine = transform['trans'].copy()
# make sure affine converts from m to mm
affine[:3] *= 1e3
# setup image for file
if fname.endswith(('.nii', '.nii.gz')): # save as nifit
# setup the nifti header
hdr = nib.Nifti1Header()
hdr.set_xyzt_units('mm')
# save the nifti image
img = nib.Nifti1Image(img, affine, header=hdr)
elif fname.endswith('.mgz'): # save as mgh
# convert to float32 (float64 not currently supported)
img = img.astype('float32')
# save the mgh image
img = nib.freesurfer.mghformat.MGHImage(img, affine)
else:
raise(ValueError('Unrecognized file extension'))
# write image to file
nib.save(img, fname)
def _add_patch_info(s):
"""Patch information in a source space.
Generate the patch information from the 'nearest' vector in
a source space. For vertex in the source space it provides
the list of neighboring vertices in the high resolution
triangulation.
Parameters
----------
s : dict
The source space.
"""
nearest = s['nearest']
if nearest is None:
s['pinfo'] = None
s['patch_inds'] = None
return
logger.info(' Computing patch statistics...')
indn = np.argsort(nearest)
nearest_sorted = nearest[indn]
steps = np.where(nearest_sorted[1:] != nearest_sorted[:-1])[0] + 1
starti = np.r_[[0], steps]
stopi = np.r_[steps, [len(nearest)]]
pinfo = list()
for start, stop in zip(starti, stopi):
pinfo.append(np.sort(indn[start:stop]))
s['pinfo'] = pinfo
# compute patch indices of the in-use source space vertices
patch_verts = nearest_sorted[steps - 1]
s['patch_inds'] = np.searchsorted(patch_verts, s['vertno'])
logger.info(' Patch information added...')
@verbose
def _read_source_spaces_from_tree(fid, tree, patch_stats=False, verbose=None):
"""Read the source spaces from a FIF file.
Parameters
----------
fid : file descriptor
An open file descriptor.
tree : dict
The FIF tree structure if source is a file id.
patch_stats : bool, optional (default False)
Calculate and add cortical patch statistics to the surfaces.
%(verbose)s
Returns
-------
src : SourceSpaces
The source spaces.
"""
# Find all source spaces
spaces = dir_tree_find(tree, FIFF.FIFFB_MNE_SOURCE_SPACE)
if len(spaces) == 0:
raise ValueError('No source spaces found')
src = list()
for s in spaces:
logger.info(' Reading a source space...')
this = _read_one_source_space(fid, s)
logger.info(' [done]')
if patch_stats:
_complete_source_space_info(this)
src.append(this)
logger.info(' %d source spaces read' % len(spaces))
return SourceSpaces(src)
@verbose
def read_source_spaces(fname, patch_stats=False, verbose=None):
"""Read the source spaces from a FIF file.
Parameters
----------
fname : str
The name of the file, which should end with -src.fif or
-src.fif.gz.
patch_stats : bool, optional (default False)
Calculate and add cortical patch statistics to the surfaces.
%(verbose)s
Returns
-------
src : SourceSpaces
The source spaces.
See Also
--------
write_source_spaces, setup_source_space, setup_volume_source_space
"""
# be more permissive on read than write (fwd/inv can contain src)
check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz',
'_src.fif', '_src.fif.gz',
'-fwd.fif', '-fwd.fif.gz',
'_fwd.fif', '_fwd.fif.gz',
'-inv.fif', '-inv.fif.gz',
'_inv.fif', '_inv.fif.gz'))
ff, tree, _ = fiff_open(fname)
with ff as fid:
src = _read_source_spaces_from_tree(fid, tree, patch_stats=patch_stats,
verbose=verbose)
src.info['fname'] = fname
node = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
if node:
node = node[0]
for p in range(node['nent']):
kind = node['directory'][p].kind
pos = node['directory'][p].pos
tag = read_tag(fid, pos)
if kind == FIFF.FIFF_MNE_ENV_WORKING_DIR:
src.info['working_dir'] = tag.data
elif kind == FIFF.FIFF_MNE_ENV_COMMAND_LINE:
src.info['command_line'] = tag.data
return src
def _read_one_source_space(fid, this):
"""Read one source space."""
res = dict()
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_ID)
if tag is None:
res['id'] = int(FIFF.FIFFV_MNE_SURF_UNKNOWN)
else:
res['id'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE)
if tag is None:
raise ValueError('Unknown source space type')
else:
src_type = int(tag.data)
if src_type == FIFF.FIFFV_MNE_SPACE_SURFACE:
res['type'] = 'surf'
elif src_type == FIFF.FIFFV_MNE_SPACE_VOLUME:
res['type'] = 'vol'
elif src_type == FIFF.FIFFV_MNE_SPACE_DISCRETE:
res['type'] = 'discrete'
else:
raise ValueError('Unknown source space type (%d)' % src_type)
if res['type'] == 'vol':
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS)
if tag is not None:
res['shape'] = tuple(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_COORD_TRANS)
if tag is not None:
res['src_mri_t'] = tag.data
parent_mri = dir_tree_find(this, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
if len(parent_mri) == 0:
# MNE 2.7.3 (and earlier) didn't store necessary information
# about volume coordinate translations. Although there is a
# FFIF_COORD_TRANS in the higher level of the FIFF file, this
# doesn't contain all the info we need. Safer to return an
# error unless a user really wants us to add backward compat.
raise ValueError('Can not find parent MRI location. The volume '
'source space may have been made with an MNE '
'version that is too old (<= 2.7.3). Consider '
'updating and regenerating the inverse.')
mri = parent_mri[0]
for d in mri['directory']:
if d.kind == FIFF.FIFF_COORD_TRANS:
tag = read_tag(fid, d.pos)
trans = tag.data
if trans['from'] == FIFF.FIFFV_MNE_COORD_MRI_VOXEL:
res['vox_mri_t'] = tag.data
if trans['to'] == FIFF.FIFFV_MNE_COORD_RAS:
res['mri_ras_t'] = tag.data
tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR)
if tag is not None:
res['interpolator'] = tag.data
if tag.data.data.size == 0:
del res['interpolator']
else:
logger.info("Interpolation matrix for MRI not found.")
tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE)
if tag is not None:
res['mri_file'] = tag.data
tag = find_tag(fid, mri, FIFF.FIFF_MRI_WIDTH)
if tag is not None:
res['mri_width'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MRI_HEIGHT)
if tag is not None:
res['mri_height'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MRI_DEPTH)
if tag is not None:
res['mri_depth'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MNE_FILE_NAME)
if tag is not None:
res['mri_volume_name'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS)
if tag is not None:
nneighbors = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS)
offset = 0
neighbors = []
for n in nneighbors:
neighbors.append(tag.data[offset:offset + n])
offset += n
res['neighbor_vert'] = neighbors
tag = find_tag(fid, this, FIFF.FIFF_COMMENT)
if tag is not None:
res['seg_name'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
if tag is None:
raise ValueError('Number of vertices not found')
res['np'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NTRI)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI)
if tag is None:
res['ntri'] = 0
else:
res['ntri'] = int(tag.data)
else:
res['ntri'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
if tag is None:
raise ValueError('Coordinate frame information not found')
res['coord_frame'] = tag.data[0]
# Vertices, normals, and triangles
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS)
if tag is None:
raise ValueError('Vertex data not found')
res['rr'] = tag.data.astype(np.float64) # double precision for mayavi
if res['rr'].shape[0] != res['np']:
raise ValueError('Vertex information is incorrect')
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
if tag is None:
raise ValueError('Vertex normals not found')
res['nn'] = tag.data.copy()
if res['nn'].shape[0] != res['np']:
raise ValueError('Vertex normal information is incorrect')
if res['ntri'] > 0:
tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_TRIANGLES)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES)
if tag is None:
raise ValueError('Triangulation not found')
else:
res['tris'] = tag.data - 1 # index start at 0 in Python
else:
res['tris'] = tag.data - 1 # index start at 0 in Python
if res['tris'].shape[0] != res['ntri']:
raise ValueError('Triangulation information is incorrect')
else:
res['tris'] = None
# Which vertices are active
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE)
if tag is None:
res['nuse'] = 0
res['inuse'] = np.zeros(res['nuse'], dtype=np.int64)
res['vertno'] = None
else:
res['nuse'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION)
if tag is None:
raise ValueError('Source selection information missing')
res['inuse'] = tag.data.astype(np.int64).T
if len(res['inuse']) != res['np']:
raise ValueError('Incorrect number of entries in source space '
'selection')
res['vertno'] = np.where(res['inuse'])[0]
# Use triangulation
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES)
if tag1 is None or tag2 is None:
res['nuse_tri'] = 0
res['use_tris'] = None
else:
res['nuse_tri'] = tag1.data
res['use_tris'] = tag2.data - 1 # index start at 0 in Python
# Patch-related information
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST)
if tag1 is None or tag2 is None:
res['nearest'] = None
res['nearest_dist'] = None
else:
res['nearest'] = tag1.data
res['nearest_dist'] = tag2.data.T
_add_patch_info(res)
# Distances
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT)
if tag1 is None or tag2 is None:
res['dist'] = None
res['dist_limit'] = None
else:
res['dist'] = tag1.data
res['dist_limit'] = tag2.data
# Add the upper triangle
res['dist'] = res['dist'] + res['dist'].T
if (res['dist'] is not None):
logger.info(' Distance information added...')
tag = find_tag(fid, this, FIFF.FIFF_SUBJ_HIS_ID)
if tag is None:
res['subject_his_id'] = None
else:
res['subject_his_id'] = tag.data
return res
@verbose
def _complete_source_space_info(this, verbose=None):
"""Add more info on surface."""
# Main triangulation
logger.info(' Completing triangulation info...')
this['tri_area'] = np.zeros(this['ntri'])
r1 = this['rr'][this['tris'][:, 0], :]
r2 = this['rr'][this['tris'][:, 1], :]
r3 = this['rr'][this['tris'][:, 2], :]
this['tri_cent'] = (r1 + r2 + r3) / 3.0
this['tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
this['tri_area'] = _normalize_vectors(this['tri_nn']) / 2.0
logger.info('[done]')
# Selected triangles
logger.info(' Completing selection triangulation info...')
if this['nuse_tri'] > 0:
r1 = this['rr'][this['use_tris'][:, 0], :]
r2 = this['rr'][this['use_tris'][:, 1], :]
r3 = this['rr'][this['use_tris'][:, 2], :]
this['use_tri_cent'] = (r1 + r2 + r3) / 3.0
this['use_tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
this['use_tri_area'] = np.linalg.norm(this['use_tri_nn'], axis=1) / 2.
logger.info('[done]')
def find_source_space_hemi(src):
"""Return the hemisphere id for a source space.
Parameters
----------
src : dict
The source space to investigate.
Returns
-------
hemi : int
Deduced hemisphere id.
"""
xave = src['rr'][:, 0].sum()
if xave < 0:
hemi = int(FIFF.FIFFV_MNE_SURF_LEFT_HEMI)
else:
hemi = int(FIFF.FIFFV_MNE_SURF_RIGHT_HEMI)
return hemi
def label_src_vertno_sel(label, src):
"""Find vertex numbers and indices from label.
Parameters
----------
label : Label
Source space label.
src : dict
Source space.
Returns
-------
vertices : list of length 2
Vertex numbers for lh and rh.
src_sel : array of int (len(idx) = len(vertices[0]) + len(vertices[1]))
Indices of the selected vertices in sourse space.
"""
if src[0]['type'] != 'surf':
return Exception('Labels are only supported with surface source '
'spaces')
vertno = [src[0]['vertno'], src[1]['vertno']]
if label.hemi == 'lh':
vertno_sel = np.intersect1d(vertno[0], label.vertices)
src_sel = np.searchsorted(vertno[0], vertno_sel)
vertno[0] = vertno_sel
vertno[1] = np.array([], int)
elif label.hemi == 'rh':
vertno_sel = np.intersect1d(vertno[1], label.vertices)
src_sel = np.searchsorted(vertno[1], vertno_sel) + len(vertno[0])
vertno[0] = np.array([], int)
vertno[1] = vertno_sel
elif label.hemi == 'both':
vertno_sel_lh = np.intersect1d(vertno[0], label.lh.vertices)
src_sel_lh = np.searchsorted(vertno[0], vertno_sel_lh)
vertno_sel_rh = np.intersect1d(vertno[1], label.rh.vertices)
src_sel_rh = np.searchsorted(vertno[1], vertno_sel_rh) + len(vertno[0])
src_sel = np.hstack((src_sel_lh, src_sel_rh))
vertno = [vertno_sel_lh, vertno_sel_rh]
else:
raise Exception("Unknown hemisphere type")
return vertno, src_sel
def _get_vertno(src):
return [s['vertno'] for s in src]
###############################################################################
# Write routines
@verbose
def _write_source_spaces_to_fid(fid, src, verbose=None):
"""Write the source spaces to a FIF file.
Parameters
----------
fid : file descriptor
An open file descriptor.
src : list
The list of source spaces.
%(verbose)s
"""
for s in src:
logger.info(' Write a source space...')
start_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
_write_one_source_space(fid, s, verbose)
end_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
logger.info(' [done]')
logger.info(' %d source spaces written' % len(src))
@verbose
def write_source_spaces(fname, src, overwrite=False, verbose=None):
"""Write source spaces to a file.
Parameters
----------
fname : str
The name of the file, which should end with -src.fif or
-src.fif.gz.
src : SourceSpaces
The source spaces (as returned by read_source_spaces).
overwrite : bool
If True, the destination file (if it exists) will be overwritten.
If False (default), an error will be raised if the file exists.
%(verbose)s
See Also
--------
read_source_spaces
"""
check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz',
'_src.fif', '_src.fif.gz'))
_check_fname(fname, overwrite=overwrite)
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_MNE)
if src.info:
start_block(fid, FIFF.FIFFB_MNE_ENV)
write_id(fid, FIFF.FIFF_BLOCK_ID)
data = src.info.get('working_dir', None)
if data:
write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data)
data = src.info.get('command_line', None)
if data:
write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data)
end_block(fid, FIFF.FIFFB_MNE_ENV)
_write_source_spaces_to_fid(fid, src, verbose)
end_block(fid, FIFF.FIFFB_MNE)
end_file(fid)
def _write_one_source_space(fid, this, verbose=None):
"""Write one source space."""
if this['type'] == 'surf':
src_type = FIFF.FIFFV_MNE_SPACE_SURFACE
elif this['type'] == 'vol':
src_type = FIFF.FIFFV_MNE_SPACE_VOLUME
elif this['type'] == 'discrete':
src_type = FIFF.FIFFV_MNE_SPACE_DISCRETE
else:
raise ValueError('Unknown source space type (%s)' % this['type'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE, src_type)
if this['id'] >= 0:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_ID, this['id'])
data = this.get('subject_his_id', None)
if data:
write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, data)
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, this['coord_frame'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, this['np'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS, this['rr'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS, this['nn'])
# Which vertices are active
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION, this['inuse'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE, this['nuse'])
if this['ntri'] > 0:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI, this['ntri'])
write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES,
this['tris'] + 1)
if this['type'] != 'vol' and this['use_tris'] is not None:
# Use triangulation
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI, this['nuse_tri'])
write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES,
this['use_tris'] + 1)
if this['type'] == 'vol':
neighbor_vert = this.get('neighbor_vert', None)
if neighbor_vert is not None:
nneighbors = np.array([len(n) for n in neighbor_vert])
neighbors = np.concatenate(neighbor_vert)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS, nneighbors)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS, neighbors)
write_coord_trans(fid, this['src_mri_t'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS, this['shape'])
start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
write_coord_trans(fid, this['mri_ras_t'])
write_coord_trans(fid, this['vox_mri_t'])
mri_volume_name = this.get('mri_volume_name', None)
if mri_volume_name is not None:
write_string(fid, FIFF.FIFF_MNE_FILE_NAME, mri_volume_name)
mri_width, mri_height, mri_depth, nvox = _src_vol_dims(this)
interpolator = this.get('interpolator')
if interpolator is None:
interpolator = sparse.csr_matrix((nvox, this['np']))
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR,
interpolator)
if 'mri_file' in this and this['mri_file'] is not None:
write_string(fid, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE,
this['mri_file'])
write_int(fid, FIFF.FIFF_MRI_WIDTH, mri_width)
write_int(fid, FIFF.FIFF_MRI_HEIGHT, mri_height)
write_int(fid, FIFF.FIFF_MRI_DEPTH, mri_depth)
end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
# Patch-related information
if this['nearest'] is not None:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST, this['nearest'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST,
this['nearest_dist'])
# Distances
if this['dist'] is not None:
# Save only upper triangular portion of the matrix
dists = this['dist'].copy()
dists = sparse.triu(dists, format=dists.format)
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST, dists)
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT,
this['dist_limit'])
# Segmentation data
if this['type'] == 'vol' and ('seg_name' in this):
# Save the name of the segment
write_string(fid, FIFF.FIFF_COMMENT, this['seg_name'])
##############################################################################
# Head to MRI volume conversion
@verbose
def head_to_mri(pos, subject, mri_head_t, subjects_dir=None,
verbose=None):
"""Convert pos from head coordinate system to MRI ones.
This function converts to MRI RAS coordinates and not to surface
RAS.
Parameters
----------
pos : array, shape (n_pos, 3)
The coordinates (in m) in head coordinate system.
%(subject)s
mri_head_t : instance of Transform
MRI<->Head coordinate transformation.
%(subjects_dir)s
%(verbose)s
Returns
-------
coordinates : array, shape (n_pos, 3)
The MRI RAS coordinates (in mm) of pos.
Notes
-----
This function requires nibabel.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
t1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
head_mri_t = _ensure_trans(mri_head_t, 'head', 'mri')
_, _, mri_ras_t, _, _ = _read_mri_info(t1_fname)
head_ras_t = combine_transforms(head_mri_t, mri_ras_t, 'head', 'ras')
return 1e3 * apply_trans(head_ras_t, pos) # mm
##############################################################################
# Surface to MNI conversion
@verbose
def vertex_to_mni(vertices, hemis, subject, subjects_dir=None, verbose=None):
"""Convert the array of vertices for a hemisphere to MNI coordinates.
Parameters
----------
vertices : int, or list of int
Vertex number(s) to convert.
hemis : int, or list of int
Hemisphere(s) the vertices belong to.
%(subject)s
subjects_dir : str, or None
Path to SUBJECTS_DIR if it is not set in the environment.
%(verbose)s
Returns
-------
coordinates : array, shape (n_vertices, 3)
The MNI coordinates (in mm) of the vertices.
"""
singleton = False
if not isinstance(vertices, list) and not isinstance(vertices, np.ndarray):
singleton = True
vertices = [vertices]
if not isinstance(hemis, list) and not isinstance(hemis, np.ndarray):
hemis = [hemis] * len(vertices)
if not len(hemis) == len(vertices):
raise ValueError('hemi and vertices must match in length')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surfs = [op.join(subjects_dir, subject, 'surf', '%s.white' % h)
for h in ['lh', 'rh']]
# read surface locations in MRI space
rr = [read_surface(s)[0] for s in surfs]
# take point locations in MRI space and convert to MNI coordinates
xfm = read_talxfm(subject, subjects_dir)
xfm['trans'][:3, 3] *= 1000. # m->mm
data = np.array([rr[h][v, :] for h, v in zip(hemis, vertices)])
if singleton:
data = data[0]
return apply_trans(xfm['trans'], data)
##############################################################################
# Volume to MNI conversion
@verbose
def head_to_mni(pos, subject, mri_head_t, subjects_dir=None,
verbose=None):
"""Convert pos from head coordinate system to MNI ones.
Parameters
----------
pos : array, shape (n_pos, 3)
The coordinates (in m) in head coordinate system.
%(subject)s
mri_head_t : instance of Transform
MRI<->Head coordinate transformation.
%(subjects_dir)s
%(verbose)s
Returns
-------
coordinates : array, shape (n_pos, 3)
The MNI coordinates (in mm) of pos.
Notes
-----
This function requires either nibabel.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
# before we go from head to MRI (surface RAS)
head_mni_t = combine_transforms(
_ensure_trans(mri_head_t, 'head', 'mri'),
read_talxfm(subject, subjects_dir), 'head', 'mni_tal')
return apply_trans(head_mni_t, pos) * 1000.
@verbose
def read_talxfm(subject, subjects_dir=None, verbose=None):
"""Compute MRI-to-MNI transform from FreeSurfer talairach.xfm file.
Parameters
----------
%(subject)s
%(subjects_dir)s
%(verbose)s
Returns
-------
mri_mni_t : instance of Transform
The affine transformation from MRI to MNI space for the subject.
"""
# Adapted from freesurfer m-files. Altered to deal with Norig
# and Torig correctly
subjects_dir = get_subjects_dir(subjects_dir)
# Setup the RAS to MNI transform
ras_mni_t = read_ras_mni_t(subject, subjects_dir)
ras_mni_t['trans'][:3, 3] /= 1000. # mm->m
# We want to get from Freesurfer surface RAS ('mri') to MNI ('mni_tal').
# This file only gives us RAS (non-zero origin) ('ras') to MNI ('mni_tal').
# Se we need to get the ras->mri transform from the MRI headers.
# To do this, we get Norig and Torig
# (i.e. vox_ras_t and vox_mri_t, respectively)
path = op.join(subjects_dir, subject, 'mri', 'orig.mgz')
if not op.isfile(path):
path = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if not op.isfile(path):
raise IOError('mri not found: %s' % path)
_, _, mri_ras_t, _, _ = _read_mri_info(path)
mri_mni_t = combine_transforms(mri_ras_t, ras_mni_t, 'mri', 'mni_tal')
return mri_mni_t
def _read_mri_info(path, units='m', return_img=False):
if has_nibabel():
import nibabel
mgz = nibabel.load(path)
hdr = mgz.header
n_orig = hdr.get_vox2ras()
t_orig = hdr.get_vox2ras_tkr()
dims = hdr.get_data_shape()
zooms = hdr.get_zooms()[:3]
else:
mgz = None
hdr = _get_mgz_header(path)
n_orig = hdr['vox2ras']
t_orig = hdr['vox2ras_tkr']
dims = hdr['dims']
zooms = hdr['zooms']
# extract the MRI_VOXEL to RAS (non-zero origin) transform
vox_ras_t = Transform('mri_voxel', 'ras', n_orig)
# extract the MRI_VOXEL to MRI transform
vox_mri_t = Transform('mri_voxel', 'mri', t_orig)
# construct the MRI to RAS (non-zero origin) transform
mri_ras_t = combine_transforms(
invert_transform(vox_mri_t), vox_ras_t, 'mri', 'ras')
assert units in ('m', 'mm')
if units == 'm':
conv = np.array([[1e-3, 1e-3, 1e-3, 1]]).T
# scaling and translation terms
vox_ras_t['trans'] *= conv
vox_mri_t['trans'] *= conv
# just the translation term
mri_ras_t['trans'][:, 3:4] *= conv
out = (vox_ras_t, vox_mri_t, mri_ras_t, dims, zooms)
if return_img:
out += (mgz,)
return out
###############################################################################
# Creation and decimation
@verbose
def _check_spacing(spacing, verbose=None):
"""Check spacing parameter."""
# check to make sure our parameters are good, parse 'spacing'
types = ('a string with values "ico#", "oct#", "all", or an int >= 2')
space_err = ('"spacing" must be %s, got type %s (%r)'
% (types, type(spacing), spacing))
if isinstance(spacing, str):
if spacing == 'all':
stype = 'all'
sval = ''
elif isinstance(spacing, str) and spacing[:3] in ('ico', 'oct'):
stype = spacing[:3]
sval = spacing[3:]
try:
sval = int(sval)
except Exception:
raise ValueError('%s subdivision must be an integer, got %r'
% (stype, sval))
lim = 0 if stype == 'ico' else 1
if sval < lim:
raise ValueError('%s subdivision must be >= %s, got %s'
% (stype, lim, sval))
else:
raise ValueError(space_err)
else:
stype = 'spacing'
sval = _ensure_int(spacing, 'spacing', types)
if sval < 2:
raise ValueError('spacing must be >= 2, got %d' % (sval,))
if stype == 'all':
logger.info('Include all vertices')
ico_surf = None
src_type_str = 'all'
else:
src_type_str = '%s = %s' % (stype, sval)
if stype == 'ico':
logger.info('Icosahedron subdivision grade %s' % sval)
ico_surf = _get_ico_surface(sval)
elif stype == 'oct':
logger.info('Octahedron subdivision grade %s' % sval)
ico_surf = _tessellate_sphere_surf(sval)
else:
assert stype == 'spacing'
logger.info('Approximate spacing %s mm' % sval)
ico_surf = sval
return stype, sval, ico_surf, src_type_str
@verbose
def setup_source_space(subject, spacing='oct6', surface='white',
subjects_dir=None, add_dist=True, n_jobs=1,
verbose=None):
"""Set up bilateral hemisphere surface-based source space with subsampling.
Parameters
----------
%(subject)s
spacing : str
The spacing to use. Can be ``'ico#'`` for a recursively subdivided
icosahedron, ``'oct#'`` for a recursively subdivided octahedron,
``'all'`` for all points, or an integer to use approximate
distance-based spacing (in mm).
.. versionchanged:: 0.18
Support for integers for distance-based spacing.
surface : str
The surface to use.
%(subjects_dir)s
add_dist : bool | str
Add distance and patch information to the source space. This takes some
time so precomputing it is recommended. Can also be 'patch' to only
compute patch information (requires SciPy 1.3+).
.. versionchanged:: 0.20
Support for add_dist='patch'.
%(n_jobs)s
Ignored if ``add_dist=='patch'``.
%(verbose)s
Returns
-------
src : SourceSpaces
The source space for each hemisphere.
See Also
--------
setup_volume_source_space
"""
cmd = ('setup_source_space(%s, spacing=%s, surface=%s, '
'subjects_dir=%s, add_dist=%s, verbose=%s)'
% (subject, spacing, surface, subjects_dir, add_dist, verbose))
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surfs = [op.join(subjects_dir, subject, 'surf', hemi + surface)
for hemi in ['lh.', 'rh.']]
for surf, hemi in zip(surfs, ['LH', 'RH']):
if surf is not None and not op.isfile(surf):
raise IOError('Could not find the %s surface %s'
% (hemi, surf))
logger.info('Setting up the source space with the following parameters:\n')
logger.info('SUBJECTS_DIR = %s' % subjects_dir)
logger.info('Subject = %s' % subject)
logger.info('Surface = %s' % surface)
stype, sval, ico_surf, src_type_str = _check_spacing(spacing)
logger.info('')
del spacing
logger.info('>>> 1. Creating the source space...\n')
# mne_make_source_space ... actually make the source spaces
src = []
# pre-load ico/oct surf (once) for speed, if necessary
if stype not in ('spacing', 'all'):
logger.info('Doing the %shedral vertex picking...'
% (dict(ico='icosa', oct='octa')[stype],))
for hemi, surf in zip(['lh', 'rh'], surfs):
logger.info('Loading %s...' % surf)
# Setup the surface spacing in the MRI coord frame
if stype != 'all':
logger.info('Mapping %s %s -> %s (%d) ...'
% (hemi, subject, stype, sval))
s = _create_surf_spacing(surf, hemi, subject, stype, ico_surf,
subjects_dir)
logger.info('loaded %s %d/%d selected to source space (%s)'
% (op.split(surf)[1], s['nuse'], s['np'], src_type_str))
src.append(s)
logger.info('') # newline after both subject types are run
# Fill in source space info
hemi_ids = [FIFF.FIFFV_MNE_SURF_LEFT_HEMI, FIFF.FIFFV_MNE_SURF_RIGHT_HEMI]
for s, s_id in zip(src, hemi_ids):
# Add missing fields
s.update(dict(dist=None, dist_limit=None, nearest=None, type='surf',
nearest_dist=None, pinfo=None, patch_inds=None, id=s_id,
coord_frame=FIFF.FIFFV_COORD_MRI))
s['rr'] /= 1000.0
del s['tri_area']
del s['tri_cent']
del s['tri_nn']
del s['neighbor_tri']
# upconvert to object format from lists
src = SourceSpaces(src, dict(working_dir=os.getcwd(), command_line=cmd))
if add_dist:
dist_limit = 0. if add_dist == 'patch' else np.inf
add_source_space_distances(src, dist_limit=dist_limit,
n_jobs=n_jobs, verbose=verbose)
# write out if requested, then return the data
logger.info('You are now one step closer to computing the gain matrix')
return src
def _check_mri(mri, subject, subjects_dir):
_validate_type(mri, 'path-like', 'mri')
if not op.isfile(mri):
if subject is None:
raise FileNotFoundError(
'MRI file %r not found and no subject provided' % (mri,))
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
mri = op.join(subjects_dir, subject, 'mri', mri)
if not op.isfile(mri):
raise FileNotFoundError('MRI file %r not found' % (mri,))
return mri
def _check_volume_labels(volume_label, mri, name='volume_label'):
_validate_type(mri, 'path-like', 'mri when %s is not None' % (name,))
mri = _check_fname(mri, overwrite='read', must_exist=True)
if isinstance(volume_label, str):
volume_label = [volume_label]
_validate_type(volume_label, (list, tuple, dict), name) # should be
if not isinstance(volume_label, dict):
# Turn it into a dict
if not mri.endswith('aseg.mgz'):
raise RuntimeError(
'Must use a *aseg.mgz file unless %s is a dict, got %s'
% (name, op.basename(mri)))
lut, _ = read_freesurfer_lut()
use_volume_label = dict()
for label in volume_label:
if label not in lut:
raise ValueError(
'Volume %r not found in file %s. Double check '
'FreeSurfer lookup table.%s'
% (label, mri, _suggest(label, lut)))
use_volume_label[label] = lut[label]
volume_label = use_volume_label
for label, id_ in volume_label.items():
_validate_type(label, str, 'volume_label keys')
_validate_type(id_, 'int-like', 'volume_labels[%r]' % (label,))
volume_label = {k: _ensure_int(v) for k, v in volume_label.items()}
return volume_label
@verbose
def setup_volume_source_space(subject=None, pos=5.0, mri=None,
sphere=None, bem=None,
surface=None, mindist=5.0, exclude=0.0,
subjects_dir=None, volume_label=None,
add_interpolator=True, sphere_units='m',
single_volume=False, verbose=None):
"""Set up a volume source space with grid spacing or discrete source space.
Parameters
----------
subject : str | None
Subject to process. If None, the path to the MRI volume must be
absolute to get a volume source space. If a subject name
is provided the T1.mgz file will be found automatically.
Defaults to None.
pos : float | dict
Positions to use for sources. If float, a grid will be constructed
with the spacing given by ``pos`` in mm, generating a volume source
space. If dict, pos['rr'] and pos['nn'] will be used as the source
space locations (in meters) and normals, respectively, creating a
discrete source space.
.. note:: For a discrete source space (``pos`` is a dict),
``mri`` must be None.
mri : str | None
The filename of an MRI volume (mgh or mgz) to create the
interpolation matrix over. Source estimates obtained in the
volume source space can then be morphed onto the MRI volume
using this interpolator. If pos is a dict, this cannot be None.
If subject name is provided, ``pos`` is a float or ``volume_label``
are not provided then the ``mri`` parameter will default to 'T1.mgz'
or ``aseg.mgz``, respectively, else it will stay None.
sphere : ndarray, shape (4,) | ConductorModel | None
Define spherical source space bounds using origin and radius given
by (ox, oy, oz, rad) in ``sphere_units``.
Only used if ``bem`` and ``surface`` are both None. Can also be a
spherical ConductorModel, which will use the origin and radius.
None (the default) uses a head-digitization fit.
bem : str | None | ConductorModel
Define source space bounds using a BEM file (specifically the inner
skull surface) or a ConductorModel for a 1-layer of 3-layers BEM.
surface : str | dict | None
Define source space bounds using a FreeSurfer surface file. Can
also be a dictionary with entries ``'rr'`` and ``'tris'``, such as
those returned by :func:`mne.read_surface`.
mindist : float
Exclude points closer than this distance (mm) to the bounding surface.
exclude : float
Exclude points closer than this distance (mm) from the center of mass
of the bounding surface.
%(subjects_dir)s
volume_label : str | dict | list | None
Region(s) of interest to use. None (default) will create a single
whole-brain source space. Otherwise, a separate source space will be
created for each entry in the list or dict (str will be turned into
a single-element list). If list of str, standard Freesurfer labels
are assumed. If dict, should be a mapping of region names to atlas
id numbers, allowing the use of other atlases.
.. versionchanged:: 0.21.0
Support for dict added.
add_interpolator : bool
If True and ``mri`` is not None, then an interpolation matrix
will be produced.
sphere_units : str
Defaults to ``"m"``.
.. versionadded:: 0.20
single_volume : bool
If True, multiple values of ``volume_label`` will be merged into a
a single source space instead of occupying multiple source spaces
(one for each sub-volume), i.e., ``len(src)`` will be ``1`` instead of
``len(volume_label)``. This can help conserve memory and disk space
when many labels are used.
.. versionadded:: 0.21
%(verbose)s
Returns
-------
src : SourceSpaces
A :class:`SourceSpaces` object containing one source space for each
entry of ``volume_labels``, or a single source space if
``volume_labels`` was not specified.
See Also
--------
setup_source_space
Notes
-----
Volume source spaces are related to an MRI image such as T1 and allow to
visualize source estimates overlaid on MRIs and to morph estimates
to a template brain for group analysis. Discrete source spaces
don't allow this. If you provide a subject name the T1 MRI will be
used by default.
When you work with a source space formed from a grid you need to specify
the domain in which the grid will be defined. There are three ways
of specifying this:
(i) sphere, (ii) bem model, and (iii) surface.
The default behavior is to use sphere model
(``sphere=(0.0, 0.0, 0.0, 90.0)``) if ``bem`` or ``surface`` is not
``None`` then ``sphere`` is ignored.
If you're going to use a BEM conductor model for forward model
it is recommended to pass it here.
To create a discrete source space, ``pos`` must be a dict, ``mri`` must be
None, and ``volume_label`` must be None. To create a whole brain volume
source space, ``pos`` must be a float and 'mri' must be provided.
To create a volume source space from label, ``pos`` must be a float,
``volume_label`` must be provided, and 'mri' must refer to a .mgh or .mgz
file with values corresponding to the freesurfer lookup-table (typically
``aseg.mgz``).
"""
subjects_dir = get_subjects_dir(subjects_dir)
_validate_type(
volume_label, (str, list, tuple, dict, None), 'volume_label')
if bem is not None and surface is not None:
raise ValueError('Only one of "bem" and "surface" should be '
'specified')
if mri is None and subject is not None:
if volume_label is not None:
mri = 'aseg.mgz'
elif _is_numeric(pos):
mri = 'T1.mgz'
if mri is not None:
mri = _check_mri(mri, subject, subjects_dir)
if isinstance(pos, dict):
raise ValueError('Cannot create interpolation matrix for '
'discrete source space, mri must be None if '
'pos is a dict')
if volume_label is not None:
volume_label = _check_volume_labels(volume_label, mri)
assert volume_label is None or isinstance(volume_label, dict)
sphere = _check_sphere(sphere, sphere_units=sphere_units)
# triage bounding argument
if bem is not None:
logger.info('BEM : %s', bem)
elif surface is not None:
if isinstance(surface, dict):
if not all(key in surface for key in ['rr', 'tris']):
raise KeyError('surface, if dict, must have entries "rr" '
'and "tris"')
# let's make sure we have geom info
complete_surface_info(surface, copy=False, verbose=False)
surf_extra = 'dict()'
elif isinstance(surface, str):
if not op.isfile(surface):
raise IOError('surface file "%s" not found' % surface)
surf_extra = surface
logger.info('Boundary surface file : %s', surf_extra)
else:
logger.info('Sphere : origin at (%.1f %.1f %.1f) mm'
% (1000 * sphere[0], 1000 * sphere[1], 1000 * sphere[2]))
logger.info(' radius : %.1f mm' % (1000 * sphere[3],))
# triage pos argument
if isinstance(pos, dict):
if not all(key in pos for key in ['rr', 'nn']):
raise KeyError('pos, if dict, must contain "rr" and "nn"')
pos_extra = 'dict()'
else: # pos should be float-like
try:
pos = float(pos)
except (TypeError, ValueError):
raise ValueError('pos must be a dict, or something that can be '
'cast to float()')
if not isinstance(pos, float):
logger.info('Source location file : %s', pos_extra)
logger.info('Assuming input in millimeters')
logger.info('Assuming input in MRI coordinates')
if isinstance(pos, float):
logger.info('grid : %.1f mm' % pos)
logger.info('mindist : %.1f mm' % mindist)
pos /= 1000.0 # convert pos from m to mm
if exclude > 0.0:
logger.info('Exclude : %.1f mm' % exclude)
vol_info = dict()
if mri is not None:
logger.info('MRI volume : %s' % mri)
logger.info('')
logger.info('Reading %s...' % mri)
vol_info = _get_mri_info_data(mri, data=volume_label is not None)
exclude /= 1000.0 # convert exclude from m to mm
logger.info('')
# Explicit list of points
if not isinstance(pos, float):
# Make the grid of sources
sp = [_make_discrete_source_space(pos)]
else:
# Load the brain surface as a template
if isinstance(bem, str):
# read bem surface in the MRI coordinate frame
surf = read_bem_surfaces(bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN,
verbose=False)
logger.info('Loaded inner skull from %s (%d nodes)'
% (bem, surf['np']))
elif bem is not None and bem.get('is_sphere') is False:
# read bem surface in the MRI coordinate frame
which = np.where([surf['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN
for surf in bem['surfs']])[0]
if len(which) != 1:
raise ValueError('Could not get inner skull surface from BEM')
surf = bem['surfs'][which[0]]
assert surf['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN
if surf['coord_frame'] != FIFF.FIFFV_COORD_MRI:
raise ValueError('BEM is not in MRI coordinates, got %s'
% (_coord_frame_name(surf['coord_frame']),))
logger.info('Taking inner skull from %s' % bem)
elif surface is not None:
if isinstance(surface, str):
# read the surface in the MRI coordinate frame
surf = read_surface(surface, return_dict=True)[-1]
else:
surf = surface
logger.info('Loaded bounding surface from %s (%d nodes)'
% (surface, surf['np']))
surf = deepcopy(surf)
surf['rr'] *= 1e-3 # must be converted to meters
else: # Load an icosahedron and use that as the surface
logger.info('Setting up the sphere...')
surf = dict(R=sphere[3], r0=sphere[:3])
# Make the grid of sources in MRI space
sp = _make_volume_source_space(
surf, pos, exclude, mindist, mri, volume_label,
vol_info=vol_info, single_volume=single_volume)
del sphere
assert isinstance(sp, list)
assert len(sp) == 1 if (volume_label is None or
single_volume) else len(volume_label)
# Compute an interpolation matrix to show data in MRI_VOXEL coord frame
if mri is not None:
if add_interpolator:
_add_interpolator(sp)
elif sp[0]['type'] == 'vol':
# If there is no interpolator, it's actually a discrete source space
sp[0]['type'] = 'discrete'
# do some cleaning
if volume_label is None and 'seg_name' in sp[0]:
del sp[0]['seg_name']
for s in sp:
if 'vol_dims' in s:
del s['vol_dims']
# Save it
sp = _complete_vol_src(sp, subject)
return sp
def _complete_vol_src(sp, subject=None):
for s in sp:
s.update(dict(nearest=None, dist=None, use_tris=None, patch_inds=None,
dist_limit=None, pinfo=None, ntri=0, nearest_dist=None,
nuse_tri=0, tris=None, subject_his_id=subject))
sp = SourceSpaces(sp, dict(working_dir=os.getcwd(), command_line='None'))
return sp
def _make_voxel_ras_trans(move, ras, voxel_size):
"""Make a transformation from MRI_VOXEL to MRI surface RAS (i.e. MRI)."""
assert voxel_size.ndim == 1
assert voxel_size.size == 3
rot = ras.T * voxel_size[np.newaxis, :]
assert rot.ndim == 2
assert rot.shape[0] == 3
assert rot.shape[1] == 3
trans = np.c_[np.r_[rot, np.zeros((1, 3))], np.r_[move, 1.0]]
t = Transform('mri_voxel', 'mri', trans)
return t
def _make_discrete_source_space(pos, coord_frame='mri'):
"""Use a discrete set of source locs/oris to make src space.
Parameters
----------
pos : dict
Must have entries "rr" and "nn". Data should be in meters.
coord_frame : str
The coordinate frame in which the positions are given; default: 'mri'.
The frame must be one defined in transforms.py:_str_to_frame
Returns
-------
src : dict
The source space.
"""
# Check that coordinate frame is valid
if coord_frame not in _str_to_frame: # will fail if coord_frame not string
raise KeyError('coord_frame must be one of %s, not "%s"'
% (list(_str_to_frame.keys()), coord_frame))
coord_frame = _str_to_frame[coord_frame] # now an int
# process points (copy and cast)
rr = np.array(pos['rr'], float)
nn = np.array(pos['nn'], float)
if not (rr.ndim == nn.ndim == 2 and nn.shape[0] == nn.shape[0] and
rr.shape[1] == nn.shape[1]):
raise RuntimeError('"rr" and "nn" must both be 2D arrays with '
'the same number of rows and 3 columns')
npts = rr.shape[0]
_normalize_vectors(nn)
nz = np.sum(np.sum(nn * nn, axis=1) == 0)
if nz != 0:
raise RuntimeError('%d sources have zero length normal' % nz)
logger.info('Positions (in meters) and orientations')
logger.info('%d sources' % npts)
# Ready to make the source space
sp = dict(coord_frame=coord_frame, type='discrete', nuse=npts, np=npts,
inuse=np.ones(npts, int), vertno=np.arange(npts), rr=rr, nn=nn,
id=-1)
return sp
def _import_nibabel(why='use MRI files'):
try:
import nibabel as nib
except ImportError as exp:
msg = 'nibabel is required to %s, got:\n%s' % (why, exp)
else:
msg = ''
if msg:
raise ImportError(msg)
return nib
def _mri_orientation(img, orientation):
"""Get MRI orientation information from an image.
Parameters
----------
img : instance of SpatialImage
The MRI image.
orientation : str
Orientation that you want. Can be "axial", "saggital", or "coronal".
Returns
-------
xyz : tuple, shape (3,)
The dimension indices for X, Y, and Z.
flips : tuple, shape (3,)
Whether each dimension requires a flip.
order : tuple, shape (3,)
The resulting order of the data if the given ``xyz`` and ``flips``
are used.
Notes
-----
.. versionadded:: 0.21
"""
import nibabel as nib
_validate_type(img, nib.spatialimages.SpatialImage)
_check_option('orientation', orientation, ('coronal', 'axial', 'sagittal'))
axcodes = ''.join(nib.orientations.aff2axcodes(img.affine))
flips = {o: (1 if o in axcodes else -1) for o in 'RAS'}
axcodes = axcodes.replace('L', 'R').replace('P', 'A').replace('I', 'S')
order = dict(
coronal=('R', 'S', 'A'),
axial=('R', 'A', 'S'),
sagittal=('A', 'S', 'R'),
)[orientation]
xyz = tuple(axcodes.index(c) for c in order)
flips = tuple(flips[c] for c in order)
return xyz, flips, order
def _get_mri_info_data(mri, data):
# Read the segmentation data using nibabel
if data:
_import_nibabel('load MRI atlas data')
out = dict()
_, out['vox_mri_t'], out['mri_ras_t'], dims, _, mgz = _read_mri_info(
mri, return_img=True)
out.update(
mri_width=dims[0], mri_height=dims[1],
mri_depth=dims[1], mri_volume_name=mri)
if data:
assert mgz is not None
out['mri_vox_t'] = invert_transform(out['vox_mri_t'])
out['data'] = np.asarray(mgz.dataobj)
return out
def _get_atlas_values(vol_info, rr):
# Transform MRI coordinates (where our surfaces live) to voxels
rr_vox = apply_trans(vol_info['mri_vox_t'], rr)
good = ((rr_vox >= -.5) &
(rr_vox < np.array(vol_info['data'].shape, int) - 0.5)).all(-1)
idx = np.round(rr_vox[good].T).astype(np.int64)
values = np.full(rr.shape[0], np.nan)
values[good] = vol_info['data'][tuple(idx)]
return values
def _make_volume_source_space(surf, grid, exclude, mindist, mri=None,
volume_labels=None, do_neighbors=True, n_jobs=1,
vol_info={}, single_volume=False):
"""Make a source space which covers the volume bounded by surf."""
# Figure out the grid size in the MRI coordinate frame
if 'rr' in surf:
mins = np.min(surf['rr'], axis=0)
maxs = np.max(surf['rr'], axis=0)
cm = np.mean(surf['rr'], axis=0) # center of mass
maxdist = np.linalg.norm(surf['rr'] - cm, axis=1).max()
else:
mins = surf['r0'] - surf['R']
maxs = surf['r0'] + surf['R']
cm = surf['r0'].copy()
maxdist = surf['R']
# Define the sphere which fits the surface
logger.info('Surface CM = (%6.1f %6.1f %6.1f) mm'
% (1000 * cm[0], 1000 * cm[1], 1000 * cm[2]))
logger.info('Surface fits inside a sphere with radius %6.1f mm'
% (1000 * maxdist))
logger.info('Surface extent:')
for c, mi, ma in zip('xyz', mins, maxs):
logger.info(' %s = %6.1f ... %6.1f mm'
% (c, 1000 * mi, 1000 * ma))
maxn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else -
np.floor(np.abs(m) / grid) - 1 for m in maxs], int)
minn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else -
np.floor(np.abs(m) / grid) - 1 for m in mins], int)
logger.info('Grid extent:')
for c, mi, ma in zip('xyz', minn, maxn):
logger.info(' %s = %6.1f ... %6.1f mm'
% (c, 1000 * mi * grid, 1000 * ma * grid))
# Now make the initial grid
ns = tuple(maxn - minn + 1)
npts = np.prod(ns)
nrow = ns[0]
ncol = ns[1]
nplane = nrow * ncol
# x varies fastest, then y, then z (can use unravel to do this)
rr = np.meshgrid(np.arange(minn[2], maxn[2] + 1),
np.arange(minn[1], maxn[1] + 1),
np.arange(minn[0], maxn[0] + 1), indexing='ij')
x, y, z = rr[2].ravel(), rr[1].ravel(), rr[0].ravel()
rr = np.array([x * grid, y * grid, z * grid]).T
sp = dict(np=npts, nn=np.zeros((npts, 3)), rr=rr,
inuse=np.ones(npts, bool), type='vol', nuse=npts,
coord_frame=FIFF.FIFFV_COORD_MRI, id=-1, shape=ns)
sp['nn'][:, 2] = 1.0
assert sp['rr'].shape[0] == npts
logger.info('%d sources before omitting any.', sp['nuse'])
# Exclude infeasible points
dists = np.linalg.norm(sp['rr'] - cm, axis=1)
bads = np.where(np.logical_or(dists < exclude, dists > maxdist))[0]
sp['inuse'][bads] = False
sp['nuse'] -= len(bads)
logger.info('%d sources after omitting infeasible sources not within '
'%0.1f - %0.1f mm.',
sp['nuse'], 1000 * exclude, 1000 * maxdist)
if 'rr' in surf:
_filter_source_spaces(surf, mindist, None, [sp], n_jobs)
else: # sphere
vertno = np.where(sp['inuse'])[0]
bads = (np.linalg.norm(sp['rr'][vertno] - surf['r0'], axis=-1) >=
surf['R'] - mindist / 1000.)
sp['nuse'] -= bads.sum()
sp['inuse'][vertno[bads]] = False
sp['vertno'] = np.where(sp['inuse'])[0]
del vertno
del surf
logger.info('%d sources remaining after excluding the sources outside '
'the surface and less than %6.1f mm inside.'
% (sp['nuse'], mindist))
# Restrict sources to volume of interest
if volume_labels is None:
sp['seg_name'] = 'the whole brain'
sps = [sp]
else:
if not do_neighbors:
raise RuntimeError('volume_label cannot be None unless '
'do_neighbors is True')
sps = list()
orig_sp = sp
# reduce the sizes when we deepcopy
for volume_label, id_ in volume_labels.items():
# this saves us some memory
memodict = dict()
for key in ('rr', 'nn'):
if key in orig_sp:
arr = orig_sp[key]
memodict[id(arr)] = arr
sp = deepcopy(orig_sp, memodict)
good = _get_atlas_values(vol_info, sp['rr'][sp['vertno']]) == id_
n_good = good.sum()
logger.info(' Selected %d voxel%s from %s'
% (n_good, _pl(n_good), volume_label))
# Update source info
sp['inuse'][sp['vertno'][~good]] = False
sp['vertno'] = sp['vertno'][good]
sp['nuse'] = sp['inuse'].sum()
sp['seg_name'] = volume_label
sp['mri_file'] = mri
sps.append(sp)
del orig_sp
assert len(sps) == len(volume_labels)
# This will undo some of the work above, but the calculations are
# pretty trivial so allow it
if single_volume:
for sp in sps[1:]:
sps[0]['inuse'][sp['vertno']] = True
sp = sps[0]
sp['seg_name'] = '+'.join(s['seg_name'] for s in sps)
sps = sps[:1]
sp['vertno'] = np.where(sp['inuse'])[0]
sp['nuse'] = len(sp['vertno'])
del sp, volume_labels
if not do_neighbors:
return sps
k = np.arange(npts)
neigh = np.empty((26, npts), int)
neigh.fill(-1)
# Figure out each neighborhood:
# 6-neighborhood first
idxs = [z > minn[2], x < maxn[0], y < maxn[1],
x > minn[0], y > minn[1], z < maxn[2]]
offsets = [-nplane, 1, nrow, -1, -nrow, nplane]
for n, idx, offset in zip(neigh[:6], idxs, offsets):
n[idx] = k[idx] + offset
# Then the rest to complete the 26-neighborhood
# First the plane below
idx1 = z > minn[2]
idx2 = np.logical_and(idx1, x < maxn[0])
neigh[6, idx2] = k[idx2] + 1 - nplane
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[7, idx3] = k[idx3] + 1 + nrow - nplane
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[8, idx2] = k[idx2] + nrow - nplane
idx2 = np.logical_and(idx1, x > minn[0])
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[9, idx3] = k[idx3] - 1 + nrow - nplane
neigh[10, idx2] = k[idx2] - 1 - nplane
idx3 = np.logical_and(idx2, y > minn[1])
neigh[11, idx3] = k[idx3] - 1 - nrow - nplane
idx2 = np.logical_and(idx1, y > minn[1])
neigh[12, idx2] = k[idx2] - nrow - nplane
idx3 = np.logical_and(idx2, x < maxn[0])
neigh[13, idx3] = k[idx3] + 1 - nrow - nplane
# Then the same plane
idx1 = np.logical_and(x < maxn[0], y < maxn[1])
neigh[14, idx1] = k[idx1] + 1 + nrow
idx1 = x > minn[0]
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[15, idx2] = k[idx2] - 1 + nrow
idx2 = np.logical_and(idx1, y > minn[1])
neigh[16, idx2] = k[idx2] - 1 - nrow
idx1 = np.logical_and(y > minn[1], x < maxn[0])
neigh[17, idx1] = k[idx1] + 1 - nrow - nplane
# Finally one plane above
idx1 = z < maxn[2]
idx2 = np.logical_and(idx1, x < maxn[0])
neigh[18, idx2] = k[idx2] + 1 + nplane
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[19, idx3] = k[idx3] + 1 + nrow + nplane
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[20, idx2] = k[idx2] + nrow + nplane
idx2 = np.logical_and(idx1, x > minn[0])
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[21, idx3] = k[idx3] - 1 + nrow + nplane
neigh[22, idx2] = k[idx2] - 1 + nplane
idx3 = np.logical_and(idx2, y > minn[1])
neigh[23, idx3] = k[idx3] - 1 - nrow + nplane
idx2 = np.logical_and(idx1, y > minn[1])
neigh[24, idx2] = k[idx2] - nrow + nplane
idx3 = np.logical_and(idx2, x < maxn[0])
neigh[25, idx3] = k[idx3] + 1 - nrow + nplane
# Omit unused vertices from the neighborhoods
logger.info('Adjusting the neighborhood info.')
r0 = minn * grid
voxel_size = grid * np.ones(3)
ras = np.eye(3)
src_mri_t = _make_voxel_ras_trans(r0, ras, voxel_size)
neigh_orig = neigh
for sp in sps:
# remove non source-space points
neigh = neigh_orig.copy()
neigh[:, np.logical_not(sp['inuse'])] = -1
# remove these points from neigh
old_shape = neigh.shape
neigh = neigh.ravel()
checks = np.where(neigh >= 0)[0]
removes = np.logical_not(np.in1d(checks, sp['vertno']))
neigh[checks[removes]] = -1
neigh.shape = old_shape
neigh = neigh.T
# Thought we would need this, but C code keeps -1 vertices, so we will:
# neigh = [n[n >= 0] for n in enumerate(neigh[vertno])]
sp['neighbor_vert'] = neigh
# Set up the volume data (needed for creating the interpolation matrix)
sp['src_mri_t'] = src_mri_t
sp['vol_dims'] = maxn - minn + 1
for key in ('mri_width', 'mri_height', 'mri_depth', 'mri_volume_name',
'vox_mri_t', 'mri_ras_t'):
if key in vol_info:
sp[key] = vol_info[key]
_print_coord_trans(sps[0]['src_mri_t'], 'Source space : ')
for key in ('vox_mri_t', 'mri_ras_t'):
if key in sps[0]:
_print_coord_trans(sps[0][key], 'MRI volume : ')
return sps
def _vol_vertex(width, height, jj, kk, pp):
return jj + width * kk + pp * (width * height)
def _get_mgz_header(fname):
"""Adapted from nibabel to quickly extract header info."""
if not fname.endswith('.mgz'):
raise IOError('Filename must end with .mgz')
header_dtd = [('version', '>i4'), ('dims', '>i4', (4,)),
('type', '>i4'), ('dof', '>i4'), ('goodRASFlag', '>i2'),
('delta', '>f4', (3,)), ('Mdc', '>f4', (3, 3)),
('Pxyz_c', '>f4', (3,))]
header_dtype = np.dtype(header_dtd)
with GzipFile(fname, 'rb') as fid:
hdr_str = fid.read(header_dtype.itemsize)
header = np.ndarray(shape=(), dtype=header_dtype,
buffer=hdr_str)
# dims
dims = header['dims'].astype(int)
dims = dims[:3] if len(dims) == 4 else dims
# vox2ras_tkr
delta = header['delta']
ds = np.array(delta, float)
ns = np.array(dims * ds) / 2.0
v2rtkr = np.array([[-ds[0], 0, 0, ns[0]],
[0, 0, ds[2], -ns[2]],
[0, -ds[1], 0, ns[1]],
[0, 0, 0, 1]], dtype=np.float32)
# ras2vox
d = np.diag(delta)
pcrs_c = dims / 2.0
Mdc = header['Mdc'].T
pxyz_0 = header['Pxyz_c'] - np.dot(Mdc, np.dot(d, pcrs_c))
M = np.eye(4, 4)
M[0:3, 0:3] = np.dot(Mdc, d)
M[0:3, 3] = pxyz_0.T
header = dict(dims=dims, vox2ras_tkr=v2rtkr, vox2ras=M,
zooms=header['delta'])
return header
def _src_vol_dims(s):
w, h, d = [s[f'mri_{key}'] for key in ('width', 'height', 'depth')]
return w, h, d, np.prod([w, h, d])
def _add_interpolator(sp):
"""Compute a sparse matrix to interpolate the data into an MRI volume."""
# extract transformation information from mri
mri_width, mri_height, mri_depth, nvox = _src_vol_dims(sp[0])
#
# Convert MRI voxels from destination (MRI volume) to source (volume
# source space subset) coordinates
#
combo_trans = combine_transforms(sp[0]['vox_mri_t'],
invert_transform(sp[0]['src_mri_t']),
'mri_voxel', 'mri_voxel')
logger.info('Setting up volume interpolation ...')
inuse = np.zeros(sp[0]['np'], bool)
for s_ in sp:
np.logical_or(inuse, s_['inuse'], out=inuse)
interp = _grid_interp(
sp[0]['vol_dims'], (mri_width, mri_height, mri_depth),
combo_trans['trans'], order=1, inuse=inuse)
assert isinstance(interp, sparse.csr_matrix)
# Compose the sparse matrices
for si, s in enumerate(sp):
if len(sp) == 1: # no need to do these gymnastics
this_interp = interp
else: # limit it rows that have any contribution from inuse
# This is the same as the following, but more efficient:
# any_ = np.asarray(
# interp[:, s['inuse'].astype(bool)].sum(1)
# )[:, 0].astype(bool)
any_ = np.zeros(interp.indices.size + 1, np.int64)
any_[1:] = s['inuse'][interp.indices]
np.cumsum(any_, out=any_)
any_ = np.diff(any_[interp.indptr]) > 0
assert any_.shape == (interp.shape[0],)
indptr = np.empty_like(interp.indptr)
indptr[0] = 0
indptr[1:] = np.diff(interp.indptr)
indptr[1:][~any_] = 0
np.cumsum(indptr, out=indptr)
mask = np.repeat(any_, np.diff(interp.indptr))
indices = interp.indices[mask]
data = interp.data[mask]
assert data.shape == indices.shape == (indptr[-1],)
this_interp = sparse.csr_matrix(
(data, indices, indptr), shape=interp.shape)
s['interpolator'] = this_interp
logger.info(' %d/%d nonzero values for %s'
% (len(s['interpolator'].data), nvox, s['seg_name']))
logger.info('[done]')
def _grid_interp(from_shape, to_shape, trans, order=1, inuse=None):
"""Compute a grid-to-grid linear or nearest interpolation given."""
from_shape = np.array(from_shape, int)
to_shape = np.array(to_shape, int)
trans = np.array(trans, np.float64) # to -> from
assert trans.shape == (4, 4) and np.array_equal(trans[3], [0, 0, 0, 1])
assert from_shape.shape == to_shape.shape == (3,)
shape = (np.prod(to_shape), np.prod(from_shape))
if inuse is None:
inuse = np.ones(shape[1], bool)
assert inuse.dtype == bool
assert inuse.shape == (shape[1],)
data, indices, indptr = _grid_interp_jit(
from_shape, to_shape, trans, order, inuse)
data = np.concatenate(data)
indices = np.concatenate(indices)
indptr = np.cumsum(indptr)
interp = sparse.csr_matrix((data, indices, indptr), shape=shape)
return interp
# This is all set up to do jit, but it's actually slower!
def _grid_interp_jit(from_shape, to_shape, trans, order, inuse):
# Loop over slices to save (lots of) memory
# Note that it is the slowest incrementing index
# This is equivalent to using mgrid and reshaping, but faster
assert order in (0, 1)
data = list()
indices = list()
nvox = np.prod(to_shape)
indptr = np.zeros(nvox + 1, np.int32)
mri_width, mri_height, mri_depth = to_shape
r0__ = np.empty((4, mri_height, mri_width), np.float64)
r0__[0, :, :] = np.arange(mri_width)
r0__[1, :, :] = np.arange(mri_height).reshape(1, mri_height, 1)
r0__[3, :, :] = 1
r0_ = np.reshape(r0__, (4, mri_width * mri_height))
width, height, _ = from_shape
trans = np.ascontiguousarray(trans)
maxs = (from_shape - 1).reshape(1, 3)
for p in range(mri_depth):
r0_[2] = p
# Transform our vertices from their MRI space into our source space's
# frame (this is labeled as FIFFV_MNE_COORD_MRI_VOXEL, but it's
# really a subset of the entire volume!)
r0 = (trans @ r0_)[:3].T
if order == 0:
rx = np.round(r0).astype(np.int32)
keep = np.where(np.logical_and(np.all(rx >= 0, axis=1),
np.all(rx <= maxs, axis=1)))[0]
indptr[keep + p * mri_height * mri_width + 1] = 1
indices.append(_vol_vertex(width, height, *rx[keep].T))
data.append(np.ones(len(keep)))
continue
rn = np.floor(r0).astype(np.int32)
good = np.where(np.logical_and(np.all(rn >= -1, axis=1),
np.all(rn <= maxs, axis=1)))[0]
if len(good) == 0:
continue
rns = rn[good]
r0s = r0[good]
jj_g, kk_g, pp_g = (rns >= 0).T
jjp1_g, kkp1_g, ppp1_g = (rns < maxs).T # same as rns + 1 <= maxs
# now we take each MRI voxel *in this space*, and figure out how
# to make its value the weighted sum of voxels in the volume source
# space. This is a trilinear interpolation based on the
# fact that we know we're interpolating from one volumetric grid
# into another.
jj = rns[:, 0]
kk = rns[:, 1]
pp = rns[:, 2]
vss = np.empty((len(jj), 8), np.int32)
jjp1 = jj + 1
kkp1 = kk + 1
ppp1 = pp + 1
mask = np.empty((len(jj), 8), bool)
vss[:, 0] = _vol_vertex(width, height, jj, kk, pp)
mask[:, 0] = jj_g & kk_g & pp_g
vss[:, 1] = _vol_vertex(width, height, jjp1, kk, pp)
mask[:, 1] = jjp1_g & kk_g & pp_g
vss[:, 2] = _vol_vertex(width, height, jjp1, kkp1, pp)
mask[:, 2] = jjp1_g & kkp1_g & pp_g
vss[:, 3] = _vol_vertex(width, height, jj, kkp1, pp)
mask[:, 3] = jj_g & kkp1_g & pp_g
vss[:, 4] = _vol_vertex(width, height, jj, kk, ppp1)
mask[:, 4] = jj_g & kk_g & ppp1_g
vss[:, 5] = _vol_vertex(width, height, jjp1, kk, ppp1)
mask[:, 5] = jjp1_g & kk_g & ppp1_g
vss[:, 6] = _vol_vertex(width, height, jjp1, kkp1, ppp1)
mask[:, 6] = jjp1_g & kkp1_g & ppp1_g
vss[:, 7] = _vol_vertex(width, height, jj, kkp1, ppp1)
mask[:, 7] = jj_g & kkp1_g & ppp1_g
# figure out weights for each vertex
xf = r0s[:, 0] - rns[:, 0].astype(np.float64)
yf = r0s[:, 1] - rns[:, 1].astype(np.float64)
zf = r0s[:, 2] - rns[:, 2].astype(np.float64)
omxf = 1.0 - xf
omyf = 1.0 - yf
omzf = 1.0 - zf
this_w = np.empty((len(good), 8), np.float64)
this_w[:, 0] = omxf * omyf * omzf
this_w[:, 1] = xf * omyf * omzf
this_w[:, 2] = xf * yf * omzf
this_w[:, 3] = omxf * yf * omzf
this_w[:, 4] = omxf * omyf * zf
this_w[:, 5] = xf * omyf * zf
this_w[:, 6] = xf * yf * zf
this_w[:, 7] = omxf * yf * zf
# eliminate zeros
mask[this_w <= 0] = False
# eliminate rows where none of inuse are actually present
row_mask = mask.copy()
row_mask[mask] = inuse[vss[mask]]
mask[~(row_mask.any(axis=-1))] = False
# construct the parts we need
indices.append(vss[mask])
indptr[good + p * mri_height * mri_width + 1] = mask.sum(1)
data.append(this_w[mask])
return data, indices, indptr
def _pts_in_hull(pts, hull, tolerance=1e-12):
return np.all([np.dot(eq[:-1], pts.T) + eq[-1] <= tolerance
for eq in hull.equations], axis=0)
@verbose
def _filter_source_spaces(surf, limit, mri_head_t, src, n_jobs=1,
verbose=None):
"""Remove all source space points closer than a given limit (in mm)."""
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD and mri_head_t is None:
raise RuntimeError('Source spaces are in head coordinates and no '
'coordinate transform was provided!')
# How close are the source points to the surface?
out_str = 'Source spaces are in '
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
inv_trans = invert_transform(mri_head_t)
out_str += 'head coordinates.'
elif src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
out_str += 'MRI coordinates.'
else:
out_str += 'unknown (%d) coordinates.' % src[0]['coord_frame']
logger.info(out_str)
out_str = 'Checking that the sources are inside the surface'
if limit > 0.0:
out_str += ' and at least %6.1f mm away' % (limit)
logger.info(out_str + ' (will take a few...)')
# fit a sphere to a surf quickly
check_inside = _CheckInside(surf)
# Check that the source is inside surface (often the inner skull)
for s in src:
vertno = np.where(s['inuse'])[0] # can't trust s['vertno'] this deep
# Convert all points here first to save time
r1s = s['rr'][vertno]
if s['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
r1s = apply_trans(inv_trans['trans'], r1s)
inside = check_inside(r1s, n_jobs)
omit_outside = (~inside).sum()
# vectorized nearest using BallTree (or cdist)
omit_limit = 0
if limit > 0.0:
# only check "inside" points
idx = np.where(inside)[0]
check_r1s = r1s[idx]
if check_inside.inner_r is not None:
# ... and those that are at least inner_sphere + limit away
mask = (np.linalg.norm(check_r1s - check_inside.cm, axis=-1) >=
check_inside.inner_r - limit / 1000.)
idx = idx[mask]
check_r1s = check_r1s[mask]
dists = _compute_nearest(
surf['rr'], check_r1s, return_dists=True, method='cKDTree')[1]
close = (dists < limit / 1000.0)
omit_limit = np.sum(close)
inside[idx[close]] = False
s['inuse'][vertno[~inside]] = False
del vertno
s['nuse'] -= (omit_outside + omit_limit)
s['vertno'] = np.where(s['inuse'])[0]
if omit_outside > 0:
extras = [omit_outside]
extras += ['s', 'they are'] if omit_outside > 1 else ['', 'it is']
logger.info(' %d source space point%s omitted because %s '
'outside the inner skull surface.' % tuple(extras))
if omit_limit > 0:
extras = [omit_limit]
extras += ['s'] if omit_outside > 1 else ['']
extras += [limit]
logger.info(' %d source space point%s omitted because of the '
'%6.1f-mm distance limit.' % tuple(extras))
# Adjust the patch inds as well if necessary
if omit_limit + omit_outside > 0:
_adjust_patch_info(s)
@verbose
def _adjust_patch_info(s, verbose=None):
"""Adjust patch information in place after vertex omission."""
if s.get('patch_inds') is not None:
if s['nearest'] is None:
# This shouldn't happen, but if it does, we can probably come
# up with a more clever solution
raise RuntimeError('Cannot adjust patch information properly, '
'please contact the mne-python developers')
_add_patch_info(s)
@verbose
def _ensure_src(src, kind=None, extra='', verbose=None):
"""Ensure we have a source space."""
_check_option(
'kind', kind, (None, 'surface', 'volume', 'mixed', 'discrete'))
msg = 'src must be a string or instance of SourceSpaces%s' % (extra,)
if _check_path_like(src):
src = str(src)
if not op.isfile(src):
raise IOError('Source space file "%s" not found' % src)
logger.info('Reading %s...' % src)
src = read_source_spaces(src, verbose=False)
if not isinstance(src, SourceSpaces):
raise ValueError('%s, got %s (type %s)' % (msg, src, type(src)))
if kind is not None:
if src.kind != kind and src.kind == 'mixed':
if kind == 'surface':
src = src[:2]
elif kind == 'volume':
src = src[2:]
if src.kind != kind:
raise ValueError('Source space must contain %s type, got '
'%s' % (kind, src.kind))
return src
def _ensure_src_subject(src, subject):
src_subject = src._subject
if subject is None:
subject = src_subject
if subject is None:
raise ValueError('source space is too old, subject must be '
'provided')
elif src_subject is not None and subject != src_subject:
raise ValueError('Mismatch between provided subject "%s" and subject '
'name "%s" in the source space'
% (subject, src_subject))
return subject
_DIST_WARN_LIMIT = 10242 # warn for anything larger than ICO-5
@verbose
def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None):
"""Compute inter-source distances along the cortical surface.
This function will also try to add patch info for the source space.
It will only occur if the ``dist_limit`` is sufficiently high that all
points on the surface are within ``dist_limit`` of a point in the
source space.
Parameters
----------
src : instance of SourceSpaces
The source spaces to compute distances for.
dist_limit : float
The upper limit of distances to include (in meters).
Note: if limit < np.inf, scipy > 0.13 (bleeding edge as of
10/2013) must be installed. If 0, then only patch (nearest vertex)
information is added.
%(n_jobs)s
Ignored if ``dist_limit==0.``.
%(verbose)s
Returns
-------
src : instance of SourceSpaces
The original source spaces, with distance information added.
The distances are stored in src[n]['dist'].
Note: this function operates in-place.
Notes
-----
This function can be memory- and CPU-intensive. On a high-end machine
(2012) running 6 jobs in parallel, an ico-5 (10242 per hemi) source space
takes about 10 minutes to compute all distances (``dist_limit = np.inf``).
With ``dist_limit = 0.007``, computing distances takes about 1 minute.
We recommend computing distances once per source space and then saving
the source space to disk, as the computed distances will automatically be
stored along with the source space data for future use.
"""
from scipy.sparse.csgraph import dijkstra
n_jobs = check_n_jobs(n_jobs)
src = _ensure_src(src)
dist_limit = float(dist_limit)
if dist_limit < 0:
raise ValueError('dist_limit must be non-negative, got %s'
% (dist_limit,))
patch_only = (dist_limit == 0)
if patch_only and not check_version('scipy', '1.3'):
raise RuntimeError('scipy >= 1.3 is required to calculate patch '
'information only, consider upgrading SciPy or '
'using dist_limit=np.inf when running '
'add_source_space_distances')
if src.kind != 'surface':
raise RuntimeError('Currently all source spaces must be of surface '
'type')
parallel, p_fun, _ = parallel_func(_do_src_distances, n_jobs)
min_dists = list()
min_idxs = list()
msg = 'patch information' if patch_only else 'source space distances'
logger.info('Calculating %s (limit=%s mm)...' % (msg, 1000 * dist_limit))
max_n = max(s['nuse'] for s in src)
if not patch_only and max_n > _DIST_WARN_LIMIT:
warn('Computing distances for %d source space points (in one '
'hemisphere) will be very slow, consider using add_dist=False'
% (max_n,))
for s in src:
adjacency = mesh_dist(s['tris'], s['rr'])
if patch_only:
min_dist, _, min_idx = dijkstra(
adjacency, indices=s['vertno'],
min_only=True, return_predecessors=True)
min_dists.append(min_dist.astype(np.float32))
min_idxs.append(min_idx)
for key in ('dist', 'dist_limit'):
s[key] = None
else:
d = parallel(p_fun(adjacency, s['vertno'], r, dist_limit)
for r in np.array_split(np.arange(len(s['vertno'])),
n_jobs))
# deal with indexing so we can add patch info
min_idx = np.array([dd[1] for dd in d])
min_dist = np.array([dd[2] for dd in d])
midx = np.argmin(min_dist, axis=0)
range_idx = np.arange(len(s['rr']))
min_dist = min_dist[midx, range_idx]
min_idx = min_idx[midx, range_idx]
min_dists.append(min_dist)
min_idxs.append(min_idx)
# convert to sparse representation
d = np.concatenate([dd[0] for dd in d]).ravel() # already float32
idx = d > 0
d = d[idx]
i, j = np.meshgrid(s['vertno'], s['vertno'])
i = i.ravel()[idx]
j = j.ravel()[idx]
s['dist'] = sparse.csr_matrix(
(d, (i, j)), shape=(s['np'], s['np']), dtype=np.float32)
s['dist_limit'] = np.array([dist_limit], np.float32)
# Let's see if our distance was sufficient to allow for patch info
if not any(np.any(np.isinf(md)) for md in min_dists):
# Patch info can be added!
for s, min_dist, min_idx in zip(src, min_dists, min_idxs):
s['nearest'] = min_idx
s['nearest_dist'] = min_dist
_add_patch_info(s)
else:
logger.info('Not adding patch information, dist_limit too small')
return src
def _do_src_distances(con, vertno, run_inds, limit):
"""Compute source space distances in chunks."""
from scipy.sparse.csgraph import dijkstra
func = partial(dijkstra, limit=limit)
chunk_size = 20 # save memory by chunking (only a little slower)
lims = np.r_[np.arange(0, len(run_inds), chunk_size), len(run_inds)]
n_chunks = len(lims) - 1
# eventually we want this in float32, so save memory by only storing 32-bit
d = np.empty((len(run_inds), len(vertno)), np.float32)
min_dist = np.empty((n_chunks, con.shape[0]))
min_idx = np.empty((n_chunks, con.shape[0]), np.int32)
range_idx = np.arange(con.shape[0])
for li, (l1, l2) in enumerate(zip(lims[:-1], lims[1:])):
idx = vertno[run_inds[l1:l2]]
out = func(con, indices=idx)
midx = np.argmin(out, axis=0)
min_idx[li] = idx[midx]
min_dist[li] = out[midx, range_idx]
d[l1:l2] = out[:, vertno]
midx = np.argmin(min_dist, axis=0)
min_dist = min_dist[midx, range_idx]
min_idx = min_idx[midx, range_idx]
d[d == np.inf] = 0 # scipy will give us np.inf for uncalc. distances
return d, min_idx, min_dist
def get_volume_labels_from_aseg(mgz_fname, return_colors=False,
atlas_ids=None):
"""Return a list of names and colors of segmented volumes.
Parameters
----------
mgz_fname : str
Filename to read. Typically aseg.mgz or some variant in the freesurfer
pipeline.
return_colors : bool
If True returns also the labels colors.
atlas_ids : dict | None
A lookup table providing a mapping from region names (str) to ID values
(int). Can be None to use the standard Freesurfer LUT.
.. versionadded:: 0.21.0
Returns
-------
label_names : list of str
The names of segmented volumes included in this mgz file.
label_colors : list of str
The RGB colors of the labels included in this mgz file.
See Also
--------
read_freesurfer_lut
Notes
-----
.. versionchanged:: 0.21.0
The label names are now sorted in the same order as their corresponding
values in the MRI file.
.. versionadded:: 0.9.0
"""
import nibabel as nib
atlas = nib.load(mgz_fname)
data = np.asarray(atlas.dataobj) # don't need float here
want = np.unique(data)
if atlas_ids is None:
atlas_ids, colors = read_freesurfer_lut()
elif return_colors:
raise ValueError('return_colors must be False if atlas_ids are '
'provided')
# restrict to the ones in the MRI, sorted by label name
keep = np.in1d(list(atlas_ids.values()), want)
keys = sorted((key for ki, key in enumerate(atlas_ids.keys()) if keep[ki]),
key=lambda x: atlas_ids[x])
if return_colors:
colors = [colors[k] for k in keys]
out = keys, colors
else:
out = keys
return out
# XXX this should probably be deprecated because it returns surface Labels,
# and probably isn't the way to go moving forward
# XXX this also assumes that the first two source spaces are surf without
# checking, which might not be the case (could be all volumes)
@fill_doc
def get_volume_labels_from_src(src, subject, subjects_dir):
"""Return a list of Label of segmented volumes included in the src space.
Parameters
----------
src : instance of SourceSpaces
The source space containing the volume regions.
%(subject)s
subjects_dir : str
Freesurfer folder of the subjects.
Returns
-------
labels_aseg : list of Label
List of Label of segmented volumes included in src space.
"""
from . import Label
from . import get_volume_labels_from_aseg
# Read the aseg file
aseg_fname = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
if not op.isfile(aseg_fname):
raise IOError('aseg file "%s" not found' % aseg_fname)
all_labels_aseg = get_volume_labels_from_aseg(
aseg_fname, return_colors=True)
# Create a list of Label
if len(src) < 2:
raise ValueError('No vol src space in src')
if any(np.any(s['type'] != 'vol') for s in src[2:]):
raise ValueError('source spaces have to be of vol type')
labels_aseg = list()
for nr in range(2, len(src)):
vertices = src[nr]['vertno']
pos = src[nr]['rr'][src[nr]['vertno'], :]
roi_str = src[nr]['seg_name']
try:
ind = all_labels_aseg[0].index(roi_str)
color = np.array(all_labels_aseg[1][ind]) / 255
except ValueError:
pass
if 'left' in roi_str.lower():
hemi = 'lh'
roi_str = roi_str.replace('Left-', '') + '-lh'
elif 'right' in roi_str.lower():
hemi = 'rh'
roi_str = roi_str.replace('Right-', '') + '-rh'
else:
hemi = 'both'
label = Label(vertices=vertices, pos=pos, hemi=hemi,
name=roi_str, color=color,
subject=subject)
labels_aseg.append(label)
return labels_aseg
def _get_hemi(s):
"""Get a hemisphere from a given source space."""
if s['type'] != 'surf':
raise RuntimeError('Only surface source spaces supported')
if s['id'] == FIFF.FIFFV_MNE_SURF_LEFT_HEMI:
return 'lh', 0, s['id']
elif s['id'] == FIFF.FIFFV_MNE_SURF_RIGHT_HEMI:
return 'rh', 1, s['id']
else:
raise ValueError('unknown surface ID %s' % s['id'])
def _get_vertex_map_nn(fro_src, subject_from, subject_to, hemi, subjects_dir,
to_neighbor_tri=None):
"""Get a nearest-neigbor vertex match for a given hemi src.
The to_neighbor_tri can optionally be passed in to avoid recomputation
if it's already available.
"""
# adapted from mne_make_source_space.c, knowing accurate=False (i.e.
# nearest-neighbor mode should be used)
logger.info('Mapping %s %s -> %s (nearest neighbor)...'
% (hemi, subject_from, subject_to))
regs = [op.join(subjects_dir, s, 'surf', '%s.sphere.reg' % hemi)
for s in (subject_from, subject_to)]
reg_fro, reg_to = [read_surface(r, return_dict=True)[-1] for r in regs]
if to_neighbor_tri is not None:
reg_to['neighbor_tri'] = to_neighbor_tri
if 'neighbor_tri' not in reg_to:
reg_to['neighbor_tri'] = _triangle_neighbors(reg_to['tris'],
reg_to['np'])
morph_inuse = np.zeros(len(reg_to['rr']), int)
best = np.zeros(fro_src['np'], int)
ones = _compute_nearest(reg_to['rr'], reg_fro['rr'][fro_src['vertno']])
for v, one in zip(fro_src['vertno'], ones):
# if it were actually a proper morph map, we would do this, but since
# we know it's nearest neighbor list, we don't need to:
# this_mm = mm[v]
# one = this_mm.indices[this_mm.data.argmax()]
if morph_inuse[one]:
# Try the nearest neighbors
neigh = _get_surf_neighbors(reg_to, one) # on demand calc
was = one
one = neigh[np.where(~morph_inuse[neigh])[0]]
if len(one) == 0:
raise RuntimeError('vertex %d would be used multiple times.'
% one)
one = one[0]
logger.info('Source space vertex moved from %d to %d because of '
'double occupation.' % (was, one))
best[v] = one
morph_inuse[one] = True
return best
@verbose
def morph_source_spaces(src_from, subject_to, surf='white', subject_from=None,
subjects_dir=None, verbose=None):
"""Morph an existing source space to a different subject.
.. warning:: This can be used in place of morphing source estimates for
multiple subjects, but there may be consequences in terms
of dipole topology.
Parameters
----------
src_from : instance of SourceSpaces
Surface source spaces to morph.
subject_to : str
The destination subject.
surf : str
The brain surface to use for the new source space.
subject_from : str | None
The "from" subject. For most source spaces this shouldn't need
to be provided, since it is stored in the source space itself.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
%(verbose)s
Returns
-------
src : instance of SourceSpaces
The morphed source spaces.
Notes
-----
.. versionadded:: 0.10.0
"""
# adapted from mne_make_source_space.c
src_from = _ensure_src(src_from)
subject_from = _ensure_src_subject(src_from, subject_from)
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
src_out = list()
for fro in src_from:
hemi, idx, id_ = _get_hemi(fro)
to = op.join(subjects_dir, subject_to, 'surf', '%s.%s' % (hemi, surf,))
logger.info('Reading destination surface %s' % (to,))
to = read_surface(to, return_dict=True, verbose=False)[-1]
complete_surface_info(to, copy=False)
# Now we morph the vertices to the destination
# The C code does something like this, but with a nearest-neighbor
# mapping instead of the weighted one::
#
# >>> mm = read_morph_map(subject_from, subject_to, subjects_dir)
#
# Here we use a direct NN calculation, since picking the max from the
# existing morph map (which naively one might expect to be equivalent)
# differs for ~3% of vertices.
best = _get_vertex_map_nn(fro, subject_from, subject_to, hemi,
subjects_dir, to['neighbor_tri'])
for key in ('neighbor_tri', 'tri_area', 'tri_cent', 'tri_nn',
'use_tris'):
del to[key]
to['vertno'] = np.sort(best[fro['vertno']])
to['inuse'] = np.zeros(len(to['rr']), int)
to['inuse'][to['vertno']] = True
to['use_tris'] = best[fro['use_tris']]
to.update(nuse=len(to['vertno']), nuse_tri=len(to['use_tris']),
nearest=None, nearest_dist=None, patch_inds=None, pinfo=None,
dist=None, id=id_, dist_limit=None, type='surf',
coord_frame=FIFF.FIFFV_COORD_MRI, subject_his_id=subject_to,
rr=to['rr'] / 1000.)
src_out.append(to)
logger.info('[done]\n')
info = dict(working_dir=os.getcwd(), command_line=_get_call_line())
return SourceSpaces(src_out, info=info)
@verbose
def _get_morph_src_reordering(vertices, src_from, subject_from, subject_to,
subjects_dir=None, verbose=None):
"""Get the reordering indices for a morphed source space.
Parameters
----------
vertices : list
The vertices for the left and right hemispheres.
src_from : instance of SourceSpaces
The original source space.
subject_from : str
The source subject.
subject_to : str
The destination subject.
%(subjects_dir)s
%(verbose)s
Returns
-------
data_idx : ndarray, shape (n_vertices,)
The array used to reshape the data.
from_vertices : list
The right and left hemisphere vertex numbers for the "from" subject.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
from_vertices = list()
data_idxs = list()
offset = 0
for ii, hemi in enumerate(('lh', 'rh')):
# Get the mapping from the original source space to the destination
# subject's surface vertex numbers
best = _get_vertex_map_nn(src_from[ii], subject_from, subject_to,
hemi, subjects_dir)
full_mapping = best[src_from[ii]['vertno']]
# Tragically, we might not have all of our vertno left (e.g. because
# some are omitted during fwd calc), so we must do some indexing magic:
# From all vertices, a subset could be chosen by fwd calc:
used_vertices = np.in1d(full_mapping, vertices[ii])
from_vertices.append(src_from[ii]['vertno'][used_vertices])
remaining_mapping = full_mapping[used_vertices]
if not np.array_equal(np.sort(remaining_mapping), vertices[ii]) or \
not np.in1d(vertices[ii], full_mapping).all():
raise RuntimeError('Could not map vertices, perhaps the wrong '
'subject "%s" was provided?' % subject_from)
# And our data have been implicitly remapped by the forced ascending
# vertno order in source spaces
implicit_mapping = np.argsort(remaining_mapping) # happens to data
data_idx = np.argsort(implicit_mapping) # to reverse the mapping
data_idx += offset # hemisphere offset
data_idxs.append(data_idx)
offset += len(implicit_mapping)
data_idx = np.concatenate(data_idxs)
# this one is really just a sanity check for us, should never be violated
# by users
assert np.array_equal(np.sort(data_idx),
np.arange(sum(len(v) for v in vertices)))
return data_idx, from_vertices
def _compare_source_spaces(src0, src1, mode='exact', nearest=True,
dist_tol=1.5e-3):
"""Compare two source spaces.
Note: this function is also used by forward/tests/test_make_forward.py
"""
from numpy.testing import (assert_allclose, assert_array_equal,
assert_equal, assert_, assert_array_less)
from scipy.spatial.distance import cdist
if mode != 'exact' and 'approx' not in mode: # 'nointerp' can be appended
raise RuntimeError('unknown mode %s' % mode)
for si, (s0, s1) in enumerate(zip(src0, src1)):
# first check the keys
a, b = set(s0.keys()), set(s1.keys())
assert_equal(a, b, str(a ^ b))
for name in ['nuse', 'ntri', 'np', 'type', 'id']:
a, b = s0[name], s1[name]
if name == 'id': # workaround for old NumPy bug
a, b = int(a), int(b)
assert_equal(a, b, name)
for name in ['subject_his_id']:
if name in s0 or name in s1:
assert_equal(s0[name], s1[name], name)
for name in ['interpolator']:
if name in s0 or name in s1:
assert name in s0, f'{name} in s1 but not s0'
assert name in s1, f'{name} in s1 but not s0'
n = np.prod(s0['interpolator'].shape)
diffs = (s0['interpolator'] - s1['interpolator']).data
if len(diffs) > 0 and 'nointerp' not in mode:
# 0.1%
assert_array_less(
np.sqrt(np.sum(diffs * diffs) / n), 0.001,
err_msg=f'{name} > 0.1%')
for name in ['nn', 'rr', 'nuse_tri', 'coord_frame', 'tris']:
if s0[name] is None:
assert_(s1[name] is None, name)
else:
if mode == 'exact':
assert_array_equal(s0[name], s1[name], name)
else: # 'approx' in mode
atol = 1e-3 if name == 'nn' else 1e-4
assert_allclose(s0[name], s1[name], rtol=1e-3, atol=atol,
err_msg=name)
for name in ['seg_name']:
if name in s0 or name in s1:
assert_equal(s0[name], s1[name], name)
# these fields will exist if patch info was added
if nearest:
for name in ['nearest', 'nearest_dist', 'patch_inds']:
if s0[name] is None:
assert_(s1[name] is None, name)
else:
atol = 0 if mode == 'exact' else 1e-6
assert_allclose(s0[name], s1[name],
atol=atol, err_msg=name)
for name in ['pinfo']:
if s0[name] is None:
assert_(s1[name] is None, name)
else:
assert_(len(s0[name]) == len(s1[name]), name)
for p1, p2 in zip(s0[name], s1[name]):
assert_(all(p1 == p2), name)
if mode == 'exact':
for name in ['inuse', 'vertno', 'use_tris']:
assert_array_equal(s0[name], s1[name], err_msg=name)
for name in ['dist_limit']:
assert_(s0[name] == s1[name], name)
for name in ['dist']:
if s0[name] is not None:
assert_equal(s1[name].shape, s0[name].shape)
assert_(len((s0['dist'] - s1['dist']).data) == 0)
else: # 'approx' in mode:
# deal with vertno, inuse, and use_tris carefully
for ii, s in enumerate((s0, s1)):
assert_array_equal(s['vertno'], np.where(s['inuse'])[0],
'src%s[%s]["vertno"] != '
'np.where(src%s[%s]["inuse"])[0]'
% (ii, si, ii, si))
assert_equal(len(s0['vertno']), len(s1['vertno']))
agreement = np.mean(s0['inuse'] == s1['inuse'])
assert_(agreement >= 0.99, "%s < 0.99" % agreement)
if agreement < 1.0:
# make sure mismatched vertno are within 1.5mm
v0 = np.setdiff1d(s0['vertno'], s1['vertno'])
v1 = np.setdiff1d(s1['vertno'], s0['vertno'])
dists = cdist(s0['rr'][v0], s1['rr'][v1])
assert_allclose(np.min(dists, axis=1), np.zeros(len(v0)),
atol=dist_tol, err_msg='mismatched vertno')
if s0['use_tris'] is not None: # for "spacing"
assert_array_equal(s0['use_tris'].shape, s1['use_tris'].shape)
else:
assert_(s1['use_tris'] is None)
assert_(np.mean(s0['use_tris'] == s1['use_tris']) > 0.99)
# The above "if s0[name] is not None" can be removed once the sample
# dataset is updated to have a source space with distance info
for name in ['working_dir', 'command_line']:
if mode == 'exact':
assert_equal(src0.info[name], src1.info[name])
else: # 'approx' in mode:
if name in src0.info:
assert_(name in src1.info, '"%s" missing' % name)
else:
assert_(name not in src1.info, '"%s" should not exist' % name)
def _set_source_space_vertices(src, vertices):
"""Reset the list of source space vertices."""
assert len(src) == len(vertices)
for s, v in zip(src, vertices):
s['inuse'].fill(0)
s['nuse'] = len(v)
s['vertno'] = np.array(v)
s['inuse'][s['vertno']] = 1
s['use_tris'] = np.array([[]], int)
s['nuse_tri'] = np.array([0])
# This will fix 'patch_info' and 'pinfo'
_adjust_patch_info(s, verbose=False)
return src
def _get_src_nn(s, use_cps=True, vertices=None):
vertices = s['vertno'] if vertices is None else vertices
if use_cps and s.get('patch_inds') is not None:
nn = np.empty((len(vertices), 3))
for vp, p in enumerate(np.searchsorted(s['vertno'], vertices)):
assert(s['vertno'][p] == vertices[vp])
# Project out the surface normal and compute SVD
nn[vp] = np.sum(
s['nn'][s['pinfo'][s['patch_inds'][p]], :], axis=0)
nn /= linalg.norm(nn, axis=-1, keepdims=True)
else:
nn = s['nn'][vertices, :]
return nn
@verbose
def compute_distance_to_sensors(src, info, picks=None, trans=None,
verbose=None):
"""Compute distances between vertices and sensors.
Parameters
----------
src : instance of SourceSpaces
The object with vertex positions for which to compute distances to
sensors.
info : instance of Info
Measurement information with sensor positions to which distances shall
be computed.
%(picks_good_data)s
%(trans_not_none)s
%(verbose)s
Returns
-------
depth : array of shape (n_vertices, n_channels)
The Euclidean distances of source space vertices with respect to
sensors.
"""
from scipy.spatial.distance import cdist
assert isinstance(src, SourceSpaces)
_validate_type(info, (Info,), 'info')
# Load the head<->MRI transform if necessary
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
src_trans, _ = _get_trans(trans, allow_none=False)
else:
src_trans = Transform('head', 'head') # Identity transform
# get vertex position in same coordinates as for sensors below
src_pos = np.vstack([
apply_trans(src_trans, s['rr'][s['inuse'].astype(np.bool)])
for s in src
])
# Select channels to be used for distance calculations
picks = _picks_to_idx(info, picks, 'data', exclude=())
# get sensor positions
sensor_pos = []
dev_to_head = None
for ch in picks:
# MEG channels are in device coordinates, translate them to head
if channel_type(info, ch) in ['mag', 'grad']:
if dev_to_head is None:
dev_to_head = _ensure_trans(info['dev_head_t'],
'meg', 'head')
sensor_pos.append(apply_trans(dev_to_head,
info['chs'][ch]['loc'][:3]))
else:
sensor_pos.append(info['chs'][ch]['loc'][:3])
sensor_pos = np.array(sensor_pos)
depths = cdist(src_pos, sensor_pos)
return depths
|
olafhauk/mne-python
|
mne/source_space.py
|
Python
|
bsd-3-clause
| 125,513
|
[
"Mayavi"
] |
d77b3b69e5e9a5a2061a0a4b4b292962a759b1927bead8704326a35776768bb1
|
# this lets me be lazy..starts the cloud up like I want from my json, and gives me a browser
# copies the jars for me, etc. Just hangs at the end for 10 minutes while I play with the browser
import unittest
import time,sys
sys.path.extend(['.','..','py'])
import h2o_cmd, h2o, h2o_hosts, h2o_glm
import h2o_browse as h2b
import os, csv, time, socket
csv_header = ('time','nodes#','java_heap_GB','dataset','y','x','family','alpha','lambda','n_folds','nLines','nCols','dof','nullDev','resDev','aic','auc','iterations','model_time','model_iterations','val_time','val_iterations','lsm_time', 'wall_clock_secs')
ec2_files = {
'allstate':'s3n://h2o-datasets/allstate/train_set.zip',
'airlines':'s3n://h2o-airlines-unpacked/allyears.csv'
}
local_files = {
'allstate': 'hdfs://192.168.1.176/datasets/allstate/train_set.zip',
'airlines':'hdfs://192.168.1.176/datasets/airlines_all.csv'
}
def is_ec2():
# return False
return 'AWS_ACCESS_KEY_ID' in os.environ
def run_glms(file,configs):
output = None
if not os.path.exists('glmbench_gaussian'):
output = open('glmbench_gaussian','w')
output.write(','.join(csv_header)+'\n')
else:
output = open('glmbench_gaussian','a')
csvWrt = csv.DictWriter(output, fieldnames=csv_header, restval=None, dialect='excel', extrasaction='ignore',delimiter=',')
# header!
# csvWrt.writerow(dict((fn,fn) for fn in csv_header))
csvWrt.writeheader()
try:
java_heap_GB = h2o.nodes[0].java_heap_GB
k = parse_file(file)
# gives us some reporting on missing values, constant values, to see if we have x specified well
# figures out everything from parseResult['destination_key']
# needs y to avoid output column (which can be index or name)
# assume all the configs have the same y..just check with the firs tone
goodX = h2o_glm.goodXFromColumnInfo(y=configs[0]['y'], key=k, timeoutSecs=300)
for kwargs in configs:
start = time.time()
res = h2o.nodes[0].GLM(k, timeoutSecs=6000000, pollTimeoutSecs=180, **kwargs)
wall_clock_secs = time.time() - start
glm = res['GLMModel']
print "glm model time (milliseconds):", glm['model_time']
print "glm validations[0] time (milliseconds):", glm['validations'][0]['val_time']
print "glm lsm time (milliseconds):", glm['lsm_time']
print 'glm computation time',res['computation_time']
coefs = glm['coefficients']
print 'wall clock in', wall_clock_secs, 'secs'
max_len = 0
val = glm['validations'][0]
row = {'time':time.asctime(),'nodes#':len(h2o.nodes)}
row.update(kwargs)
row.update(glm)
row.update(val)
row.update({'wall_clock_secs': wall_clock_secs})
row.update({'java_heap_GB': java_heap_GB})
csvWrt.writerow(row)
h2o.nodes[0].remove_key(k)
finally:
output.close()
def parse_file(f):
v = h2o.nodes[0].import_hdfs(f)['succeeded'][0]
return h2o.nodes[0].parse(v['key'],timeoutSecs=3600)['destination_key']
if __name__ == '__main__':
h2o.parse_our_args()
files = None
if is_ec2():
files = ec2_files
h2o_hosts.build_cloud_with_hosts()
else:
files = local_files
h2o_hosts.build_cloud_with_hosts(use_hdfs=True,base_port=54321)
# want to ignore columns with missing values, since GLM throws away those rows, (won't analyze as many rows)
# Distance, CRSEElapsedTime has some...I guess ignore
# column Year 0 type: int
# column Month 1 type: int
# column DayofMonth 2 type: int
# column DayOfWeek 3 type: int
# column DepTime 4 type: int num_missing_values: 2302136
# column CRSDepTime 5 type: int
# column ArrTime 6 type: int num_missing_values: 2584478
# column CRSArrTime 7 type: int
# column UniqueCarrier 8 type: enum enum_domain_size: 29
# column FlightNum 9 type: int
# column TailNum 10 type: int num_missing_values: 123534969
# column ActualElapsedTime 11 type: int num_missing_values: 2587529
# column CRSElapsedTime 12 type: int num_missing_values: 26234
# column AirTime 13 type: int num_missing_values: 123534969
# column ArrDelay 14 type: int num_missing_values: 2587529
# column DepDelay 15 type: int num_missing_values: 2302136
# column Origin 16 type: enum enum_domain_size: 347
# column Dest 17 type: enum enum_domain_size: 352
# column Distance 18 type: int num_missing_values: 202000
# column TaxiIn 19 type: int num_missing_values: 123534969
# column TaxiOut 20 type: int num_missing_values: 123534969
# column Cancelled 21 type: int
# column CancellationCode 22 type: enum enum_domain_size: 5 num_missing_values: 38955823
# column Diverted 23 type: int
# column CarrierDelay 24 type: int num_missing_values: 123534969
# column WeatherDelay 25 type: int num_missing_values: 123534969
# column NASDelay 26 type: int num_missing_values: 123534969
# column SecurityDelay 27 type: int num_missing_values: 123534969
# column LateAircraftDelay 28 type: int num_missing_values: 123534969
# column IsArrDelayed 29 type: enum enum_domain_size: 2
# column IsDepDelayed 30 type: enum enum_domain_size: 2
# run allstate
run_glms(files['allstate'],[{'y':'Claim_Amount','lambda':l,'alpha':a,'family':'gaussian','n_folds':1}
# for l in (1e-4,1e-5)
# for a in (1.0,0.5,0.0)])
for l in [1e-4]
for a in [0.5]])
# was:
# x = '0,1,2,3,4,5,6,7,8,9,12,16,17,18'
x = '0,1,2,3,5,7,8,9,16,17'
# run airlines
run_glms(files['airlines'],[{'y':'IsArrDelayed','x':x,'lambda':l,'alpha':a,'family':'gaussian','n_folds':1,'case':1}
# for l in (0.035,0.025,1e-2,5e-3,1e-3,5e-4,1e-4,5e-5,1e-5,1e-8)
# for a in (1.0,0.5,0.0)])
for l in [1e-4]
for a in [0.5]])
h2o.tear_down_cloud()
|
janezhango/BigDataMachineLearning
|
py/testdir_hosts/glm_bench_gaussian.py
|
Python
|
apache-2.0
| 6,075
|
[
"Gaussian"
] |
5f2b0357f771a609f0214f87270bc86ac55024c7713aa306a357f2f2610a4698
|
#pylint: disable=C0111
#pylint: disable=W0621
from lettuce import world, step
from nose.tools import assert_false, assert_equal, assert_regexp_matches # pylint: disable=E0611
from common import type_in_codemirror, press_the_notification_button
KEY_CSS = '.key input.policy-key'
VALUE_CSS = 'textarea.json'
DISPLAY_NAME_KEY = "display_name"
DISPLAY_NAME_VALUE = '"Robot Super Course"'
@step('I select the Advanced Settings$')
def i_select_advanced_settings(step):
world.click_course_settings()
# The click handlers are set up so that if you click <body>
# the menu disappears. This means that if we're even a *little*
# bit off on the last item ('Advanced Settings'), the menu
# will close and the test will fail.
# For this reason, we retrieve the link and visit it directly
# This is what the browser *should* be doing, since it's just a native
# link with no JavaScript involved.
link_css = 'li.nav-course-settings-advanced a'
world.wait_for_visible(link_css)
link = world.css_find(link_css).first['href']
world.visit(link)
@step('I am on the Advanced Course Settings page in Studio$')
def i_am_on_advanced_course_settings(step):
step.given('I have opened a new course in Studio')
step.given('I select the Advanced Settings')
@step(u'I edit the value of a policy key$')
def edit_the_value_of_a_policy_key(step):
type_in_codemirror(get_index_of(DISPLAY_NAME_KEY), 'X')
@step(u'I edit the value of a policy key and save$')
def edit_the_value_of_a_policy_key_and_save(step):
change_display_name_value(step, '"foo"')
@step('I create a JSON object as a value for "(.*)"$')
def create_JSON_object(step, key):
change_value(step, key, '{"key": "value", "key_2": "value_2"}')
@step('I create a non-JSON value not in quotes$')
def create_value_not_in_quotes(step):
change_display_name_value(step, 'quote me')
@step('I see default advanced settings$')
def i_see_default_advanced_settings(step):
# Test only a few of the existing properties (there are around 34 of them)
assert_policy_entries(
["advanced_modules", DISPLAY_NAME_KEY, "show_calculator"], ["[]", DISPLAY_NAME_VALUE, "false"])
@step('the settings are alphabetized$')
def they_are_alphabetized(step):
key_elements = world.css_find(KEY_CSS)
all_keys = []
for key in key_elements:
all_keys.append(key.value)
assert_equal(sorted(all_keys), all_keys, "policy keys were not sorted")
@step('it is displayed as formatted$')
def it_is_formatted(step):
assert_policy_entries(['discussion_topics'], ['{\n "key": "value",\n "key_2": "value_2"\n}'])
@step('I get an error on save$')
def error_on_save(step):
assert_regexp_matches(world.css_text('#notification-error-description'), 'Incorrect setting format')
@step('it is displayed as a string')
def it_is_displayed_as_string(step):
assert_policy_entries([DISPLAY_NAME_KEY], ['"quote me"'])
@step(u'the policy key value is unchanged$')
def the_policy_key_value_is_unchanged(step):
assert_equal(get_display_name_value(), DISPLAY_NAME_VALUE)
@step(u'the policy key value is changed$')
def the_policy_key_value_is_changed(step):
assert_equal(get_display_name_value(), '"foo"')
def assert_policy_entries(expected_keys, expected_values):
for key, value in zip(expected_keys, expected_values):
index = get_index_of(key)
assert_false(index == -1, "Could not find key: {key}".format(key=key))
found_value = world.css_find(VALUE_CSS)[index].value
assert_equal(
value, found_value,
"Expected {} to have value {} but found {}".format(key, value, found_value)
)
def get_index_of(expected_key):
for i, element in enumerate(world.css_find(KEY_CSS)):
# Sometimes get stale reference if I hold on to the array of elements
key = world.css_value(KEY_CSS, index=i)
if key == expected_key:
return i
return -1
def get_display_name_value():
index = get_index_of(DISPLAY_NAME_KEY)
return world.css_value(VALUE_CSS, index=index)
def change_display_name_value(step, new_value):
change_value(step, DISPLAY_NAME_KEY, new_value)
def change_value(step, key, new_value):
type_in_codemirror(get_index_of(key), new_value)
world.wait(0.5)
press_the_notification_button(step, "Save")
world.wait_for_ajax_complete()
|
TangXT/GreatCatMOOC
|
cms/djangoapps/contentstore/features/advanced_settings.py
|
Python
|
agpl-3.0
| 4,393
|
[
"VisIt"
] |
3f07b84cf0c96d4cef8d690703dc5a8bfdf49fa50cb28a7a4356eae8bcd1c222
|
#!/usr/bin/env python
#
# fa2lens - Extract length data from a fasta file
#
# Copyright (C) 2013, Jian-Long Huang
# Licensed under The MIT License
# http://opensource.org/licenses/MIT
#
# Author: Jian-Long Huang (jianlong@ntu.edu.tw)
# Version: 0.2
# Created: 2013.1.26
#
# Required:
# * Biopython: http://biopython.org
#
# Usage: fa2lens <input.fa> [options]
#
# Options:
# -s, --sep STR: seperator (default: newline)
# -o, --output STR: output file name. If this option is not specified, the script will generate
# one with unique identifier at current directory.
#
# File formats:
# * <input.fa>: fasta
import argparse
from Bio import SeqIO
from fhandle import name
def main():
parser = argparse.ArgumentParser(description='fa2lens - Extract length data from a fasta file')
parser.add_argument('input_file')
parser.add_argument('-s', '--sep', dest='sep', default='\n',
help='seperator (default: newline)')
parser.add_argument('-o', '--output', dest='output_file',
help='output file name. If this option is not specified, the script will generate '
'one with unique identifier at current directory.')
args = parser.parse_args()
if args.output_file is None:
args.output_file = args.input_file + '_out_' + name.genid() + '.leng.txt'
with open(args.input_file, 'r') as fin, open(args.output_file, 'w') as fw:
records = map(str, map(len, list(SeqIO.parse(fin, 'fasta'))))
fw.write(args.sep.join(records))
fw.flush()
if __name__ == '__main__':
main()
|
jlhg/bdorpy
|
bdorpy/fa2lens.py
|
Python
|
mit
| 1,611
|
[
"Biopython"
] |
4bcc5597e32479d859db95047c911da598195f22f04d62ba3c9871450ff3f8e3
|
# $HeadURL$
"""
Encoding and decoding for dirac, Ids:
i -> int
I -> long
f -> float
b -> bool
s -> string
z -> datetime
n -> none
l -> list
t -> tuple
d -> dictionary
k -> DError
"""
__RCSID__ = "$Id$"
import types
import datetime
from DIRAC.Core.Utilities.DErrno import DError
_dateTimeObject = datetime.datetime.utcnow()
_dateTimeType = type( _dateTimeObject )
_dateType = type( _dateTimeObject.date() )
_timeType = type( _dateTimeObject.time() )
g_dEncodeFunctions = {}
g_dDecodeFunctions = {}
#Encoding and decoding ints
def encodeInt( iValue, eList ):
eList.extend( ( "i", str( iValue ), "e" ) )
def decodeInt( data, i ):
i += 1
end = data.index( 'e', i )
value = int( data[i:end] )
return ( value, end + 1 )
g_dEncodeFunctions[ types.IntType ] = encodeInt
g_dDecodeFunctions[ "i" ] = decodeInt
#Encoding and decoding longs
def encodeLong( iValue, eList ):
# corrected by KGG eList.extend( ( "l", str( iValue ), "e" ) )
eList.extend( ( "I", str( iValue ), "e" ) )
def decodeLong( data, i ):
i += 1
end = data.index( 'e', i )
value = long( data[i:end] )
return ( value, end + 1 )
g_dEncodeFunctions[ types.LongType ] = encodeLong
g_dDecodeFunctions[ "I" ] = decodeLong
#Encoding and decoding floats
def encodeFloat( iValue, eList ):
eList.extend( ( "f", str( iValue ), "e" ) )
def decodeFloat( data, i ):
i += 1
end = data.index( 'e', i )
if end + 1 < len( data ) and data[end + 1] in ( '+', '-' ):
eI = end
end = data.index( 'e', end + 1 )
value = float( data[i:eI] ) * 10 ** int( data[eI + 1:end] )
else:
value = float( data[i:end] )
return ( value, end + 1 )
g_dEncodeFunctions[ types.FloatType ] = encodeFloat
g_dDecodeFunctions[ "f" ] = decodeFloat
#Encoding and decoding booleand
def encodeBool( bValue, eList ):
if bValue:
eList.append( "b1" )
else:
eList.append( "b0" )
def decodeBool( data, i ):
if data[ i + 1 ] == "0":
return ( False, i + 2 )
else:
return ( True, i + 2 )
g_dEncodeFunctions[ types.BooleanType ] = encodeBool
g_dDecodeFunctions[ "b" ] = decodeBool
#Encoding and decoding strings
def encodeString( sValue, eList ):
eList.extend( ( 's', str( len( sValue ) ), ':', sValue ) )
def decodeString( data, i ):
i += 1
colon = data.index( ":", i )
value = int( data[ i : colon ] )
colon += 1
end = colon + value
return ( data[ colon : end] , end )
g_dEncodeFunctions[ types.StringType ] = encodeString
g_dDecodeFunctions[ "s" ] = decodeString
#Encoding and decoding unicode strings
def encodeUnicode( sValue, eList ):
valueStr = sValue.encode( 'utf-8' )
eList.extend( ( 'u', str( len( valueStr ) ), ':', valueStr ) )
def decodeUnicode( data, i ):
i += 1
colon = data.index( ":", i )
value = int( data[ i : colon ] )
colon += 1
end = colon + value
return ( unicode( data[ colon : end], 'utf-8' ) , end )
g_dEncodeFunctions[ types.UnicodeType ] = encodeUnicode
g_dDecodeFunctions[ "u" ] = decodeUnicode
#Encoding and decoding datetime
def encodeDateTime( oValue, eList ):
if type( oValue ) == _dateTimeType:
tDateTime = ( oValue.year, oValue.month, oValue.day, \
oValue.hour, oValue.minute, oValue.second, \
oValue.microsecond, oValue.tzinfo )
eList.append( "za" )
# corrected by KGG encode( tDateTime, eList )
g_dEncodeFunctions[ type( tDateTime ) ]( tDateTime, eList )
elif type( oValue ) == _dateType:
tData = ( oValue.year, oValue.month, oValue. day )
eList.append( "zd" )
# corrected by KGG encode( tData, eList )
g_dEncodeFunctions[ type( tData ) ]( tData, eList )
elif type( oValue ) == _timeType:
tTime = ( oValue.hour, oValue.minute, oValue.second, oValue.microsecond, oValue.tzinfo )
eList.append( "zt" )
# corrected by KGG encode( tTime, eList )
g_dEncodeFunctions[ type( tTime ) ]( tTime, eList )
else:
raise Exception( "Unexpected type %s while encoding a datetime object" % str( type( oValue ) ) )
def decodeDateTime( data, i ):
i += 1
dataType = data[i]
# corrected by KGG tupleObject, i = decode( data, i + 1 )
tupleObject, i = g_dDecodeFunctions[ data[ i + 1 ] ]( data, i + 1 )
if dataType == 'a':
dtObject = datetime.datetime( *tupleObject )
elif dataType == 'd':
dtObject = datetime.date( *tupleObject )
elif dataType == 't':
dtObject = datetime.time( *tupleObject )
else:
raise Exception( "Unexpected type %s while decoding a datetime object" % dataType )
return ( dtObject, i )
g_dEncodeFunctions[ _dateTimeType ] = encodeDateTime
g_dEncodeFunctions[ _dateType ] = encodeDateTime
g_dEncodeFunctions[ _timeType ] = encodeDateTime
g_dDecodeFunctions[ 'z' ] = decodeDateTime
#Encoding and decoding None
def encodeNone( oValue, eList ):
eList.append( "n" )
def decodeNone( data, i ):
return ( None, i + 1 )
g_dEncodeFunctions[ types.NoneType ] = encodeNone
g_dDecodeFunctions[ 'n' ] = decodeNone
#Encode and decode a list
def encodeList( lValue, eList ):
eList.append( "l" )
for uObject in lValue:
g_dEncodeFunctions[ type( uObject ) ]( uObject, eList )
eList.append( "e" )
def decodeList( data, i ):
oL = []
i += 1
while data[ i ] != "e":
ob, i = g_dDecodeFunctions[ data[ i ] ]( data, i )
oL.append( ob )
return( oL, i + 1 )
g_dEncodeFunctions[ types.ListType ] = encodeList
g_dDecodeFunctions[ "l" ] = decodeList
#Encode and decode a tuple
def encodeTuple( lValue, eList ):
eList.append( "t" )
for uObject in lValue:
g_dEncodeFunctions[ type( uObject ) ]( uObject, eList )
eList.append( "e" )
def decodeTuple( data, i ):
oL, i = decodeList( data, i )
return ( tuple( oL ), i )
g_dEncodeFunctions[ types.TupleType ] = encodeTuple
g_dDecodeFunctions[ "t" ] = decodeTuple
#Encode and decode a dictionary
def encodeDict( dValue, eList ):
eList.append( "d" )
for key in sorted( dValue ):
g_dEncodeFunctions[ type( key ) ]( key, eList )
g_dEncodeFunctions[ type( dValue[key] ) ]( dValue[key], eList )
eList.append( "e" )
def decodeDict( data, i ):
oD = {}
i += 1
while data[ i ] != "e":
k, i = g_dDecodeFunctions[ data[ i ] ]( data, i )
oD[ k ], i = g_dDecodeFunctions[ data[ i ] ]( data, i )
return ( oD, i + 1 )
g_dEncodeFunctions[ types.DictType ] = encodeDict
g_dDecodeFunctions[ "d" ] = decodeDict
# Encoding and decoding DError
def encodeDError( dErrorValue, eList ):
eList.append( 'k' )
g_dEncodeFunctions[type( dErrorValue.errno )]( dErrorValue.errno, eList )
g_dEncodeFunctions[type( dErrorValue.errmsg )]( dErrorValue.errmsg, eList )
g_dEncodeFunctions[type( dErrorValue._callStack )]( dErrorValue._callStack, eList )
eList.append( 'e' )
def decodeDError( data, i ):
i += 1
errno, i = g_dDecodeFunctions[ data[i] ]( data, i )
errmsg, i = g_dDecodeFunctions[ data[i] ]( data, i )
callStack, i = g_dDecodeFunctions[ data[i] ]( data, i )
de = DError( errno, errmsg )
de._callStack = callStack
return ( de, i + 1 )
de = DError(0)
g_dEncodeFunctions[ type(de) ] = encodeDError
g_dDecodeFunctions[ "k" ] = decodeDError
#Encode function
def encode( uObject ):
try:
eList = []
#print "ENCODE FUNCTION : %s" % g_dEncodeFunctions[ type( uObject ) ]
g_dEncodeFunctions[ type( uObject ) ]( uObject, eList )
return "".join( eList )
except Exception:
raise
def decode( data ):
if not data:
return data
try:
#print "DECODE FUNCTION : %s" % g_dDecodeFunctions[ sStream [ iIndex ] ]
return g_dDecodeFunctions[ data[ 0 ] ]( data, 0 )
except Exception:
raise
if __name__ == "__main__":
gObject = {2:"3", True : ( 3, None ), 2.0 * 10 ** 20 : 2.0 * 10 ** -10 }
print "Initial: %s" % gObject
gData = encode( gObject )
print "Encoded: %s" % gData
print "Decoded: %s, [%s]" % decode( gData )
|
vmendez/DIRAC
|
Core/Utilities/DEncode.py
|
Python
|
gpl-3.0
| 7,767
|
[
"DIRAC"
] |
f99231715b3893ebbdcd8779d3f54fc3337f82dbd849b2f543c9ca868b14110c
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
"""Test of sayAll output."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
########################################################################
# We wait for the focus to be on a blank Firefox window.
#
sequence.append(WaitForWindowActivate(utils.firefoxFrameNames, None))
########################################################################
# Load the local blockquote test case.
#
sequence.append(KeyComboAction("<Control>l"))
sequence.append(WaitForFocus(acc_role=pyatspi.ROLE_ENTRY))
sequence.append(TypeAction(utils.htmlURLPrefix + "bug-591351-1.html"))
sequence.append(KeyComboAction("Return"))
sequence.append(WaitForDocLoad())
sequence.append(WaitForFocus("Pause test",
acc_role=pyatspi.ROLE_DOCUMENT_FRAME))
sequence.append(PauseAction(3000))
########################################################################
# Press Control+Home to move to the top.
#
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.AssertPresentationAction(
"Top of file",
["BRAILLE LINE: 'Hello world.'",
" VISIBLE: 'Hello world.', cursor=1",
"SPEECH OUTPUT: 'Hello world.",
"'"]))
########################################################################
# SayAll to the End.
#
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Add"))
sequence.append(utils.AssertPresentationAction(
"KP_Add to do a SayAll",
["SPEECH OUTPUT: 'Hello world.",
"",
"",
" I wonder what a bevezeto is. I should Google that. ",
"",
" Aha! It is the Hungarian word for \"Introduction\". Here is some proof link . I really think we need to get Attila to teach the Orca team some Hungarian. Maybe one (really easy) phrase per bug comment. separator Foo link'"]))
########################################################################
# Move to the location bar by pressing Control+L. When it has focus
# type "about:blank" and press Return to restore the browser to the
# conditions at the test's start.
#
sequence.append(KeyComboAction("<Control>l"))
sequence.append(WaitForFocus(acc_role=pyatspi.ROLE_ENTRY))
sequence.append(TypeAction("about:blank"))
sequence.append(KeyComboAction("Return"))
sequence.append(WaitForDocLoad())
# Just a little extra wait to let some events get through.
#
sequence.append(PauseAction(3000))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
h4ck3rm1k3/orca-sonar
|
test/keystrokes/firefox/sayAll_bug-591351-1.py
|
Python
|
lgpl-2.1
| 2,537
|
[
"ORCA"
] |
c6e58783221ec9b724585ce9ed54bc667ec9b385271841eb5d7ac9f3c990230f
|
#
import numpy as np
import healpy as hp
import astropy.io.fits as pyfits
from multiprocessing import Pool
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from quicksipManera import *
import fitsio
### ------------ A couple of useful conversions -----------------------
def zeropointToScale(zp):
return 10.**((zp - 22.5)/2.5)
def nanomaggiesToMag(nm):
return -2.5 * (log(nm,10.) - 9.)
def Magtonanomaggies(m):
return 10.**(-m/2.5+9.)
#-2.5 * (log(nm,10.) - 9.)
### ------------ SHARED CLASS: HARDCODED INPUTS GO HERE ------------------------
### Please, add here your own harcoded values if any, so other may use them
class mysample(object):
"""
This class mantains the basic information of the sample
to minimize hardcoded parameters in the test functions
Everyone is meant to call mysample to obtain information like
- path to ccd-annotated files : ccds
- zero points : zp0
- magnitude limits (recm) : recm
- photoz requirements : phreq
- extintion coefficient : extc
- extintion index : be
- mask var eqv. to blacklist_ok : maskname
- predicted frac exposures : FracExp
Current Inputs are: survey, DR, band, localdir)
survey: DECaLS, MZLS, BASS
DR: DR3, DR4
band: g,r,z
localdir: output directory
"""
def __init__(self,survey,DR,band,localdir,verb):
"""
Initialize image survey, data release, band, output path
Calculate variables and paths
"""
self.survey = survey
self.DR = DR
self.band = band
self.localdir = localdir
self.verbose =verb
# Check bands
if(self.band != 'g' and self.band !='r' and self.band!='z'):
raise RuntimeError("Band seems wrong options are 'g' 'r' 'z'")
# Check surveys
if(self.survey !='DECaLS' and self.survey !='BASS' and self.survey !='MZLS'):
raise RuntimeError("Survey seems wrong options are 'DECAaLS' 'BASS' MZLS' ")
# Annotated CCD paths
if(self.DR == 'DR3'):
inputdir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr3/'
self.ccds =inputdir+'ccds-annotated-decals.fits.gz'
self.catalog = 'DECaLS_DR3'
if(self.survey != 'DECaLS'): raise RuntimeError("Survey name seems inconsistent")
elif(self.DR == 'DR4'):
inputdir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr4/'
if (band == 'g' or band == 'r'):
#self.ccds = inputdir+'ccds-annotated-dr4-90prime.fits.gz'
self.ccds = inputdir+'ccds-annotated-bass.fits.gz'
self.catalog = 'BASS_DR4'
if(self.survey != 'BASS'): raise RuntimeError("Survey name seems inconsistent")
elif(band == 'z'):
#self.ccds = inputdir+'ccds-annotated-dr4-mzls.fits.gz'
self.ccds = inputdir+'ccds-annotated-mzls.fits.gz'
self.catalog = 'MZLS_DR4'
if(self.survey != 'MZLS'): raise RuntimeError("Survey name seems inconsistent")
else: raise RuntimeError("Input sample band seems inconsisent")
else: raise RuntimeError("Data Realease seems wrong")
# Predicted survey exposure fractions
if(self.survey =='DECaLS'):
# DECALS final survey will be covered by
# 1, 2, 3, 4, and 5 exposures in the following fractions:
self.FracExp=[0.02,0.24,0.50,0.22,0.02]
elif(self.survey == 'BASS'):
# BASS coverage fractions for 1,2,3,4,5 exposures are:
self.FracExp=[0.0014,0.0586,0.8124,0.1203,0.0054,0.0019]
elif(self.survey == 'MZLS'):
# For MzLS fill factors of 100% with a coverage of at least 1,
# 99.5% with a coverage of at least 2, and 85% with a coverage of 3.
self.FracExp=[0.005,0.145,0.85,0,0]
else:
raise RuntimeError("Survey seems to have wrong options for fraction of exposures ")
#Bands inputs
if band == 'g':
self.be = 1
self.extc = 3.303 #/2.751
self.zp0 = 25.08
self.recm = 24.
self.phreq = 0.01
if band == 'r':
self.be = 2
self.extc = 2.285 #/2.751
self.zp0 = 25.29
self.recm = 23.4
self.phreq = 0.01
if band == 'z':
self.be = 4
self.extc = 1.263 #/2.751
self.zp0 = 24.92
self.recm = 22.5
self.phreq = 0.02
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# ------------ VALIDATION TESTS ------------------------------------
# ------------------------------------------------------------------
# Note: part of the name of the function should startw with number valXpX
def val3p4c_depthfromIvar(sample):
"""
Requirement V3.4
90% filled to g=24, r=23.4 and z=22.5 and 95% and 98% at 0.3/0.6 mag shallower.
Produces extinction correction magnitude maps for visual inspection
MARCM stable version, improved from AJR quick hack
This now included extinction from the exposures
Uses quicksip subroutines from Boris, corrected
for a bug I found for BASS and MzLS ccd orientation
"""
nside = 1024 # Resolution of output maps
nsidesout = None # if you want full sky degraded maps to be written
ratiores = 1 # Superresolution/oversampling ratio, simp mode doesn't allow anything other than 1
mode = 1 # 1: fully sequential, 2: parallel then sequential, 3: fully parallel
pixoffset = 0 # How many pixels are being removed on the edge of each CCD? 15 for DES.
oversamp='1' # ratiores in string format
nsideSTR='1024' # same as nside but in string format
band = sample.band
catalogue_name = sample.catalog
fname = sample.ccds
localdir = sample.localdir
outroot = localdir
extc = sample.extc
#Read ccd file
tbdata = pyfits.open(fname)[1].data
# ------------------------------------------------------
# Obtain indices
auxstr='band_'+band
sample_names = [auxstr]
if(sample.DR == 'DR3'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True))
elif(sample.DR == 'DR4'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['bitmask'] == 0))
#Read data
#obtain invnoisesq here, including extinction
nmag = Magtonanomaggies(tbdata['galdepth']-extc*tbdata['EBV'])/5.
ivar= 1./nmag**2.
# What properties do you want mapped?
# Each each tuple has [(quantity to be projected, weighting scheme, operation),(etc..)]
propertiesandoperations = [ ('ivar', '', 'total'), ]
# What properties to keep when reading the images?
# Should at least contain propertiesandoperations and the image corners.
# MARCM - actually no need for ra dec image corners.
# Only needs ra0 ra1 ra2 ra3 dec0 dec1 dec2 dec3 only if fast track appropriate quicksip subroutines were implemented
propertiesToKeep = [ 'filter', 'AIRMASS', 'FWHM','mjd_obs'] \
+ ['RA', 'DEC', 'crval1', 'crval2', 'crpix1', 'crpix2', 'cd1_1', 'cd1_2', 'cd2_1', 'cd2_2','width','height']
# Create big table with all relevant properties.
tbdata = np.core.records.fromarrays([tbdata[prop] for prop in propertiesToKeep] + [ivar], names = propertiesToKeep + [ 'ivar'])
# Read the table, create Healtree, project it into healpix maps, and write these maps.
# Done with Quicksip library, note it has quite a few hardcoded values (use new version by MARCM for BASS and MzLS)
# project_and_write_maps_simp(mode, propertiesandoperations, tbdata, catalogue_name, outroot, sample_names, inds, nside)
project_and_write_maps(mode, propertiesandoperations, tbdata, catalogue_name, outroot, sample_names, inds, nside, ratiores, pixoffset, nsidesout)
# Read Haelpix maps from quicksip
prop='ivar'
op='total'
vmin=21.0
vmax=24.0
fname2=localdir+catalogue_name+'/nside'+nsideSTR+'_oversamp'+oversamp+'/'+\
catalogue_name+'_band_'+band+'_nside'+nsideSTR+'_oversamp'+oversamp+'_'+prop+'__'+op+'.fits.gz'
f = fitsio.read(fname2)
# HEALPIX DEPTH MAPS
# convert ivar to depth
import healpy as hp
from healpix import pix2ang_ring,thphi2radec
ral = []
decl = []
val = f['SIGNAL']
pix = f['PIXEL']
# Obtain values to plot
if (prop == 'ivar'):
myval = []
mylabel='depth'
below=0
for i in range(0,len(val)):
depth=nanomaggiesToMag(sqrt(1./val[i]) * 5.)
if(depth < vmin):
below=below+1
else:
myval.append(depth)
th,phi = hp.pix2ang(int(nside),pix[i])
ra,dec = thphi2radec(th,phi)
ral.append(ra)
decl.append(dec)
npix=len(f)
print 'Area is ', npix/(float(nside)**2.*12)*360*360./pi, ' sq. deg.'
print below, 'of ', npix, ' pixels are not plotted as their ', mylabel,' < ', vmin
print 'Within the plot, min ', mylabel, '= ', min(myval), ' and max ', mylabel, ' = ', max(myval)
# Plot depth
from matplotlib import pyplot as plt
import matplotlib.cm as cm
map = plt.scatter(ral,decl,c=myval, cmap=cm.rainbow,s=2., vmin=vmin, vmax=vmax, lw=0,edgecolors='none')
cbar = plt.colorbar(map)
plt.xlabel('r.a. (degrees)')
plt.ylabel('declination (degrees)')
plt.title('Map of '+ mylabel +' for '+catalogue_name+' '+band+'-band')
plt.xlim(0,360)
plt.ylim(-30,90)
mapfile=localdir+mylabel+'_'+band+'_'+catalogue_name+str(nside)+'.png'
print 'saving plot to ', mapfile
plt.savefig(mapfile)
plt.close()
#plt.show()
#cbar.set_label(r'5$\sigma$ galaxy depth', rotation=270,labelpad=1)
#plt.xscale('log')
return mapfile
def val3p4b_maghist_pred(sample,ndraw=1e5, nbin=100, vmin=21.0, vmax=25.0):
"""
Requirement V3.4
90% filled to g=24, r=23.4 and z=22.5 and 95% and 98% at 0.3/0.6 mag shallower.
MARCM
Makes histogram of predicted magnitudes
by MonteCarlo from exposures converving fraction of number of exposures
This produces the histogram for Dustin's processed galaxy depth
"""
import fitsio
from matplotlib import pyplot as plt
from numpy import zeros,array
from random import random
# Check fraction of number of exposures adds to 1.
if( abs(sum(sample.FracExp) - 1.0) > 1e-5 ):
raise ValueError("Fration of number of exposures don't add to one")
# Survey inputs
rel = sample.DR
catalogue_name = sample.catalog
band = sample.band
be = sample.be
zp0 = sample.zp0
recm = sample.recm
verbose = sample.verbose
f = fitsio.read(sample.ccds)
#read in magnitudes including extinction
counts2014 = 0
counts20 = 0
nl = []
for i in range(0,len(f)):
year = int(f[i]['date_obs'].split('-')[0])
if (year <= 2014): counts2014 = counts2014 + 1
if f[i]['dec'] < -20 : counts20 = counts20 + 1
if(sample.DR == 'DR3'):
if f[i]['filter'] == sample.band and f[i]['photometric'] == True and f[i]['blacklist_ok'] == True :
magext = f[i]['galdepth'] - f[i]['decam_extinction'][be]
nmag = Magtonanomaggies(magext)/5. #total noise
nl.append(nmag)
if(sample.DR == 'DR4'):
if f[i]['filter'] == sample.band and f[i]['photometric'] == True and f[i]['bitmask'] == 0 :
magext = f[i]['galdepth'] - f[i]['decam_extinction'][be]
nmag = Magtonanomaggies(magext)/5. #total noise
nl.append(nmag)
ng = len(nl)
print "-----------"
if(verbose) : print "Number of objects = ", len(f)
if(verbose) : print "Counts before or during 2014 = ", counts2014
if(verbose) : print "Counts with dec < -20 = ", counts20
print "Number of objects in the sample = ", ng
#Monte Carlo to predict magnitudes histogram
ndrawn = 0
nbr = 0
NTl = []
n = 0
for indx, f in enumerate(sample.FracExp,1) :
Nexp = indx # indx starts at 1 bc argument on enumearate :-), thus is the number of exposures
nd = int(round(ndraw * f))
ndrawn=ndrawn+nd
for i in range(0,nd):
detsigtoti = 0
for j in range(0,Nexp):
ind = int(random()*ng)
detsig1 = nl[ind]
detsigtoti += 1./detsig1**2.
detsigtot = sqrt(1./detsigtoti)
m = nanomaggiesToMag(detsigtot * 5.)
if m > recm: # pass requirement
nbr += 1.
NTl.append(m)
n += 1.
# Run some statistics
NTl=np.array(NTl)
mean = sum(NTl)/float(len(NTl))
std = sqrt(sum(NTl**2.)/float(len(NTl))-mean**2.)
NTl.sort()
if len(NTl)/2. != len(NTl)/2:
med = NTl[len(NTl)/2+1]
else:
med = (NTl[len(NTl)/2+1]+NTl[len(NTl)/2])/2.
print "Total images drawn with either 1,2,3,4,5 exposures", ndrawn
print "Mean = ", mean, "; Median = ", med ,"; Std = ", std
print 'percentage better than requirements = '+str(nbr/float(ndrawn))
# Prepare historgram
minN = max(min(NTl),vmin)
maxN = max(NTl)+.0001
hl = zeros((nbin)) # histogram counts
lowcounts=0
for i in range(0,len(NTl)):
bin = int(nbin*(NTl[i]-minN)/(maxN-minN))
if(bin >= 0) :
hl[bin] += 1
else:
lowcounts +=1
Nl = [] # x bin centers
for i in range(0,len(hl)):
Nl.append(minN+i*(maxN-minN)/float(nbin)+0.5*(maxN-minN)/float(nbin))
NTl = array(NTl)
print "min,max depth = ",min(NTl), max(NTl)
print "counts below ", minN, " = ", lowcounts
#### Ploting histogram
fname=sample.localdir+'validationplots/'+sample.catalog+sample.band+'_pred_exposures.png'
print "saving histogram plot in", fname
#--- pdf version ---
#from matplotlib.backends.backend_pdf import PdfPages
#pp = PdfPages(fname)
plt.clf()
plt.plot(Nl,hl,'k-')
plt.xlabel(r'5$\sigma$ '+sample.band+ ' depth')
plt.ylabel('# of images')
plt.title('MC combined exposure depth '+str(mean)[:5]+r'$\pm$'+str(std)[:4]+r', $f_{\rm pass}=$'+str(nbr/float(ndrawn))[:5]+'\n '+catalogue_name)
#plt.xscale('log') # --- pdf ---
plt.savefig(fname) #pp.savefig()
plt.close #pp.close()
return fname
# -------------------------------------------------------------------
# -------------------------------------------------------------------
# OLD STUFF
# -------------------------------------------------------------------
dir = '$HOME/' # obviously needs to be changed
#inputdir = '/project/projectdirs/cosmo/data/legacysurvey/dr3/' # where I get my data
inputdir= '/global/projecta/projectdirs/cosmo/work/dr4/'
localdir = '/global/homes/m/manera/DESI/validation-outputs/' #place for local DESI stuff
#extmap = np.loadtxt('/global/homes/m/manera/DESI/validation-outputs/healSFD_r_256_fullsky.dat') # extintion map remove it
### Plotting facilities
def plotPhotometryMap(band,vmin=0.0,vmax=1.0,mjdmax='',prop='zptvar',op='min',rel='DR0',survey='surveyname',nside='1024',oversamp='1'):
import fitsio
from matplotlib import pyplot as plt
import matplotlib.cm as cm
from numpy import zeros,array
import healpix
from healpix import pix2ang_ring,thphi2radec
import healpy as hp
# Survey inputs
mjdw=mjdmax
if rel == 'DR2':
fname =inputdir+'decals-ccds-annotated.fits'
catalogue_name = 'DECaLS_DR2'+mjdw
if rel == 'DR3':
inputdir = '/project/projectdirs/cosmo/data/legacysurvey/dr3/' # where I get my data
fname =inputdir+'ccds-annotated-decals.fits.gz'
catalogue_name = 'DECaLS_DR3'+mjdw
if rel == 'DR4':
inputdir= '/global/projecta/projectdirs/cosmo/work/dr4/'
if (band == 'g' or band == 'r'):
fname=inputdir+'ccds-annotated-dr4-90prime.fits.gz'
catalogue_name = '90prime_DR4'+mjdw
if band == 'z' :
fname = inputdir+'ccds-annotated-dr4-mzls.fits.gz'
catalogue_name = 'MZLS_DR4'+mjdw
fname=localdir+catalogue_name+'/nside'+nside+'_oversamp'+oversamp+'/'+catalogue_name+'_band_'+band+'_nside'+nside+'_oversamp'+oversamp+'_'+prop+'__'+op+'.fits.gz'
f = fitsio.read(fname)
ral = []
decl = []
val = f['SIGNAL']
pix = f['PIXEL']
# -------------- plot of values ------------------
if( prop=='zptvar' and opt == 'min' ):
print 'Plotting min zpt rms'
myval = []
for i in range(0,len(val)):
myval.append(1.086 * np.sqrt(val[i])) #1.086 converts dm into d(flux)
th,phi = hp.pix2ang(int(nside),pix[i])
ra,dec = thphi2radec(th,phi)
ral.append(ra)
decl.append(dec)
mylabel = 'min-zpt-rms-flux'
vmin = 0.0 #min(myval)
vmax = 0.03 #max(myval)
npix = len(myval)
below = 0
print 'Min and Max values of ', mylabel, ' values is ', min(myval), max(myval)
print 'Number of pixels is ', npix
print 'Number of pixels offplot with ', mylabel,' < ', vmin, ' is', below
print 'Area is ', npix/(float(nside)**2.*12)*360*360./pi, ' sq. deg.'
map = plt.scatter(ral,decl,c=myval, cmap=cm.rainbow,s=2., vmin=vmin, vmax=vmax, lw=0,edgecolors='none')
cbar = plt.colorbar(map)
plt.xlabel('r.a. (degrees)')
plt.ylabel('declination (degrees)')
plt.title('Map of '+ mylabel +' for '+catalogue_name+' '+band+'-band')
plt.xlim(0,360)
plt.ylim(-30,90)
plt.savefig(localdir+mylabel+'_'+band+'_'+catalogue_name+str(nside)+'.png')
plt.close()
# -------------- plot of status in udgrade maps of 1.406 deg pix size ------------------
#Bands inputs
if band == 'g':
phreq = 0.01
if band == 'r':
phreq = 0.01
if band == 'z':
phreq = 0.02
# Obtain values to plot
if( prop=='zptvar' and opt == 'min' ):
nside2 = 64 # 1.40625 deg per pixel
npix2 = hp.nside2npix(nside2)
myreq = np.zeros(npix2) # 0 off footprint, 1 at least one pass requirement, -1 none pass requirement
ral = np.zeros(npix2)
decl = np.zeros(npix2)
mylabel = 'photometric-pixels'
print 'Plotting photometric requirement'
for i in range(0,len(val)):
th,phi = hp.pix2ang(int(nside),pix[i])
ipix = hp.ang2pix(nside2,th,phi)
dF= 1.086 * (sqrt(val[i])) # 1.086 converts d(magnitudes) into d(flux)
if(dF < phreq):
myreq[ipix]=1
else:
if(myreq[ipix] == 0): myreq[ipix]=-1
for i in range(0,len(myreq)):
th,phi = hp.pix2ang(int(nside2),pix[i])
ra,dec = thphi2radec(th,phi)
ral[i] = ra
decl[i] = dec
#myval = np.zeros(npix2)
#mycount = np.zeros(pix2)
#myval[ipix] += dF
#mycount[ipix] += 1.
below=sum( x for x in myreq if x < phreq)
print 'Number of pixels offplot with ', mylabel,' < ', phreq, ' is', below
vmin = min(myreq)
vmax = max(myreq)
map = plt.scatter(ral,decl,c=myreq, cmap=cm.rainbow,s=5., vmin=vmin, vmax=vmax, lw=0,edgecolors='none')
cbar = plt.colorbar(map)
plt.xlabel('r.a. (degrees)')
plt.ylabel('declination (degrees)')
plt.title('Map of '+ mylabel +' for '+catalogue_name+' '+band+'-band')
plt.xlim(0,360)
plt.ylim(-30,90)
plt.savefig(localdir+mylabel+'_'+band+'_'+catalogue_name+str(nside)+'.png')
plt.close()
#plt.show()
#cbar.set_label(r'5$\sigma$ galaxy depth', rotation=270,labelpad=1)
#plt.xscale('log')
return True
def plotPropertyMap(band,vmin=21.0,vmax=24.0,mjdmax='',prop='ivar',op='total',survey='surveyname',nside='1024',oversamp='1'):
import fitsio
from matplotlib import pyplot as plt
import matplotlib.cm as cm
from numpy import zeros,array
import healpix
from healpix import pix2ang_ring,thphi2radec
import healpy as hp
fname=localdir+survey+mjdmax+'/nside'+nside+'_oversamp'+oversamp+'/'+survey+mjdmax+'_band_'+band+'_nside'+nside+'_oversamp'+oversamp+'_'+prop+'__'+op+'.fits.gz'
f = fitsio.read(fname)
ral = []
decl = []
val = f['SIGNAL']
pix = f['PIXEL']
# Obtain values to plot
if (prop == 'ivar'):
myval = []
mylabel='depth'
print 'Converting ivar to depth.'
print 'Plotting depth'
below=0
for i in range(0,len(val)):
depth=nanomaggiesToMag(sqrt(1./val[i]) * 5.)
if(depth < vmin):
below=below+1
else:
myval.append(depth)
th,phi = hp.pix2ang(int(nside),pix[i])
ra,dec = thphi2radec(th,phi)
ral.append(ra)
decl.append(dec)
npix=len(f)
print 'Min and Max values of ', mylabel, ' values is ', min(myval), max(myval)
print 'Number of pixels is ', npix
print 'Number of pixels offplot with ', mylabel,' < ', vmin, ' is', below
print 'Area is ', npix/(float(nside)**2.*12)*360*360./pi, ' sq. deg.'
map = plt.scatter(ral,decl,c=myval, cmap=cm.rainbow,s=2., vmin=vmin, vmax=vmax, lw=0,edgecolors='none')
cbar = plt.colorbar(map)
plt.xlabel('r.a. (degrees)')
plt.ylabel('declination (degrees)')
plt.title('Map of '+ mylabel +' for '+survey+' '+band+'-band')
plt.xlim(0,360)
plt.ylim(-30,90)
plt.savefig(localdir+mylabel+'_'+band+'_'+survey+str(nside)+'.png')
plt.close()
#plt.show()
#cbar.set_label(r'5$\sigma$ galaxy depth', rotation=270,labelpad=1)
#plt.xscale('log')
return True
def depthfromIvar(band,rel='DR3',survey='survename'):
# ------------------------------------------------------
# MARCM stable version, improved from AJR quick hack
# This now included extinction from the exposures
# Uses quicksip subroutines from Boris
# (with a bug I corrected for BASS and MzLS ccd orientation)
# Produces depth maps from Dustin's annotated files
# ------------------------------------------------------
nside = 1024 # Resolution of output maps
nsidesout = None # if you want full sky degraded maps to be written
ratiores = 1 # Superresolution/oversampling ratio, simp mode doesn't allow anything other than 1
mode = 1 # 1: fully sequential, 2: parallel then sequential, 3: fully parallel
pixoffset = 0 # How many pixels are being removed on the edge of each CCD? 15 for DES.
mjd_max = 10e10
mjdw = ''
# Survey inputs
if rel == 'DR2':
fname =inputdir+'decals-ccds-annotated.fits'
catalogue_name = 'DECaLS_DR2'+mjdw
if rel == 'DR3':
inputdir = '/project/projectdirs/cosmo/data/legacysurvey/dr3/' # where I get my data
fname =inputdir+'ccds-annotated-decals.fits.gz'
catalogue_name = 'DECaLS_DR3'+mjdw
if rel == 'DR4':
inputdir= '/global/projecta/projectdirs/cosmo/work/dr4/'
if (band == 'g' or band == 'r'):
fname=inputdir+'ccds-annotated-dr4-90prime.fits.gz'
catalogue_name = '90prime_DR4'+mjdw
if band == 'z' :
fname = inputdir+'ccds-annotated-dr4-mzls.fits.gz'
catalogue_name = 'MZLS_DR4'+mjdw
#Bands inputs
if band == 'g':
be = 1
extc = 3.303 #/2.751
if band == 'r':
be = 2
extc = 2.285 #/2.751
if band == 'z':
be = 4
extc = 1.263 #/2.751
# Where to write the maps ? Make sure directory exists.
outroot = localdir
tbdata = pyfits.open(fname)[1].data
# ------------------------------------------------------
# Obtain indices
if band == 'g':
sample_names = ['band_g']
indg = np.where((tbdata['filter'] == 'g') & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True))
inds = indg #redundant
if band == 'r':
sample_names = ['band_r']
indr = np.where((tbdata['filter'] == 'r') & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True))
inds = indr #redundant
if band == 'z':
sample_names = ['band_z']
indz = np.where((tbdata['filter'] == 'z') & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True))
inds = indz # redundant
#Read data
#obtain invnoisesq here, including extinction
nmag = Magtonanomaggies(tbdata['galdepth']-extc*tbdata['EBV'])/5.
ivar= 1./nmag**2.
# What properties do you want mapped?
# Each each tuple has [(quantity to be projected, weighting scheme, operation),(etc..)]
propertiesandoperations = [ ('ivar', '', 'total'), ]
# What properties to keep when reading the images?
#Should at least contain propertiesandoperations and the image corners.
# MARCM - actually no need for ra dec image corners.
# Only needs ra0 ra1 ra2 ra3 dec0 dec1 dec2 dec3 only if fast track appropriate quicksip subroutines were implemented
propertiesToKeep = [ 'filter', 'AIRMASS', 'FWHM','mjd_obs'] \
+ ['RA', 'DEC', 'crval1', 'crval2', 'crpix1', 'crpix2', 'cd1_1', 'cd1_2', 'cd2_1', 'cd2_2','width','height']
# Create big table with all relevant properties.
tbdata = np.core.records.fromarrays([tbdata[prop] for prop in propertiesToKeep] + [ivar], names = propertiesToKeep + [ 'ivar'])
# Read the table, create Healtree, project it into healpix maps, and write these maps.
# Done with Quicksip library, note it has quite a few hardcoded values (use new version by MARCM for BASS and MzLS)
# project_and_write_maps_simp(mode, propertiesandoperations, tbdata, catalogue_name, outroot, sample_names, inds, nside)
project_and_write_maps(mode, propertiesandoperations, tbdata, catalogue_name, outroot, sample_names, inds, nside, ratiores, pixoffset, nsidesout)
# ----- plot depth map -----
prop='ivar'
plotPropertyMap(band,survey=catalogue_name,prop=prop)
return True
def plotMaghist_pred(band,FracExp=[0,0,0,0,0],ndraw = 1e5,nbin=100,rel='DR3',vmin=21.0):
# MARCM Makes histogram of predicted magnitudes
# by MonteCarlo from exposures converving fraction of number of exposures
# This produces the histogram for Dustin's processed galaxy depth
import fitsio
from matplotlib import pyplot as plt
from numpy import zeros,array
from random import random
# Check fraction of number of exposures adds to 1.
if( abs(sum(FracExp) - 1.0) > 1e-5 ):
print sum(FracExp)
raise ValueError("Fration of number of exposures don't add to one")
# Survey inputs
mjdw=''
if rel == 'DR2':
fname =inputdir+'decals-ccds-annotated.fits'
catalogue_name = 'DECaLS_DR2'+mjdw
if rel == 'DR3':
inputdir = '/project/projectdirs/cosmo/data/legacysurvey/dr3/' # where I get my data
fname =inputdir+'ccds-annotated-decals.fits.gz'
catalogue_name = 'DECaLS_DR3'+mjdw
if rel == 'DR4':
#inputdir= '/global/projecta/projectdirs/cosmo/work/dr4/'
inputdir='/project/projectdirs/cosmo/data/legacysurvey/dr4'
if (band == 'g' or band == 'r'):
fname=inputdir+'ccds-annotated-bass.fits.gz'
catalogue_name='BASS_DR4'+mjdw
#fname=inputdir+'ccds-annotated-dr4-90prime.fits.gz'
#catalogue_name = '90prime_DR4'+mjdw
if band == 'z' :
#fname = inputdir+'ccds-annotated-dr4-mzls.fits.gz'
fname = inputdir+'ccds-annotated-mzls.fits.gz'
catalogue_name = 'MZLS_DR4'+mjdw
# Bands info
if band == 'g':
be = 1
zp0 = 25.08
recm = 24.
if band == 'r':
be = 2
zp0 = 25.29
recm = 23.4
if band == 'z':
be = 4
zp0 = 24.92
recm = 22.5
f = fitsio.read(fname)
#read in magnitudes including extinction
counts2014 =0
n = 0
nl = []
for i in range(0,len(f)):
DS = 0
year = int(f[i]['date_obs'].split('-')[0])
if (year <= 2014): counts2014=counts2014+1
if year > 2014:
DS = 1 #enforce 2015 data
if f[i]['filter'] == band:
if DS == 1:
n += 1
if f[i]['dec'] > -20 and f[i]['photometric'] == True and f[i]['blacklist_ok'] == True :
magext = f[i]['galdepth'] - f[i]['decam_extinction'][be]
nmag = Magtonanomaggies(magext)/5. #total noise
nl.append(nmag)
ng = len(nl)
print "-----------"
print "Number of objects with DS=1", n
print "Number of objects in the sample", ng
print "Counts before or during 2014", counts2014
#Monte Carlo to predict magnitudes histogram
ndrawn = 0
nbr = 0
NTl = []
for indx, f in enumerate(FracExp,1) :
Nexp = indx # indx starts at 1 bc argument on enumearate :-), thus is the number of exposures
nd = int(round(ndraw * f))
ndrawn=ndrawn+nd
for i in range(0,nd):
detsigtoti = 0
for j in range(0,Nexp):
ind = int(random()*ng)
detsig1 = nl[ind]
detsigtoti += 1./detsig1**2.
detsigtot = sqrt(1./detsigtoti)
m = nanomaggiesToMag(detsigtot * 5.)
if m > recm: # pass requirement
nbr += 1.
NTl.append(m)
n += 1.
# Run some statistics
NTl=np.array(NTl)
mean = sum(NTl)/float(len(NTl))
std = sqrt(sum(NTl**2.)/float(len(NTl))-mean**2.)
NTl.sort()
if len(NTl)/2. != len(NTl)/2:
med = NTl[len(NTl)/2+1]
else:
med = (NTl[len(NTl)/2+1]+NTl[len(NTl)/2])/2.
print "Mean ", mean
print "Median ", med
print "Std ", std
print 'percentage better than requirements '+str(nbr/float(ndrawn))
# Prepare historgram
minN = max(min(NTl),vmin)
maxN = max(NTl)+.0001
hl = zeros((nbin)) # histogram counts
lowcounts=0
for i in range(0,len(NTl)):
bin = int(nbin*(NTl[i]-minN)/(maxN-minN))
if(bin >= 0) :
hl[bin] += 1
else:
lowcounts +=1
Nl = [] # x bin centers
for i in range(0,len(hl)):
Nl.append(minN+i*(maxN-minN)/float(nbin)+0.5*(maxN-minN)/float(nbin))
NTl = array(NTl)
#### Ploting histogram
print "Plotting the histogram now"
print "min,max depth ",min(NTl), max(NTl)
print "counts below ", vmin, "are ", lowcounts
from matplotlib.backends.backend_pdf import PdfPages
plt.clf()
pp = PdfPages(localdir+'validationplots/'+catalogue_name+band+'_pred_exposures.pdf')
plt.plot(Nl,hl,'k-')
plt.xlabel(r'5$\sigma$ '+band+ ' depth')
plt.ylabel('# of images')
plt.title('MC combined exposure depth '+str(mean)[:5]+r'$\pm$'+str(std)[:4]+r', $f_{\rm pass}=$'+str(nbr/float(ndrawn))[:5]+'\n '+catalogue_name)
#plt.xscale('log')
pp.savefig()
pp.close()
return True
def photometricReq(band,rel='DR3',survey='survename'):
# ------------------------------------------------------
# ------------------------------------------------------
nside = 1024 # Resolution of output maps
nsidesout = None # if you want full sky degraded maps to be written
ratiores = 1 # Superresolution/oversampling ratio, simp mode doesn't allow anything other than 1
mode = 1 # 1: fully sequential, 2: parallel then sequential, 3: fully parallel
pixoffset = 0 # How many pixels are being removed on the edge of each CCD? 15 for DES.
mjd_max = 10e10
mjdw = ''
# Survey inputs
if rel == 'DR2':
fname =inputdir+'decals-ccds-annotated.fits'
catalogue_name = 'DECaLS_DR2'+mjdw
if rel == 'DR3':
inputdir = '/project/projectdirs/cosmo/data/legacysurvey/dr3/' # where I get my data
fname =inputdir+'ccds-annotated-decals.fits.gz'
catalogue_name = 'DECaLS_DR3'+mjdw
if rel == 'DR4':
inputdir= '/global/projecta/projectdirs/cosmo/work/dr4/'
if (band == 'g' or band == 'r'):
fname=inputdir+'ccds-annotated-dr4-90prime.fits.gz'
catalogue_name = '90prime_DR4'+mjdw
if band == 'z' :
fname = inputdir+'ccds-annotated-dr4-mzls.fits.gz'
catalogue_name = 'MZLS_DR4'+mjdw
# Where to write the maps ? Make sure directory exists.
outroot = localdir
tbdata = pyfits.open(fname)[1].data
# ------------------------------------------------------
# Obtain indices
if band == 'g':
sample_names = ['band_g']
#indg = np.where((tbdata['filter'] == 'g') & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True))
indg = np.where((tbdata['filter'] == 'g') & (tbdata['blacklist_ok'] == True))
#indg = np.where((tbdata['filter'] == 'g') )
inds = indg #redundant
if band == 'r':
sample_names = ['band_r']
#indr = np.where((tbdata['filter'] == 'r') & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True))
indr = np.where((tbdata['filter'] == 'r') & (tbdata['blacklist_ok'] == True))
#indr = np.where((tbdata['filter'] == 'r') )
inds = indr #redundant
if band == 'z':
sample_names = ['band_z']
#indz = np.where((tbdata['filter'] == 'z') & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True))
indz = np.where((tbdata['filter'] == 'z') & (tbdata['blacklist_ok'] == True))
#indz = np.where((tbdata['filter'] == 'z') )
inds = indz # redundant
#Read data
#obtain invnoisesq here, including extinction
zptvar = tbdata['CCDPHRMS']**2/tbdata['CCDNMATCH']
zptivar = 1./zptvar
nccd = np.ones(len(tbdata))
# What properties do you want mapped?
# Each each tuple has [(quantity to be projected, weighting scheme, operation),(etc..)]
quicksipVerbose(sample.verbose)
propertiesandoperations = [ ('zptvar', '', 'total') , ('zptvar','','min') , ('nccd','','total') , ('zptivar','','total')]
# What properties to keep when reading the images?
#Should at least contain propertiesandoperations and the image corners.
# MARCM - actually no need for ra dec image corners.
# Only needs ra0 ra1 ra2 ra3 dec0 dec1 dec2 dec3 only if fast track appropriate quicksip subroutines were implemented
propertiesToKeep = [ 'filter', 'AIRMASS', 'FWHM','mjd_obs'] \
+ ['RA', 'DEC', 'crval1', 'crval2', 'crpix1', 'crpix2', 'cd1_1', 'cd1_2', 'cd2_1', 'cd2_2','width','height']
# Create big table with all relevant properties.
tbdata = np.core.records.fromarrays([tbdata[prop] for prop in propertiesToKeep] + [zptvar,zptivar,nccd], names = propertiesToKeep + [ 'zptvar','zptivar','nccd'])
# Read the table, create Healtree, project it into healpix maps, and write these maps.
# Done with Quicksip library, note it has quite a few hardcoded values (use new version by MARCM for BASS and MzLS)
# project_and_write_maps_simp(mode, propertiesandoperations, tbdata, catalogue_name, outroot, sample_names, inds, nside)
project_and_write_maps(mode, propertiesandoperations, tbdata, catalogue_name, outroot, sample_names, inds, nside, ratiores, pixoffset, nsidesout)
# ----- plot depth map -----
#prop='ivar'
#plotPropertyMap(band,survey=catalogue_name,prop=prop)
return True
# ***********************************************************************
# ***********************************************************************
# --- run depth maps
#band='r'
#depthfromIvar(band,rel='DR3')
#
#band='g'
#depthfromIvar(band,rel='DR3')
#
#band='z'
#depthfromIvar(band,rel='DR3')
#band='r'
#depthfromIvar(band,rel='DR4')
#band='g'
#depthfromIvar(band,rel='DR4')
#band='z'
#depthfromIvar(band,rel='DR4')
# DECALS (DR3) the final survey will be covered by
# 1, 2, 3, 4, and 5 exposures in the following fractions:
#FracExp=[0.02,0.24,0.50,0.22,0.02]
#print "DECaLS depth histogram r-band"
#band='r'
#plotMaghist_pred(band,FracExp=FracExp,ndraw = 1e5,nbin=100,rel='DR3')
#print "DECaLS depth histogram g-band"
#band='g'
#plotMaghist_pred(band,FracExp=FracExp,ndraw = 1e5,nbin=100,rel='DR3')
#print "DECaLS depth histogram z-band"
#band='z'
#plotMaghist_pred(band,FracExp=FracExp,ndraw = 1e5,nbin=100,rel='DR3')
# For BASS (DR4) the coverage fractions for 1,2,3,4,5 exposures are:
#FracExp=[0.0014,0.0586,0.8124,0.1203,0.0054,0.0019]
#print "BASS depth histogram r-band"
#band='r'
#plotMaghist_pred(band,FracExp=FracExp,ndraw = 1e5,nbin=100,rel='DR4')
#print "BASS depth histogram g-band"
#band='g'
#plotMaghist_pred(band,FracExp=FracExp,ndraw = 1e5,nbin=100,rel='DR4')
# For MzLS fill factors of 100% with a coverage of at least 1,
# 99.5% with a coverage of at least 2, and 85% with a coverage of 3.
#FracExp=[0.005,0.145,0.85,0,0]
#print "MzLS depth histogram z-band"
#band='z'
#plotMaghist_pred(band,FracExp=FracExp,ndraw = 1e5,nbin=100,rel='DR4')
# --- run histogram deph peredictions
# prova
#photometricReq('g',rel='DR3',survey='survename')
#photometricReq('r',rel='DR3',survey='survename')
#photometricReq('z',rel='DR3',survey='survename')
#photometricReq('g',rel='DR4',survey='survename')
#photometricReq('r',rel='DR4',survey='survename')
#photometricReq('z',rel='DR4',survey='survename')
#prop = 'zptvar'
#opt = 'min'
#rel = 'DR3'
#band = 'g'
#plotPhotometryMap(band,prop=prop,op=opt,rel=rel)
#band = 'r'
#plotPhotometryMap(band,prop=prop,op=opt,rel=rel)
#band = 'z'
#plotPhotometryMap(band,prop=prop,op=opt,rel=rel)
#
#rel = 'DR4'
#band = 'g'
#plotPhotometryMap(band,prop=prop,op=opt,rel=rel)
#band = 'r'
#plotPhotometryMap(band,prop=prop,op=opt,rel=rel)
#band = 'z'
#plotPhotometryMap(band,prop=prop,op=opt,rel=rel)
|
legacysurvey/pipeline
|
validationtests/DESIccdManera.py
|
Python
|
gpl-2.0
| 38,903
|
[
"Galaxy"
] |
b246960abd393e9b2282d3789c29aa1730bc8bd625e3f2e35610a4445c702f89
|
# -*- coding: utf-8 -*-
"""
sphinx.pycode.nodes
~~~~~~~~~~~~~~~~~~~
Parse tree node implementations.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
class BaseNode(object):
"""
Node superclass for both terminal and nonterminal nodes.
"""
parent = None
def _eq(self, other):
raise NotImplementedError
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
def __ne__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return not self._eq(other)
__hash__ = None
def get_prev_sibling(self):
"""Return previous child in parent's children, or None."""
if self.parent is None:
return None
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i-1]
def get_next_sibling(self):
"""Return next child in parent's children, or None."""
if self.parent is None:
return None
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i+1]
except IndexError:
return None
def get_prev_leaf(self):
"""Return the leaf node that precedes this node in the parse tree."""
def last_child(node):
if isinstance(node, Leaf):
return node
elif not node.children:
return None
else:
return last_child(node.children[-1])
if self.parent is None:
return None
prev = self.get_prev_sibling()
if isinstance(prev, Leaf):
return prev
elif prev is not None:
return last_child(prev)
return self.parent.get_prev_leaf()
def get_next_leaf(self):
"""Return self if leaf, otherwise the leaf node that succeeds this
node in the parse tree.
"""
node = self
while not isinstance(node, Leaf):
assert node.children
node = node.children[0]
return node
def get_lineno(self):
"""Return the line number which generated the invocant node."""
return self.get_next_leaf().lineno
def get_prefix(self):
"""Return the prefix of the next leaf node."""
# only leaves carry a prefix
return self.get_next_leaf().prefix
class Node(BaseNode):
"""
Node implementation for nonterminals.
"""
def __init__(self, type, children, context=None):
# type of nonterminals is >= 256
# assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
# assert ch.parent is None, repr(ch)
ch.parent = self
def __repr__(self):
return '%s(%s, %r)' % (self.__class__.__name__,
self.type, self.children)
def __str__(self):
"""This reproduces the input source exactly."""
return ''.join(map(str, self.children))
def _eq(self, other):
return (self.type, self.children) == (other.type, other.children)
# support indexing the node directly instead of .children
def __getitem__(self, index):
return self.children[index]
def __iter__(self):
return iter(self.children)
def __len__(self):
return len(self.children)
class Leaf(BaseNode):
"""
Node implementation for leaf nodes (terminals).
"""
prefix = '' # Whitespace and comments preceding this token in the input
lineno = 0 # Line where this token starts in the input
column = 0 # Column where this token tarts in the input
def __init__(self, type, value, context=None):
# type of terminals is below 256
# assert 0 <= type < 256, type
self.type = type
self.value = value
if context is not None:
self.prefix, (self.lineno, self.column) = context
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.type, self.value, self.prefix)
def __str__(self):
"""This reproduces the input source exactly."""
return self.prefix + str(self.value)
def _eq(self, other):
"""Compares two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def convert(grammar, raw_node):
"""Convert raw node to a Node or Leaf instance."""
type, value, context, children = raw_node
if children or type in grammar.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
def nice_repr(node, number2name, prefix=False):
def _repr(node):
if isinstance(node, Leaf):
return "%s(%r)" % (number2name[node.type], node.value)
else:
return "%s(%s)" % (number2name[node.type],
', '.join(map(_repr, node.children)))
def _prepr(node):
if isinstance(node, Leaf):
return "%s(%r, %r)" % (number2name[node.type],
node.prefix, node.value)
else:
return "%s(%s)" % (number2name[node.type],
', '.join(map(_prepr, node.children)))
return (prefix and _prepr or _repr)(node)
class NodeVisitor(object):
def __init__(self, number2name, *args):
self.number2name = number2name
self.init(*args)
def init(self, *args):
pass
def visit(self, node):
"""Visit a node."""
method = 'visit_' + self.number2name[node.type]
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
if isinstance(node, Node):
for child in node:
self.visit(child)
|
havard024/prego
|
venv/lib/python2.7/site-packages/sphinx/pycode/nodes.py
|
Python
|
mit
| 6,387
|
[
"VisIt"
] |
fc38fbd08a8799f6a6167e186fdecfa58028b90510ba06fedb02d8049b722c80
|
"""
This script tests the SPMP2 module.
"""
import numpy as np
from frankenstein import sgscf, mp
from frankenstein.tools.pyscf_utils import get_pymol
import pytest
@pytest.mark.parametrize("geom, basis, spin_proj, ngrid, eref",
[
("geom/c2h4.zmat", "6-31g", 0, 6, -0.1269558397)
])
def test_spmp2(geom, basis, spin_proj, ngrid, eref):
pymol = get_pymol(geom, basis, verbose=3)
rhf = sgscf.RHF(pymol)
rhf.kernel()
C0 = [rhf.mo_coeff.copy(), rhf.mo_coeff.copy()]
eri = rhf._eri
sphf = sgscf.SPHF(pymol)
sphf._eri = eri
sphf.spin_proj = spin_proj
sphf.ngrid = ngrid
sphf.guess_mix = 0.8
sphf.kernel(mo_coeff0=C0)
sphf.do_mp2(frozen=True)
assert(np.allclose(sphf.e_corr, eref))
|
hongzhouye/frankenstein
|
tests/spmp2_test.py
|
Python
|
bsd-3-clause
| 748
|
[
"PyMOL"
] |
79a4fbadceebb5167f7c358e37471a6f82509c33baad0bdd66f7ae383f3f45f5
|
# pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from lettuce.django import django_url
@step('I register for the course "([^"]*)"$')
def i_register_for_the_course(_step, course):
url = django_url('courses/%s/about' % world.scenario_dict['COURSE'].id.to_deprecated_string())
world.browser.visit(url)
world.css_click('section.intro a.register')
assert world.is_css_present('section.container.dashboard')
@step('I register to audit the course$')
def i_register_to_audit_the_course(_step):
url = django_url('courses/%s/about' % world.scenario_dict['COURSE'].id.to_deprecated_string())
world.browser.visit(url)
world.css_click('section.intro a.register')
audit_button = world.browser.find_by_name("audit_mode")
audit_button.click()
assert world.is_css_present('section.container.dashboard')
@step(u'I should see an empty dashboard message')
def i_should_see_empty_dashboard(_step):
empty_dash_css = 'section.empty-dashboard-message'
assert world.is_css_present(empty_dash_css)
@step(u'I should( NOT)? see the course numbered "([^"]*)" in my dashboard$')
def i_should_see_that_course_in_my_dashboard(_step, doesnt_appear, course):
course_link_css = 'section.my-courses a[href*="%s"]' % course
if doesnt_appear:
assert world.is_css_not_present(course_link_css)
else:
assert world.is_css_present(course_link_css)
@step(u'I unregister for the course numbered "([^"]*)"')
def i_unregister_for_that_course(_step, course):
unregister_css = 'section.info a[href*="#unenroll-modal"][data-course-number*="%s"]' % course
world.css_click(unregister_css)
button_css = 'section#unenroll-modal input[value="Unregister"]'
world.css_click(button_css)
|
LICEF/edx-platform
|
lms/djangoapps/courseware/features/registration.py
|
Python
|
agpl-3.0
| 1,763
|
[
"VisIt"
] |
bb6141b2b402f08c25fb7e14cfd7300c601e18b6bea021a6417fd6b844e467d6
|
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def offset_gaussian():
# Connect to a pre-existing cluster
insurance = h2o.import_file(pyunit_utils.locate("smalldata/glm_test/insurance.csv"))
insurance["offset"] = insurance["Holders"].log()
gbm = H2OGradientBoostingEstimator(ntrees=600,
max_depth=1,
min_rows=1,
learn_rate=0.1,
distribution="gaussian")
gbm.train(x=list(range(3)), y="Claims", training_frame=insurance, offset_column="offset")
predictions = gbm.predict(insurance)
# Comparison result generated from R's gbm:
# fit2 <- gbm(Claims ~ District + Group + Age+ offset(log(Holders)) , interaction.depth = 1,n.minobsinnode = 1,
# shrinkage = .1,bag.fraction = 1,train.fraction = 1,
# data = Insurance, distribution ="gaussian", n.trees = 600)
# pg = predict(fit2, newdata = Insurance, type = "response", n.trees=600)
# pr = pg - - log(Insurance$Holders)
assert abs(44.33016 - gbm._model_json['output']['init_f']) < 1e-5, "expected init_f to be {0}, but got {1}". \
format(44.33016, gbm._model_json['output']['init_f'])
assert abs(1491.135 - gbm.mse()) < 1e-2, "expected mse to be {0}, but got {1}".format(1491.135, gbm.mse())
assert abs(49.23438 - predictions.mean()[0]) < 1e-2, "expected prediction mean to be {0}, but got {1}". \
format(49.23438, predictions.mean()[0])
assert abs(-45.5720659304 - predictions.min()) < 1e-2, "expected prediction min to be {0}, but got {1}". \
format(-45.5720659304, predictions.min())
assert abs(207.387 - predictions.max()) < 1e-2, "expected prediction max to be {0}, but got {1}". \
format(207.387, predictions.max())
if __name__ == "__main__":
pyunit_utils.standalone_test(offset_gaussian)
else:
offset_gaussian()
|
nilbody/h2o-3
|
h2o-py/tests/testdir_algos/gbm/pyunit_offset_gaussian_gbm.py
|
Python
|
apache-2.0
| 2,019
|
[
"Gaussian"
] |
9e9e2fae88312e9800011a5a73ab5f6d709bd196e79fdb8bf3c281c778b43eb0
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import collections
from .. import common
from . import command
def make_extension(**kwargs):
return ConfigExtension(**kwargs)
class ConfigExtension(command.CommandExtension):
"""
Allows the configuration items of objects to be changes on a per-page basis.
"""
@staticmethod
def defaultConfig():
config = command.CommandExtension.defaultConfig()
return config
def __init__(self, *args, **kwargs):
command.CommandExtension.__init__(self, *args, **kwargs)
def postRead(self, page, content):
"""Updates configuration items."""
if content:
for match in command.BlockInlineCommand.RE.finditer(content):
if match.group('command') == 'config':
subcommand = match.group('subcommand')
_, settings = common.match_settings(dict(), match.group('settings'))
if subcommand == 'disable':
self.__configPageDisable(page, settings)
else:
page['__{}__'.format(subcommand)].update(settings)
def extend(self, reader, renderer):
self.requires(command)
self.addCommand(reader, ConfigCommand())
self.addCommand(reader, ConfigPageActiveCommand())
@staticmethod
def __configPageDisable(page, settings):
"""Activate/deactivate based on extension."""
_, ext = os.path.splitext(page.destination)
extensions = eval(settings.get('extensions'))
if extensions and ext in extensions:
page['active'] = False
class ConfigCommand(command.CommandComponent):
"""This does nothing but serves to hide the command syntax from outputting."""
COMMAND = 'config'
SUBCOMMAND = '*'
PARSE_SETTINGS = False
def createToken(self, parent, info, page):
return parent
class ConfigPageActiveCommand(command.CommandComponent):
"""This does nothing but serves to hide the command syntax from outputting."""
COMMAND = 'config'
SUBCOMMAND = 'disable'
PARSE_SETTINGS = False
@staticmethod
def defaultSettings():
settings = command.CommandComponent.defaultSettings()
settings['extensions'] = ([], "If the output extension matches the page is disabled from " \
"translation.")
return settings
def createToken(self, parent, info, page):
return parent
|
harterj/moose
|
python/MooseDocs/extensions/config.py
|
Python
|
lgpl-2.1
| 2,751
|
[
"MOOSE"
] |
af2359d032bbf5d3b370037f80ce6bc91692c8ee0877948581ec4d68829f0881
|
import operator as op
from functools import partial
from itertools import permutations, combinations
import logging
import lib.const as C
import lib.visit as v
from ... import add_artifacts
from ... import util
from ... import sample
from ...encoder import add_ty_map
from ...meta import class_lookup
from ...meta.template import Template
from ...meta.clazz import Clazz, merge_flat
from ...meta.method import Method, sig_match, call_stt
from ...meta.field import Field
from ...meta.statement import Statement, to_statements
from ...meta.expression import Expression, to_expression, gen_E_gen
class Observer(object):
@classmethod
def find_obs(cls):
return lambda anno: anno.by_name(C.A.OBS)
# to avoid name conflict, use fresh counter as suffix
__cnt = 0
@classmethod
def fresh_cnt(cls):
cls.__cnt = cls.__cnt + 1
return cls.__cnt
@classmethod
def new_aux(cls, suffix=None):
if not suffix:
suffix = str(Observer.fresh_cnt())
return u"{}{}".format(C.OBS.AUX, suffix)
def __init__(self, smpls, obs_conf):
self._smpls = smpls
evt_kinds = sample.evt_kinds(smpls)
self._smpl_events = util.ffilter(map(class_lookup, evt_kinds))
self._obs_conf = obs_conf
self._tmpl = None
self._eq = None
self._cur_mtd = None
# classes that are involved in this pattern
self._clss = {} # { E1: [C1, D1], E2: [C2, D1], ... }
# event name to aux class name
self._evts = {} # { E1: Aux1, E2: Aux2, ... }
# class name to aux class names
self._auxs = {} # { C1: [Aux1], D1: [Aux1, Aux2], C2: [Aux2], ... }
# (subjectCall) methods to aux class name
self._subj_mtds = {} # { M1: [Aux1], M2: [Aux1, Aux2], ... }
@v.on("node")
def visit(self, node):
"""
This is the generic method to initialize the dynamic dispatcher
"""
# find possible classes for @Subject and @Observer
# so as to build self._clss and self._auxs
# at this point, assume those are annotated with @ObserverPattern(E+)
def find_clss_involved_w_anno_evt(self, tmpl):
for cls in util.flatten_classes(tmpl.classes, "inners"):
if not util.exists(Observer.find_obs(), cls.annos): continue
# ignore interface without implementers
if cls.is_itf and not cls.subs:
logging.debug("ignore {} due to no implementers".format(cls.name))
continue
events = util.find(Observer.find_obs(), cls.annos).events
for event in events:
cls_e = class_lookup(event)
if not cls_e: continue
for cls_smpl_e in self._smpl_events:
if cls_smpl_e <= cls_e: # subtype appears in the samples
util.mk_or_append(self._clss, event, cls)
for event in self._clss.keys():
# if # of candidates is less than 2, ignore that event
if len(self._clss[event]) < 2:
logging.debug("ignore {} {}".format(event, self._clss[event]))
del self._clss[event]
del tmpl.events[event]
continue
aux_name = Observer.new_aux(event)
tmpl.obs_auxs[aux_name] = self._clss[event]
self._evts[event] = aux_name
logging.debug("{}: {} {}".format(event, aux_name, self._clss[event]))
for cls in self._clss[event]:
util.mk_or_append(self._auxs, cls.name, aux_name)
if cls.outer:
util.mk_or_append(self._auxs, unicode(repr(cls)), aux_name)
# find possible classes for @Subject and @Observer
# so as to build self._clss and self._auxs
# at this point, assume those are annotated with @ObserverPattern
def find_clss_involved_w_anno(self, tmpl):
logging.debug("target events: {}".format(self._smpl_events))
for cls in util.flatten_classes(tmpl.classes, "inners"):
if not util.exists(Observer.find_obs(), cls.annos): continue
# ignore interface without implementers
if cls.is_itf and not cls.subs:
logging.debug("ignore {} due to no implementers".format(cls.name))
continue
involved_clss = map(class_lookup, cls.param_typs)
for cls_e in self._smpl_events:
for cls_i in involved_clss:
if cls_i and cls_e <= cls_i:
util.mk_or_append(self._clss, cls_e.name, cls)
for event in self._clss.keys():
# if # of candidates is less than 2, ignore that event
if len(self._clss[event]) < 2:
logging.debug("ignore {} {}".format(event, self._clss[event]))
del self._clss[event]
del tmpl.events[event]
continue
aux_name = Observer.new_aux(event)
tmpl.obs_auxs[aux_name] = self._clss[event]
self._evts[event] = aux_name
logging.debug("{}: {} {}".format(event, aux_name, self._clss[event]))
for cls in self._clss[event]:
util.mk_or_append(self._auxs, cls.name, aux_name)
if cls.outer:
util.mk_or_append(self._auxs, unicode(repr(cls)), aux_name)
# find possible classes for @Subject and @Observer
# so as to build self._clss and self._auxs
# at this point, annotations are no longer used
def find_clss_involved_wo_anno(self, tmpl):
event = "AWTEvent"
self._clss[event] = []
for cls in util.flatten_classes(tmpl.classes, "inners"):
if not util.exists(Observer.find_obs(), cls.annos): continue
# ignore interface without implementers
if cls.is_itf and not cls.subs:
logging.debug("ignore {} due to no implementers".format(cls.name))
continue
util.mk_or_append(self._clss, event, cls)
for e in tmpl.events.keys():
if e != event:
del tmpl.events[e]
aux_name = Observer.new_aux(event)
tmpl.obs_auxs[aux_name] = self._clss[event]
self._evts[event] = aux_name
logging.debug("{}: {} {}".format(event, aux_name, self._clss[event]))
for cls in self._clss[event]:
util.mk_or_append(self._auxs, cls.name, aux_name)
if cls.outer:
util.mk_or_append(self._auxs, unicode(repr(cls)), aux_name)
# subtype based lookup
@staticmethod
def subtype_lookup(dic, ty):
_ty = util.sanitize_ty(ty)
if _ty in dic: return dic[_ty]
cls = class_lookup(ty)
if not cls: return None
if cls.itfs:
for itf in cls.itfs:
res = Observer.subtype_lookup(dic, itf)
if res: return res
if cls.sup: return Observer.subtype_lookup(dic, cls.sup)
return None
# find the corresponding aux type based on subtypes
def find_aux(self, ty):
return Observer.subtype_lookup(self._auxs, ty)
## @ObserverPattern(E)
## class C { ... }
## class D { ... void update(E obj2); ... }
## class E { ... T gettype(); ...}
## =>
## class C { @Subject(D, E, update) ... }
## class D { @Observer ... }
## class E { @Event ... }
@staticmethod
def check_rule1(aux, conf):
rule = Method(clazz=aux, mods=[C.mod.ST, C.mod.HN], name=u"checkRule1")
body = u"""
assert {aux.subject} != {aux.observer};
""".format(**locals())
if conf[0] < 2:
body += u"""
assert subcls(belongsTo({aux.update}), {aux.observer});
assert 1 == (argNum({aux.update}));
assert subcls({aux.event}, argType({aux.update}, 0));
""".format(**locals())
else:
body += u"""
assert subcls(belongsTo({aux.eventtype}), {aux.event});
assert 0 == (argNum({aux.eventtype}));
""".format(**locals())
for i in xrange(conf[0]):
aux_up = getattr(aux, "update_"+str(i))
body += u"""
assert subcls(belongsTo({aux_up}), {aux.observer});
assert 1 == (argNum({aux_up}));
assert subcls({aux.event}, argType({aux_up}, 0));
""".format(**locals())
for i, j in combinations(range(conf[0]), 2):
aux_up_i = getattr(aux, "update_"+str(i))
aux_up_j = getattr(aux, "update_"+str(j))
body += u"assert {aux_up_i} != {aux_up_j};".format(**locals())
rule.body = to_statements(rule, body)
aux.add_mtds([rule])
## @Subject(D, E, update)
## void M1(D obj1){}
## void M2(D obj2){}
## void M3(E obj3){}
## =>
## List<D> _obs;
## void M1(D obj1) { @Attach(obj1, _obs) }
## void M2(D obj2) { @Detach(obj2, _obs) }
## void M3(E obj3) { @Handle(D, update, obj3, _obs) }
@staticmethod
def check_rule2(aux, conf):
rule = Method(clazz=aux, mods=[C.mod.ST, C.mod.HN], name=u"checkRule2")
body = u""
if conf[1] > 0:
body += u"""
assert subcls(belongsTo({aux.attach}), {aux.subject});
assert 1 == (argNum({aux.attach}));
assert subcls({aux.observer}, argType({aux.attach}, 0));
""".format(**locals())
if conf[2] > 0:
body += u"""
assert subcls(belongsTo({aux.detach}), {aux.subject});
assert 1 == (argNum({aux.detach}));
assert subcls({aux.observer}, argType({aux.detach}, 0));
""".format(**locals())
if conf[1] > 0 and conf[2] > 0:
body += u"""
assert {aux.attach} != {aux.detach};
""".format(**locals())
def handle_related(aux, hdl):
constraints = u"""
assert subcls(belongsTo({hdl}), {aux.subject});
assert 1 == (argNum({hdl}));
assert subcls({aux.event}, argType({hdl}, 0));
""".format(**locals())
if conf[1] > 0:
constraints += u"""
assert {hdl} != {aux.attach};
""".format(**locals())
if conf[2] > 0:
constraints += u"""
assert {hdl} != {aux.detach};
""".format(**locals())
return constraints
if conf[0] < 2:
body += handle_related(aux, aux.handle)
else:
for i in xrange(conf[0]):
aux_hdl = getattr(aux, "handle_"+str(i))
body += handle_related(aux, aux_hdl)
for i, j in combinations(range(conf[0]), 2):
aux_hdl_i = getattr(aux, "handle_"+str(i))
aux_hdl_j = getattr(aux, "handle_"+str(j))
body += u"assert {aux_hdl_i} != {aux_hdl_j};".format(**locals())
rule.body = to_statements(rule, body)
aux.add_mtds([rule])
# assume candidate methods will be neither <init> nor static
# and have at least one parameter whose type is of interest (if any)
def is_candidate_mtd(self, aux, mtd):
if mtd.is_init or mtd.is_static: return False
for (ty, _) in mtd.params:
cls_ty = class_lookup(ty)
if not cls_ty: continue
for cls in aux.subs + [aux.evt]:
if cls_ty <= cls: return True
# events are allowed to be downcasted
if aux.evt <= cls_ty: return True
return False
# retrieve candidate methods
def get_candidate_mtds(self, aux, cls):
mtds = cls.mtds
# if it's an interface with implementers
if cls.is_itf and cls.subs:
# collect all sub-classes
subss = util.flatten_classes(cls.subs, "subs")
# filter out sub-interfaces (e.g., Action < ActionListener)
subss, _ = util.partition(lambda c: c.is_class, subss)
# then collect actual methods from those sub-classes
mtds = util.flatten(map(op.attrgetter("mtds"), subss))
return filter(partial(self.is_candidate_mtd, aux), mtds)
# common params for methods in Aux...
@staticmethod
def mtd_params(aux):
aname, ename = aux.name, aux.evt.name
rcv = u'_'.join(["rcv", aname])
return [(aname, rcv), (aname, u"arg"), (ename, u"evt")]
# restrict call stack for the given method via a global counter
@staticmethod
def limit_depth(aux, mtd, depth):
fname = mtd.name + "_depth"
z = to_expression(u"0")
d = Field(clazz=aux, mods=C.PRST, typ=C.J.i, name=fname, init=z)
aux.add_flds([d])
ret = u"return" if mtd.typ == C.J.v else u"return null"
prologue = to_statements(mtd, u"""
if ({fname} > {depth}) {ret};
{fname} = {fname} + 1;
""".format(**locals()))
epilogue = to_statements(mtd, u"""
{fname} = {fname} - 1;
""".format(**locals()))
mtd.body = prologue + mtd.body + epilogue
# event type getter
def egetter(self, aux, clss):
aname, ename = aux.name, aux.evt.name
rcv = u'_'.join(["rcv", ename])
params = [(C.J.i, u"mtd_id"), (ename, rcv)]
egetter = Method(clazz=aux, mods=C.PBST, typ=u"Object", params=params, name=u"egetter")
def switch( (cls, other) ):
mtds = cls.mtds
for mtd in mtds: util.mk_or_append(self._subj_mtds, repr(mtd), aux)
logging.debug("{}.{}, {}, {}, {}".format(aux.name, egetter.name, repr(cls), repr(other), mtds))
def invoke(mtd):
if mtd.typ == u"void": return u''
cls = mtd.clazz
# if there is no implementer for this method in interface, ignore it
if cls.is_itf and not cls.subs: return u''
#actual_params = [(other.name, u"arg")] + [params[-1]]
#args = u", ".join(sig_match(mtd.params, actual_params))
call = u"return rcv_{}.{}();".format(ename, mtd.name)
return u"if (mtd_id == {mtd.id}) {{ {call} }}".format(**locals())
invocations = util.ffilter(map(invoke, mtds))
return u"\nelse ".join(invocations)
tests = util.ffilter([switch((aux.evt, aux.evt))])
egetter.body = to_statements(egetter, u"\nelse ".join(tests))
Observer.limit_depth(aux, egetter, 2)
aux.add_mtds([egetter])
setattr(aux, "egetter", egetter)
# a method that simulates reflection
def reflect(self, aux, clss, conf):
params = [(C.J.i, u"mtd_id")] + Observer.mtd_params(aux)
reflect = Method(clazz=aux, mods=C.PBST, params=params, name=u"reflect")
def switch( (cls, other) ):
mtds = self.get_candidate_mtds(aux, cls)
for mtd in mtds: util.mk_or_append(self._subj_mtds, repr(mtd), aux)
logging.debug("{}.{}, {}, {}, {}".format(aux.name, reflect.name, repr(cls), repr(other), mtds))
def invoke(mtd):
cls = mtd.clazz
# if there is no implementer for this method in interface, ignore it
if cls.is_itf and not cls.subs: return u''
actual_params = [(other.name, u"arg")] + [params[-1]]
args = u", ".join(sig_match(mtd.params, actual_params))
casted_rcv = u"({})rcv_{}".format(mtd.clazz.name, aux.name)
call = u"({}).{}({});".format(casted_rcv, mtd.name, args)
return u"if (mtd_id == {mtd.id}) {{ {call} }}".format(**locals())
invocations = util.ffilter(map(invoke, mtds))
body = u"\nelse ".join(invocations)
if conf[0] >= 2:
hdl, mtd = getattr(aux, "handle"), getattr(aux, "mtd_handle")
args = u", ".join(sig_match(mtd.params, params))
call = u"{}.{}({});".format(aux.name, mtd.name, args)
body += u"\nelse if (mtd_id == {hdl}) {{ {call} }}".format(**locals())
return body
tests = util.ffilter(map(switch, permutations(clss, 2)))
reflect.body = to_statements(reflect, u"\nelse ".join(tests))
depth = 3 if conf[0] >= 2 else 2
Observer.limit_depth(aux, reflect, depth)
aux.add_mtds([reflect])
setattr(aux, "reflect", reflect)
# add a list of @Observer, along with an initializing statement
@staticmethod
def add_obs(aux, clss):
typ = u"{}<{}>".format(C.J.LST, C.J.OBJ)
obs = Field(clazz=aux, typ=typ, name=C.OBS.obs)
aux.add_flds([obs])
setattr(aux, "obs", obs)
tmp = '_'.join([C.OBS.tmp, aux.name])
for cls in clss:
if cls.is_itf: continue
for mtd in cls.inits:
body = u"""
{0} {1} = ({0})this;
{1}.{2} = new {3}();
""".format(aux.name, tmp, C.OBS.obs, typ)
mtd.body.extend(to_statements(mtd, body))
# attach code
@staticmethod
def attach(aux):
params = Observer.mtd_params(aux)
attach = Method(clazz=aux, mods=C.PBST, params=params, name=u"attachCode")
add = u"rcv_{}.{}.add(arg);".format(aux.name, C.OBS.obs)
attach.body = to_statements(attach, add)
aux.add_mtds([attach])
setattr(aux, "mtd_attach", attach)
# detach code
@staticmethod
def detach(aux):
params = Observer.mtd_params(aux)
detach = Method(clazz=aux, mods=C.PBST, params=params, name=u"detachCode")
rm = u"rcv_{}.{}.remove(arg);".format(aux.name, C.OBS.obs)
detach.body = to_statements(detach, rm)
aux.add_mtds([detach])
setattr(aux, "mtd_detach", detach)
# upper-level handle code
@staticmethod
def sub_handle(aux, idx):
params = Observer.mtd_params(aux)
handle = Method(clazz=aux, mods=C.PBST, params=params, name=u"subHandleCode")
cnt = Observer.fresh_cnt()
aname = aux.name
reflect = u"reflect" #getattr(aux, "reflect").name
loop = u"""
if (evt instanceof {aux.evt.name}) {{
List<Object> obs{cnt} = rcv_{aname}._obs;
for ({aname} o : obs{cnt}) {{
{aname}.{reflect}({aux.update}_{idx}, o, rcv_{aname}, ({aux.evt.name})evt);
}}
}}
""".format(**locals())
handle.body = to_statements(handle, loop)
aux.add_mtds([handle])
setattr(aux, "mtd_sub_handle", handle)
# handle code
@staticmethod
def handle(aux, conf):
ename = aux.evt.name
params = Observer.mtd_params(aux)
handle = Method(clazz=aux, mods=C.PBST, params=params, name=u"handleCode")
reflect = u"reflect" #getattr(aux, "reflect").name
if conf[0] >= 2: egetter = getattr(aux, "egetter").name
aname = aux.name
args = u", ".join(map(lambda (ty, nm): nm, params))
def handle_body(aux, role):
aname, evtname = aux.name, aux.evt.name
cnt = Observer.fresh_cnt()
loop = u"""
if (evt instanceof {evtname}) {{
List<Object> obs{cnt} = rcv_{aname}._obs;
for ({aname} o : obs{cnt}) {{
{aname}.reflect({role}, o, rcv_{aname}, ({evtname})evt);
}}
}}
""".format(**locals())
return loop
def handle_mtd(i):
handle_i = Method(clazz=aux, mods=C.PBST, params=params, name=u"handleCode_{i}".format(**locals()))
body_i = handle_body(aux, getattr(aux, "update_"+str(i)))
handle_i.body = to_statements(handle_i, body_i)
setattr(aux, "mtd_handle_"+str(i), handle_i)
return handle_i
if conf[0] < 2:
cnt = Observer.__cnt
body = handle_body(aux, aux.update)
handle.body = to_statements(handle, body)
else:
aux.add_mtds(map(handle_mtd, range(conf[0])))
evt_cls = class_lookup(aux.evt.name)
const_flds = []
if evt_cls.inners:
for inner in evt_cls.inners:
const_flds.extend(filter(lambda f: f.is_final and f.is_static, inner.flds))
evtyp = evt_cls.inners[0].name
evt_id = getattr(aux, u"eventtype")
get_type = u"""
{evtyp} et = {aname}.{egetter}({evt_id}, evt);
""".format(**locals())
def handle_switch(i):
evt_cls = class_lookup(aux.evt.name)
evtyp = evt_cls.inners[0].name
aname = aux.name
reflect = u"reflect" #getattr(aux, "reflect").name
cns_typ = '.'.join([aux.evt.name, evtyp, const_flds[i].name])
hdl_id = getattr(aux, u"handle_"+unicode(i))
params = Observer.mtd_params(aux)
args = u", ".join(map(lambda (ty, nm): nm, params))
return u"""
if (et == {cns_typ}) {aname}.{reflect}({hdl_id}, {args});
""".format(**locals())
choose = u"\nelse ".join(map(handle_switch, range(len(const_flds))))
handle.body = to_statements(handle, get_type + choose)
aux.add_mtds([handle])
setattr(aux, "mtd_handle", handle)
# add a role variable for the handle method
if conf[0] >= 2:
c_to_e = lambda c: to_expression(unicode(c))
new_fld = Field(clazz=aux, mods=[C.mod.ST], typ=C.J.i, name=getattr(aux, C.OBS.H), init=c_to_e(handle.id))
aux.add_flds([new_fld])
# attach/detach/handle will be dispatched here
@staticmethod
def subjectCall(aux, conf):
params = [(C.J.i, u"mtd_id")] + Observer.mtd_params(aux)
one = Method(clazz=aux, mods=C.PBST, params=params, name=u"subjectCall")
def switch(role):
aname = aux.name
args = ", ".join(map(lambda (ty, nm): nm, params[1:]))
v = getattr(aux, role)
f = getattr(aux, "mtd_"+role).name
return u"if (mtd_id == {v}) {aname}.{f}({args});".format(**locals())
roles = [C.OBS.H]
if conf[0] >= 2: map(lambda i: roles.append('_'.join([C.OBS.H, str(i)])), range(conf[0]))
if conf[1] > 0: roles.append(C.OBS.A)
if conf[2] > 0: roles.append(C.OBS.D)
one.body = to_statements(one, u'\n'.join(map(switch, roles)))
Observer.limit_depth(aux, one, 2)
aux.add_mtds([one])
setattr(aux, "one", one)
##
## generate an aux type for @Subject and @Observer
##
def gen_aux_cls(self, event, conf, clss):
aux_name = self._evts[event]
aux = merge_flat(aux_name, clss)
aux.mods = [C.mod.PB]
aux.subs = clss # virtual relations; to find proper methods
setattr(aux, "evt", class_lookup(event))
def extend_itf(cls):
_clss = [cls]
if cls.is_itf and cls.subs: _clss.extend(cls.subs)
return _clss
ext_clss = util.rm_dup(util.flatten(map(extend_itf, clss)))
# add a list of @Observer into candidate classes
self.add_obs(aux, ext_clss)
# set role variables
def set_role(role):
setattr(aux, role, '_'.join([role, aux.name]))
for r in C.obs_roles:
if r == C.OBS.H or r == C.OBS.U:
if conf[0] < 2: set_role(r)
else:
set_role(r)
map(lambda i: set_role('_'.join([r, str(i)])), range(conf[0]))
elif r == C.OBS.A:
if conf[1] > 0: set_role(r)
elif r == C.OBS.D:
if conf[2] > 0: set_role(r)
else:
set_role(r)
# add fields that stand for non-deterministic rule choices
def aux_fld(init, ty, nm):
if hasattr(aux, nm): nm = getattr(aux, nm)
return Field(clazz=aux, mods=[C.mod.ST], typ=ty, name=nm, init=init)
hole = to_expression(C.T.HOLE)
aux_int = partial(aux_fld, hole, C.J.i)
c_to_e = lambda c: to_expression(unicode(c))
# if explicitly annotated, use those concrete event names
if self._tmpl.is_event_annotated:
ev_init = c_to_e(aux.evt.id)
role_var_evt = aux_fld(ev_init, C.J.i, C.OBS.EVT)
else: # o.w., introduce a role variable for event
role_var_evt = aux_int(C.OBS.EVT)
aux.add_flds([role_var_evt])
## range check
gen_range = lambda ids: gen_E_gen(map(c_to_e, util.rm_dup(ids)))
get_id = op.attrgetter("id")
# range check for classes
cls_vars = [C.OBS.OBSR, C.OBS.SUBJ]
cls_ids = map(get_id, clss)
cls_init = gen_range(cls_ids)
aux_int_cls = partial(aux_fld, cls_init, C.J.i)
aux.add_flds(map(aux_int_cls, cls_vars))
# range check for methods
mtd_vars = []
if conf[1] > 0: mtd_vars.append(C.OBS.A)
if conf[2] > 0: mtd_vars.append(C.OBS.D)
for r in [C.OBS.H, C.OBS.U]:
if conf[0] < 2: mtd_vars.append(r)
else: map(lambda i: mtd_vars.append('_'.join([r, str(i)])), range(conf[0]))
mtds = util.flatten(map(partial(self.get_candidate_mtds, aux), clss))
mtd_ids = map(get_id, mtds)
mtd_init = gen_range(mtd_ids)
aux_int_mtd = partial(aux_fld, mtd_init, C.J.i)
aux.add_flds(map(aux_int_mtd, mtd_vars))
# range check for event type getter
if conf[0] >= 2:
evt_mtds = filter(lambda m: class_lookup(m.typ) in aux.evt.inners, aux.evt.mtds)
evt_mtd_init = gen_range(map(get_id, evt_mtds))
aux.add_flds([aux_fld(evt_mtd_init, C.J.i, C.OBS.EVTTYP)])
## rules regarding non-deterministic rewritings
Observer.check_rule1(aux, conf)
Observer.check_rule2(aux, conf)
if conf[0] >= 2: self.egetter(aux, clss)
Observer.handle(aux, conf)
Observer.attach(aux)
Observer.detach(aux)
Observer.subjectCall(aux, conf)
self.reflect(aux, clss, conf)
add_artifacts([aux.name])
return aux
# add an event queue
@staticmethod
def add_event_queue(cls):
eq_typ = u"Queue<{}>".format(C.GUI.EVT)
eq_name = u"_evt_queue"
eq = Field(clazz=cls, typ=eq_typ, name=eq_name)
cls.add_flds([eq])
setattr(cls, "eq", eq)
cls.init_fld(eq)
@v.when(Template)
def visit(self, node):
self._tmpl = node
if not node.events: return
self._eq = class_lookup(C.GUI.QUE)
# build mappings from event kinds to involved classes
if self._tmpl.is_event_annotated:
self.find_clss_involved_w_anno_evt(node)
else:
self.find_clss_involved_w_anno(node)
# introduce AuxObserver$n$ for @Subject and @Observer
for event in self._clss:
clss = self._clss[event]
node.add_classes([self.gen_aux_cls(event, self._obs_conf[event], clss)])
# add an event queue
Observer.add_event_queue(self._eq)
# add type conversion mappings
trimmed_auxs = {}
for k in self._auxs: trimmed_auxs[k] = self._auxs[k][0]
add_ty_map(trimmed_auxs)
@v.when(Clazz)
def visit(self, node): pass
@v.when(Field)
def visit(self, node): pass
@v.when(Method)
def visit(self, node):
self._cur_mtd = node
# special methods
if node.clazz.name == C.GUI.QUE:
eq = self._eq
# EventQueue.dispatchEvent
if node.name == "dispatchEvent":
_, evt = node.params[0]
switches = u''
for event, i in self._tmpl.events.iteritems():
if event not in self._clss: continue
cls_h = class_lookup(self._evts[event])
cls_h_name = cls_h.name
reflect = cls_h.reflect.name
hdl = '.'.join([cls_h_name, cls_h.handle])
cond_call = u"""
else if ({evt}_k == {i}) {{
{cls_h_name} rcv_{i} = ({cls_h_name}){evt}.getSource();
//{cls_h_name} rcv_{i} = ({cls_h_name}){evt}._source;
{cls_h_name}.{reflect}({hdl}, rcv_{i}, null, ({event}){evt});
}}
""".format(**locals())
switches += cond_call
body = u"""
if ({evt} == null) return;
int {evt}_k = {evt}.kind;
if ({evt}_k == -1) {{ // InvocationEvent
InvocationEvent ie = (InvocationEvent){evt};
ie.dispatch();
}} {switches}
""".format(**locals())
node.body = to_statements(node, body)
# EventQueue.getNextEvent
elif node.name == "getNextEvent":
body = u"if (this != null) return ({}){}.remove(); else return null;".format(node.typ, eq.eq.name)
node.body = to_statements(node, body)
# EventQueue.postEvent
elif node.name == "postEvent":
_, evt = node.params[0]
body = u"if (this != null) {}.add({});".format(eq.eq.name, evt)
node.body = to_statements(node, body)
# EventQueue.invokeLater
elif node.name == "invokeLater":
_, r = node.params[0]
root_evt = C.GUI.EVT
body = u"""
Toolkit t = Toolkit.getDefaultToolkit();
{root_evt} evt = new InvocationEvent(null, {r});
evt.kind = -1; // kinds of usual events start at 0
EventQueue q = t.getSystemEventQueue();
q.postEvent(evt);
""".format(**locals())
node.body = to_statements(node, body)
# NOTE: deprecated (use adapter pattern)
#elif node.clazz.name == C.GUI.IVK:
# # InvocationEvent.dispatch
# if node.name == "dispatch":
# fld = node.clazz.fld_by_typ(C.J.RUN)
# body = u"{}.run();".format(fld.name)
# node.body = to_statements(node, body)
# for methods that are candidates of @Attach/@Detach/@Handle
if node.clazz.is_itf: return
if repr(node) in self._subj_mtds:
cname = node.clazz.name
for aux in self._subj_mtds[repr(node)]:
logging.debug("{}.{} => {}.subjectCall".format(cname, node.name, aux.name))
if node.is_static: params = node.params
else: params = [(cname, C.J.THIS)] + node.params
one_params = [(C.J.i, unicode(node.id))]
for (ty, nm) in params:
cls_ty = class_lookup(ty)
# downcast AWTEvent to actual event
if aux.evt <= cls_ty:
one_params.append( (aux.evt.name, nm) )
elif self.find_aux(ty):
one_params.append( (aux.name, nm) )
else:
one_params.append( (ty, nm) )
body = u"{};".format(call_stt(aux.one, one_params))
node.body = to_statements(node, body) + node.body
@v.when(Statement)
def visit(self, node): return [node]
## @React
## =>
## AWTEvent e = q.getNextEvent();
## q.dispatchEvent(e);
# NOTE: assume @React is in @Harness only; and then use variable q there
@v.when(Expression)
def visit(self, node):
if node.kind == C.E.ANNO:
_anno = node.anno
if _anno.name == C.A.REACT:
logging.debug("reducing: {}".format(str(_anno)))
suffix = Observer.fresh_cnt()
body = u"""
{1} evt{0} = q.getNextEvent();
q.dispatchEvent(evt{0});
""".format(suffix, C.GUI.EVT)
return to_statements(self._cur_mtd, body)
return node
|
plum-umd/pasket
|
pasket/rewrite/gui/observer.py
|
Python
|
mit
| 28,595
|
[
"VisIt"
] |
c3541e9211f85ae7248d7e6e1c6a940803be597869ccd207ec9558cfd25047fb
|
# -*- coding: utf-8 -*-
# LICENCE
#
# This File is part of the Webbouqueteditor plugin
# and licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported
# License if not stated otherwise in a files head. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc-sa/3.0/ or send a letter to Creative
# Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
from __future__ import print_function
from Plugins.Extensions.OpenWebif.controllers.i18n import _
from enigma import eServiceReference, eServiceCenter, eDVBDB
from Components.Sources.Source import Source
from Screens.ChannelSelection import MODE_TV #,service_types_tv, MODE_RADIO
from Components.config import config
from os import remove, path, popen
from Screens.InfoBar import InfoBar
from ServiceReference import ServiceReference
from Components.ParentalControl import parentalControl
from re import compile as re_compile
from Components.NimManager import nimmanager
class BouquetEditor(Source):
ADD_BOUQUET = 0
REMOVE_BOUQUET = 1
MOVE_BOUQUET = 2
ADD_SERVICE_TO_BOUQUET = 3
REMOVE_SERVICE = 4
MOVE_SERVICE = 5
ADD_PROVIDER_TO_BOUQUETLIST = 6
ADD_SERVICE_TO_ALTERNATIVE = 7
REMOVE_ALTERNATIVE_SERVICES = 8
TOGGLE_LOCK = 9
BACKUP = 10
RESTORE = 11
RENAME_SERVICE = 12
ADD_MARKER_TO_BOUQUET = 13
IMPORT_BOUQUET = 14
BACKUP_PATH = "/tmp" # nosec
BACKUP_FILENAME = "webbouqueteditor_backup.tar"
def __init__(self, session, func=ADD_BOUQUET):
Source.__init__(self)
self.func = func
self.session = session
self.command = None
self.bouquet_rootstr = ""
self.result = (False, "one two three four unknown command")
def handleCommand(self, cmd):
print("[WebComponents.BouquetEditor] handleCommand with cmd = ", cmd)
if self.func is self.ADD_BOUQUET:
self.result = self.addToBouquet(cmd)
elif self.func is self.MOVE_BOUQUET:
self.result = self.moveBouquet(cmd)
elif self.func is self.MOVE_SERVICE:
self.result = self.moveService(cmd)
elif self.func is self.REMOVE_BOUQUET:
self.result = self.removeBouquet(cmd)
elif self.func is self.REMOVE_SERVICE:
self.result = self.removeService(cmd)
elif self.func is self.ADD_SERVICE_TO_BOUQUET:
self.result = self.addServiceToBouquet(cmd)
elif self.func is self.ADD_PROVIDER_TO_BOUQUETLIST:
self.result = self.addProviderToBouquetlist(cmd)
elif self.func is self.ADD_SERVICE_TO_ALTERNATIVE:
self.result = self.addServiceToAlternative(cmd)
elif self.func is self.REMOVE_ALTERNATIVE_SERVICES:
self.result = self.removeAlternativeServices(cmd)
elif self.func is self.TOGGLE_LOCK:
self.result = self.toggleLock(cmd)
elif self.func is self.BACKUP:
self.result = self.backupFiles(cmd)
elif self.func is self.RESTORE:
self.result = self.restoreFiles(cmd)
elif self.func is self.RENAME_SERVICE:
self.result = self.renameService(cmd)
elif self.func is self.ADD_MARKER_TO_BOUQUET:
self.result = self.addMarkerToBouquet(cmd)
elif self.func is self.IMPORT_BOUQUET:
self.result = self.importBouquet(cmd)
else:
self.result = (False, _("one two three four unknown command"))
def addToBouquet(self, param):
print("[WebComponents.BouquetEditor] addToBouquet with param = ", param)
bName = param["name"]
if bName is None:
return (False, _("No bouquet name given!"))
mode = MODE_TV # init
if "mode" in param:
if param["mode"] is not None:
mode = int(param["mode"])
return self.addBouquet(bName, mode, None)
def addBouquet(self, bName, mode, services):
if config.usage.multibouquet.value:
mutableBouquetList = self.getMutableBouquetList(mode)
if mutableBouquetList:
if mode == MODE_TV:
bName += " (TV)"
sref = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET \"userbouquet.%s.tv\" ORDER BY bouquet' % (self.buildBouquetID(bName, "userbouquet.", mode))
else:
bName += " (Radio)"
sref = '1:7:2:0:0:0:0:0:0:0:FROM BOUQUET \"userbouquet.%s.radio\" ORDER BY bouquet' % (self.buildBouquetID(bName, "userbouquet.", mode))
new_bouquet_ref = eServiceReference(sref)
if not mutableBouquetList.addService(new_bouquet_ref):
mutableBouquetList.flushChanges()
eDVBDB.getInstance().reloadBouquets()
mutableBouquet = self.getMutableList(new_bouquet_ref)
if mutableBouquet:
mutableBouquet.setListName(bName)
if services is not None:
for service in services:
if mutableBouquet.addService(service):
print("add", service.toString(), "to new bouquet failed")
mutableBouquet.flushChanges()
self.setRoot(self.bouquet_rootstr)
return (True, _("Bouquet %s created.") % bName)
else:
return (False, _("Get mutable list for new created bouquet failed!"))
else:
return (False, _("Bouquet %s already exists.") % bName)
else:
return (False, _("Bouquetlist is not editable!"))
else:
return (False, _("Multi-Bouquet is not enabled!"))
def addProviderToBouquetlist(self, param):
print("[WebComponents.BouquetEditor] addProviderToBouquet with param = ", param)
refstr = param["sProviderRef"]
if refstr is None:
return (False, _("No provider given!"))
mode = MODE_TV # init
if "mode" in param:
if param["mode"] is not None:
mode = int(param["mode"])
ref = eServiceReference(refstr)
provider = ServiceReference(ref)
providerName = provider.getServiceName()
serviceHandler = eServiceCenter.getInstance()
services = serviceHandler.list(provider.ref)
return self.addBouquet(providerName, mode, services and services.getContent('R', True))
def removeBouquet(self, param):
print("[WebComponents.BouquetEditor] removeBouquet with param = ", param)
refstr = sref = param["sBouquetRef"]
if refstr is None:
return (False, _("No bouquet name given!"))
mode = MODE_TV # init
if "mode" in param:
if param["mode"] is not None:
mode = int(param["mode"])
if "BouquetRefRoot" in param:
bouquet_root = param["BouquetRefRoot"] # only when removing alternative
else:
bouquet_root = None
pos = refstr.find('FROM BOUQUET "')
filename = None
if pos != -1:
refstr = refstr[pos + 14:]
pos = refstr.find('"')
if pos != -1:
filename = '/etc/enigma2/' + refstr[:pos] # FIXMEEE !!! HARDCODED /etc/enigma2
ref = eServiceReference(sref)
bouquetName = self.getName(ref)
if not bouquetName:
bouquetName = filename
if bouquet_root:
mutableList = self.getMutableList(eServiceReference(bouquet_root))
else:
mutableList = self.getMutableBouquetList(mode)
if ref.valid() and mutableList is not None:
if not mutableList.removeService(ref):
mutableList.flushChanges()
self.setRoot(self.bouquet_rootstr)
else:
return (False, _("Bouquet %s removed failed.") % filename)
else:
return (False, _("Bouquet %s removed failed, sevicerefence or mutable list is not valid.") % filename)
try:
if filename is not None:
if not path.exists(filename + '.del'):
remove(filename)
return (True, _("Bouquet %s deleted.") % bouquetName)
except OSError:
return (False, _("Error: Bouquet %s could not deleted, OSError.") % filename)
def moveBouquet(self, param):
print("[WebComponents.BouquetEditor] moveBouquet with param = ", param)
sBouquetRef = param["sBouquetRef"]
if sBouquetRef is None:
return (False, _("No bouquet name given!"))
mode = MODE_TV # init
if "mode" in param:
if param["mode"] is not None:
mode = int(param["mode"])
position = None
if "position" in param:
if param["position"] is not None:
position = int(param["position"])
if position is None:
return (False, _("No position given!"))
mutableBouquetList = self.getMutableBouquetList(mode)
if mutableBouquetList is not None:
ref = eServiceReference(sBouquetRef)
mutableBouquetList.moveService(ref, position)
mutableBouquetList.flushChanges()
self.setRoot(self.bouquet_rootstr)
return (True, _("Bouquet %s moved.") % self.getName(ref))
else:
return (False, _("Bouquet %s can not be moved.") % self.getName(ref))
def removeService(self, param):
print("[WebComponents.BouquetEditor] removeService with param = ", param)
sBouquetRef = param["sBouquetRef"]
if sBouquetRef is None:
return (False, _("No bouquet given!"))
sRef = None
if "sRef" in param:
if param["sRef"] is not None:
sRef = param["sRef"]
if sRef is None:
return (False, _("No service given!"))
ref = eServiceReference(sRef)
if ref.flags & eServiceReference.isGroup: # check if service is an alternative, if so delete it with removeBouquet
new_param = {}
new_param["sBouquetRef"] = sRef
new_param["mode"] = None # of no interest when passing BouquetRefRoot
new_param["BouquetRefRoot"] = sBouquetRef
returnValue = self.removeBouquet(new_param)
if returnValue[0]:
return (True, _("Service %s removed.") % self.getName(ref))
else:
bouquetRef = eServiceReference(sBouquetRef)
mutableBouquetList = self.getMutableList(bouquetRef)
if mutableBouquetList is not None:
if not mutableBouquetList.removeService(ref):
mutableBouquetList.flushChanges()
self.setRoot(sBouquetRef)
return (True, _("Service %s removed from bouquet %s.") % (self.getName(ref), self.getName(bouquetRef)))
return (False, _("Service %s can not be removed.") % self.getName(ref))
def moveService(self, param):
print("[WebComponents.BouquetEditor] moveService with param = ", param)
sBouquetRef = param["sBouquetRef"]
if sBouquetRef is None:
return (False, _("No bouquet given!"))
sRef = None
if "sRef" in param:
if param["sRef"] is not None:
sRef = param["sRef"]
if sRef is None:
return (False, _("No service given!"))
position = None
if "position" in param:
if param["position"] is not None:
position = int(param["position"])
if position is None:
return (False, _("No position given!"))
mutableBouquetList = self.getMutableList(eServiceReference(sBouquetRef))
if mutableBouquetList is not None:
ref = eServiceReference(sRef)
mutableBouquetList.moveService(ref, position)
mutableBouquetList.flushChanges()
self.setRoot(sBouquetRef)
return (True, _("Service %s moved.") % self.getName(ref))
return (False, _("Service can not be moved."))
def addServiceToBouquet(self, param):
print("[WebComponents.BouquetEditor] addService with param = ", param)
sBouquetRef = param["sBouquetRef"]
if sBouquetRef is None:
return (False, _("No bouquet given!"))
sRef = None
if "sRef" in param:
if param["sRef"] is not None:
sRef = param["sRef"]
sRefUrl = False
sName = None
if "Name" in param:
if param["Name"] is not None:
sName = param["Name"]
if sRef is None and "sRefUrl" in param:
# check IPTV
if param["sRefUrl"] is not None and sName is not None:
sRef = param["sRefUrl"]
sRefUrl = True
elif sRef is None:
return (False, _("No service given!"))
sRefBefore = eServiceReference()
if "sRefBefore" in param:
if param["sRefBefore"] is not None:
sRefBefore = eServiceReference(param["sRefBefore"])
bouquetRef = eServiceReference(sBouquetRef)
mutableBouquetList = self.getMutableList(bouquetRef)
if mutableBouquetList is not None:
if sRefUrl:
ref = eServiceReference(4097, 0, sRef)
else:
ref = eServiceReference(sRef)
if sName:
ref.setName(sName)
if not mutableBouquetList.addService(ref, sRefBefore):
mutableBouquetList.flushChanges()
self.setRoot(sBouquetRef)
return (True, _("Service %s added.") % self.getName(ref))
else:
bouquetName = self.getName(bouquetRef)
return (False, _("Service %s already exists in bouquet %s.") % (self.getName(ref), bouquetName))
return (False, _("This service can not be added."))
def addMarkerToBouquet(self, param):
print("[WebComponents.BouquetEditor] addMarkerToBouquet with param = ", param)
sBouquetRef = param["sBouquetRef"]
if sBouquetRef is None:
return (False, _("No bouquet given!"))
name = None
if "Name" in param:
if param["Name"] is not None:
name = param["Name"]
if name is None:
if "SP" not in param:
return (False, _("No marker-name given!"))
sRefBefore = eServiceReference()
if "sRefBefore" in param:
if param["sRefBefore"] is not None:
sRefBefore = eServiceReference(param["sRefBefore"])
bouquet_ref = eServiceReference(sBouquetRef)
mutableBouquetList = self.getMutableList(bouquet_ref)
cnt = 0
while mutableBouquetList:
if name is None:
service_str = '1:832:D:%d:0:0:0:0:0:0:' % cnt
else:
service_str = '1:64:%d:0:0:0:0:0:0:0::%s' % (cnt, name)
ref = eServiceReference(service_str)
if not mutableBouquetList.addService(ref, sRefBefore):
mutableBouquetList.flushChanges()
self.setRoot(sBouquetRef)
return (True, _("Marker added."))
cnt += 1
return (False, _("Internal error!"))
def renameService(self, param):
sRef = None
if "sRef" in param:
if param["sRef"] is not None:
sRef = param["sRef"]
if sRef is None:
return (False, _("No service given!"))
sName = None
if "newName" in param:
if param["newName"] is not None:
sName = param["newName"]
if sName is None:
return (False, _("No new servicename given!"))
sBouquetRef = None
if "sBouquetRef" in param:
if param["sBouquetRef"] is not None:
sBouquetRef = param["sBouquetRef"]
cur_ref = eServiceReference(sRef)
if cur_ref.flags & eServiceReference.mustDescent:
# bouquets or alternatives can be renamed with setListName directly
mutableBouquetList = self.getMutableList(cur_ref)
if mutableBouquetList:
mutableBouquetList.setListName(sName)
mutableBouquetList.flushChanges()
if sBouquetRef: # BouquetRef is given when renaming alternatives
self.setRoot(sBouquetRef)
else:
mode = MODE_TV # mode is given when renaming bouquet
if "mode" in param:
if param["mode"] is not None:
mode = int(param["mode"])
if mode == MODE_TV:
bouquet_rootstr = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet'
else:
bouquet_rootstr = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.radio" ORDER BY bouquet'
self.setRoot(bouquet_rootstr)
return (True, _("Bouquet renamed successfully."))
else: # service
# services can not be renamed directly, so delete the current and add it again with new servicename
sRefBefore = None
if "sRefBefore" in param:
if param["sRefBefore"] is not None:
sRefBefore = param["sRefBefore"]
new_param = {}
new_param["sBouquetRef"] = sBouquetRef
new_param["sRef"] = sRef
new_param["Name"] = sName
new_param["sRefBefore"] = sRefBefore
returnValue = self.removeService(new_param)
if returnValue[0]:
returnValue = self.addServiceToBouquet(new_param)
if returnValue[0]:
return (True, _("Service renamed successfully."))
return (False, _("Service can not be renamed."))
def addServiceToAlternative(self, param):
sBouquetRef = param["sBouquetRef"]
if sBouquetRef is None:
return (False, "No bouquet given!")
sRef = None
if "sRef" in param:
if param["sRef"] is not None:
sRef = param["sRef"] # service to add to the alternative
if sRef is None:
return (False, _("No service given!"))
sCurrentRef = param["sCurrentRef"] # alternative service
if sCurrentRef is None:
return (False, _("No current service given!"))
cur_ref = eServiceReference(sCurrentRef)
# check if service is already an alternative
if not (cur_ref.flags & eServiceReference.isGroup):
# sCurrentRef is not an alternative service yet, so do this and add itself to new alternative liste
mode = MODE_TV # init
if "mode" in param:
if param["mode"] is not None:
mode = int(param["mode"])
mutableBouquetList = self.getMutableList(eServiceReference(sBouquetRef))
if mutableBouquetList:
cur_service = ServiceReference(cur_ref)
name = cur_service.getServiceName()
if mode == MODE_TV:
sref = '1:134:1:0:0:0:0:0:0:0:FROM BOUQUET \"alternatives.%s.tv\" ORDER BY bouquet' % (self.buildBouquetID(name, "alternatives.", mode))
else:
sref = '1:134:2:0:0:0:0:0:0:0:FROM BOUQUET \"alternatives.%s.radio\" ORDER BY bouquet' % (self.buildBouquetID(name, "alternatives.", mode))
new_ref = eServiceReference(sref)
if not mutableBouquetList.addService(new_ref, cur_ref):
mutableBouquetList.removeService(cur_ref)
mutableBouquetList.flushChanges()
eDVBDB.getInstance().reloadBouquets()
mutableAlternatives = self.getMutableList(new_ref)
if mutableAlternatives:
mutableAlternatives.setListName(name)
if mutableAlternatives.addService(cur_ref):
print("add", cur_ref.toString(), "to new alternatives failed")
mutableAlternatives.flushChanges()
self.setRoot(sBouquetRef)
sCurrentRef = sref # currentRef is now an alternative (bouquet)
else:
return (False, _("Get mutable list for new created alternative failed!"))
else:
return (False, _("Alternative %s created failed.") % name)
else:
return (False, _("Bouquetlist is not editable!"))
# add service to alternative-bouquet
new_param = {}
new_param["sBouquetRef"] = sCurrentRef
new_param["sRef"] = sRef
returnValue = self.addServiceToBouquet(new_param)
if returnValue[0]:
cur_ref = eServiceReference(sCurrentRef)
cur_service = ServiceReference(cur_ref)
name = cur_service.getServiceName()
service_ref = ServiceReference(sRef)
service_name = service_ref.getServiceName()
return (True, _("Added %s to alternative service %s.") % (service_name, name))
else:
return returnValue
def removeAlternativeServices(self, param):
print("[WebComponents.BouquetEditor] removeAlternativeServices with param = ", param)
sBouquetRef = param["sBouquetRef"]
if sBouquetRef is None:
return (False, _("No bouquet given!"))
sRef = None
if "sRef" in param:
if param["sRef"] is not None:
sRef = param["sRef"]
if sRef is None:
return (False, _("No service given!"))
cur_ref = eServiceReference(sRef)
# check if service is an alternative
if cur_ref.flags & eServiceReference.isGroup:
cur_service = ServiceReference(cur_ref)
list = cur_service.list()
first_in_alternative = list and list.getNext()
if first_in_alternative:
mutableBouquetList = self.getMutableList(eServiceReference(sBouquetRef))
if mutableBouquetList is not None:
if mutableBouquetList.addService(first_in_alternative, cur_service.ref):
print("couldn't add first alternative service to current root")
else:
print("couldn't edit current root")
else:
print("remove empty alternative list")
else:
return (False, _("Service is not an alternative."))
new_param = {}
new_param["sBouquetRef"] = sRef
new_param["mode"] = None # of no interest when passing BouquetRefRoot
new_param["BouquetRefRoot"] = sBouquetRef
returnValue = self.removeBouquet(new_param)
if returnValue[0]:
self.setRoot(sBouquetRef)
return (True, _("All alternative services deleted."))
else:
return returnValue
def toggleLock(self, param):
if not config.ParentalControl.configured.value:
return (False, _("Parent Control is not activated."))
sRef = None
if "sRef" in param:
if param["sRef"] is not None:
sRef = param["sRef"]
if sRef is None:
return (False, _("No service given!"))
if "setuppinactive" in list(config.ParentalControl.dict().keys()) and config.ParentalControl.setuppinactive.value:
password = None
if "password" in param:
if param["password"] is not None:
password = param["password"]
if password is None:
return (False, _("No Parent Control Setup Pin given!"))
else:
if password.isdigit():
if int(password) != config.ParentalControl.setuppin.value:
return (False, _("Parent Control Setup Pin is wrong!"))
else:
return (False, _("Parent Control Setup Pin is wrong!"))
cur_ref = eServiceReference(sRef)
protection = parentalControl.getProtectionLevel(cur_ref.toCompareString())
if protection:
parentalControl.unProtectService(cur_ref.toCompareString())
else:
parentalControl.protectService(cur_ref.toCompareString())
if cur_ref.flags & eServiceReference.mustDescent:
serviceType = "Bouquet"
else:
serviceType = "Service"
if protection:
if config.ParentalControl.type.value == "blacklist":
if sRef in parentalControl.blacklist:
if "SERVICE" in (sRef in parentalControl.blacklist):
protectionText = _("Service %s is locked.") % self.getName(cur_ref)
elif "BOUQUET" in (sRef in parentalControl.blacklist):
protectionText = _("Bouquet %s is locked.") % self.getName(cur_ref)
else:
protectionText = _("%s %s is locked.") % (serviceType, self.getName(cur_ref))
else:
if hasattr(parentalControl, "whitelist") and sRef in parentalControl.whitelist:
if "SERVICE" in (sRef in parentalControl.whitelist):
protectionText = _("Service %s is unlocked.") % self.getName(cur_ref)
elif "BOUQUET" in (sRef in parentalControl.whitelist):
protectionText = _("Bouquet %s is unlocked.") % self.getName(cur_ref)
return (True, protectionText)
def backupFiles(self, param):
filename = param
if not filename:
filename = self.BACKUP_FILENAME
invalidCharacters = re_compile(r'[^A-Za-z0-9_. ]+|^\.|\.$|^ | $|^$')
tarFilename = "%s.tar" % invalidCharacters.sub('_', filename)
backupFilename = path.join(self.BACKUP_PATH, tarFilename)
if path.exists(backupFilename):
remove(backupFilename)
checkfile = path.join(self.BACKUP_PATH, '.webouquetedit')
f = open(checkfile, 'w')
if f:
files = []
f.write('created with WebBouquetEditor')
f.close()
files.append(checkfile)
files.append("/etc/enigma2/bouquets.tv")
files.append("/etc/enigma2/bouquets.radio")
# files.append("/etc/enigma2/userbouquet.favourites.tv")
# files.append("/etc/enigma2/userbouquet.favourites.radio")
files.append("/etc/enigma2/lamedb")
for xml in ("/etc/tuxbox/cables.xml", "/etc/tuxbox/terrestrial.xml", "/etc/tuxbox/satellites.xml", "/etc/tuxbox/atsc.xml", "/etc/enigma2/lamedb5"):
if path.exists(xml):
files.append(xml)
if config.ParentalControl.configured.value:
if config.ParentalControl.type.value == "blacklist":
files.append("/etc/enigma2/blacklist")
else:
files.append("/etc/enigma2/whitelist")
files += self.getPhysicalFilenamesFromServicereference(eServiceReference('1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet'))
files += self.getPhysicalFilenamesFromServicereference(eServiceReference('1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.radio" ORDER BY bouquet'))
tarFiles = ""
for arg in files:
if not path.exists(arg):
return (False, _("Error while preparing backup file, %s does not exists.") % arg)
tarFiles += "%s " % arg
lines = popen("tar cvf %s %s" % (backupFilename, tarFiles)).readlines() # nosec
remove(checkfile)
return (True, tarFilename)
else:
return (False, _("Error while preparing backup file."))
def getPhysicalFilenamesFromServicereference(self, ref):
files = []
serviceHandler = eServiceCenter.getInstance()
services = serviceHandler.list(ref)
servicelist = services and services.getContent("S", True)
for service in servicelist:
sref = service
pos = sref.find('FROM BOUQUET "')
filename = None
if pos != -1:
sref = sref[pos + 14:]
pos = sref.find('"')
if pos != -1:
filename = '/etc/enigma2/' + sref[:pos] # FIXMEEE !!! HARDCODED /etc/enigma2
files.append(filename)
files += self.getPhysicalFilenamesFromServicereference(eServiceReference(service))
return files
def restoreFiles(self, param):
tarFilename = param
backupFilename = tarFilename # path.join(self.BACKUP_PATH, tarFilename)
if path.exists(backupFilename):
check_tar = False
lines = popen('tar -tf %s' % backupFilename).readlines() # nosec
for line in lines:
pos = line.find('tmp/.webouquetedit')
if pos != -1:
check_tar = True
break
if check_tar:
eDVBDB.getInstance().removeServices()
files = []
files += self.getPhysicalFilenamesFromServicereference(eServiceReference('1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet'))
files += self.getPhysicalFilenamesFromServicereference(eServiceReference('1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.radio" ORDER BY bouquet'))
for bouquetfiles in files:
if path.exists(bouquetfiles):
remove(bouquetfiles)
lines = popen('tar xvf %s -C / --exclude tmp/.webouquetedit' % backupFilename).readlines() # nosec
nimmanager.readTransponders()
eDVBDB.getInstance().reloadServicelist()
eDVBDB.getInstance().reloadBouquets()
infoBarInstance = InfoBar.instance
if infoBarInstance is not None:
servicelist = infoBarInstance.servicelist
root = servicelist.getRoot()
currentref = servicelist.getCurrentSelection()
servicelist.setRoot(root)
servicelist.setCurrentSelection(currentref)
remove(backupFilename)
return (True, _("Bouquet-settings were restored successfully"))
else:
return (False, _("Error, %s was not created with WebBouquetEditor...") % backupFilename)
else:
return (False, _("Error, %s does not exists, restore is not possible...") % backupFilename)
def getMutableBouquetList(self, mode):
if mode == MODE_TV:
self.bouquet_rootstr = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet'
else:
self.bouquet_rootstr = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.radio" ORDER BY bouquet'
return self.getMutableList(eServiceReference(self.bouquet_rootstr))
def getMutableList(self, ref):
serviceHandler = eServiceCenter.getInstance()
return serviceHandler.list(ref).startEdit()
def setRoot(self, bouquet_rootstr):
infoBarInstance = InfoBar.instance
if infoBarInstance is not None:
servicelist = infoBarInstance.servicelist
root = servicelist.getRoot()
if bouquet_rootstr == root.toString():
currentref = servicelist.getCurrentSelection()
servicelist.setRoot(root)
servicelist.setCurrentSelection(currentref)
def buildBouquetID(self, str, prefix, mode):
tmp = str.lower()
name = ''
for c in tmp:
if (c >= 'a' and c <= 'z') or (c >= '0' and c <= '9'):
name += c
else:
name += '_'
# check if file is unique
suffix = ""
if mode == MODE_TV:
suffix = ".tv"
else:
suffix = ".radio"
filename = '/etc/enigma2/' + prefix + name + suffix
if path.exists(filename):
i = 1
while True:
filename = "/etc/enigma2/%s%s_%d%s" % (prefix, name, i, suffix)
if path.exists(filename):
i += 1
else:
name = "%s_%d" % (name, i)
break
return name
def getName(self, ref):
serviceHandler = eServiceCenter.getInstance()
info = serviceHandler.info(ref)
if info:
name = info.getName(ref)
else:
name = ""
return name
def importBouquet(self, param):
if config.usage.multibouquet.value:
import json
ret = [False, 'json format error']
mode = MODE_TV
try:
bqimport = json.loads(param["json"][0])
filename = bqimport["filename"]
_mode = bqimport["mode"]
overwrite = bqimport["overwrite"]
lines = bqimport["lines"]
except (ValueError, KeyError):
return ret
if _mode == 1:
mode = MODE_RADIO
fullfilename = '/etc/enigma2/' + filename
if mode == MODE_TV:
sref = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET \"%s\" ORDER BY bouquet' % (filename)
else:
sref = '1:7:2:0:0:0:0:0:0:0:FROM BOUQUET \"%s\" ORDER BY bouquet' % (filename)
if not path.exists(fullfilename):
new_bouquet_ref = eServiceReference(str(sref))
mutableBouquetList = self.getMutableBouquetList(mode)
mutableBouquetList.addService(new_bouquet_ref)
mutableBouquetList.flushChanges()
if overwrite == 1:
f = open(fullfilename, 'w')
else:
f = open(fullfilename, 'a')
if f:
for line in lines:
f.write(line)
f.write("\n")
f.close()
else:
return [False, 'error creating bouquet file']
eDVBDB.getInstance().reloadBouquets()
return [True, 'bouquet added']
else:
return [False, _("Multi-Bouquet is not enabled!")]
|
E2OpenPlugins/e2openplugin-OpenWebif
|
plugin/controllers/BouquetEditor.py
|
Python
|
gpl-3.0
| 28,113
|
[
"VisIt"
] |
7fa14312816064af8c56ada9a84f50bd0a4e074ecb3db0cd1406d1f1babb93a0
|
import json, os, time
from solver.commonSolver import CommonSolver
from logic.helpers import Pickup
from utils.utils import PresetLoader
from solver.conf import Conf
from solver.out import Out
from solver.comeback import ComeBack
from utils.parameters import easy, medium, hard, harder, hardcore, mania, infinity
from utils.parameters import Knows, isKnows, Settings
from logic.logic import Logic
from utils.objectives import Objectives
import utils.log
class StandardSolver(CommonSolver):
# given a rom and parameters returns the estimated difficulty
def __init__(self, rom, presetFileName, difficultyTarget, pickupStrategy, itemsForbidden=[], type='console',
firstItemsLog=None, extStatsFilename=None, extStatsStep=None, displayGeneratedPath=False,
outputFileName=None, magic=None, checkDuplicateMajor=False, vcr=False, runtimeLimit_s=0):
self.interactive = False
self.checkDuplicateMajor = checkDuplicateMajor
if vcr == True:
from utils.vcr import VCR
self.vcr = VCR(rom, 'solver')
else:
self.vcr = None
# for compatibility with some common methods of the interactive solver
self.mode = 'standard'
self.log = utils.log.get('Solver')
self.setConf(difficultyTarget, pickupStrategy, itemsForbidden, displayGeneratedPath)
self.firstLogFile = None
if firstItemsLog is not None:
self.firstLogFile = open(firstItemsLog, 'w')
self.firstLogFile.write('Item;Location;Area\n')
self.extStatsFilename = extStatsFilename
self.extStatsStep = extStatsStep
# can be called from command line (console) or from web site (web)
self.type = type
self.output = Out.factory(self.type, self)
self.outputFileName = outputFileName
self.objectives = Objectives()
self.loadRom(rom, magic=magic)
self.presetFileName = presetFileName
self.loadPreset(self.presetFileName)
self.pickup = Pickup(Conf.itemsPickup)
self.comeBack = ComeBack(self)
self.runtimeLimit_s = runtimeLimit_s
self.startTime = time.process_time()
def setConf(self, difficultyTarget, pickupStrategy, itemsForbidden, displayGeneratedPath):
Conf.difficultyTarget = difficultyTarget
Conf.itemsPickup = pickupStrategy
Conf.displayGeneratedPath = displayGeneratedPath
Conf.itemsForbidden = itemsForbidden
def solveRom(self):
self.lastAP = self.startLocation
self.lastArea = self.startArea
(self.difficulty, self.itemsOk) = self.computeDifficulty()
if self.firstLogFile is not None:
self.firstLogFile.close()
(self.knowsUsed, self.knowsKnown, knowsUsedList) = self.getKnowsUsed()
if self.vcr != None:
self.vcr.dump()
if self.extStatsFilename != None:
self.computeExtStats()
firstMinor = {'Missile': False, 'Super': False, 'PowerBomb': False}
locsItems = {}
for loc in self.visitedLocations:
if loc.itemName in firstMinor and firstMinor[loc.itemName] == False:
locsItems[loc.Name] = loc.itemName
firstMinor[loc.itemName] = True
import utils.db as db
with open(self.extStatsFilename, 'a') as extStatsFile:
db.DB.dumpExtStatsSolver(self.difficulty, knowsUsedList, self.solverStats, locsItems, self.extStatsStep, extStatsFile)
self.output.out()
return self.difficulty
def computeExtStats(self):
# avgLocs: avg number of available locs, the higher the value the more open is a seed
# open[1-4]4: how many location you have to visit to open 1/4, 1/2, 3/4, all locations.
# gives intel about prog item repartition.
self.solverStats = {}
self.solverStats['avgLocs'] = int(sum(self.nbAvailLocs)/len(self.nbAvailLocs))
derivative = []
for i in range(len(self.nbAvailLocs)-1):
d = self.nbAvailLocs[i+1] - self.nbAvailLocs[i]
derivative.append(d)
sumD = sum([d for d in derivative if d != -1])
(sum14, sum24, sum34, sum44) = (sumD/4, sumD/2, sumD*3/4, sumD)
(open14, open24, open34, open44) = (-1, -1, -1, -1)
sumD = 0
for (i, d) in enumerate(derivative, 1):
if d == -1:
continue
sumD += d
if sumD >= sum14 and open14 == -1:
open14 = i
continue
if sumD >= sum24 and open24 == -1:
open24 = i
continue
if sumD >= sum34 and open34 == -1:
open34 = i
continue
if sumD >= sum44 and open44 == -1:
open44 = i
break
self.solverStats['open14'] = open14 if open14 != -1 else 0
self.solverStats['open24'] = open24 if open24 != -1 else 0
self.solverStats['open34'] = open34 if open34 != -1 else 0
self.solverStats['open44'] = open44 if open44 != -1 else 0
def getRemainMajors(self):
return [loc for loc in self.majorLocations if loc.difficulty.bool == False and loc.itemName not in ['Nothing', 'NoEnergy']]
def getRemainMinors(self):
if self.majorsSplit == 'Full':
return None
else:
return [loc for loc in self.minorLocations if loc.difficulty.bool == False and loc.itemName not in ['Nothing', 'NoEnergy']]
def getSkippedMajors(self):
return [loc for loc in self.majorLocations if loc.difficulty.bool == True and loc.itemName not in ['Nothing', 'NoEnergy']]
def getUnavailMajors(self):
return [loc for loc in self.majorLocations if loc.difficulty.bool == False and loc.itemName not in ['Nothing', 'NoEnergy']]
def getDiffThreshold(self):
target = Conf.difficultyTarget
threshold = target
epsilon = 0.001
if target <= easy:
threshold = medium - epsilon
elif target <= medium:
threshold = hard - epsilon
elif target <= hard:
threshold = harder - epsilon
elif target <= harder:
threshold = hardcore - epsilon
elif target <= hardcore:
threshold = mania - epsilon
return threshold
def getKnowsUsed(self):
knowsUsed = []
for loc in self.visitedLocations:
knowsUsed += loc.difficulty.knows
# get unique knows
knowsUsed = list(set(knowsUsed))
knowsUsedCount = len(knowsUsed)
# get total of known knows
knowsKnownCount = len([knows for knows in Knows.__dict__ if isKnows(knows) and getattr(Knows, knows).bool == True])
knowsKnownCount += len([hellRun for hellRun in Settings.hellRuns if Settings.hellRuns[hellRun] is not None])
return (knowsUsedCount, knowsKnownCount, knowsUsed)
def tryRemainingLocs(self):
# use preset which knows every techniques to test the remaining locs to
# find which technique could allow to continue the seed
locations = self.majorLocations if self.majorsSplit == 'Full' else self.majorLocations + self.minorLocations
# instanciate a new smbool manager to reset the cache
from logic.smboolmanager import SMBoolManagerPlando as SMBoolManager
self.smbm = SMBoolManager()
presetFileName = os.path.expanduser('~/RandomMetroidSolver/standard_presets/solution.json')
presetLoader = PresetLoader.factory(presetFileName)
presetLoader.load()
self.smbm.createKnowsFunctions()
self.areaGraph.getAvailableLocations(locations, self.smbm, infinity, self.lastAP)
return [loc for loc in locations if loc.difficulty.bool == True]
|
theonlydude/RandomMetroidSolver
|
solver/standardSolver.py
|
Python
|
mit
| 7,846
|
[
"VisIt"
] |
181ef660ee1eec7e9082989be965636f8598414e659fb32d0da947a6cd3e6f2e
|
from numpy.testing import assert_array_equal, assert_allclose, assert_raises
import numpy as np
from skimage.data import camera
from skimage.util import random_noise, img_as_float
def test_set_seed():
seed = 42
cam = camera()
test = random_noise(cam, seed=seed)
assert_array_equal(test, random_noise(cam, seed=seed))
def test_salt():
seed = 42
cam = img_as_float(camera())
cam_noisy = random_noise(cam, seed=seed, mode='salt', amount=0.15)
saltmask = cam != cam_noisy
# Ensure all changes are to 1.0
assert_allclose(cam_noisy[saltmask], np.ones(saltmask.sum()))
# Ensure approximately correct amount of noise was added
proportion = float(saltmask.sum()) / (cam.shape[0] * cam.shape[1])
assert 0.11 < proportion <= 0.15
def test_salt_p1():
image = np.random.rand(2, 3)
noisy = random_noise(image, mode='salt', amount=1)
assert_array_equal(noisy, [[1, 1, 1], [1, 1, 1]])
def test_singleton_dim():
"""Ensure images where size of a given dimension is 1 work correctly."""
image = np.random.rand(1, 20)
noisy = random_noise(image, mode='salt', amount=0.1, seed=42)
assert np.sum(noisy == 1) == 2
def test_pepper():
seed = 42
cam = img_as_float(camera())
data_signed = cam * 2. - 1. # Same image, on range [-1, 1]
cam_noisy = random_noise(cam, seed=seed, mode='pepper', amount=0.15)
peppermask = cam != cam_noisy
# Ensure all changes are to 1.0
assert_allclose(cam_noisy[peppermask], np.zeros(peppermask.sum()))
# Ensure approximately correct amount of noise was added
proportion = float(peppermask.sum()) / (cam.shape[0] * cam.shape[1])
assert 0.11 < proportion <= 0.15
# Check to make sure pepper gets added properly to signed images
orig_zeros = (data_signed == -1).sum()
cam_noisy_signed = random_noise(data_signed, seed=seed, mode='pepper',
amount=.15)
proportion = (float((cam_noisy_signed == -1).sum() - orig_zeros) /
(cam.shape[0] * cam.shape[1]))
assert 0.11 < proportion <= 0.15
def test_salt_and_pepper():
seed = 42
cam = img_as_float(camera())
cam_noisy = random_noise(cam, seed=seed, mode='s&p', amount=0.15,
salt_vs_pepper=0.25)
saltmask = np.logical_and(cam != cam_noisy, cam_noisy == 1.)
peppermask = np.logical_and(cam != cam_noisy, cam_noisy == 0.)
# Ensure all changes are to 0. or 1.
assert_allclose(cam_noisy[saltmask], np.ones(saltmask.sum()))
assert_allclose(cam_noisy[peppermask], np.zeros(peppermask.sum()))
# Ensure approximately correct amount of noise was added
proportion = float(
saltmask.sum() + peppermask.sum()) / (cam.shape[0] * cam.shape[1])
assert 0.11 < proportion <= 0.18
# Verify the relative amount of salt vs. pepper is close to expected
assert 0.18 < saltmask.sum() / float(peppermask.sum()) < 0.33
def test_gaussian():
seed = 42
data = np.zeros((128, 128)) + 0.5
data_gaussian = random_noise(data, seed=seed, var=0.01)
assert 0.008 < data_gaussian.var() < 0.012
data_gaussian = random_noise(data, seed=seed, mean=0.3, var=0.015)
assert 0.28 < data_gaussian.mean() - 0.5 < 0.32
assert 0.012 < data_gaussian.var() < 0.018
def test_localvar():
seed = 42
data = np.zeros((128, 128)) + 0.5
local_vars = np.zeros((128, 128)) + 0.001
local_vars[:64, 64:] = 0.1
local_vars[64:, :64] = 0.25
local_vars[64:, 64:] = 0.45
data_gaussian = random_noise(data, mode='localvar', seed=seed,
local_vars=local_vars, clip=False)
assert 0. < data_gaussian[:64, :64].var() < 0.002
assert 0.095 < data_gaussian[:64, 64:].var() < 0.105
assert 0.245 < data_gaussian[64:, :64].var() < 0.255
assert 0.445 < data_gaussian[64:, 64:].var() < 0.455
# Ensure local variance bounds checking works properly
bad_local_vars = np.zeros_like(data)
assert_raises(ValueError, random_noise, data, mode='localvar', seed=seed,
local_vars=bad_local_vars)
bad_local_vars += 0.1
bad_local_vars[0, 0] = -1
assert_raises(ValueError, random_noise, data, mode='localvar', seed=seed,
local_vars=bad_local_vars)
def test_speckle():
seed = 42
data = np.zeros((128, 128)) + 0.1
np.random.seed(seed=seed)
noise = np.random.normal(0.1, 0.02 ** 0.5, (128, 128))
expected = np.clip(data + data * noise, 0, 1)
data_speckle = random_noise(data, mode='speckle', seed=seed, mean=0.1,
var=0.02)
assert_allclose(expected, data_speckle)
def test_poisson():
seed = 42
data = camera() # 512x512 grayscale uint8
cam_noisy = random_noise(data, mode='poisson', seed=seed)
cam_noisy2 = random_noise(data, mode='poisson', seed=seed, clip=False)
np.random.seed(seed=seed)
expected = np.random.poisson(img_as_float(data) * 256) / 256.
assert_allclose(cam_noisy, np.clip(expected, 0., 1.))
assert_allclose(cam_noisy2, expected)
def test_clip_poisson():
seed = 42
data = camera() # 512x512 grayscale uint8
data_signed = img_as_float(data) * 2. - 1. # Same image, on range [-1, 1]
# Signed and unsigned, clipped
cam_poisson = random_noise(data, mode='poisson', seed=seed, clip=True)
cam_poisson2 = random_noise(data_signed, mode='poisson', seed=seed,
clip=True)
assert (cam_poisson.max() == 1.) and (cam_poisson.min() == 0.)
assert (cam_poisson2.max() == 1.) and (cam_poisson2.min() == -1.)
# Signed and unsigned, unclipped
cam_poisson = random_noise(data, mode='poisson', seed=seed, clip=False)
cam_poisson2 = random_noise(data_signed, mode='poisson', seed=seed,
clip=False)
assert (cam_poisson.max() > 1.15) and (cam_poisson.min() == 0.)
assert (cam_poisson2.max() > 1.3) and (cam_poisson2.min() == -1.)
def test_clip_gaussian():
seed = 42
data = camera() # 512x512 grayscale uint8
data_signed = img_as_float(data) * 2. - 1. # Same image, on range [-1, 1]
# Signed and unsigned, clipped
cam_gauss = random_noise(data, mode='gaussian', seed=seed, clip=True)
cam_gauss2 = random_noise(data_signed, mode='gaussian', seed=seed,
clip=True)
assert (cam_gauss.max() == 1.) and (cam_gauss.min() == 0.)
assert (cam_gauss2.max() == 1.) and (cam_gauss2.min() == -1.)
# Signed and unsigned, unclipped
cam_gauss = random_noise(data, mode='gaussian', seed=seed, clip=False)
cam_gauss2 = random_noise(data_signed, mode='gaussian', seed=seed,
clip=False)
assert (cam_gauss.max() > 1.22) and (cam_gauss.min() < -0.36)
assert (cam_gauss2.max() > 1.219) and (cam_gauss2.min() < -1.337)
def test_clip_speckle():
seed = 42
data = camera() # 512x512 grayscale uint8
data_signed = img_as_float(data) * 2. - 1. # Same image, on range [-1, 1]
# Signed and unsigned, clipped
cam_speckle = random_noise(data, mode='speckle', seed=seed, clip=True)
cam_speckle2 = random_noise(data_signed, mode='speckle', seed=seed,
clip=True)
assert (cam_speckle.max() == 1.) and (cam_speckle.min() == 0.)
assert (cam_speckle2.max() == 1.) and (cam_speckle2.min() == -1.)
# Signed and unsigned, unclipped
cam_speckle = random_noise(data, mode='speckle', seed=seed, clip=False)
cam_speckle2 = random_noise(data_signed, mode='speckle', seed=seed,
clip=False)
assert (cam_speckle.max() > 1.219) and (cam_speckle.min() == 0.)
assert (cam_speckle2.max() > 1.219) and (cam_speckle2.min() < -1.306)
def test_bad_mode():
data = np.zeros((64, 64))
assert_raises(KeyError, random_noise, data, 'perlin')
if __name__ == '__main__':
np.testing.run_module_suite()
|
paalge/scikit-image
|
skimage/util/tests/test_random_noise.py
|
Python
|
bsd-3-clause
| 8,005
|
[
"Gaussian"
] |
73bad0fff841ebbc21b507d21fe6d92e716e89403a31e117e8d03d3aff1f72f4
|
# Copyright 2004 by James Casbon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Code to deal with COMPASS output, a program for profile/profile comparison.
Compass is described in:
Sadreyev R, Grishin N. COMPASS: a tool for comparison of multiple protein
alignments with assessment of statistical significance. J Mol Biol. 2003 Feb
7;326(1):317-36.
Tested with COMPASS 1.24.
Functions:
read Reads a COMPASS file containing one COMPASS record
parse Iterates over records in a COMPASS file.
Classes:
Record One result of a COMPASS file
DEPRECATED CLASSES:
_Scanner Scan compass results
_Consumer Consume scanner events
RecordParser Parse one compass record
Iterator Iterate through a number of compass records
"""
import re
def read(handle):
record = None
try:
line = handle.next()
record = Record()
__read_names(record, line)
line = handle.next()
__read_threshold(record, line)
line = handle.next()
__read_lengths(record, line)
line = handle.next()
__read_profilewidth(record, line)
line = handle.next()
__read_scores(record, line)
except StopIteration:
if not record:
raise ValueError("No record found in handle")
else:
raise ValueError("Unexpected end of stream.")
for line in handle:
if is_blank_line(line):
continue
__read_query_alignment(record, line)
try:
line = handle.next()
__read_positive_alignment(record, line)
line = handle.next()
__read_hit_alignment(record, line)
except StopIteration:
raise ValueError("Unexpected end of stream.")
return record
def parse(handle):
record = None
try:
line = handle.next()
except StopIteration:
return
while True:
try:
record = Record()
__read_names(record, line)
line = handle.next()
__read_threshold(record, line)
line = handle.next()
__read_lengths(record, line)
line = handle.next()
__read_profilewidth(record, line)
line = handle.next()
__read_scores(record, line)
except StopIteration:
raise ValueError("Unexpected end of stream.")
for line in handle:
if not line.strip():
continue
if "Ali1:" in line:
yield record
break
__read_query_alignment(record, line)
try:
line = handle.next()
__read_positive_alignment(record, line)
line = handle.next()
__read_hit_alignment(record, line)
except StopIteration:
raise ValueError("Unexpected end of stream.")
else:
yield record
break
class Record(object):
"""
Hold information from one compass hit.
Ali1 one is the query, Ali2 the hit.
"""
def __init__(self):
self.query=''
self.hit=''
self.gap_threshold=0
self.query_length=0
self.query_filtered_length=0
self.query_nseqs=0
self.query_neffseqs=0
self.hit_length=0
self.hit_filtered_length=0
self.hit_nseqs=0
self.hit_neffseqs=0
self.sw_score=0
self.evalue=-1
self.query_start=-1
self.hit_start=-1
self.query_aln=''
self.hit_aln=''
self.positives=''
def query_coverage(self):
"""Return the length of the query covered in alignment"""
s = self.query_aln.replace("=", "")
return len(s)
def hit_coverage(self):
"""Return the length of the hit covered in the alignment"""
s = self.hit_aln.replace("=", "")
return len(s)
# Everything below is private
__regex = {"names": re.compile("Ali1:\s+(\S+)\s+Ali2:\s+(\S+)\s+"),
"threshold": re.compile("Threshold of effective gap content in columns: (\S+)"),
"lengths": re.compile("length1=(\S+)\s+filtered_length1=(\S+)\s+length2=(\S+)\s+filtered_length2=(\S+)"),
"profilewidth": re.compile("Nseqs1=(\S+)\s+Neff1=(\S+)\s+Nseqs2=(\S+)\s+Neff2=(\S+)"),
"scores": re.compile("Smith-Waterman score = (\S+)\s+Evalue = (\S+)"),
"start": re.compile("(\d+)"),
"align": re.compile("^.{15}(\S+)"),
"positive_alignment": re.compile("^.{15}(.+)"),
}
def __read_names(record, line):
"""
Ali1: 60456.blo.gz.aln Ali2: allscop//14984.blo.gz.aln
------query----- -------hit-------------
"""
if not "Ali1:" in line:
raise ValueError("Line does not contain 'Ali1:':\n%s" % line)
m = __regex["names"].search(line)
record.query = m.group(1)
record.hit = m.group(2)
def __read_threshold(record,line):
if not line.startswith("Threshold"):
raise ValueError("Line does not start with 'Threshold':\n%s" % line)
m = __regex["threshold"].search(line)
record.gap_threshold = float(m.group(1))
def __read_lengths(record, line):
if not line.startswith("length1="):
raise ValueError("Line does not start with 'length1=':\n%s" % line)
m = __regex["lengths"].search(line)
record.query_length = int(m.group(1))
record.query_filtered_length = float(m.group(2))
record.hit_length = int(m.group(3))
record.hit_filtered_length = float(m.group(4))
def __read_profilewidth(record, line):
if not "Nseqs1" in line:
raise ValueError("Line does not contain 'Nseqs1':\n%s" % line)
m = __regex["profilewidth"].search(line)
record.query_nseqs = int(m.group(1))
record.query_neffseqs = float(m.group(2))
record.hit_nseqs = int(m.group(3))
record.hit_neffseqs = float(m.group(4))
def __read_scores(record, line):
if not line.startswith("Smith-Waterman"):
raise ValueError("Line does not start with 'Smith-Waterman':\n%s" % line)
m = __regex["scores"].search(line)
if m:
record.sw_score = int(m.group(1))
record.evalue = float(m.group(2))
else:
record.sw_score = 0
record.evalue = -1.0
def __read_query_alignment(record, line):
m = __regex["start"].search(line)
if m:
record.query_start = int(m.group(1))
m = __regex["align"].match(line)
assert m!=None, "invalid match"
record.query_aln += m.group(1)
def __read_positive_alignment(record, line):
m = __regex["positive_alignment"].match(line)
assert m!=None, "invalid match"
record.positives += m.group(1)
def __read_hit_alignment(record, line):
m = __regex["start"].search(line)
if m:
record.hit_start = int(m.group(1))
m = __regex["align"].match(line)
assert m!=None, "invalid match"
record.hit_aln += m.group(1)
# Everything below is deprecated
from Bio import File
from Bio.ParserSupport import *
import Bio
class _Scanner:
"""Reads compass output and generate events (DEPRECATED)"""
def __init__(self):
import warnings
warnings.warn("Bio.Compass._Scanner is deprecated; please use the read() and parse() functions in this module instead", Bio.BiopythonDeprecationWarning)
def feed(self, handle, consumer):
"""Feed in COMPASS ouput"""
if isinstance(handle, File.UndoHandle):
pass
else:
handle = File.UndoHandle(handle)
assert isinstance(handle, File.UndoHandle), \
"handle must be an UndoHandle"
if handle.peekline():
self._scan_record(handle, consumer)
def _scan_record(self,handle,consumer):
self._scan_names(handle, consumer)
self._scan_threshold(handle, consumer)
self._scan_lengths(handle,consumer)
self._scan_profilewidth(handle, consumer)
self._scan_scores(handle,consumer)
self._scan_alignment(handle,consumer)
def _scan_names(self,handle,consumer):
"""
Ali1: 60456.blo.gz.aln Ali2: allscop//14984.blo.gz.aln
"""
read_and_call(handle, consumer.names, contains="Ali1:")
def _scan_threshold(self,handle, consumer):
"""
Threshold of effective gap content in columns: 0.5
"""
read_and_call(handle, consumer.threshold, start="Threshold")
def _scan_lengths(self,handle, consumer):
"""
length1=388 filtered_length1=386 length2=145 filtered_length2=137
"""
read_and_call(handle, consumer.lengths, start="length1=")
def _scan_profilewidth(self,handle,consumer):
"""
Nseqs1=399 Neff1=12.972 Nseqs2=1 Neff2=6.099
"""
read_and_call(handle, consumer.profilewidth, contains="Nseqs1")
def _scan_scores(self,handle, consumer):
"""
Smith-Waterman score = 37 Evalue = 5.75e+02
"""
read_and_call(handle, consumer.scores, start="Smith-Waterman")
def _scan_alignment(self,handle, consumer):
"""
QUERY 2 LSDRLELVSASEIRKLFDIAAGMKDVISLGIGEPDFDTPQHIKEYAKEALDKGLTHYGPN
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
QUERY 2 LSDRLELVSASEIRKLFDIAAGMKDVISLGIGEPDFDTPQHIKEYAKEALDKGLTHYGPN
QUERY IGLLELREAIAEKLKKQNGIEADPKTEIMVLLGANQAFLMGLSAFLKDGEEVLIPTPAFV
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
QUERY IGLLELREAIAEKLKKQNGIEADPKTEIMVLLGANQAFLMGLSAFLKDGEEVLIPTPAFV
"""
while 1:
line = handle.readline()
if not line:
break
if is_blank_line(line):
continue
else:
consumer.query_alignment(line)
read_and_call(handle, consumer.positive_alignment)
read_and_call(handle, consumer.hit_alignment)
class _Consumer:
# all regular expressions used -- compile only once
_re_names = re.compile("Ali1:\s+(\S+)\s+Ali2:\s+(\S+)\s+")
_re_threshold = \
re.compile("Threshold of effective gap content in columns: (\S+)")
_re_lengths = \
re.compile("length1=(\S+)\s+filtered_length1=(\S+)\s+length2=(\S+)"
+ "\s+filtered_length2=(\S+)")
_re_profilewidth = \
re.compile("Nseqs1=(\S+)\s+Neff1=(\S+)\s+Nseqs2=(\S+)\s+Neff2=(\S+)")
_re_scores = re.compile("Smith-Waterman score = (\S+)\s+Evalue = (\S+)")
_re_start = re.compile("(\d+)")
_re_align = re.compile("^.{15}(\S+)")
_re_positive_alignment = re.compile("^.{15}(.+)")
def __init__(self):
import warnings
warnings.warn("Bio.Compass._Consumer is deprecated; please use the read() and parse() functions in this module instead", Bio.BiopythonDeprecationWarning)
self.data = None
def names(self, line):
"""
Ali1: 60456.blo.gz.aln Ali2: allscop//14984.blo.gz.aln
------query----- -------hit-------------
"""
self.data = Record()
m = self.__class__._re_names.search(line)
self.data.query = m.group(1)
self.data.hit = m.group(2)
def threshold(self,line):
m = self.__class__._re_threshold.search(line)
self.data.gap_threshold = float(m.group(1))
def lengths(self,line):
m = self.__class__._re_lengths.search(line)
self.data.query_length = int(m.group(1))
self.data.query_filtered_length = float(m.group(2))
self.data.hit_length = int(m.group(3))
self.data.hit_filtered_length = float(m.group(4))
def profilewidth(self,line):
m = self.__class__._re_profilewidth.search(line)
self.data.query_nseqs = int(m.group(1))
self.data.query_neffseqs = float(m.group(2))
self.data.hit_nseqs = int(m.group(3))
self.data.hit_neffseqs = float(m.group(4))
def scores(self, line):
m = self.__class__._re_scores.search(line)
if m:
self.data.sw_score = int(m.group(1))
self.data.evalue = float(m.group(2))
else:
self.data.sw_score = 0
self.data.evalue = -1.0
def query_alignment(self, line):
m = self.__class__._re_start.search(line)
if m:
self.data.query_start = int(m.group(1))
m = self.__class__._re_align.match(line)
assert m!=None, "invalid match"
self.data.query_aln = self.data.query_aln + m.group(1)
def positive_alignment(self,line):
m = self.__class__._re_positive_alignment.match(line)
assert m!=None, "invalid match"
self.data.positives = self.data.positives + m.group(1)
def hit_alignment(self,line):
m = self.__class__._re_start.search(line)
if m:
self.data.hit_start = int(m.group(1))
m = self.__class__._re_align.match(line)
assert m!=None, "invalid match"
self.data.hit_aln = self.data.hit_aln + m.group(1)
class RecordParser(AbstractParser):
"""Parses compass results into a Record object (DEPRECATED).
"""
def __init__(self):
import warnings
warnings.warn("Bio.Compass._RecordParser is deprecated; please use the read() and parse() functions in this module instead", Bio.BiopythonDeprecationWarning)
self._scanner = _Scanner()
self._consumer = _Consumer()
def parse(self, handle):
if isinstance(handle, File.UndoHandle):
uhandle = handle
else:
uhandle = File.UndoHandle(handle)
self._scanner.feed(uhandle, self._consumer)
return self._consumer.data
class Iterator(object):
"""Iterate through a file of compass results (DEPRECATED)."""
def __init__(self, handle):
import warnings
warnings.warn("Bio.Compass.Iterator is deprecated; please use the parse() function in this module instead", Bio.BiopythonDeprecationWarning)
self._uhandle = File.UndoHandle(handle)
self._parser = RecordParser()
def next(self):
lines = []
while 1:
line = self._uhandle.readline()
if not line:
break
if line[0:4] == "Ali1" and lines:
self._uhandle.saveline(line)
break
lines.append(line)
if not lines:
return None
data = ''.join(lines)
return self._parser.parse(File.StringHandle(data))
|
LyonsLab/coge
|
bin/last_wrapper/Bio/Compass/__init__.py
|
Python
|
bsd-2-clause
| 14,957
|
[
"Biopython"
] |
6306e215b3df74a9ea1a3362263b9017bac851dafc4b58662542c5f4aad1ecca
|
"""
Copyright (C) since 2013 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
io.py
~~~~~
Functions to read and save model results.
"""
import os
import xarray as xr
from calliope._version import __version__
from calliope import exceptions
def read_netcdf(path):
"""Read model_data from NetCDF file"""
with xr.open_dataset(path) as model_data:
model_data.load()
calliope_version = model_data.attrs.get("calliope_version", False)
if calliope_version:
if not str(calliope_version) in __version__:
exceptions.warn(
"This model data was created with Calliope version {}, "
"but you are running {}. Proceed with caution!".format(
calliope_version, __version__
)
)
# FIXME some checks for consistency
# use check_dataset from the checks module
# also check the old checking from 0.5.x
return model_data
def save_netcdf(model_data, path, model=None):
encoding = {k: {"zlib": True, "complevel": 4} for k in model_data.data_vars}
original_model_data_attrs = model_data.attrs
model_data_attrs = model_data.attrs.copy()
if model is not None and hasattr(model, "_model_run"):
# Attach _model_run and _debug_data to _model_data
model_run_to_save = model._model_run.copy()
for k in ["timeseries_data", "timesteps"]:
model_run_to_save.pop(k, None)
model_data_attrs["_model_run"] = model_run_to_save.to_yaml()
if hasattr(model, "_debug_data"):
model_data_attrs["_debug_data"] = model._debug_data.to_yaml()
# Convert boolean attrs to ints
bool_attrs = [k for k, v in model_data_attrs.items() if isinstance(v, bool)]
for k in bool_attrs:
model_data_attrs[k] = int(model_data_attrs[k])
# Convert None attrs to 'None'
none_attrs = [k for k, v in model_data_attrs.items() if v is None]
for k in none_attrs:
model_data_attrs[k] = "None"
# Convert `object` dtype coords to string
# FIXME: remove once xarray issue https://github.com/pydata/xarray/issues/2404 is resolved
for k, v in model_data.coords.items():
if v.dtype == "O":
model_data[k] = v.astype("<U{}".format(max([len(i.item()) for i in v])))
try:
model_data.attrs = model_data_attrs
model_data.to_netcdf(path, format="netCDF4", encoding=encoding)
model_data.close() # Force-close NetCDF file after writing
finally: # Revert model_data.attrs back
model_data.attrs = original_model_data_attrs
def save_csv(model_data, path, dropna=True):
"""
If termination condition was not optimal, filters inputs only, and
warns that results will not be saved.
"""
os.makedirs(path, exist_ok=False)
# a MILP model which optimises to within the MIP gap, but does not fully
# converge on the LP relaxation, may return as 'feasible', not 'optimal'
if "termination_condition" not in model_data.attrs or model_data.attrs[
"termination_condition"
] in ["optimal", "feasible"]:
data_vars = model_data.data_vars
else:
data_vars = model_data.filter_by_attrs(is_result=0).data_vars
exceptions.warn(
"Model termination condition was not optimal, saving inputs only."
)
for var in data_vars:
in_out = "results" if model_data[var].attrs["is_result"] else "inputs"
out_path = os.path.join(path, "{}_{}.csv".format(in_out, var))
series = model_data[var].to_series()
if dropna:
series = series.dropna()
series.to_csv(out_path, header=True)
def save_lp(model, path):
if not model.run_config["backend"] == "pyomo":
raise IOError("Only the pyomo backend can save to LP.")
if not hasattr(model, "_backend_model"):
model.run(build_only=True)
model._backend_model.write(
path, format="lp", io_options={"symbolic_solver_labels": True}
)
|
calliope-project/calliope
|
calliope/core/io.py
|
Python
|
apache-2.0
| 4,033
|
[
"NetCDF"
] |
935c1273bc0273a1f933ff0a7aa5001c5468d05c0f51b8227cf820913c39ed1c
|
"""
Acceptance tests for the teams feature.
"""
import json
import random
import time
from dateutil.parser import parse
import ddt
from nose.plugins.attrib import attr
from uuid import uuid4
from unittest import skip
from ..helpers import EventsTestMixin, UniqueCourseTest
from ...fixtures import LMS_BASE_URL
from ...fixtures.course import CourseFixture
from ...fixtures.discussion import (
Thread,
MultipleThreadFixture
)
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.course_info import CourseInfoPage
from ...pages.lms.learner_profile import LearnerProfilePage
from ...pages.lms.tab_nav import TabNavPage
from ...pages.lms.teams import (
TeamsPage,
MyTeamsPage,
BrowseTopicsPage,
BrowseTeamsPage,
TeamManagementPage,
EditMembershipPage,
TeamPage
)
from ...pages.common.utils import confirm_prompt
TOPICS_PER_PAGE = 12
class TeamsTabBase(EventsTestMixin, UniqueCourseTest):
"""Base class for Teams Tab tests"""
def setUp(self):
super(TeamsTabBase, self).setUp()
self.tab_nav = TabNavPage(self.browser)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.teams_page = TeamsPage(self.browser, self.course_id)
def create_topics(self, num_topics):
"""Create `num_topics` test topics."""
return [{u"description": i, u"name": i, u"id": i} for i in map(str, xrange(num_topics))]
def create_teams(self, topic, num_teams, time_between_creation=0):
"""Create `num_teams` teams belonging to `topic`."""
teams = []
for i in xrange(num_teams):
team = {
'course_id': self.course_id,
'topic_id': topic['id'],
'name': 'Team {}'.format(i),
'description': 'Description {}'.format(i),
'language': 'aa',
'country': 'AF'
}
response = self.course_fixture.session.post(
LMS_BASE_URL + '/api/team/v0/teams/',
data=json.dumps(team),
headers=self.course_fixture.headers
)
# Sadly, this sleep is necessary in order to ensure that
# sorting by last_activity_at works correctly when running
# in Jenkins.
time.sleep(time_between_creation)
teams.append(json.loads(response.text))
return teams
def create_membership(self, username, team_id):
"""Assign `username` to `team_id`."""
response = self.course_fixture.session.post(
LMS_BASE_URL + '/api/team/v0/team_membership/',
data=json.dumps({'username': username, 'team_id': team_id}),
headers=self.course_fixture.headers
)
return json.loads(response.text)
def set_team_configuration(self, configuration, enroll_in_course=True, global_staff=False):
"""
Sets team configuration on the course and calls auto-auth on the user.
"""
#pylint: disable=attribute-defined-outside-init
self.course_fixture = CourseFixture(**self.course_info)
if configuration:
self.course_fixture.add_advanced_settings(
{u"teams_configuration": {u"value": configuration}}
)
self.course_fixture.install()
enroll_course_id = self.course_id if enroll_in_course else None
#pylint: disable=attribute-defined-outside-init
self.user_info = AutoAuthPage(self.browser, course_id=enroll_course_id, staff=global_staff).visit().user_info
self.course_info_page.visit()
def verify_teams_present(self, present):
"""
Verifies whether or not the teams tab is present. If it should be present, also
checks the text on the page (to ensure view is working).
"""
if present:
self.assertIn("Teams", self.tab_nav.tab_names)
self.teams_page.visit()
self.assertEqual(self.teams_page.active_tab(), 'browse')
else:
self.assertNotIn("Teams", self.tab_nav.tab_names)
def verify_teams(self, page, expected_teams):
"""Verify that the list of team cards on the current page match the expected teams in order."""
def assert_team_equal(expected_team, team_card_name, team_card_description):
"""
Helper to assert that a single team card has the expected name and
description.
"""
self.assertEqual(expected_team['name'], team_card_name)
self.assertEqual(expected_team['description'], team_card_description)
team_card_names = page.team_names
team_card_descriptions = page.team_descriptions
map(assert_team_equal, expected_teams, team_card_names, team_card_descriptions)
def verify_my_team_count(self, expected_number_of_teams):
""" Verify the number of teams shown on "My Team". """
# We are doing these operations on this top-level page object to avoid reloading the page.
self.teams_page.verify_my_team_count(expected_number_of_teams)
def only_team_events(self, event):
"""Filter out all non-team events."""
return event['event_type'].startswith('edx.team.')
@ddt.ddt
@attr('shard_5')
class TeamsTabTest(TeamsTabBase):
"""
Tests verifying when the Teams tab is present.
"""
def test_teams_not_enabled(self):
"""
Scenario: teams tab should not be present if no team configuration is set
Given I am enrolled in a course without team configuration
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration(None)
self.verify_teams_present(False)
def test_teams_not_enabled_no_topics(self):
"""
Scenario: teams tab should not be present if team configuration does not specify topics
Given I am enrolled in a course with no topics in the team configuration
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": []})
self.verify_teams_present(False)
def test_teams_not_enabled_not_enrolled(self):
"""
Scenario: teams tab should not be present if student is not enrolled in the course
Given there is a course with team configuration and topics
And I am not enrolled in that course, and am not global staff
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration(
{u"max_team_size": 10, u"topics": self.create_topics(1)},
enroll_in_course=False
)
self.verify_teams_present(False)
def test_teams_enabled(self):
"""
Scenario: teams tab should be present if user is enrolled in the course and it has team configuration
Given I am enrolled in a course with team configuration and topics
When I view the course info page
Then I should see the Teams tab
And the correct content should be on the page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(1)})
self.verify_teams_present(True)
def test_teams_enabled_global_staff(self):
"""
Scenario: teams tab should be present if user is not enrolled in the course, but is global staff
Given there is a course with team configuration
And I am not enrolled in that course, but am global staff
When I view the course info page
Then I should see the Teams tab
And the correct content should be on the page
"""
self.set_team_configuration(
{u"max_team_size": 10, u"topics": self.create_topics(1)},
enroll_in_course=False,
global_staff=True
)
self.verify_teams_present(True)
@ddt.data(
'topics/{topic_id}',
'topics/{topic_id}/search',
'teams/{topic_id}/{team_id}/edit-team',
'teams/{topic_id}/{team_id}'
)
def test_unauthorized_error_message(self, route):
"""Ensure that an error message is shown to the user if they attempt
to take an action which makes an AJAX request while not signed
in.
"""
topics = self.create_topics(1)
topic = topics[0]
self.set_team_configuration(
{u'max_team_size': 10, u'topics': topics},
global_staff=True
)
team = self.create_teams(topic, 1)[0]
self.teams_page.visit()
self.browser.delete_cookie('sessionid')
url = self.browser.current_url.split('#')[0]
self.browser.get(
'{url}#{route}'.format(
url=url,
route=route.format(
topic_id=topic['id'],
team_id=team['id']
)
)
)
self.teams_page.wait_for_ajax()
self.assertEqual(
self.teams_page.warning_message,
u"Your request could not be completed. Reload the page and try again."
)
@ddt.data(
('browse', '.topics-list'),
# TODO: find a reliable way to match the "My Teams" tab
# ('my-teams', 'div.teams-list'),
('teams/{topic_id}/{team_id}', 'div.discussion-module'),
('topics/{topic_id}/create-team', 'div.create-team-instructions'),
('topics/{topic_id}', '.teams-list'),
('not-a-real-route', 'div.warning')
)
@ddt.unpack
def test_url_routing(self, route, selector):
"""Ensure that navigating to a URL route correctly updates the page
content.
"""
topics = self.create_topics(1)
topic = topics[0]
self.set_team_configuration({
u'max_team_size': 10,
u'topics': topics
})
team = self.create_teams(topic, 1)[0]
self.teams_page.visit()
# Get the base URL (the URL without any trailing fragment)
url = self.browser.current_url
fragment_index = url.find('#')
if fragment_index >= 0:
url = url[0:fragment_index]
self.browser.get(
'{url}#{route}'.format(
url=url,
route=route.format(
topic_id=topic['id'],
team_id=team['id']
))
)
self.teams_page.wait_for_ajax()
self.assertTrue(self.teams_page.q(css=selector).present)
self.assertTrue(self.teams_page.q(css=selector).visible)
@attr('shard_5')
class MyTeamsTest(TeamsTabBase):
"""
Tests for the "My Teams" tab of the Teams page.
"""
def setUp(self):
super(MyTeamsTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
self.set_team_configuration({'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]})
self.my_teams_page = MyTeamsPage(self.browser, self.course_id)
self.page_viewed_event = {
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'my-teams',
'topic_id': None,
'team_id': None
}
}
def test_not_member_of_any_teams(self):
"""
Scenario: Visiting the My Teams page when user is not a member of any team should not display any teams.
Given I am enrolled in a course with a team configuration and a topic but am not a member of a team
When I visit the My Teams page
And I should see no teams
And I should see a message that I belong to no teams.
"""
with self.assert_events_match_during(self.only_team_events, expected_events=[self.page_viewed_event]):
self.my_teams_page.visit()
self.assertEqual(len(self.my_teams_page.team_cards), 0, msg='Expected to see no team cards')
self.assertEqual(
self.my_teams_page.q(css='.page-content-main').text,
[u'You are not currently a member of any team.']
)
def test_member_of_a_team(self):
"""
Scenario: Visiting the My Teams page when user is a member of a team should display the teams.
Given I am enrolled in a course with a team configuration and a topic and am a member of a team
When I visit the My Teams page
Then I should see a pagination header showing the number of teams
And I should see all the expected team cards
And I should not see a pagination footer
"""
teams = self.create_teams(self.topic, 1)
self.create_membership(self.user_info['username'], teams[0]['id'])
with self.assert_events_match_during(self.only_team_events, expected_events=[self.page_viewed_event]):
self.my_teams_page.visit()
self.verify_teams(self.my_teams_page, teams)
@attr('shard_5')
@ddt.ddt
class BrowseTopicsTest(TeamsTabBase):
"""
Tests for the Browse tab of the Teams page.
"""
def setUp(self):
super(BrowseTopicsTest, self).setUp()
self.topics_page = BrowseTopicsPage(self.browser, self.course_id)
@ddt.data(('name', False), ('team_count', True))
@ddt.unpack
def test_sort_topics(self, sort_order, reverse):
"""
Scenario: the user should be able to sort the list of topics by name or team count
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see a list of topics for the course
When I choose a sort order
Then I should see the paginated list of topics in that order
"""
topics = self.create_topics(TOPICS_PER_PAGE + 1)
self.set_team_configuration({u"max_team_size": 100, u"topics": topics})
for i, topic in enumerate(random.sample(topics, len(topics))):
self.create_teams(topic, i)
topic['team_count'] = i
self.topics_page.visit()
self.topics_page.sort_topics_by(sort_order)
topic_names = self.topics_page.topic_names
self.assertEqual(len(topic_names), TOPICS_PER_PAGE)
self.assertEqual(
topic_names,
[t['name'] for t in sorted(topics, key=lambda t: t[sort_order], reverse=reverse)][:TOPICS_PER_PAGE]
)
def test_sort_topics_update(self):
"""
Scenario: the list of topics should remain sorted after updates
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics and choose a sort order
Then I should see the paginated list of topics in that order
When I create a team in one of those topics
And I return to the topics list
Then I should see the topics in the correct sorted order
"""
topics = self.create_topics(3)
self.set_team_configuration({u"max_team_size": 100, u"topics": topics})
self.topics_page.visit()
self.topics_page.sort_topics_by('team_count')
topic_name = self.topics_page.topic_names[-1]
topic = [t for t in topics if t['name'] == topic_name][0]
self.topics_page.browse_teams_for_topic(topic_name)
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, topic)
self.assertTrue(browse_teams_page.is_browser_on_page())
browse_teams_page.click_create_team_link()
create_team_page = TeamManagementPage(self.browser, self.course_id, topic)
create_team_page.value_for_text_field(field_id='name', value='Team Name', press_enter=False)
create_team_page.value_for_textarea_field(
field_id='description',
value='Team description.'
)
create_team_page.submit_form()
team_page = TeamPage(self.browser, self.course_id)
self.assertTrue(team_page.is_browser_on_page)
team_page.click_all_topics()
self.assertTrue(self.topics_page.is_browser_on_page())
self.topics_page.wait_for_ajax()
self.assertEqual(topic_name, self.topics_page.topic_names[0])
def test_list_topics(self):
"""
Scenario: a list of topics should be visible in the "Browse" tab
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see a list of topics for the course
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(2)})
self.topics_page.visit()
self.assertEqual(len(self.topics_page.topic_cards), 2)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-2 out of 2 total'))
self.assertFalse(self.topics_page.pagination_controls_visible())
self.assertFalse(self.topics_page.is_previous_page_button_enabled())
self.assertFalse(self.topics_page.is_next_page_button_enabled())
def test_topic_pagination(self):
"""
Scenario: a list of topics should be visible in the "Browse" tab, paginated 12 per page
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see only the first 12 topics
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(20)})
self.topics_page.visit()
self.assertEqual(len(self.topics_page.topic_cards), TOPICS_PER_PAGE)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-12 out of 20 total'))
self.assertTrue(self.topics_page.pagination_controls_visible())
self.assertFalse(self.topics_page.is_previous_page_button_enabled())
self.assertTrue(self.topics_page.is_next_page_button_enabled())
def test_go_to_numbered_page(self):
"""
Scenario: topics should be able to be navigated by page number
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
And I enter a valid page number in the page number input
Then I should see that page of topics
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(25)})
self.topics_page.visit()
self.topics_page.go_to_page(3)
self.assertEqual(len(self.topics_page.topic_cards), 1)
self.assertTrue(self.topics_page.is_previous_page_button_enabled())
self.assertFalse(self.topics_page.is_next_page_button_enabled())
def test_go_to_invalid_page(self):
"""
Scenario: browsing topics should not respond to invalid page numbers
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
And I enter an invalid page number in the page number input
Then I should stay on the current page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(13)})
self.topics_page.visit()
self.topics_page.go_to_page(3)
self.assertEqual(self.topics_page.get_current_page_number(), 1)
def test_page_navigation_buttons(self):
"""
Scenario: browsing topics should not respond to invalid page numbers
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
When I press the next page button
Then I should move to the next page
When I press the previous page button
Then I should move to the previous page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(13)})
self.topics_page.visit()
self.topics_page.press_next_page_button()
self.assertEqual(len(self.topics_page.topic_cards), 1)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 13-13 out of 13 total'))
self.topics_page.press_previous_page_button()
self.assertEqual(len(self.topics_page.topic_cards), TOPICS_PER_PAGE)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-12 out of 13 total'))
def test_topic_description_truncation(self):
"""
Scenario: excessively long topic descriptions should be truncated so
as to fit within a topic card.
Given I am enrolled in a course with a team configuration and a topic
with a long description
When I visit the Teams page
And I browse topics
Then I should see a truncated topic description
"""
initial_description = "A" + " really" * 50 + " long description"
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [{"name": "", "id": "", "description": initial_description}]}
)
self.topics_page.visit()
truncated_description = self.topics_page.topic_descriptions[0]
self.assertLess(len(truncated_description), len(initial_description))
self.assertTrue(truncated_description.endswith('...'))
self.assertIn(truncated_description.split('...')[0], initial_description)
def test_go_to_teams_list(self):
"""
Scenario: Clicking on a Topic Card should take you to the
teams list for that Topic.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page
And I browse topics
And I click on the arrow link to view teams for the first topic
Then I should be on the browse teams page
"""
topic = {u"name": u"Example Topic", u"id": u"example_topic", u"description": "Description"}
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [topic]}
)
self.topics_page.visit()
self.topics_page.browse_teams_for_topic('Example Topic')
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, topic)
self.assertTrue(browse_teams_page.is_browser_on_page())
self.assertEqual(browse_teams_page.header_name, 'Example Topic')
self.assertEqual(browse_teams_page.header_description, 'Description')
def test_page_viewed_event(self):
"""
Scenario: Visiting the browse topics page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the browse topics page
Then my browser should post a page viewed event
"""
topic = {u"name": u"Example Topic", u"id": u"example_topic", u"description": "Description"}
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [topic]}
)
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'browse',
'topic_id': None,
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.topics_page.visit()
@attr('shard_5')
@ddt.ddt
class BrowseTeamsWithinTopicTest(TeamsTabBase):
"""
Tests for browsing Teams within a Topic on the Teams page.
"""
TEAMS_PAGE_SIZE = 10
def setUp(self):
super(BrowseTeamsWithinTopicTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
self.max_team_size = 10
self.set_team_configuration({
'course_id': self.course_id,
'max_team_size': self.max_team_size,
'topics': [self.topic]
})
self.browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
self.topics_page = BrowseTopicsPage(self.browser, self.course_id)
def teams_with_default_sort_order(self, teams):
"""Return a list of teams sorted according to the default ordering
(last_activity_at, with a secondary sort by open slots).
"""
return sorted(
sorted(teams, key=lambda t: len(t['membership']), reverse=True),
key=lambda t: parse(t['last_activity_at']).replace(microsecond=0),
reverse=True
)
def verify_page_header(self):
"""Verify that the page header correctly reflects the current topic's name and description."""
self.assertEqual(self.browse_teams_page.header_name, self.topic['name'])
self.assertEqual(self.browse_teams_page.header_description, self.topic['description'])
def verify_search_header(self, search_results_page, search_query):
"""Verify that the page header correctly reflects the current topic's name and description."""
self.assertEqual(search_results_page.header_name, 'Team Search')
self.assertEqual(
search_results_page.header_description,
'Showing results for "{search_query}"'.format(search_query=search_query)
)
def verify_on_page(self, teams_page, page_num, total_teams, pagination_header_text, footer_visible):
"""
Verify that we are on the correct team list page.
Arguments:
teams_page (BaseTeamsPage): The teams page object that should be the current page.
page_num (int): The one-indexed page number that we expect to be on
total_teams (list): An unsorted list of all the teams for the
current topic
pagination_header_text (str): Text we expect to see in the
pagination header.
footer_visible (bool): Whether we expect to see the pagination
footer controls.
"""
sorted_teams = self.teams_with_default_sort_order(total_teams)
self.assertTrue(teams_page.get_pagination_header_text().startswith(pagination_header_text))
self.verify_teams(
teams_page,
sorted_teams[(page_num - 1) * self.TEAMS_PAGE_SIZE:page_num * self.TEAMS_PAGE_SIZE]
)
self.assertEqual(
teams_page.pagination_controls_visible(),
footer_visible,
msg='Expected paging footer to be ' + 'visible' if footer_visible else 'invisible'
)
@ddt.data(
('open_slots', 'last_activity_at', True),
('last_activity_at', 'open_slots', True)
)
@ddt.unpack
def test_sort_teams(self, sort_order, secondary_sort_order, reverse):
"""
Scenario: the user should be able to sort the list of teams by open slots or last activity
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse teams within a topic
Then I should see a list of teams for that topic
When I choose a sort order
Then I should see the paginated list of teams in that order
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1)
for i, team in enumerate(random.sample(teams, len(teams))):
for _ in range(i):
user_info = AutoAuthPage(self.browser, course_id=self.course_id).visit().user_info
self.create_membership(user_info['username'], team['id'])
team['open_slots'] = self.max_team_size - i
# Parse last activity date, removing microseconds because
# the Django ORM does not support them. Will be fixed in
# Django 1.8.
team['last_activity_at'] = parse(team['last_activity_at']).replace(microsecond=0)
# Re-authenticate as staff after creating users
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=True
).visit()
self.browse_teams_page.visit()
self.browse_teams_page.sort_teams_by(sort_order)
team_names = self.browse_teams_page.team_names
self.assertEqual(len(team_names), self.TEAMS_PAGE_SIZE)
sorted_teams = [
team['name']
for team in sorted(
sorted(teams, key=lambda t: t[secondary_sort_order], reverse=reverse),
key=lambda t: t[sort_order],
reverse=reverse
)
][:self.TEAMS_PAGE_SIZE]
self.assertEqual(team_names, sorted_teams)
def test_default_sort_order(self):
"""
Scenario: the list of teams should be sorted by last activity by default
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse teams within a topic
Then I should see a list of teams for that topic, sorted by last activity
"""
self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1)
self.browse_teams_page.visit()
self.assertEqual(self.browse_teams_page.sort_order, 'last activity')
def test_no_teams(self):
"""
Scenario: Visiting a topic with no teams should not display any teams.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see a pagination header showing no teams
And I should see no teams
And I should see a button to add a team
And I should not see a pagination footer
"""
self.browse_teams_page.visit()
self.verify_page_header()
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
self.assertEqual(len(self.browse_teams_page.team_cards), 0, msg='Expected to see no team cards')
self.assertFalse(
self.browse_teams_page.pagination_controls_visible(),
msg='Expected paging footer to be invisible'
)
def test_teams_one_page(self):
"""
Scenario: Visiting a topic with fewer teams than the page size should
all those teams on one page.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see a pagination header showing the number of teams
And I should see all the expected team cards
And I should see a button to add a team
And I should not see a pagination footer
"""
teams = self.teams_with_default_sort_order(
self.create_teams(self.topic, self.TEAMS_PAGE_SIZE, time_between_creation=1)
)
self.browse_teams_page.visit()
self.verify_page_header()
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 1-10 out of 10 total'))
self.verify_teams(self.browse_teams_page, teams)
self.assertFalse(
self.browse_teams_page.pagination_controls_visible(),
msg='Expected paging footer to be invisible'
)
def test_teams_navigation_buttons(self):
"""
Scenario: The user should be able to page through a topic's team list
using navigation buttons when it is longer than the page size.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see that I am on the first page of results
When I click on the next page button
Then I should see that I am on the second page of results
And when I click on the previous page button
Then I should see that I am on the first page of results
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1, time_between_creation=1)
self.browse_teams_page.visit()
self.verify_page_header()
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 11 total', True)
self.browse_teams_page.press_next_page_button()
self.verify_on_page(self.browse_teams_page, 2, teams, 'Showing 11-11 out of 11 total', True)
self.browse_teams_page.press_previous_page_button()
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 11 total', True)
def test_teams_page_input(self):
"""
Scenario: The user should be able to page through a topic's team list
using the page input when it is longer than the page size.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see that I am on the first page of results
When I input the second page
Then I should see that I am on the second page of results
When I input the first page
Then I should see that I am on the first page of results
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 10, time_between_creation=1)
self.browse_teams_page.visit()
self.verify_page_header()
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 20 total', True)
self.browse_teams_page.go_to_page(2)
self.verify_on_page(self.browse_teams_page, 2, teams, 'Showing 11-20 out of 20 total', True)
self.browse_teams_page.go_to_page(1)
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 20 total', True)
def test_browse_team_topics(self):
"""
Scenario: User should be able to navigate to "browse all teams" and "search team description" links.
Given I am enrolled in a course with teams enabled
When I visit the Teams page for a topic
Then I should see the correct page header
And I should see the link to "browse teams in other topics"
When I should navigate to that link
Then I should see the topic browse page
"""
self.browse_teams_page.visit()
self.verify_page_header()
self.browse_teams_page.click_browse_all_teams_link()
self.assertTrue(self.topics_page.is_browser_on_page())
@skip("Skip until TNL-3198 (searching teams makes two AJAX requests) is resolved")
def test_search(self):
"""
Scenario: User should be able to search for a team
Given I am enrolled in a course with teams enabled
When I visit the Teams page for that topic
And I search for 'banana'
Then I should see the search result page
And the search header should be shown
And 0 results should be shown
And my browser should fire a page viewed event for the search page
"""
# Note: all searches will return 0 results with the mock search server
# used by Bok Choy.
search_text = 'banana'
self.create_teams(self.topic, 5)
self.browse_teams_page.visit()
events = [{
'event_type': 'edx.team.searched',
'event': {
'search_text': search_text,
'topic_id': self.topic['id'],
'number_of_results': 0
}
}, {
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'search-teams',
'topic_id': self.topic['id'],
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
search_results_page = self.browse_teams_page.search(search_text)
self.verify_search_header(search_results_page, search_text)
self.assertTrue(search_results_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
def test_page_viewed_event(self):
"""
Scenario: Visiting the browse page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page
Then my browser should post a page viewed event for the teams page
"""
self.create_teams(self.topic, 5)
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'single-topic',
'topic_id': self.topic['id'],
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.browse_teams_page.visit()
@attr('shard_5')
class TeamFormActions(TeamsTabBase):
"""
Base class for create, edit, and delete team.
"""
TEAM_DESCRIPTION = 'The Avengers are a fictional team of superheroes.'
topic = {'name': 'Example Topic', 'id': 'example_topic', 'description': 'Description'}
TEAMS_NAME = 'Avengers'
def setUp(self):
super(TeamFormActions, self).setUp()
self.team_management_page = TeamManagementPage(self.browser, self.course_id, self.topic)
def verify_page_header(self, title, description, breadcrumbs):
"""
Verify that the page header correctly reflects the
create team header, description and breadcrumb.
"""
self.assertEqual(self.team_management_page.header_page_name, title)
self.assertEqual(self.team_management_page.header_page_description, description)
self.assertEqual(self.team_management_page.header_page_breadcrumbs, breadcrumbs)
def verify_and_navigate_to_create_team_page(self):
"""Navigates to the create team page and verifies."""
self.browse_teams_page.click_create_team_link()
self.verify_page_header(
title='Create a New Team',
description='Create a new team if you can\'t find an existing team to join, '
'or if you would like to learn with friends you know.',
breadcrumbs='All Topics {topic_name}'.format(topic_name=self.topic['name'])
)
def verify_and_navigate_to_edit_team_page(self):
"""Navigates to the edit team page and verifies."""
# pylint: disable=no-member
self.assertEqual(self.team_page.team_name, self.team['name'])
self.assertTrue(self.team_page.edit_team_button_present)
self.team_page.click_edit_team_button()
self.team_management_page.wait_for_page()
# Edit page header.
self.verify_page_header(
title='Edit Team',
description='If you make significant changes, make sure you notify '
'members of the team before making these changes.',
breadcrumbs='All Topics {topic_name} {team_name}'.format(
topic_name=self.topic['name'],
team_name=self.team['name']
)
)
def verify_team_info(self, name, description, location, language):
"""Verify the team information on team page."""
# pylint: disable=no-member
self.assertEqual(self.team_page.team_name, name)
self.assertEqual(self.team_page.team_description, description)
self.assertEqual(self.team_page.team_location, location)
self.assertEqual(self.team_page.team_language, language)
def fill_create_or_edit_form(self):
"""Fill the create/edit team form fields with appropriate values."""
self.team_management_page.value_for_text_field(
field_id='name',
value=self.TEAMS_NAME,
press_enter=False
)
self.team_management_page.value_for_textarea_field(
field_id='description',
value=self.TEAM_DESCRIPTION
)
self.team_management_page.value_for_dropdown_field(field_id='language', value='English')
self.team_management_page.value_for_dropdown_field(field_id='country', value='Pakistan')
def verify_all_fields_exist(self):
"""
Verify the fields for create/edit page.
"""
self.assertEqual(
self.team_management_page.message_for_field('name'),
'A name that identifies your team (maximum 255 characters).'
)
self.assertEqual(
self.team_management_page.message_for_textarea_field('description'),
'A short description of the team to help other learners understand '
'the goals or direction of the team (maximum 300 characters).'
)
self.assertEqual(
self.team_management_page.message_for_field('country'),
'The country that team members primarily identify with.'
)
self.assertEqual(
self.team_management_page.message_for_field('language'),
'The language that team members primarily use to communicate with each other.'
)
@ddt.ddt
class CreateTeamTest(TeamFormActions):
"""
Tests for creating a new Team within a Topic on the Teams page.
"""
def setUp(self):
super(CreateTeamTest, self).setUp()
self.set_team_configuration({'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]})
self.browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
self.browse_teams_page.visit()
def test_user_can_see_create_team_page(self):
"""
Scenario: The user should be able to see the create team page via teams list page.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the Create Team page link on bottom
And When I click create team link
Then I should see the create team page.
And I should see the create team header
And I should also see the help messages for fields.
"""
self.verify_and_navigate_to_create_team_page()
self.verify_all_fields_exist()
def test_user_can_see_error_message_for_missing_data(self):
"""
Scenario: The user should be able to see error message in case of missing required field.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
And When I click create team button without filling required fields
Then I should see the error message and highlighted fields.
"""
self.verify_and_navigate_to_create_team_page()
self.team_management_page.submit_form()
self.assertEqual(
self.team_management_page.validation_message_text,
'Check the highlighted fields below and try again.'
)
self.assertTrue(self.team_management_page.error_for_field(field_id='name'))
self.assertTrue(self.team_management_page.error_for_field(field_id='description'))
def test_user_can_see_error_message_for_incorrect_data(self):
"""
Scenario: The user should be able to see error message in case of increasing length for required fields.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I add text > than 255 characters for name field
And I click Create button
Then I should see the error message for exceeding length.
"""
self.verify_and_navigate_to_create_team_page()
# Fill the name field with >255 characters to see validation message.
self.team_management_page.value_for_text_field(
field_id='name',
value='EdX is a massive open online course (MOOC) provider and online learning platform. '
'It hosts online university-level courses in a wide range of disciplines to a worldwide '
'audience, some at no charge. It also conducts research into learning based on how '
'people use its platform. EdX was created for students and institutions that seek to'
'transform themselves through cutting-edge technologies, innovative pedagogy, and '
'rigorous courses. More than 70 schools, nonprofits, corporations, and international'
'organizations offer or plan to offer courses on the edX website. As of 22 October 2014,'
'edX has more than 4 million users taking more than 500 courses online.',
press_enter=False
)
self.team_management_page.submit_form()
self.assertEqual(
self.team_management_page.validation_message_text,
'Check the highlighted fields below and try again.'
)
self.assertTrue(self.team_management_page.error_for_field(field_id='name'))
def test_user_can_create_new_team_successfully(self):
"""
Scenario: The user should be able to create new team.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I fill all the fields present with appropriate data
And I click Create button
Then I expect analytics events to be emitted
And I should see the page for my team
And I should see the message that says "You are member of this team"
And the new team should be added to the list of teams within the topic
And the number of teams should be updated on the topic card
And if I switch to "My Team", the newly created team is displayed
"""
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.browse_teams_page.visit()
self.verify_and_navigate_to_create_team_page()
self.fill_create_or_edit_form()
expected_events = [
{
'event_type': 'edx.team.created'
},
{
'event_type': 'edx.team.learner_added',
'event': {
'add_method': 'added_on_create',
}
}
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
self.team_management_page.submit_form()
# Verify that the page is shown for the new team
team_page = TeamPage(self.browser, self.course_id)
team_page.wait_for_page()
self.assertEqual(team_page.team_name, self.TEAMS_NAME)
self.assertEqual(team_page.team_description, self.TEAM_DESCRIPTION)
self.assertEqual(team_page.team_user_membership_text, 'You are a member of this team.')
# Verify the new team was added to the topic list
self.teams_page.click_specific_topic("Example Topic")
self.teams_page.verify_topic_team_count(1)
self.teams_page.click_all_topics()
self.teams_page.verify_team_count_in_first_topic(1)
# Verify that if one switches to "My Team" without reloading the page, the newly created team is shown.
self.verify_my_team_count(1)
def test_user_can_cancel_the_team_creation(self):
"""
Scenario: The user should be able to cancel the creation of new team.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I click Cancel button
Then I should see teams list page without any new team.
And if I switch to "My Team", it shows no teams
"""
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
self.verify_and_navigate_to_create_team_page()
self.team_management_page.cancel_team()
self.assertTrue(self.browse_teams_page.is_browser_on_page())
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
self.teams_page.click_all_topics()
self.teams_page.verify_team_count_in_first_topic(0)
self.verify_my_team_count(0)
def test_page_viewed_event(self):
"""
Scenario: Visiting the create team page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the create team page
Then my browser should post a page viewed event
"""
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'new-team',
'topic_id': self.topic['id'],
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.verify_and_navigate_to_create_team_page()
@ddt.ddt
class DeleteTeamTest(TeamFormActions):
"""
Tests for deleting teams.
"""
def setUp(self):
super(DeleteTeamTest, self).setUp()
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]},
global_staff=True
)
self.team = self.create_teams(self.topic, num_teams=1)[0]
self.team_page = TeamPage(self.browser, self.course_id, team=self.team)
#need to have a membership to confirm it gets deleted as well
self.create_membership(self.user_info['username'], self.team['id'])
self.team_page.visit()
def test_cancel_delete(self):
"""
Scenario: The user should be able to cancel the Delete Team dialog
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Delete Team button
When I click the delete team button
And I cancel the prompt
And I refresh the page
Then I should still see the team
"""
self.delete_team(cancel=True)
self.assertTrue(self.team_management_page.is_browser_on_page())
self.browser.refresh()
self.team_management_page.wait_for_page()
self.assertEqual(
' '.join(('All Topics', self.topic['name'], self.team['name'])),
self.team_management_page.header_page_breadcrumbs
)
@ddt.data('Moderator', 'Community TA', 'Administrator', None)
def test_delete_team(self, role):
"""
Scenario: The user should be able to see and navigate to the delete team page.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Delete Team button
When I click the delete team button
And I confirm the prompt
Then I should see the browse teams page
And the team should not be present
"""
# If role is None, remain logged in as global staff
if role is not None:
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=False,
roles=role
).visit()
self.team_page.visit()
self.delete_team(require_notification=False)
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
self.assertTrue(browse_teams_page.is_browser_on_page())
self.assertNotIn(self.team['name'], browse_teams_page.team_names)
def delete_team(self, **kwargs):
"""
Delete a team. Passes `kwargs` to `confirm_prompt`.
Expects edx.team.deleted event to be emitted, with correct course_id.
Also expects edx.team.learner_removed event to be emitted for the
membership that is removed as a part of the delete operation.
"""
self.team_page.click_edit_team_button()
self.team_management_page.wait_for_page()
self.team_management_page.delete_team_button.click()
if 'cancel' in kwargs and kwargs['cancel'] is True:
confirm_prompt(self.team_management_page, **kwargs)
else:
expected_events = [
{
'event_type': 'edx.team.deleted',
'event': {
'team_id': self.team['id']
}
},
{
'event_type': 'edx.team.learner_removed',
'event': {
'team_id': self.team['id'],
'remove_method': 'team_deleted',
'user_id': self.user_info['user_id']
}
}
]
with self.assert_events_match_during(
event_filter=self.only_team_events, expected_events=expected_events
):
confirm_prompt(self.team_management_page, **kwargs)
def test_delete_team_updates_topics(self):
"""
Scenario: Deleting a team should update the team count on the topics page
Given I am staff user for a course with a team
And I delete a team
When I navigate to the browse topics page
Then the team count for the deletd team's topic should be updated
"""
self.delete_team(require_notification=False)
BrowseTeamsPage(self.browser, self.course_id, self.topic).click_all_topics()
topics_page = BrowseTopicsPage(self.browser, self.course_id)
self.assertTrue(topics_page.is_browser_on_page())
self.teams_page.verify_topic_team_count(0)
@ddt.ddt
class EditTeamTest(TeamFormActions):
"""
Tests for editing the team.
"""
def setUp(self):
super(EditTeamTest, self).setUp()
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]},
global_staff=True
)
self.team = self.create_teams(self.topic, num_teams=1)[0]
self.team_page = TeamPage(self.browser, self.course_id, team=self.team)
self.team_page.visit()
def test_staff_can_navigate_to_edit_team_page(self):
"""
Scenario: The user should be able to see and navigate to the edit team page.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the edit team page
And I should see the edit team header
And I should also see the help messages for fields
"""
self.verify_and_navigate_to_edit_team_page()
self.verify_all_fields_exist()
def test_staff_can_edit_team_successfully(self):
"""
Scenario: The staff should be able to edit team successfully.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the edit team page
And an analytics event should be fired
When I edit all the fields with appropriate data
And I click Update button
Then I should see the page for my team with updated data
"""
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
self.verify_and_navigate_to_edit_team_page()
self.fill_create_or_edit_form()
expected_events = [
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'country',
'old': 'AF',
'new': 'PK',
'truncated': [],
}
},
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'name',
'old': self.team['name'],
'new': self.TEAMS_NAME,
'truncated': [],
}
},
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'language',
'old': 'aa',
'new': 'en',
'truncated': [],
}
},
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'description',
'old': self.team['description'],
'new': self.TEAM_DESCRIPTION,
'truncated': [],
}
},
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
self.team_management_page.submit_form()
self.team_page.wait_for_page()
self.verify_team_info(
name=self.TEAMS_NAME,
description=self.TEAM_DESCRIPTION,
location='Pakistan',
language='English'
)
def test_staff_can_cancel_the_team_edit(self):
"""
Scenario: The user should be able to cancel the editing of team.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the edit team page
Then I should see the Edit Team header
When I click Cancel button
Then I should see team page page without changes.
"""
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
self.verify_and_navigate_to_edit_team_page()
self.fill_create_or_edit_form()
self.team_management_page.cancel_team()
self.team_page.wait_for_page()
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
def test_student_cannot_see_edit_button(self):
"""
Scenario: The student should not see the edit team button.
Given I am student for a course with a team
When I visit the Team profile page
Then I should not see the Edit Team button
"""
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.team_page.visit()
self.assertFalse(self.team_page.edit_team_button_present)
@ddt.data('Moderator', 'Community TA', 'Administrator')
def test_discussion_privileged_user_can_edit_team(self, role):
"""
Scenario: The user with specified role should see the edit team button.
Given I am user with privileged role for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
"""
kwargs = {
'course_id': self.course_id,
'staff': False
}
if role is not None:
kwargs['roles'] = role
AutoAuthPage(self.browser, **kwargs).visit()
self.team_page.visit()
self.teams_page.wait_for_page()
self.assertTrue(self.team_page.edit_team_button_present)
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
self.verify_and_navigate_to_edit_team_page()
self.fill_create_or_edit_form()
self.team_management_page.submit_form()
self.team_page.wait_for_page()
self.verify_team_info(
name=self.TEAMS_NAME,
description=self.TEAM_DESCRIPTION,
location='Pakistan',
language='English'
)
def test_page_viewed_event(self):
"""
Scenario: Visiting the edit team page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the edit team page
Then my browser should post a page viewed event
"""
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'edit-team',
'topic_id': self.topic['id'],
'team_id': self.team['id']
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.verify_and_navigate_to_edit_team_page()
@ddt.ddt
class EditMembershipTest(TeamFormActions):
"""
Tests for administrating from the team membership page
"""
def setUp(self):
super(EditMembershipTest, self).setUp()
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]},
global_staff=True
)
self.team_management_page = TeamManagementPage(self.browser, self.course_id, self.topic)
self.team = self.create_teams(self.topic, num_teams=1)[0]
#make sure a user exists on this team so we can edit the membership
self.create_membership(self.user_info['username'], self.team['id'])
self.edit_membership_page = EditMembershipPage(self.browser, self.course_id, self.team)
self.team_page = TeamPage(self.browser, self.course_id, team=self.team)
def edit_membership_helper(self, role, cancel=False):
"""
Helper for common functionality in edit membership tests.
Checks for all relevant assertions about membership being removed,
including verify edx.team.learner_removed events are emitted.
"""
if role is not None:
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=False,
roles=role
).visit()
self.team_page.visit()
self.team_page.click_edit_team_button()
self.team_management_page.wait_for_page()
self.assertTrue(
self.team_management_page.membership_button_present
)
self.team_management_page.click_membership_button()
self.edit_membership_page.wait_for_page()
self.edit_membership_page.click_first_remove()
if cancel:
self.edit_membership_page.cancel_delete_membership_dialog()
self.assertEqual(self.edit_membership_page.team_members, 1)
else:
expected_events = [
{
'event_type': 'edx.team.learner_removed',
'event': {
'team_id': self.team['id'],
'remove_method': 'removed_by_admin',
'user_id': self.user_info['user_id']
}
}
]
with self.assert_events_match_during(
event_filter=self.only_team_events, expected_events=expected_events
):
self.edit_membership_page.confirm_delete_membership_dialog()
self.assertEqual(self.edit_membership_page.team_members, 0)
self.assertTrue(self.edit_membership_page.is_browser_on_page)
@ddt.data('Moderator', 'Community TA', 'Administrator', None)
def test_remove_membership(self, role):
"""
Scenario: The user should be able to remove a membership
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Edit Membership button
And When I click the edit membership button
Then I should see the edit membership page
And When I click the remove button and confirm the dialog
Then my membership should be removed, and I should remain on the page
"""
self.edit_membership_helper(role, cancel=False)
@ddt.data('Moderator', 'Community TA', 'Administrator', None)
def test_cancel_remove_membership(self, role):
"""
Scenario: The user should be able to remove a membership
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Edit Membership button
And When I click the edit membership button
Then I should see the edit membership page
And When I click the remove button and cancel the dialog
Then my membership should not be removed, and I should remain on the page
"""
self.edit_membership_helper(role, cancel=True)
@attr('shard_5')
@ddt.ddt
class TeamPageTest(TeamsTabBase):
"""Tests for viewing a specific team"""
SEND_INVITE_TEXT = 'Send this link to friends so that they can join too.'
def setUp(self):
super(TeamPageTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
def _set_team_configuration_and_membership(
self,
max_team_size=10,
membership_team_index=0,
visit_team_index=0,
create_membership=True,
another_user=False):
"""
Set team configuration.
Arguments:
max_team_size (int): number of users a team can have
membership_team_index (int): index of team user will join
visit_team_index (int): index of team user will visit
create_membership (bool): whether to create membership or not
another_user (bool): another user to visit a team
"""
#pylint: disable=attribute-defined-outside-init
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': max_team_size, 'topics': [self.topic]}
)
self.teams = self.create_teams(self.topic, 2)
if create_membership:
self.create_membership(self.user_info['username'], self.teams[membership_team_index]['id'])
if another_user:
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.team_page = TeamPage(self.browser, self.course_id, self.teams[visit_team_index])
def setup_thread(self):
"""
Create and return a thread for this test's discussion topic.
"""
thread = Thread(
id="test_thread_{}".format(uuid4().hex),
commentable_id=self.teams[0]['discussion_topic_id'],
body="Dummy text body."
)
thread_fixture = MultipleThreadFixture([thread])
thread_fixture.push()
return thread
def setup_discussion_user(self, role=None, staff=False):
"""Set this test's user to have the given role in its
discussions. Role is one of 'Community TA', 'Moderator',
'Administrator', or 'Student'.
"""
kwargs = {
'course_id': self.course_id,
'staff': staff
}
if role is not None:
kwargs['roles'] = role
#pylint: disable=attribute-defined-outside-init
self.user_info = AutoAuthPage(self.browser, **kwargs).visit().user_info
def verify_teams_discussion_permissions(self, should_have_permission):
"""Verify that the teams discussion component is in the correct state
for the test user. If `should_have_permission` is True, assert that
the user can see controls for posting replies, voting, editing, and
deleting. Otherwise, assert that those controls are hidden.
"""
thread = self.setup_thread()
self.team_page.visit()
self.assertEqual(self.team_page.discussion_id, self.teams[0]['discussion_topic_id'])
discussion = self.team_page.discussion_page
self.assertTrue(discussion.is_browser_on_page())
self.assertTrue(discussion.is_discussion_expanded())
self.assertEqual(discussion.get_num_displayed_threads(), 1)
self.assertTrue(discussion.has_thread(thread['id']))
assertion = self.assertTrue if should_have_permission else self.assertFalse
assertion(discussion.q(css='.post-header-actions').present)
assertion(discussion.q(css='.add-response').present)
assertion(discussion.q(css='.new-post-btn').present)
def test_discussion_on_my_team_page(self):
"""
Scenario: Team Page renders a discussion for a team to which I belong.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When the team has a discussion with a thread
And I visit the Team page for that team
Then I should see a discussion with the correct discussion_id
And I should see the existing thread
And I should see controls to change the state of the discussion
"""
self._set_team_configuration_and_membership()
self.verify_teams_discussion_permissions(True)
@ddt.data(True, False)
def test_discussion_on_other_team_page(self, is_staff):
"""
Scenario: Team Page renders a team discussion for a team to which I do
not belong.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am not a member
When the team has a discussion with a thread
And I visit the Team page for that team
Then I should see a discussion with the correct discussion_id
And I should see the team's thread
And I should not see controls to change the state of the discussion
"""
self._set_team_configuration_and_membership(create_membership=False)
self.setup_discussion_user(staff=is_staff)
self.verify_teams_discussion_permissions(False)
@ddt.data('Moderator', 'Community TA', 'Administrator')
def test_discussion_privileged(self, role):
self._set_team_configuration_and_membership(create_membership=False)
self.setup_discussion_user(role=role)
self.verify_teams_discussion_permissions(True)
def assert_team_details(self, num_members, is_member=True, max_size=10):
"""
Verifies that user can see all the information, present on detail page according to their membership status.
Arguments:
num_members (int): number of users in a team
is_member (bool) default True: True if request user is member else False
max_size (int): number of users a team can have
"""
self.assertEqual(
self.team_page.team_capacity_text,
self.team_page.format_capacity_text(num_members, max_size)
)
self.assertEqual(self.team_page.team_location, 'Afghanistan')
self.assertEqual(self.team_page.team_language, 'Afar')
self.assertEqual(self.team_page.team_members, num_members)
if num_members > 0:
self.assertTrue(self.team_page.team_members_present)
else:
self.assertFalse(self.team_page.team_members_present)
if is_member:
self.assertEqual(self.team_page.team_user_membership_text, 'You are a member of this team.')
self.assertTrue(self.team_page.team_leave_link_present)
self.assertTrue(self.team_page.new_post_button_present)
else:
self.assertEqual(self.team_page.team_user_membership_text, '')
self.assertFalse(self.team_page.team_leave_link_present)
self.assertFalse(self.team_page.new_post_button_present)
def test_team_member_can_see_full_team_details(self):
"""
Scenario: Team member can see full info for team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When I visit the Team page for that team
Then I should see the full team detail
And I should see the team members
And I should see my team membership text
And I should see the language & country
And I should see the Leave Team and Invite Team
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
self.assert_team_details(
num_members=1,
)
def test_other_users_can_see_limited_team_details(self):
"""
Scenario: Users who are not member of this team can only see limited info for this team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am not a member
When I visit the Team page for that team
Then I should not see full team detail
And I should see the team members
And I should not see my team membership text
And I should not see the Leave Team and Invite Team links
"""
self._set_team_configuration_and_membership(create_membership=False)
self.team_page.visit()
self.assert_team_details(is_member=False, num_members=0)
def test_user_can_navigate_to_members_profile_page(self):
"""
Scenario: User can navigate to profile page via team member profile image.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When I visit the Team page for that team
Then I should see profile images for the team members
When I click on the first profile image
Then I should be taken to the user's profile page
And I should see the username on profile page
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
learner_name = self.team_page.first_member_username
self.team_page.click_first_profile_image()
learner_profile_page = LearnerProfilePage(self.browser, learner_name)
learner_profile_page.wait_for_page()
learner_profile_page.wait_for_field('username')
self.assertTrue(learner_profile_page.field_is_visible('username'))
def test_join_team(self):
"""
Scenario: User can join a Team if not a member already..
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I visit the Team page for that team
Then I should see Join Team button
And I should not see New Post button
When I click on Join Team button
Then there should be no Join Team button and no message
And an analytics event should be emitted
And I should see the updated information under Team Details
And I should see New Post button
And if I switch to "My Team", the team I have joined is displayed
"""
self._set_team_configuration_and_membership(create_membership=False)
teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
teams_page.visit()
teams_page.view_first_team()
self.assertTrue(self.team_page.join_team_button_present)
expected_events = [
{
'event_type': 'edx.team.learner_added',
'event': {
'add_method': 'joined_from_team_view'
}
}
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
self.team_page.click_join_team_button()
self.assertFalse(self.team_page.join_team_button_present)
self.assertFalse(self.team_page.join_team_message_present)
self.assert_team_details(num_members=1, is_member=True)
# Verify that if one switches to "My Team" without reloading the page, the newly joined team is shown.
self.teams_page.click_all_topics()
self.verify_my_team_count(1)
def test_already_member_message(self):
"""
Scenario: User should see `You are already in a team` if user is a
member of other team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I am already a member of a team
And I visit a team other than mine
Then I should see `You are already in a team` message
"""
self._set_team_configuration_and_membership(membership_team_index=0, visit_team_index=1)
self.team_page.visit()
self.assertEqual(self.team_page.join_team_message, 'You already belong to another team.')
self.assert_team_details(num_members=0, is_member=False)
def test_team_full_message(self):
"""
Scenario: User should see `Team is full` message when team is full.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And team has no space left
And I am not a member of any team
And I visit the team
Then I should see `Team is full` message
"""
self._set_team_configuration_and_membership(
create_membership=True,
max_team_size=1,
membership_team_index=0,
visit_team_index=0,
another_user=True
)
self.team_page.visit()
self.assertEqual(self.team_page.join_team_message, 'This team is full.')
self.assert_team_details(num_members=1, is_member=False, max_size=1)
def test_leave_team(self):
"""
Scenario: User can leave a team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I am a member of team
And I visit the team
And I should not see Join Team button
And I should see New Post button
Then I should see Leave Team link
When I click on Leave Team link
Then user should be removed from team
And an analytics event should be emitted
And I should see Join Team button
And I should not see New Post button
And if I switch to "My Team", the team I have left is not displayed
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
self.assertFalse(self.team_page.join_team_button_present)
self.assert_team_details(num_members=1)
expected_events = [
{
'event_type': 'edx.team.learner_removed',
'event': {
'remove_method': 'self_removal'
}
}
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
self.team_page.click_leave_team_link()
self.assert_team_details(num_members=0, is_member=False)
self.assertTrue(self.team_page.join_team_button_present)
# Verify that if one switches to "My Team" without reloading the page, the old team no longer shows.
self.teams_page.click_all_topics()
self.verify_my_team_count(0)
def test_page_viewed_event(self):
"""
Scenario: Visiting the team profile page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the team profile page
Then my browser should post a page viewed event
"""
self._set_team_configuration_and_membership()
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'single-team',
'topic_id': self.topic['id'],
'team_id': self.teams[0]['id']
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.team_page.visit()
|
nanolearningllc/edx-platform-cypress-2
|
common/test/acceptance/tests/lms/test_teams.py
|
Python
|
agpl-3.0
| 80,479
|
[
"VisIt"
] |
ff0a785480564d8686c8cf907bf2db9138cb3d0df1ca95e3536ea95183e22c43
|
#!/usr/bin/env python
'''
Master loader for CANON October (Fall) Campaign 2020
'''
import os
import sys
from datetime import datetime
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir)
from CANON import CANONLoader
import timing
cl = CANONLoader('stoqs_canon_october2020', 'CANON - October 2020',
description='October 2020 shipless campaign in Monterey Bay (CN20F)',
x3dTerrains={
'https://stoqs.mbari.org/x3d/Monterey25_10x/Monterey25_10x_scene.x3d': {
'name': 'Monterey25_10x',
'position': '-2822317.31255 -4438600.53640 3786150.85474',
'orientation': '0.89575 -0.31076 -0.31791 1.63772',
'centerOfRotation': '-2711557.9403829873 -4331414.329506527 3801353.4691465236',
'VerticalExaggeration': '10',
},
},
grdTerrain=os.path.join(parentDir, 'Monterey25.grd')
)
startdate = datetime(2020, 10, 4)
enddate = datetime(2020, 10, 29)
# default location of thredds and dods data:
cl.tdsBase = 'http://odss.mbari.org/thredds/'
cl.dodsBase = cl.tdsBase + 'dodsC/'
######################################################################
# GLIDERS
######################################################################
# Glider data files from CeNCOOS thredds server
# L_662a updated parameter names in netCDF file
cl.l_662a_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/Line67/'
cl.l_662a_files = [ 'OS_Glider_L_662_20200615_TS.nc', ]
cl.l_662a_parms = ['temperature', 'salinity', 'fluorescence','oxygen']
cl.l_662a_startDatetime = startdate
cl.l_662a_endDatetime = enddate
# NPS_34 ##
cl.nps34_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/MBARI/'
cl.nps34_files = [ 'OS_Glider_NPS_G34_20201006_TS.nc' ]
cl.nps34_parms = ['TEMP', 'PSAL', 'FLU2', 'OXYG']
cl.nps34_startDatetime = startdate
cl.nps34_endDatetime = enddate
# NPS_29 ##
cl.nps29_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/MBARI/'
cl.nps29_files = [ 'OS_Glider_NPS_G29_20201006_TS.nc' ]
cl.nps29_parms = ['TEMP', 'PSAL', 'FLU2', 'OXYG']
cl.nps29_startDatetime = startdate
cl.nps29_endDatetime = enddate
######################################################################
# Wavegliders
######################################################################
# WG Tex - All instruments combined into one file - one time coordinate
##cl.wg_tex_base = cl.dodsBase + 'CANON_september2013/Platforms/Gliders/WG_Tex/final/'
##cl.wg_tex_files = [ 'WG_Tex_all_final.nc' ]
##cl.wg_tex_parms = [ 'wind_dir', 'wind_spd', 'atm_press', 'air_temp', 'water_temp', 'sal', 'density', 'bb_470', 'bb_650', 'chl' ]
##cl.wg_tex_startDatetime = startdate
##cl.wg_tex_endDatetime = enddate
# WG Hansen - All instruments combined into one file - one time coordinate
cl.wg_Hansen_base = 'http://dods.mbari.org/opendap/data/waveglider/deployment_data/'
cl.wg_Hansen_files = [
'wgHansen/20201005/realTime/20201005.nc'
]
cl.wg_Hansen_parms = [ 'wind_dir', 'avg_wind_spd', 'max_wind_spd', 'atm_press', 'air_temp', 'water_temp_float', 'sal_float', 'water_temp_sub',
'sal_sub', 'bb_470', 'bb_650', 'chl', 'beta_470', 'beta_650', 'pH', 'O2_conc_float','O2_conc_sub' ] # two ctds (_float, _sub), no CO2
cl.wg_Hansen_depths = [ 0 ]
cl.wg_Hansen_startDatetime = startdate
cl.wg_Hansen_endDatetime = enddate
# WG Tiny - All instruments combined into one file - one time coordinate
cl.wg_Tiny_base = 'http://dods.mbari.org/opendap/data/waveglider/deployment_data/'
cl.wg_Tiny_files = [
'wgTiny/20201006/realTime/20201005.nc'
]
cl.wg_Tiny_parms = [ 'wind_dir', 'avg_wind_spd', 'max_wind_spd', 'atm_press', 'air_temp', 'water_temp', 'sal', 'bb_470', 'bb_650', 'chl',
'beta_470', 'beta_650', 'pCO2_water', 'pCO2_air', 'pH', 'O2_conc' ]
cl.wg_Tiny_depths = [ 0 ]
cl.wg_Tiny_startDatetime = startdate
cl.wg_Tiny_endDatetime = enddate
######################################################################
# MOORINGS
######################################################################
cl.m1_base = 'http://dods.mbari.org/opendap/data/ssdsdata/deployments/m1/'
cl.m1_files = [
'202008/OS_M1_20200825hourly_CMSTV.nc', ]
cl.m1_parms = [
'eastward_sea_water_velocity_HR', 'northward_sea_water_velocity_HR',
'SEA_WATER_SALINITY_HR', 'SEA_WATER_TEMPERATURE_HR', 'SW_FLUX_HR', 'AIR_TEMPERATURE_HR',
'EASTWARD_WIND_HR', 'NORTHWARD_WIND_HR', 'WIND_SPEED_HR'
]
cl.m1_startDatetime = startdate
cl.m1_endDatetime = enddate
# Execute the load
cl.process_command_line()
if cl.args.test:
cl.stride = 10
elif cl.args.stride:
cl.stride = cl.args.stride
cl.loadM1()
##cl.loadL_662a()
cl.load_NPS29()
cl.load_NPS34()
cl.load_wg_Tiny()
cl.load_wg_Hansen()
# Problem with loading both temperature & salinity - for now load just one of them
#_cl.makai_base = 'http://dods.mbari.org/opendap/data/lrauv/makai/realtime/sbdlogs/2020/202010/'
#_cl.makai_files = ['20201008T014813/shore_i.nc']
#_cl.makai_parms = ['chlorophyll', 'temperature', 'salinity']
#_cl.loadLRAUV('makai', critSimpleDepthTime=0.1, build_attrs=False)
# Previously "fixed" load
#_cl.whoidhs_base = 'http://dods.mbari.org/opendap/data/lrauv/whoidhs/realtime/sbdlogs/2019/201906/'
#_cl.whoidhs_files = ['20190612T024430/shore_i.nc']
#_cl.whoidhs_parms = ['concentration_of_chromophoric_dissolved_organic_matter_in_sea_water', 'mass_concentration_of_chlorophyll_in_sea_water', ]
##cl.whoidhs_parms = ['concentration_of_chromophoric_dissolved_organic_matter_in_sea_water']
##cl.whoidhs_parms = ['mass_concentration_of_chlorophyll_in_sea_water', ]
#_cl.loadLRAUV('whoidhs', critSimpleDepthTime=0.1, build_attrs=False)
##cl.loadLRAUV('makai', startdate, enddate, critSimpleDepthTime=0.1, sbd_logs=True,
## parameters=['chlorophyll', 'temperature'])
##cl.loadLRAUV('pontus', startdate, enddate, critSimpleDepthTime=0.1, sbd_logs=True,
## parameters=['chlorophyll', 'temperature'])
cl.loadLRAUV('makai', startdate, enddate)
cl.loadLRAUV('pontus', startdate, enddate)
cl.loadDorado(startdate, enddate, build_attrs=True)
##cl.loadSubSamples()
# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed
cl.addTerrainResources()
print("All Done.")
|
stoqs/stoqs
|
stoqs/loaders/CANON/loadCANON_october2020.py
|
Python
|
gpl-3.0
| 6,445
|
[
"NetCDF"
] |
4aaa242da3c7f15fa8aaa84ede8a3a805483526dbc9aa9038191d8e0c0219e41
|
import numpy as np
from galaxy_analysis.plot.plot_styles import *
import matplotlib.pyplot as plt
import yt
from yt.fields.api import ValidateParameter
from matplotlib.colors import LogNorm
from galaxy_analysis.analysis import Galaxy
from multiprocessing import Pool
from contextlib import closing
import itertools
import os
import glob
import sys
def plot_field(dsname, mass_field, paperstyle=False, show_colorbar=True):
fontsize = 'large'
field_to_plot = mass_field + "_disk_fraction"
field_parameter = "total_field_mass"
gal = Galaxy(dsname)
dims = [700,700,56]
pix = np.array(dims) / 2.0
dist = pix * np.min( gal.df['dx'].to('pc'))
LE = gal.ds.domain_center - dist
cg = gal.ds.smoothed_covering_grid(level = int(np.max(gal.df['grid_level'])),
left_edge = LE,
dims = dims)
disk_mass = np.sum( (gal.disk[mass_field].to('Msun').value) )
cg.set_field_parameter(field_parameter,
disk_mass * yt.units.Msun)
def _disk_mass_fraction(field,data):
if data.has_field_parameter(field_parameter):
mtot = data.get_field_parameter(field_parameter)
if hasattr(mtot, 'convert_to_units'):
mtot = mtot.convert_to_units('Msun')
else:
mtot = mtot * yt.units.Msun
else:
raise ValueError
mfield = mass_field
return data[mfield].convert_to_units('Msun') / mtot
gal.ds.add_field(field_to_plot, function = _disk_mass_fraction,
units = "", take_log = False, force_override=True,
validators=[ValidateParameter(field_parameter)])
# sum the data over 2nd axes
axis = 2
#print np.shape(cg[field_to_plot])
proj_data = np.sum(cg[field_to_plot], axis=axis)
n_proj_data = np.max(cg['number_density'].value, axis=axis)
#print np.shape(proj_data)
#print np.min(proj_data), np.max(proj_data)
if axis == 2:
extent = [-dist[0],dist[0],-dist[1],dist[1]]
fsize = (8,8)
elif axis == 0 or axis == 1:
extent = [-dist[1],dist[1],-dist[2],dist[2]]
proj_data = proj_data.T
fsize = (12.5,1)
n_proj_data = n_proj_data.T
pp = plt.imshow( proj_data, norm = LogNorm(), extent=extent)
pp.set_clim(1.0E-10,1.0E-1)
pp.set_cmap("magma")
#plt.tight_layout()
#pp.axes.set_xticks([-600,-400,-200,0,200,400,600])
pp.axes.yaxis.set_visible(False)
pp.axes.xaxis.set_visible(False)
pp.axes.set_frame_on(False)
pp.figure.set_size_inches(fsize)
pp.figure.patch.set_facecolor('black')
# annotate the size
anncoord = (0.9,0.1)
dim = np.shape(proj_data)
x = 0.5
di = 250.0 / ( 1.8 )
pp.axes.plot( [dim[0]*x, dim[0]*x + di],
[-dim[1]*0.825]*2, lw = 3, color = "white")
pp.axes.annotate('250 pc',
xy=anncoord, xycoords='axes fraction',
xytext=anncoord, textcoords='axes fraction', color = "white",
# arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='top', fontsize = fontsize)
start = 220.0
anncoord2 = (0.225,0.96)
if not paperstyle:
pp.axes.annotate("%.1f Myr"%(gal.ds.current_time.to('Myr').value - start),
xy=anncoord2, xycoords='axes fraction',
xytext=anncoord2, textcoords='axes fraction', color = "white",
# arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='top', fontsize = fontsize)
# pp.figure.savefig("./mixing_plots/" + dsname + "_" + mass_field + "_disk_fraction.png")
outname = "./mixing_plots/" + dsname + "_" + mass_field + "_disk_fraction.png"
if show_colorbar:
cax = plt.colorbar(orientation="horizontal")
cticks = [1.0E-8, 1.0E-6, 1.0E-4, 1.0E-2, 1.0]
cticks = [1.0E-9, 1.0E-7, 1.0E-5, 1.0E-3, 1.0E-1]
cax.set_ticks(cticks)
cax.set_ticklabels( [r"10$^{%2i}$"%(np.log10(x)) for x in cticks] )
cax.set_label("Tracer Fraction (Arbitrary Units)")
plt.savefig(outname, bbox_inches="tight", pad_inches = 0.0)
plt.close()
############
pp = plt.imshow( n_proj_data, norm = LogNorm(),
extent=extent)
pp.set_clim(1.0E-4,1.0E3)
#plt.tight_layout()
#pp.axes.set_xticks([-600,-400,-200,0,200,400,600])
pp.axes.yaxis.set_visible(False)
pp.axes.xaxis.set_visible(False)
pp.axes.set_frame_on(False)
pp.figure.set_size_inches(fsize)
pp.figure.patch.set_facecolor('black')
# annotate the size
anncoord = (0.9,0.1)
dim = np.shape(n_proj_data)
x = 0.5
di = 250.0 / ( 1.8 )
pp.axes.plot( [dim[0]*x, dim[0]*x + di],
[-dim[1]*0.825]*2, lw = 3, color = "white")
pp.axes.annotate('250 pc',
xy=anncoord, xycoords='axes fraction',
xytext=anncoord, textcoords='axes fraction', color = "white",
# arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='top', fontsize = fontsize)
start = 220.0
anncoord2 = (0.225,0.96)
pp.axes.annotate("%.1f Myr"%(gal.ds.current_time.to('Myr').value - start),
xy=anncoord2, xycoords='axes fraction',
xytext=anncoord2, textcoords='axes fraction', color = "white",
# arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='top', fontsize = fontsize)
# pp.figure.savefig("./mixing_plots/" + dsname + "_" + mass_field + "_disk_fraction.png")
outname = "./mixing_plots/" + dsname + "_" + "ndens.png"
if show_colorbar:
cax = plt.colorbar(orientation = "horizontal")
cticks = [1.0E-4, 1.0E-2, 1.0, 100.0]
cax.set_ticks(cticks)
cax.set_ticklabels( [r"10$^{%2i}$"%(np.log10(x)) for x in cticks] )
cax.set_label(r"n (cm$^{-3}$)")
plt.savefig(outname, bbox_inches="tight", pad_inches = 0.0)
return
if __name__ == "__main__":
mass_field = "C_Mass"
nproc = 1
ds_list = None
if len(sys.argv) >= 2:
mass_field = str(sys.argv[1])
if len(sys.argv) >= 4:
imin = int(sys.argv[2])
imax = int(sys.argv[3])
else:
ds_list = np.sort(glob.glob('DD????'))
if len(sys.argv) >= 5:
di = int(sys.argv[4])
if len(sys.argv) >= 6:
nproc = int(sys.argv[5])
if ds_list is None:
ds_list = ["DD%0004i"%(i) for i in np.arange(imin,imax,di)]
if nproc > 1:
def _parallel_loop(dsname):
plot_field(dsname, mass_field)
return
for sub_list in itertools.zip_longest(*(iter(ds_list),) * nproc):
sub_list = list(sub_list)
sub_list = [s for s in sub_list if s is not None]
reduced_nproc = np.min( [len(sub_list), nproc] )
pool = Pool(reduced_nproc)
results = pool.map_async(_parallel_loop, sub_list)
pool.close()
pool.join()
else:
for dsname in ds_list:
plot_field(dsname, mass_field)
|
aemerick/galaxy_analysis
|
misc/mixing_experiment_plot.py
|
Python
|
mit
| 7,363
|
[
"Galaxy"
] |
e622f789c1d029318a8c0e2c606ea730a6a7eda51367de3de61296d101c3f287
|
# Modified from moose-example/synapse_tutorial.py ---
import moose
import random
import numpy as np
def test_synapse():
"""
In this example we walk through creation of a vector of IntFire
elements and setting up synaptic connection between them. Synapse on
IntFire elements is an example of ElementField - elements that do not
exist on their own, but only as part of another element. This example
also illustrates various operations on `vec` objects and
ElementFields.
"""
size = 1024 # number of IntFire objects in a vec
delayMin = 0
delayMax = 4
thresh = 0.8
weightMax = 0.5
# The above sets the constants we shall use in this example. Now we create
# a vector of IntFire elements of size `size`.
net = moose.IntFire("/network", size)
# This creates a `vec` of `IntFire` elements of size 1024 and returns the
# first `element`, i.e. "/network[0]".
net = moose.element("/network[0]")
# You need now to provide synaptic input to the network
synh = moose.SimpleSynHandler("/network/synh", size)
# These need to be connected to the nodes in the network
moose.connect(synh, "activationOut", net, "activation", "OneToOne")
# You can access the underlying vector of elements using the `vec` field on
# any element. This is very useful for vectorized field access:
_vm = [thresh / 2.0] * size
net.vec.Vm = _vm
assert np.allclose(net.vec.Vm, _vm)
# The right part of the assigment creates a Python list of length `size`
# with each element set to `thresh/2.0`, which is 0.4. You can index into
# the `vec` to access individual elements' field:
print(net.vec[1].Vm)
assert net.vec[1].Vm == 0.4
# `SimpleSynHandler` class has an `ElementField` called `synapse`. It is
# just like a `vec` above in terms of field access, but by default its size
# is 0.
assert len(synh.synapse) == 0
print(len(synh.synapse))
# To actually create synapses, you can explicitly assign the `num` field of
# this, or set the `numSynapses` field of the `IntFire` element. There are
# some functions which can implicitly set the size of the `ElementField`.
synh.numSynapses = 3
assert len(synh.synapse) == 3
print(len(synh.synapse))
synh.numSynapses = 4
print(synh.synapse, 111)
assert len(synh.synapse) == 4, (4, len(synh.synapse))
print(len(synh.synapse))
# Now you can index into `net.synapse` as if it was an array.
print("Before:", synh.synapse[0].delay)
assert synh.synapse[0].delay == 0.0
synh.synapse[0].delay = 1.0
assert synh.synapse[0].delay == 1.0
print("After:", synh.synapse[0].delay)
# You could do the same vectorized assignment as with `vec` directly:
syns = synh.synapse.vec
syns.weight = [0.2] * len(syns)
assert np.allclose(syns.weight, 0.2), syns.weight
print(syns.weight)
# You can create the synapses and assign the weights and delays using loops:
for syn in synh.vec:
syn.numSynapses = random.randint(1, 10)
# create synapse fields with random size between 1 and 10, end points
# included. Below is one (inefficient) way of setting the individual weights of
# the elements in 'synapse'
syns = syn.synapse
for ii in range(len(syns)):
syns[ii].weight = random.random() * weightMax
# This is a more efficient way - rhs of `=` is list comprehension in
# Python and rather fast.
syns.delay = [
delayMin + random.random() * delayMax for ii in range(len(syn.synapse))
]
# An even faster way will be to use numpy.random.rand(size) which
# produces array of random numbers uniformly distributed between 0 and
# 1
syns.delay = delayMin + np.random.rand(len(syn.synapse)) * delayMax
# Now display the results, we use slice notation on `vec` to show the
# values of delay and weight for the first 5 elements in `/network`
for syn in synh.vec[:5]:
print("Delays for synapses on ", syn.path, ":", syn.synapse.vec.delay)
print("Weights for synapses on ", syn.path, ":", syn.synapse.vec.weight)
if __name__ == "__main__":
test_synapse()
|
dilawar/moose-core
|
tests/core/test_synapse.py
|
Python
|
gpl-3.0
| 4,253
|
[
"MOOSE"
] |
8549be44643281b74f3525bd12f47d1c99bf2f8c2224a96be246723f31f7a456
|
# vi: ts=8 sts=4 sw=4 et
#
# model.py: the Draco model
#
# This file is part of Draco2. Draco2 is free software and is made available
# under the MIT license. Consult the file "LICENSE" that is distributed
# together with this file for the exact licensing terms.
#
# Draco2 is copyright (c) 1999-2007 by the Draco2 authors. See the file
# "AUTHORS" for a complete overview.
#
# $Revision: 1187 $
import threading
from draco2.database import DatabaseError
from draco2.model.transaction import Transaction
from draco2.model.schema import Schema
class ModelMetaClass(type):
"""Metaclass that can be used to call Model._build() when
a model is defined.
"""
def __init__(self, name, bases, dict):
type.__init__(self, name, bases, dict)
# Call for strict subclasses of Model only.
if object not in bases:
self._build()
class Model(object):
"""The Draco Model.
Specific models are created by subclassing this class and providing
the various class members.
"""
__metaclass__ = ModelMetaClass
name = None
version = None
entities = []
relationships = []
views = []
transaction_factory = Transaction
init_statements = []
def __init__(self, database):
"""Create a new Draco model."""
self.m_database = database
self.m_tsd = threading.local()
self.m_lock = threading.Lock()
self.m_anonid = 0
@classmethod
def _create(cls, api):
"""Factory method."""
model = cls(api.database)
return model
def _finalize(self):
"""Close all open transactions."""
try:
transactions = self.m_tsd.transactions.values()
except AttributeError:
transactions = []
for tnx in transactions:
tnx._finalize()
self.m_tsd.__dict__.clear()
@classmethod
def _build(cls):
"""Build the model. A model has to be built once, before it
can be instantiated."""
from draco2.model.check import CheckVisitor
from draco2.model.build import BuildVisitor
checker = CheckVisitor()
checker.visit(cls)
builder = BuildVisitor()
builder.visit(cls)
def database(self):
"""Return the model's database manager."""
return self.m_database
def _get_name(self):
"""Return a new anonymous name."""
self.m_lock.acquire()
try:
anonid = self.m_anonid
self.m_anonid += 1
finally:
self.m_lock.release()
name = 'anon/%d' % anonid
return name
def transaction(self, name=None):
"""Create a (named) transaction.
If name is given and a transaction with the same name already exists,
that transaction is returned. If no transaction with the specified
name exists, or if no name is given, a new transaction is returned.
"""
if name is None:
name = self._get_name()
self.m_lock.acquire()
try:
try:
tnx = self.m_tsd.transactions[name]
except AttributeError:
self.m_tsd.transactions = {}
except KeyError:
pass
else:
return tnx
tnx = self.transaction_factory(self)
self.m_tsd.transactions[name] = tnx
finally:
self.m_lock.release()
return tnx
def schema(self):
"""Return a schema object for this model."""
return Schema(self)
def object(self, name):
"""Return the entity or relationship `name'."""
for en in self.entities:
if en.name == name:
return en
for re in self.relationships:
if re.name == name:
return re
raise KeyError, 'No such object: %s' % name
|
geertj/draco2
|
draco2/model/model.py
|
Python
|
mit
| 3,882
|
[
"VisIt"
] |
1337d5d13cbade816f027f22e71a1184d82c52fcfa72aa7dbdf16c431698bb0a
|
# suppyrrs.py ---
#
# Filename: suppyrrs.py
# Description:
# Author: subhasis ray
# Maintainer:
# Created: Fri Aug 7 13:59:30 2009 (+0530)
# Version:
# Last-Updated: Fri Oct 21 17:12:04 2011 (+0530)
# By: Subhasis Ray
# Update #: 605
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary: Superficial Regular Spiking Pyramidal Cells of layer 2/3
# From Traub et all, 2005
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
from datetime import datetime
import config
import trbutil
import moose
from cell import *
from capool import CaPool
class SupPyrRS(TraubCell):
"""Superficial Pyramidal Regula Spiking cell."""
chan_params = {
'ENa': 50e-3,
'EK': -95e-3,
'ECa': 125e-3,
'EAR': -35e-3,
'EGABA': -81e-3,
'TauCa': 20e-3
}
ca_dep_chans = ['KAHP', 'KC']
num_comp = 74
presyn = 72
proto_file = "SupPyrRS.p"
# level maps level number to the set of compartments belonging to it
level = None
# depth stores a map between level number and the depth of the compartments.
depth = {
1: 850.0 * 1e-6,
2: 885.0 * 1e-6,
3: 920.0 * 1e-6,
4: 955.0 * 1e-6,
5: 825.0 * 1e-6,
6: 775.0 * 1e-6,
7: 725.0 * 1e-6,
8: 690.0 * 1e-6,
9: 655.0 * 1e-6,
10: 620.0 * 1e-6,
11: 585.0 * 1e-6,
12: 550.0 * 1e-6
}
prototype = TraubCell.read_proto(proto_file, "SupPyrRS", level_dict=level, depth_dict=depth, params=chan_params)
def __init__(self, *args):
# start = datetime.now()
TraubCell.__init__(self, *args)
soma_ca_pool = moose.CaConc(self.soma.path + '/CaPool')
soma_ca_pool.tau = 100e-3
# end = datetime.now()
# delta = end - start
# config.BENCHMARK_LOGGER.info('created cell in: %g s' % (delta.days * 86400 + delta.seconds + delta.microseconds * 1e-6))
def _topology(self):
raise Exception, 'Deprecated'
def _setup_passive(self):
raise Exception, 'Deprecated'
def _setup_channels(self):
"""Set up connections between compartment and channels, and Ca pool"""
raise Exception, 'Deprecated'
@classmethod
def test_single_cell(cls):
"""Simulates a single superficial pyramidal regular spiking
cell and plots the Vm and [Ca2+]"""
config.LOGGER.info("/**************************************************************************")
config.LOGGER.info(" *")
config.LOGGER.info(" * Simulating a single cell: %s" % (cls.__name__))
config.LOGGER.info(" *")
config.LOGGER.info(" **************************************************************************/")
sim = Simulation(cls.__name__)
mycell = SupPyrRS(SupPyrRS.prototype, sim.model.path + "/SupPyrRS")
config.LOGGER.info('Created cell: %s' % (mycell.path))
vm_table = mycell.comp[SupPyrRS.presyn].insertRecorder('Vm_suppyrrs', 'Vm', sim.data)
pulsegen = mycell.soma.insertPulseGen('pulsegen', sim.model, firstLevel=3e-10, firstDelay=50e-3, firstWidth=50e-3)
sim.schedule()
if mycell.has_cycle():
config.LOGGER.warning("WARNING!! CYCLE PRESENT IN CICRUIT.")
t1 = datetime.now()
sim.run(200e-3)
t2 = datetime.now()
delta = t2 - t1
if config.has_pylab:
mus_vm = config.pylab.array(vm_table) * 1e3
mus_t = linspace(0, sim.simtime * 1e3, len(mus_vm))
try:
nrn_vm = config.pylab.loadtxt('../nrn/mydata/Vm_deepLTS.plot')
nrn_t = nrn_vm[:, 0]
nrn_vm = nrn_vm[:, 1]
config.pylab.plot(nrn_t, nrn_vm, 'y-', label='nrn vm')
except IOError:
print 'NEURON Data not available.'
config.pylab.plot(mus_t, mus_vm, 'g-.', label='mus vm')
config.pylab.legend()
config.pylab.show()
# test main --
from simulation import Simulation
import pylab
from subprocess import call
if __name__ == "__main__":
SupPyrRS.test_single_cell()
#
# suppyrrs.py ends here
|
BhallaLab/moose-thalamocortical
|
DEMOS/pymoose/traub2005/py/suppyrRS.py
|
Python
|
lgpl-2.1
| 4,890
|
[
"MOOSE",
"NEURON"
] |
3e43603d3765e3b4fe374bc60bb538f28680fc1964d287e9bcdd6c5eef97d6ca
|
from django.conf import settings
from django.contrib import admin
from django.urls.base import reverse
from django.urls.exceptions import NoReverseMatch
from edc_model_admin.model_admin_audit_fields_mixin import audit_fieldset_tuple,\
audit_fields
from edc_visit_schedule.fieldsets import visit_schedule_fieldset_tuple,\
visit_schedule_fields
class CrfModelAdminMixin:
"""ModelAdmin subclass for models with a ForeignKey to your
visit model(s).
"""
date_hierarchy = 'report_datetime'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.list_display = list(self.list_display)
self.list_display.append(self.visit_model_attr)
self.list_display = tuple(self.list_display)
self.extend_search_fields()
self.extend_list_filter()
@property
def visit_model(self):
return self.model.visit_model()
@property
def visit_model_attr(self):
return self.model.visit_model_attr()
def extend_search_fields(self):
self.search_fields = list(self.search_fields)
self.search_fields.extend([
'{}__appointment__subject_identifier'.format(
self.visit_model_attr)])
self.search_fields = tuple(set(self.search_fields))
def extend_list_filter(self):
"""Extends list filter with additional values from the visit
model.
"""
self.list_filter = list(self.list_filter)
self.list_filter.extend([
self.visit_model_attr + '__report_datetime',
self.visit_model_attr + '__reason',
self.visit_model_attr + '__appointment__appt_status',
self.visit_model_attr + '__appointment__visit_code'])
self.list_filter = tuple(self.list_filter)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == self.visit_model_attr:
if request.GET.get(self.visit_model_attr):
kwargs["queryset"] = self.visit_model.objects.filter(
id__exact=request.GET.get(self.visit_model_attr, 0))
else:
kwargs["queryset"] = self.visit_model.objects.none()
return super().formfield_for_foreignkey(db_field, request, **kwargs)
class VisitModelAdminMixin:
"""ModelAdmin subclass for models with a ForeignKey to
'appointment', such as your visit model(s).
In the child ModelAdmin class set the following attributes,
for example:
visit_attr = 'maternal_visit'
dashboard_type = 'maternal'
"""
date_hierarchy = 'report_datetime'
fieldsets = (
(None, {
'fields': [
'appointment',
'report_datetime',
'reason',
'reason_missed',
'reason_unscheduled',
'reason_unscheduled_other',
'info_source',
'info_source_other',
'comments'
]}),
visit_schedule_fieldset_tuple,
audit_fieldset_tuple
)
radio_fields = {
'reason': admin.VERTICAL,
'reason_unscheduled': admin.VERTICAL,
'reason_missed': admin.VERTICAL,
'info_source': admin.VERTICAL,
'require_crfs': admin.VERTICAL}
list_display = ['appointment', 'subject_identifier', 'report_datetime',
'reason',
'study_status', 'require_crfs', 'created',
'modified', 'user_created',
'user_modified', ]
search_fields = ['id', 'reason', 'appointment__visit_code',
'appointment__subject_identifier']
list_filter = [
'report_datetime',
'appointment__visit_code',
'appointment__visit_code_sequence',
'reason',
'require_crfs',
'created',
'modified',
'user_created',
'user_modified',
'hostname_created']
def subject_identifier(self, obj=None):
return obj.appointment.subject_identifier
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'appointment':
kwargs["queryset"] = db_field.related_model.objects.filter(
pk=request.GET.get('appointment'))
return super().formfield_for_foreignkey(
db_field, request, **kwargs)
def get_readonly_fields(self, request, obj=None):
fields = super().get_readonly_fields(request, obj=obj)
fields = fields + audit_fields + visit_schedule_fields
return fields
def view_on_site(self, obj):
dashboard_url_name = settings.DASHBOARD_URL_NAMES.get(
'subject_dashboard_url')
try:
return reverse(
dashboard_url_name, kwargs=dict(
subject_identifier=obj.subject_identifier,
appointment=str(obj.appointment.id)))
except NoReverseMatch:
return super().view_on_site(obj)
class CareTakerFieldsAdminMixin:
mixin_fields = [
'information_provider',
'information_provider_other',
'is_present',
'survival_status',
'last_alive_date',
'comments']
radio_fields_mixin = {
'is_present': admin.VERTICAL,
'survival_status': admin.VERTICAL,
}
|
botswana-harvard/edc-visit-tracking
|
edc_visit_tracking/modeladmin_mixins.py
|
Python
|
gpl-2.0
| 5,335
|
[
"VisIt"
] |
f2c46fba5e6f507863bb394f3e2e50bf8972137ccb365c72272225b3ab66d4d8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.