text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
import time
import os
#------------------------------------------------------------------------
#
# gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gen.plug.menu import StringOption, MediaOption, NumberOption
from gramps.gen.utils.file import media_path_full
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.docgen import (FontStyle, ParagraphStyle,
FONT_SANS_SERIF, PARA_ALIGN_CENTER)
#------------------------------------------------------------------------
#
# SimpleBookTitle
#
#------------------------------------------------------------------------
class SimpleBookTitle(Report):
""" This report class generates a title page for a book. """
def __init__(self, database, options, user):
"""
Create SimpleBookTitle object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
This report needs the following parameters (class variables)
that come in the options class.
title - Title string.
subtitle - Subtitle string.
imgid - Gramps ID of the media object to use as an image.
imgsize - Size for the image.
footer - Footer string.
"""
Report.__init__(self, database, options, user)
self._user = user
menu = options.menu
self.title_string = menu.get_option_by_name('title').get_value()
self.image_size = menu.get_option_by_name('imgsize').get_value()
self.subtitle_string = menu.get_option_by_name('subtitle').get_value()
self.footer_string = menu.get_option_by_name('footer').get_value()
self.object_id = menu.get_option_by_name('imgid').get_value()
def write_report(self):
""" Generate the contents of the report """
self.doc.start_paragraph('SBT-Title')
self.doc.write_text(self.title_string)
self.doc.end_paragraph()
self.doc.start_paragraph('SBT-Subtitle')
self.doc.write_text(self.subtitle_string)
self.doc.end_paragraph()
if self.object_id:
the_object = self.database.get_object_from_gramps_id(self.object_id)
filename = media_path_full(self.database, the_object.get_path())
if os.path.exists(filename):
if self.image_size:
image_size = self.image_size
else:
image_size = min(
0.8 * self.doc.get_usable_width(),
0.7 * self.doc.get_usable_height() )
self.doc.add_media_object(filename, 'center',
image_size, image_size)
else:
self._user.warn(_('Could not add photo to page'),
_('File %s does not exist') % filename)
self.doc.start_paragraph('SBT-Footer')
self.doc.write_text(self.footer_string)
self.doc.end_paragraph()
#------------------------------------------------------------------------
#
# SimpleBookTitleOptions
#
#------------------------------------------------------------------------
class SimpleBookTitleOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
self.__db = dbase
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
""" Add the options for this report """
category_name = _("Report Options")
title = StringOption(_('book|Title'), _('Title of the Book') )
title.set_help(_("Title string for the book."))
menu.add_option(category_name, "title", title)
subtitle = StringOption(_('Subtitle'), _('Subtitle of the Book') )
subtitle.set_help(_("Subtitle string for the book."))
menu.add_option(category_name, "subtitle", subtitle)
dateinfo = time.localtime(time.time())
rname = self.__db.get_researcher().get_name()
footer_string = _('Copyright %(year)d %(name)s') % {
'year' : dateinfo[0], 'name' : rname }
footer = StringOption(_('Footer'), footer_string )
footer.set_help(_("Footer string for the page."))
menu.add_option(category_name, "footer", footer)
imgid = MediaOption(_('Image'))
imgid.set_help( _("Gramps ID of the media object to use as an image."))
menu.add_option(category_name, "imgid", imgid)
imgsize = NumberOption(_('Image Size'), 0, 0, 20, 0.1)
imgsize.set_help(_("Size of the image in cm. A value of 0 indicates "
"that the image should be fit to the page."))
menu.add_option(category_name, "imgsize", imgsize)
def make_default_style(self, default_style):
"""Make the default output style for the Simple Boot Title report."""
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=16, bold=1, italic=1)
para = ParagraphStyle()
para.set_font(font)
para.set_header_level(1)
para.set_alignment(PARA_ALIGN_CENTER)
para.set(pad=0.5)
para.set_description(_('The style used for the title of the page.'))
default_style.add_paragraph_style("SBT-Title", para)
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=14, italic=1)
para = ParagraphStyle()
para.set_font(font)
para.set_header_level(2)
para.set(pad=0.5)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(_('The style used for the subtitle.'))
default_style.add_paragraph_style("SBT-Subtitle", para)
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=10, italic=1)
para = ParagraphStyle()
para.set_font(font)
para.set_header_level(2)
para.set(pad=0.5)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(_('The style used for the footer.'))
default_style.add_paragraph_style("SBT-Footer", para)
| pmghalvorsen/gramps_branch | gramps/plugins/textreport/simplebooktitle.py | Python | gpl-2.0 | 7,539 | [
"Brian"
] | ba7451dfe328e7c82ad67bfaf19a589ef598e8078e9ec001fefd3f7828cdcaf8 |
import numpy as np
from mayavi import mlab
from BDSpace.Curve import ParametricCurve
from BDSpaceVis.space import SpaceView
class CurveView(SpaceView):
def __init__(self, fig, curve, scale=1, color=None, opacity=None, edge_visible=False,
cs_visible=True, surface_visible=True, wireframe=False, resolution=20, thickness=None):
assert isinstance(curve, ParametricCurve)
self.resolution = resolution
self.edge_visible = edge_visible
self.thickness = None
points, dims = generate_points(curve, self.resolution)
super(CurveView, self).__init__(fig, curve, scale=scale, color=color, opacity=opacity,
points=points, dims=dims,
cs_visible=cs_visible, surface_visible=surface_visible, wireframe=wireframe)
def set_resolution(self, resolution):
self.resolution = resolution
points, dims = generate_points(self.space, resolution)
self.set_points(points, dims)
self.draw()
def get_thickness(self):
if self.surface is not None:
return self.surface.parent.parent.filter.radius
def set_thickness(self, thickness):
"""
Sets the thickness of the curve line changing mayavi tube radius
:param thickness: float number between 0.0 and 1e299
"""
if isinstance(thickness, (float, int)):
self.thickness = float(thickness)
try:
self.surface.parent.parent.filter.radius = self.thickness
self.draw()
except AttributeError:
pass
def set_edge_visible(self, edge_visible=True):
self.edge_visible = edge_visible
self.draw()
def draw_surface(self):
if self.surface_visible:
if self.points is not None:
coordinate_system = self.space.basis_in_global_coordinate_system()
curve_points = np.asarray(coordinate_system.to_parent(self.points))
if self.surface is None:
mlab.figure(self.fig, bgcolor=self.fig.scene.background)
if self.thickness is None:
self.surface = mlab.plot3d(curve_points[:, 0], curve_points[:, 1], curve_points[:, 2],
color=self.color)
self.thickness = self.get_thickness()
else:
self.surface = mlab.plot3d(curve_points[:, 0], curve_points[:, 1], curve_points[:, 2],
color=self.color, tube_radius=self.thickness)
else:
n_pts = len(curve_points) - 1
lines = np.zeros((n_pts, 2), 'l')
lines[:, 0] = np.arange(0, n_pts - 0.5, 1, 'l')
lines[:, 1] = np.arange(1, n_pts + 0.5, 1, 'l')
data = self.surface.parent.parent.parent.parent.data
data.set(lines=None)
data.set(points=curve_points)
data.set(lines=lines)
self.surface.parent.parent.parent.parent.name = self.space.name
self.surface.parent.parent.filter.radius = self.thickness
self.surface.actor.property.color = self.color
self.surface.actor.property.edge_visibility = self.edge_visible
self.surface.actor.property.edge_color = self.color
if self.wireframe:
self.surface.actor.property.representation = 'wireframe'
else:
self.surface.actor.property.representation = 'surface'
if self.opacity is not None:
self.surface.actor.property.opacity = self.opacity
else:
if self.surface is not None:
self.surface.remove()
self.surface = None
def generate_points(curve, resolution=20):
assert isinstance(curve, ParametricCurve)
dims = None
num_points = angular_resolution(abs(curve.stop - curve.start), resolution)
t = np.linspace(curve.start, curve.stop, num=num_points, endpoint=True, dtype=np.float)
points = curve.generate_points(t)
return points, dims
def angular_resolution(angle, resolution):
points_num = int(angle / np.pi * resolution)
if points_num < 2:
points_num = 2
return points_num
| bond-anton/Space_visualization | BDSpaceVis/curves.py | Python | apache-2.0 | 4,454 | [
"Mayavi"
] | cb0351ba8b71bab33d528479cc6221f6854536f896fe7c3c64c1f7072b88df4a |
import visit as v
from .. import JAVA_LANG
from .. import PRIMITIVES
from ..utils import utils
from ..node import Node
from ..compilationunit import CompilationUnit
from ..importdeclaration import ImportDeclaration
from ..body.classorinterfacedeclaration import ClassOrInterfaceDeclaration
from ..body.fielddeclaration import FieldDeclaration
from ..body.variabledeclarator import VariableDeclarator
from ..body.variabledeclaratorid import VariableDeclaratorId
from ..body.methoddeclaration import MethodDeclaration
from ..body.constructordeclaration import ConstructorDeclaration
from ..body.emptymemberdeclaration import EmptyMemberDeclaration
from ..body.axiomdeclaration import AxiomDeclaration
from ..body.axiomparameter import AxiomParameter
from ..stmt.blockstmt import BlockStmt
from ..stmt.ifstmt import IfStmt
from ..stmt.expressionstmt import ExpressionStmt
from ..expr.nameexpr import NameExpr
from ..expr.variabledeclarationexpr import VariableDeclarationExpr
from ..expr.binaryexpr import BinaryExpr
from ..expr.integerliteralexpr import IntegerLiteralExpr
from ..expr.methodcallexpr import MethodCallExpr
from ..expr.fieldaccessexpr import FieldAccessExpr
from ..expr.objectcreationexpr import ObjectCreationExpr
from ..type.primitivetype import PrimitiveType
from ..type.voidtype import VoidType
from ..type.referencetype import ReferenceType
# https://docs.oracle.com/javase/specs/jls/se8/html/jls-6.html#jls-6.3
class SymtabGen(object):
NONSYM = [PrimitiveType, VoidType, IntegerLiteralExpr]
def __init__(self, **kwargs):
self. _lib = kwargs.get('lib', True)
@v.on("node")
def visit(self, node):
"""
This is the generic method that initializes the
dynamic dispatcher.
"""
def new_symtab(self, n, cp=False):
if n.symtab: return
if n.parentNode.symtab:
n.symtab = n.parentNode.symtab.copy() if cp else n.parentNode.symtab
elif not n.symtab: n.symtab = {}
@v.when(Node)
def visit(self, node):
if type(node) in self.NONSYM: return
self.new_symtab(node)
map(lambda n: n.accept(self), node.childrenNodes)
# print "Unimplemented node:", node
@v.when(CompilationUnit)
def visit(self, node):
# The scope of a top level type is all type declarations in the package in
# which the top level type is declared.
if self.lib:
for i in JAVA_LANG: # add in java.lang which is import by default
nm = i.split('.')
qn = {
u'@t': u'QualifiedNameExpr',
u'name': nm[-1],
u'qualifier': {
u'@t': u'QualifiedNameExpr',
u'name': u'lang',
u'qualifier': {
u'name': u'java',},},
}
node.imports.append(ImportDeclaration({u'@t':u'ImportDeclaration',u'name':qn, u'implicit': True}))
for i in node.imports: node.symtab.update({str(i):i})
d = dict([v for v in map(lambda t: (t.name,t), node.types)])
for ty in node.types:
ty.symtab.update({u'_cu_':node})
if self.lib:
for i in node.imports: ty.symtab.update({str(i).split('.')[-1]:i})
ty.symtab.update(d)
ty.accept(self)
# body/
@v.when(ClassOrInterfaceDeclaration)
def visit(self, node):
# The scope of a declaration of a member m declared in or inherited by
# a class type C is the entire body of C, including any nested type declarations.
self.new_symtab(node, cp=True)
# if type(node.parentNode) == ClassOrInterfaceDeclaration:
# node.parentNode.symtab.update({str(node):node})
node.symtab.update({node.name:node})
if node.name == u'Object': node.parentNode.symtab.update({node.name:node})
[node.symtab.update({n.name:n}) for n in node.extendsList if n.name not in node.symtab]
[node.symtab.update({n.name:n}) for n in node.implementsList if n.name not in node.symtab]
[node.symtab.update({n.name:n}) for n in node.typeParameters if n.name not in node.symtab]
node.members = filter(lambda n: not isinstance(n, EmptyMemberDeclaration), node.members)
map(lambda n: node.symtab.update({n.name:n} if isinstance(n, FieldDeclaration) or \
isinstance(n, ClassOrInterfaceDeclaration) else \
{n.sig():n}), node.members)
map(lambda n: n.accept(self), node.members)
@v.when(MethodDeclaration)
def visit(self, node):
# The scope of a formal parameter of a method is the entire body of the method
self.new_symtab(node, cp=True)
# node.parentNode.symtab.update({str(node):node})
node.symtab.update({node.sig():node})
if str(node.typee) not in PRIMITIVES and str(node.typee) not in node.symtab:
node.symtab.update({str(node.typee):node.typee})
# somethign is weird here. shouldnt have to visit idd and parameters
map(lambda p: p.idd.accept(self), node.parameters)
map(lambda p: p.accept(self), node.parameters)
map(lambda t: node.symtab.update({t.name:t}), node.typeParameters)
map(lambda p: p.idd.symtab.update(node.symtab), node.parameters)
# map(lambda c: c.accept(self), node.childrenNodes)
if node.body: node.body.accept(self)
if type(node.parentNode) == ObjectCreationExpr:
target = node.symtab.get(utils.anon_nm(node).name)
target.symtab.update({str(node):node})
node.name = '{}_{}_{}'.format(str(node), node.parentNode.typee, target.name)
target.symtab.update({str(node):node})
@v.when(AxiomDeclaration)
def visit(self, node):
self.new_symtab(node, cp=True)
# print '*'*10, str(node)
# print 'axiomdeclaration:', str(node), node.name
# print node.symtab
node.parentNode.symtab.update({node.sig():node})
if str(node.typee) not in PRIMITIVES and str(node.typee) not in node.symtab:
node.symtab.update({str(node.typee):node.typee})
# somethign is weird here. shouldnt have to visit idd and parameters
# for p in node.parameters:
# if p.idd: p.idd.accept(self)
# if p.method: p.method.accept(self)
map(lambda p: p.accept(self), node.parameters)
for p in node.parameters:
if p.idd:
p.idd.symtab.update(node.symtab)
# node.symtab = dict(p.idd.symtab.items() + node.symtab.items())
# Catch args that are actually Axiom Declarations
if p.method:
p.method.symtab.update(node.symtab)
node.symtab = dict(p.method.symtab.items() + node.symtab.items())
if node.body:
node.body.accept(self)
# print node.symtab
# print '*'*10, str(node)
@v.when(AxiomParameter)
def visit(self, node):
self.new_symtab(node)
# print '--'*8
# print 'axiomparameter:', node.name
# print node.symtab
node.typee.accept(self)
if node.idd:
node.idd.accept(self)
else:
node.method.accept(self)
# print 'axiomparameter:', node.name
# print node.symtab
# print '--'*8
@v.when(ConstructorDeclaration)
def visit(self, node):
# The scope of a formal parameter of a constructor is the entire body of the constructor
self.new_symtab(node, cp=True)
node.parentNode.symtab.update({str(node):node})
node.symtab.update({str(node):node})
map(lambda p: p.idd.accept(self), node.parameters)
map(lambda p: p.accept(self), node.parameters)
map(lambda p: p.idd.symtab.update(node.symtab), node.parameters)
if node.body: node.body.accept(self)
@v.when(FieldDeclaration)
def visit(self, node):
self.new_symtab(node, cp=True)
node.symtab.update({node.name:node})
node.variable.accept(self)
@v.when(VariableDeclarator)
def visit(self, node):
self.new_symtab(node)
if isinstance(node.typee, ReferenceType) and node.typee.arrayCount > 0:
fd = FieldDeclaration({u"@t": u"FieldDeclaration",
u"variables": {
u"@e": [{u"@t": u"VariableDeclarator",
u"id": {u"name": u"length",},
u'init': {u'@t': u'IntegerLiteralExpr',
u'value': u'0',},},]},
u"type": {u"@t": u"PrimitiveType",
u"type": {"name": "Int"},},})
node.symtab.update({u'length':fd})
if isinstance(node.parentNode, FieldDeclaration):
node.parentNode.symtab.update({u'length':fd})
node.symtab.update({node.name:node})
if node.init: node.init.accept(self)
@v.when(VariableDeclaratorId)
def visit(self, node): self.new_symtab(node)
# stmt/
@v.when(BlockStmt)
def visit(self, node):
self.new_symtab(node, cp=True)
stlen = len(node.stmts)
if stlen > 0:
node.stmts[0].accept(self)
for i in xrange(1, stlen):
node.stmts[i].symtab = node.stmts[i-1].symtab.copy()
node.stmts[i].accept(self)
@v.when(IfStmt)
def visit(self, node):
self.new_symtab(node)
if node.condition: node.condition.accept(self)
if node.thenStmt:
self.new_symtab(node, cp=True)
node.thenStmt.accept(self)
if node.elseStmt:
self.new_symtab(node, cp=True)
node.elseStmt.accept(self)
@v.when(ExpressionStmt)
def visit(self, node):
self.new_symtab(node)
map(lambda n: n.accept(self), node.childrenNodes)
# expr/
@v.when(FieldAccessExpr)
def visit(self, node):
self.new_symtab(node, cp=True)
map(lambda n: n.accept(self), node.childrenNodes)
@v.when(MethodCallExpr)
def visit(self, node):
self.new_symtab(node, cp=True)
map(lambda n: n.accept(self), node.childrenNodes)
@v.when(VariableDeclarationExpr)
def visit(self, node):
self.new_symtab(node)
map(lambda v: v.accept(self), node.childrenNodes)
# map(lambda v: v.accept(self), node.varss)
@v.when(BinaryExpr)
def visit(self, node):
self.new_symtab(node)
map(lambda n: n.accept(self), node.childrenNodes)
@v.when(NameExpr)
def visit(self, node):
self.new_symtab(node)
@property
def lib(self): return self._lib
@lib.setter
def lib(self, v): self._lib = v
| plum-umd/java-sketch | jskparser/ast/visit/symtabgen.py | Python | mit | 10,916 | [
"VisIt"
] | 0a5174727d1de86b55bf55a45af878e75078a39917fa50351bad189331144ca2 |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Tim Moore
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from xml.etree import cElementTree
import numpy as np
from mdtraj.formats.registry import FormatRegistry
from mdtraj.utils import ilen, import_, ensure_type
from mdtraj.core.element import virtual_site
__all__ = ['load_hoomdxml']
@FormatRegistry.register_loader('.hoomdxml')
def load_hoomdxml(filename, top=None):
"""Load a single conformation from an HOOMD-Blue XML file.
For more information on this file format, see:
http://codeblue.umich.edu/hoomd-blue/doc/page_xml_file_format.html
Notably, all node names and attributes are in all lower case.
HOOMD-Blue does not contain residue and chain information explicitly.
For this reason, chains will be found by looping over all the bonds and
finding what is bonded to what.
Each chain consisists of exactly one residue.
Parameters
----------
filename : path-like
The path on disk to the XML file
top : None
This argumet is ignored
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object, with corresponding
Topology.
Notes
-----
This function requires the NetworkX python package.
"""
from mdtraj.core.trajectory import Trajectory
from mdtraj.core.topology import Topology
topology = Topology()
tree = cElementTree.parse(filename)
config = tree.getroot().find('configuration')
position = config.find('position')
bond = config.find('bond')
atom_type = config.find('type') # MDTraj calls this "name"
box = config.find('box')
box.attrib = dict((key.lower(), val) for key, val in box.attrib.items())
# be generous for case of box attributes
lx = float(box.attrib['lx'])
ly = float(box.attrib['ly'])
lz = float(box.attrib['lz'])
try:
xy = float(box.attrib['xy'])
xz = float(box.attrib['xz'])
yz = float(box.attrib['yz'])
except (ValueError, KeyError):
xy = 0.0
xz = 0.0
yz = 0.0
unitcell_vectors = np.array([[[lx, xy*ly, xz*lz],
[0.0, ly, yz*lz],
[0.0, 0.0, lz ]]])
positions, types = [], {}
for pos in position.text.splitlines()[1:]:
positions.append((float(pos.split()[0]),
float(pos.split()[1]),
float(pos.split()[2])))
for idx, atom_name in enumerate(atom_type.text.splitlines()[1:]):
types[idx] = str(atom_name.split()[0])
if len(types) != len(positions):
raise ValueError('Different number of types and positions in xml file')
# ignore the bond type
if hasattr(bond, 'text'):
bonds = [(int(b.split()[1]), int(b.split()[2])) for b in bond.text.splitlines()[1:]]
chains = _find_chains(bonds)
else:
chains = []
bonds = []
# Relate the first index in the bonded-group to mdtraj.Residue
bonded_to_residue = {}
for i, _ in enumerate(types):
bonded_group = _in_chain(chains, i)
if bonded_group is not None:
if bonded_group[0] not in bonded_to_residue:
t_chain = topology.add_chain()
t_residue = topology.add_residue('A', t_chain)
bonded_to_residue[bonded_group[0]] = t_residue
topology.add_atom(types[i], virtual_site,
bonded_to_residue[bonded_group[0]])
if bonded_group is None:
t_chain = topology.add_chain()
t_residue = topology.add_residue('A', t_chain)
topology.add_atom(types[i], virtual_site, t_residue)
for bond in bonds:
atom1, atom2 = bond[0], bond[1]
topology.add_bond(topology.atom(atom1), topology.atom(atom2))
traj = Trajectory(xyz=np.array(positions), topology=topology)
traj.unitcell_vectors = unitcell_vectors
return traj
def _find_chains(bond_list):
"""Given a set of bonds, find unique molecules, with the assumption that
there are no bonds between separate chains (i.e., only INTRAmolecular
bonds), which also implies that each atom can be in exactly one chain.
Parameters
----------
bond_list : list of (int, int)
The list of bonds
Returns
_______
chains : list of list of int
List of atoms in each chain
Notes
-----
This function requires the NetworkX python package.
"""
nx = import_('networkx')
chains = []
bond_list = np.asarray(bond_list)
molecules = nx.Graph()
molecules.add_nodes_from(set(bond_list.flatten()))
molecules.add_edges_from(bond_list)
return [sorted(x) for x in list(nx.connected_components(molecules))]
def _in_chain(chains, atom_index):
"""Check if an item is in a list of lists"""
for chain in chains:
if atom_index in chain:
return chain
return None
| rmcgibbo/mdtraj | mdtraj/formats/hoomdxml.py | Python | lgpl-2.1 | 5,880 | [
"HOOMD-blue",
"MDTraj"
] | 5e36b03fd862d9d888b0c365f950e217efd9df2d218687313d2c4e7733e40917 |
# -------------------------------------------------------------------------
# Name: globals
# Purpose:
#
# Author: burekpe
#
# Created: 16/05/2016
# Copyright: (c) burekpe 2016
# This program comes with ABSOLUTELY NO WARRANTY
# This is free software, and you are welcome to redistribute it under certain conditions
# run cwatm 1 -w for details
# -------------------------------------------------------------------------
import getopt
import os.path
import sys
import ctypes
import numpy.ctypeslib as npct
import numpy as np
# for detecting on which system it is running
import platform
from cwatm.management_modules.messages import *
def globalclear():
settingsfile.clear()
maskinfo.clear()
modelSteps.clear()
xmlstring.clear()
geotrans.clear()
versioning.clear()
timestepInit.clear()
binding.clear()
option.clear()
metaNetcdfVar.clear()
inputcounter.clear()
flagmeteo.clear()
meteofiles.clear()
initCondVarValue.clear()
initCondVar.clear()
dateVar.clear()
outDir.clear()
outMap.clear()
outTss.clear()
outsection.clear()
reportTimeSerieAct.clear()
reportMapsAll.clear()
reportMapsSteps.clear()
reportMapsEnd.clear()
ReportSteps.clear()
FilterSteps.clear()
EnsMembers.clear()
nrCores.clear()
outputDir.clear()
maskmapAttr.clear()
bigmapAttr.clear()
metadataNCDF.clear()
domain.clear()
indexes.clear()
global settingsfile
settingsfile = []
global maskinfo,zeromap,modelSteps,xmlstring,geotrans
# noinspection PyRedeclaration
maskinfo = {}
modelSteps = []
xmlstring = []
geotrans = []
global binding, option, FlagName, Flags, ReportSteps, FilterSteps, EnsMembers, outputDir
global MMaskMap, maskmapAttr, bigmapAttr, cutmap, cutmapGlobal, cutmapFine, cutmapVfine, metadataNCDF
global timestepInit
global metaNetcdfVar
global inputcounter
global versioning
global meteofiles, flagmeteo
versioning = {}
timestepInit =[]
binding = {}
option = {}
metaNetcdfVar = {}
inputcounter = {}
flagmeteo ={}
meteofiles = {}
# Initial conditions
global initCondVar,initCondVarValue
initCondVarValue = []
initCondVar = []
#date variable
global dateVar
# noinspection PyRedeclaration
dateVar = {}
# Output variables
global outDir, outsection, outputTyp
global outMap, outTss
global outputTypMap,outputTypTss, outputTypTss2
outDir = {}
outMap = {}
outTss = {}
outsection = []
outputTypMap = ['daily', 'monthtot','monthavg', 'monthend', 'monthmid','annualtot','annualavg','annualend','totaltot','totalavg','totalend','once','12month']
outputTypTss = ['daily', 'monthtot','monthavg', 'monthend','annualtot','annualavg','annualend','totaltot','totalavg']
outputTypTss2 = ['tss', 'areasum','areaavg']
reportTimeSerieAct = {}
reportMapsAll = {}
reportMapsSteps = {}
reportMapsEnd = {}
MMaskMap = 0
ReportSteps = {}
FilterSteps = []
EnsMembers = []
nrCores = []
outputDir = []
maskmapAttr = {}
bigmapAttr = {}
cutmap = [0, 1, 0, 1]
cutmapGlobal = [0, 1, 0, 1]
cutmapFine = [0, 1, 0, 1]
cutmapVfine = [0, 1, 0, 1]
cdfFlag = [0, 0, 0,0,0,0,0] # flag for netcdf output for all, steps and end, monthly (steps), yearly(steps), monthly , yearly
metadataNCDF = {}
# groundwater modflow
global domain, indexes
domain = {}
indexes = {}
global timeMes,timeMesString, timeMesSum
timeMes=[]
timeMesString = [] # name of the time measure - filled in dynamic
timeMesSum = [] # time measure of hydrological modules
global coverresult
coverresult = [False,0]
# -------------------------
global platform1
platform1 = platform.uname()[0]
# ----------------------------------
FlagName = ['quiet', 'veryquiet', 'loud',
'checkfiles', 'noheader', 'printtime','warranty']
Flags = {'quiet': False, 'veryquiet': False, 'loud': False,
'check': False, 'noheader': False, 'printtime': False, 'warranty': False, 'use': False,
'test': False}
python_bit = ctypes.sizeof(ctypes.c_voidp) * 8
#print "Running under platform: ", platform1
if python_bit < 64:
msg = "Error 301: The Python version used is not a 64 bit version! Python " + str(python_bit) + "bit"
raise CWATMError(msg)
path_global = os.path.dirname(__file__)
if platform1 == "Windows":
dll_routing = os.path.join(os.path.split(path_global)[0],"hydrological_modules","routing_reservoirs","t5.dll")
elif platform1 == "CYGWIN_NT-6.1":
# CYGWIN_NT-6.1 - compiled with cygwin
dll_routing = os.path.join(os.path.split(path_global)[0],"hydrological_modules","routing_reservoirs","t5cyg.so")
else:
print("Linux\n")
dll_routing = os.path.join(os.path.split(path_global)[0],"hydrological_modules","routing_reservoirs","t5_linux.so")
#dll_routing = "C:/work2/test1/t4.dll"
lib2 = ctypes.cdll.LoadLibrary(dll_routing)
# setup the return typs and argument types
# input type for the cos_doubles function
# must be a double array, with single dimension that is contiguous
array_1d_double = npct.ndpointer(dtype=np.double, ndim=1, flags='CONTIGUOUS')
array_2d_int = npct.ndpointer(dtype=np.int64, ndim=2)
array_1d_int = npct.ndpointer(dtype=np.int64, ndim=1)
#array_1d_int16 = npct.ndpointer(dtype=np.int16, ndim=1, flags='CONTIGUOUS')
#array_2d_int32 = npct.ndpointer(dtype=np.int32, ndim=2, flags='CONTIGUOUS')
array_2d_double = npct.ndpointer(dtype=np.double, ndim=2, flags='CONTIGUOUS')
lib2.ups.restype = None
lib2.ups.argtypes = [array_1d_int, array_1d_int, array_1d_double, ctypes.c_int]
lib2.dirID.restype = None
lib2.dirID.argtypes = [array_2d_int, array_2d_int, array_2d_int, ctypes.c_int,ctypes.c_int]
#lib2.repairLdd1.argtypes = [ array_2d_int, ctypes.c_int,ctypes.c_int]
lib2.repairLdd1.argtypes = [ array_2d_int, ctypes.c_int,ctypes.c_int]
lib2.repairLdd2.restype = None
lib2.repairLdd2.argtypes = [ array_1d_int, array_1d_int, array_1d_int, ctypes.c_int]
lib2.kinematic.restype = None
#lib2.kinematic.argtypes = [array_1d_double,array_1d_double, array_1d_int, array_1d_int, array_1d_int, array_1d_double, ctypes.c_double, ctypes.c_double,ctypes.c_double, ctypes.c_double, ctypes.c_int]
# qold q dirdown diruplen dirupid Qnew alpha beta deltaT deltaX size
lib2.kinematic.argtypes = [array_1d_double,array_1d_double, array_1d_int, array_1d_int, array_1d_int, array_1d_double, array_1d_double, ctypes.c_double,ctypes.c_double, array_1d_double, ctypes.c_int]
lib2.runoffConc.restype = None
lib2.runoffConc.argtypes = [array_2d_double,array_1d_double,array_1d_double,array_1d_double,ctypes.c_int, ctypes.c_int]
def globalFlags(setting, arg,settingsfile,Flags):
"""
Read flags - according to the flags the output is adjusted
quiet,veryquiet, loud, checkfiles, noheader,printtime, warranty
:param arg: argument from calling cwatm
"""
# put the settingsfile name in a global variable
settingsfile.append(setting)
try:
opts, args = getopt.getopt(arg, 'qvlchtw', FlagName)
except getopt.GetoptError:
Flags['use'] = True
return
for o, a in opts:
if o in ('-q', '--quiet'):
Flags['quiet'] = True
if o in ('-v', '--veryquiet'):
Flags['veryquiet'] = True
if o in ('-l', '--loud'):
Flags['loud'] = True
if o in ('-c', '--checkfiles'):
Flags['check'] = True
if o in ('-h', '--noheader'):
Flags['noheader'] = True
if o in ('-t', '--printtime'):
Flags['printtime'] = True
if o in ('-w', '--warranty'):
Flags['warranty'] = True
# if testing from pytest
if "pytest" in sys.modules:
Flags['test'] = True
| CWatM/CWatM | cwatm/management_modules/globals.py | Python | gpl-3.0 | 7,714 | [
"NetCDF"
] | 467ce1b57f6f971bfdf6a20b454b274ab9b9d685c48600f78c671a5460b22fe0 |
# Create your views here.
import json
import os
import re
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
from pymatgen import Composition, Element
from matgendb.query_engine import QueryEngine
from matgendb import dbconfig
import bson
import datetime
from django.utils.encoding import force_unicode
from django.core.serializers.json import DjangoJSONEncoder
qe = None
mgdb_config = os.environ.get("MGDB_CONFIG", "")
if mgdb_config:
config = json.loads(mgdb_config)
if not dbconfig.normalize_auth(config, readonly_first=True):
config["user"] = config["password"] = None
qe = QueryEngine(host=config["host"], port=config["port"],
database=config["database"], user=config["user"],
password=config["password"],
collection=config["collection"],
aliases_config=config.get("aliases_config", None))
def index(request, rest_query):
if request.method == "GET":
if qe is None:
return HttpResponseBadRequest(
json.dumps({"error": "no database configured"}),
mimetype="application/json")
try:
rest_query = rest_query.strip("/")
if rest_query == "":
results = list(qe.query(criteria={}, properties=["task_id"]))
else:
toks = rest_query.split("/")
props = None if len(toks) == 1 else [".".join(toks[1:])]
results = list(qe.query(criteria={"task_id": int(toks[0])},
properties=props))
return HttpResponse(json.dumps(results, cls=MongoJSONEncoder),
mimetype="application/json")
except Exception as ex:
return HttpResponseBadRequest(
json.dumps({"error": str(ex)}, cls=MongoJSONEncoder),
mimetype="application/json")
@csrf_exempt
def query(request):
if request.method == 'POST':
try:
critstr = request.POST["criteria"].strip()
if re.match("^{.*}$", critstr):
criteria = json.loads(critstr)
else:
toks = critstr.split()
tids = []
formulas = []
chemsys = []
for tok in toks:
if re.match("^\d+$", tok):
tids.append(int(tok))
elif re.match("^[\w\(\)]+$", tok):
comp = Composition(tok)
formulas.append(comp.reduced_formula)
elif re.match("^[A-Za-z\-]+$", tok):
syms = [Element(sym).symbol
for sym in tok.split("-")]
syms.sort()
chemsys.append("-".join(syms))
else:
raise ValueError("{} not understood".format(tok))
criteria = []
if tids:
criteria.append({"task_id": {"$in": tids}})
if formulas:
criteria.append({"pretty_formula": {"$in": formulas}})
if chemsys:
criteria.append({"chemsys": {"$in": chemsys}})
criteria = {"$or": criteria} if len(criteria) > 1 else \
criteria[0]
properties = request.POST["properties"]
if properties == "*":
properties = None
else:
properties = properties.split()
limit = int(request.POST["limit"])
except ValueError as ex:
d = {"valid_response": False,
"error": "Bad criteria / properties: {}".format(str(ex))}
return HttpResponse(
json.dumps(d), mimetype="application/json")
results = list(qe.query(criteria=criteria,
properties=properties, limit=limit))
if properties is None and len(results) > 0:
properties = list(results[0].keys())
d = {"valid_response": True, "results": results,
"properties": properties}
#print("@@ criteria: {}, result: {}".format(criteria, d))
return HttpResponse(json.dumps(d, cls=MongoJSONEncoder),
mimetype="application/json")
return HttpResponseBadRequest(
json.dumps({"error": "Bad response method. POST should be used."},
cls=MongoJSONEncoder),
mimetype="application/json")
class MongoJSONEncoder(DjangoJSONEncoder):
"""
Encodes Mongo DB objects into JSON
In particular is handles BSON Object IDs and Datetime objects
"""
def default(self, obj):
if isinstance(obj, bson.objectid.ObjectId):
return force_unicode(obj)
elif isinstance(obj, datetime.datetime):
return str(obj)
return super(MongoJSONEncoder, self).default(obj)
| migueldiascosta/pymatgen-db | matgendb/webui/rest/views.py | Python | mit | 4,998 | [
"pymatgen"
] | 8fad959086dd869093bee9e329fbe2d7d77c0e93cf9fc6584b6d4a29898ad806 |
import numpy as np
import os
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import matplotlib.pyplot as plt
import time
from datetime import datetime
from matplotlib.dates import date2num, num2date
import pyroms
import pyroms_toolbox
import _remapping
class nctime(object):
pass
def remap_bdry(src_file, src_varname, src_grd, dst_grd, dxy=20, cdepth=0, kk=2, dst_dir='./'):
print src_file
# get time
nctime.long_name = 'time'
nctime.units = 'days since 1900-01-01 00:00:00'
# create boundary file
dst_file = src_file.rsplit('/')[-1]
dst_file = dst_dir + dst_file[:-3] + '_' + src_varname + '_bdry_' + dst_grd.name + '.nc'
print '\nCreating boundary file', dst_file
if os.path.exists(dst_file) is True:
os.remove(dst_file)
pyroms_toolbox.nc_create_roms_bdry_file(dst_file, dst_grd, nctime)
# open boundary file
nc = netCDF.Dataset(dst_file, 'a', format='NETCDF3_64BIT')
#load var
cdf = netCDF.Dataset(src_file)
src_var = cdf.variables[src_varname]
time = cdf.variables['ocean_time'][0]
print time
#get missing value
spval = src_var._FillValue
src_var = cdf.variables[src_varname][0]
# determine variable dimension
ndim = len(src_var.shape)
if src_varname == 'ssh':
pos = 't'
Cpos = 'rho'
z = src_grd.z_t
Mp, Lp = dst_grd.hgrid.mask_rho.shape
wts_file = 'remap_weights_GLBa0.08_to_ARCTIC2_bilinear_t_to_rho.nc'
dst_varname = 'zeta'
dimensions = ('ocean_time', 'eta_rho', 'xi_rho')
long_name = 'free-surface'
dst_varname_north = 'zeta_north'
dimensions_north = ('ocean_time', 'xi_rho')
long_name_north = 'free-surface north boundary condition'
field_north = 'zeta_north, scalar, series'
dst_varname_south = 'zeta_south'
dimensions_south = ('ocean_time', 'xi_rho')
long_name_south = 'free-surface south boundary condition'
field_south = 'zeta_south, scalar, series'
dst_varname_east = 'zeta_east'
dimensions_east = ('ocean_time', 'eta_rho')
long_name_east = 'free-surface east boundary condition'
field_east = 'zeta_east, scalar, series'
dst_varname_west = 'zeta_west'
dimensions_west = ('ocean_time', 'eta_rho')
long_name_west = 'free-surface west boundary condition'
field_west = 'zeta_west, scalar, series'
units = 'meter'
elif src_varname == 'temp':
pos = 't'
Cpos = 'rho'
z = src_grd.z_t
Mp, Lp = dst_grd.hgrid.mask_rho.shape
wts_file = 'remap_weights_GLBa0.08_to_ARCTIC2_bilinear_t_to_rho.nc'
dst_varname = 'temperature'
dst_varname_north = 'temp_north'
dimensions_north = ('ocean_time', 's_rho', 'xi_rho')
long_name_north = 'potential temperature north boundary condition'
field_north = 'temp_north, scalar, series'
dst_varname_south = 'temp_south'
dimensions_south = ('ocean_time', 's_rho', 'xi_rho')
long_name_south = 'potential temperature south boundary condition'
field_south = 'temp_south, scalar, series'
dst_varname_east = 'temp_east'
dimensions_east = ('ocean_time', 's_rho', 'eta_rho')
long_name_east = 'potential temperature east boundary condition'
field_east = 'temp_east, scalar, series'
dst_varname_west = 'temp_west'
dimensions_west = ('ocean_time', 's_rho', 'eta_rho')
long_name_west = 'potential temperature west boundary condition'
field_west = 'temp_west, scalar, series'
units = 'Celsius'
elif src_varname == 'salt':
pos = 't'
Cpos = 'rho'
z = src_grd.z_t
Mp, Lp = dst_grd.hgrid.mask_rho.shape
wts_file = 'remap_weights_GLBa0.08_to_ARCTIC2_bilinear_t_to_rho.nc'
dst_varname = 'salinity'
dst_varname_north = 'salt_north'
dimensions_north = ('ocean_time', 's_rho', 'xi_rho')
long_name_north = 'salinity north boundary condition'
field_north = 'salt_north, scalar, series'
dst_varname_south = 'salt_south'
dimensions_south = ('ocean_time', 's_rho', 'xi_rho')
long_name_south = 'salinity south boundary condition'
field_south = 'salt_south, scalar, series'
dst_varname_east = 'salt_east'
dimensions_east = ('ocean_time', 's_rho', 'eta_rho')
long_name_east = 'salinity east boundary condition'
field_east = 'salt_east, scalar, series'
dst_varname_west = 'salt_west'
dimensions_west = ('ocean_time', 's_rho', 'eta_rho')
long_name_west = 'salinity west boundary condition'
field_west = 'salt_west, scalar, series'
units = 'PSU'
else:
raise ValueError, 'Undefined src_varname'
if ndim == 3:
# build intermediate zgrid
zlevel = -z[::-1,0,0]
nzlevel = len(zlevel)
dst_zcoord = pyroms.vgrid.z_coordinate(dst_grd.vgrid.h, zlevel, nzlevel)
dst_grdz = pyroms.grid.ROMS_Grid(dst_grd.name+'_Z', dst_grd.hgrid, dst_zcoord)
# create variable in boudary file
print 'Creating variable', dst_varname_north
nc.createVariable(dst_varname_north, 'f8', dimensions_north, fill_value=spval)
nc.variables[dst_varname_north].long_name = long_name_north
nc.variables[dst_varname_north].units = units
nc.variables[dst_varname_north].field = field_north
#nc.variables[dst_varname_north]._FillValue = spval
print 'Creating variable', dst_varname_south
nc.createVariable(dst_varname_south, 'f8', dimensions_south, fill_value=spval)
nc.variables[dst_varname_south].long_name = long_name_south
nc.variables[dst_varname_south].units = units
nc.variables[dst_varname_south].field = field_south
#nc.variables[dst_varname_south]._FillValue = spval
print 'Creating variable', dst_varname_east
nc.createVariable(dst_varname_east, 'f8', dimensions_east, fill_value=spval)
nc.variables[dst_varname_east].long_name = long_name_east
nc.variables[dst_varname_east].units = units
nc.variables[dst_varname_east].field = field_east
#nc.variables[dst_varname_east]._FillValue = spval
print 'Creating variable', dst_varname_west
nc.createVariable(dst_varname_west, 'f8', dimensions_west, fill_value=spval)
nc.variables[dst_varname_west].long_name = long_name_west
nc.variables[dst_varname_west].units = units
nc.variables[dst_varname_west].field = field_west
#nc.variables[dst_varname_west]._FillValue = spval
# remapping
print 'remapping', dst_varname, 'from', src_grd.name, \
'to', dst_grd.name
print 'time =', time
if ndim == 3:
# flood the grid
print 'flood the grid'
src_varz = pyroms_toolbox.Grid_HYCOM.flood_fast(src_var, src_grd, pos=pos, spval=spval, \
dxy=dxy, cdepth=cdepth, kk=kk)
else:
src_varz = src_var
# horizontal interpolation using scrip weights
print 'horizontal interpolation using scrip weights'
dst_varz = pyroms.remapping.remap(src_varz, wts_file, spval=spval)
if ndim == 3:
# vertical interpolation from standard z level to sigma
print 'vertical interpolation from standard z level to sigma'
dst_var_north = pyroms.remapping.z2roms(dst_varz[::-1, Mp-1:Mp, :], \
dst_grdz, dst_grd, Cpos=Cpos, spval=spval, \
flood=False, irange=(0,Lp), jrange=(Mp-1,Mp))
dst_var_south = pyroms.remapping.z2roms(dst_varz[::-1, 0:1, :], \
dst_grdz, dst_grd, Cpos=Cpos, spval=spval, \
flood=False, irange=(0,Lp), jrange=(0,1))
dst_var_east = pyroms.remapping.z2roms(dst_varz[::-1, :, Lp-1:Lp], \
dst_grdz, dst_grd, Cpos=Cpos, spval=spval, \
flood=False, irange=(Lp-1,Lp), jrange=(0,Mp))
dst_var_west = pyroms.remapping.z2roms(dst_varz[::-1, :, 0:1], \
dst_grdz, dst_grd, Cpos=Cpos, spval=spval, \
flood=False, irange=(0,1), jrange=(0,Mp))
else:
dst_var_north = dst_varz[-1, :]
dst_var_south = dst_varz[0, :]
dst_var_east = dst_varz[:, -1]
dst_var_west = dst_varz[:, 0]
# write data in destination file
print 'write data in destination file'
nc.variables['ocean_time'][0] = time
nc.variables[dst_varname_north][0] = np.squeeze(dst_var_north)
nc.variables[dst_varname_south][0] = np.squeeze(dst_var_south)
nc.variables[dst_varname_east][0] = np.squeeze(dst_var_east)
nc.variables[dst_varname_west][0] = np.squeeze(dst_var_west)
# close file
nc.close()
cdf.close()
if src_varname == 'ssh':
return dst_varz
| dcherian/pyroms | examples/Arctic_HYCOM/remap_bdry.py | Python | bsd-3-clause | 8,826 | [
"NetCDF"
] | 857a214a5c3e3e9dba44582745896bad18424162ac468f38c7458745e83c9f88 |
"""
====================================================================
K-means clustering and vector quantization (:mod:`scipy.cluster.vq`)
====================================================================
Provides routines for k-means clustering, generating code books
from k-means models, and quantizing vectors by comparing them with
centroids in a code book.
.. autosummary::
:toctree: generated/
whiten -- Normalize a group of observations so each feature has unit variance
vq -- Calculate code book membership of a set of observation vectors
kmeans -- Performs k-means on a set of observation vectors forming k clusters
kmeans2 -- A different implementation of k-means with more methods
-- for initializing centroids
Background information
======================
The k-means algorithm takes as input the number of clusters to
generate, k, and a set of observation vectors to cluster. It
returns a set of centroids, one for each of the k clusters. An
observation vector is classified with the cluster number or
centroid index of the centroid closest to it.
A vector v belongs to cluster i if it is closer to centroid i than
any other centroids. If v belongs to i, we say centroid i is the
dominating centroid of v. The k-means algorithm tries to
minimize distortion, which is defined as the sum of the squared distances
between each observation vector and its dominating centroid.
The minimization is achieved by iteratively reclassifying
the observations into clusters and recalculating the centroids until
a configuration is reached in which the centroids are stable. One can
also define a maximum number of iterations.
Since vector quantization is a natural application for k-means,
information theory terminology is often used. The centroid index
or cluster index is also referred to as a "code" and the table
mapping codes to centroids and vice versa is often referred as a
"code book". The result of k-means, a set of centroids, can be
used to quantize vectors. Quantization aims to find an encoding of
vectors that reduces the expected distortion.
All routines expect obs to be a M by N array where the rows are
the observation vectors. The codebook is a k by N array where the
i'th row is the centroid of code word i. The observation vectors
and centroids have the same feature dimension.
As an example, suppose we wish to compress a 24-bit color image
(each pixel is represented by one byte for red, one for blue, and
one for green) before sending it over the web. By using a smaller
8-bit encoding, we can reduce the amount of data by two
thirds. Ideally, the colors for each of the 256 possible 8-bit
encoding values should be chosen to minimize distortion of the
color. Running k-means with k=256 generates a code book of 256
codes, which fills up all possible 8-bit sequences. Instead of
sending a 3-byte value for each pixel, the 8-bit centroid index
(or code word) of the dominating centroid is transmitted. The code
book is also sent over the wire so each 8-bit code can be
translated back to a 24-bit pixel value representation. If the
image of interest was of an ocean, we would expect many 24-bit
blues to be represented by 8-bit codes. If it was an image of a
human face, more flesh tone colors would be represented in the
code book.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from collections import deque
from scipy._lib._util import _asarray_validated
from scipy._lib.six import xrange
from scipy.spatial.distance import cdist
from . import _vq
__docformat__ = 'restructuredtext'
__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2']
class ClusterError(Exception):
pass
def whiten(obs, check_finite=True):
"""
Normalize a group of observations on a per feature basis.
Before running k-means, it is beneficial to rescale each feature
dimension of the observation set with whitening. Each feature is
divided by its standard deviation across all observations to give
it unit variance.
Parameters
----------
obs : ndarray
Each row of the array is an observation. The
columns are the features seen during each observation.
>>> # f0 f1 f2
>>> obs = [[ 1., 1., 1.], #o0
... [ 2., 2., 2.], #o1
... [ 3., 3., 3.], #o2
... [ 4., 4., 4.]] #o3
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
result : ndarray
Contains the values in `obs` scaled by the standard deviation
of each column.
Examples
--------
>>> from scipy.cluster.vq import whiten
>>> features = np.array([[1.9, 2.3, 1.7],
... [1.5, 2.5, 2.2],
... [0.8, 0.6, 1.7,]])
>>> whiten(features)
array([[ 4.17944278, 2.69811351, 7.21248917],
[ 3.29956009, 2.93273208, 9.33380951],
[ 1.75976538, 0.7038557 , 7.21248917]])
"""
obs = _asarray_validated(obs, check_finite=check_finite)
std_dev = obs.std(axis=0)
zero_std_mask = std_dev == 0
if zero_std_mask.any():
std_dev[zero_std_mask] = 1.0
warnings.warn("Some columns have standard deviation zero. "
"The values of these columns will not change.",
RuntimeWarning)
return obs / std_dev
def vq(obs, code_book, check_finite=True):
"""
Assign codes from a code book to observations.
Assigns a code from a code book to each observation. Each
observation vector in the 'M' by 'N' `obs` array is compared with the
centroids in the code book and assigned the code of the closest
centroid.
The features in `obs` should have unit variance, which can be
achieved by passing them through the whiten function. The code
book can be created with the k-means algorithm or a different
encoding algorithm.
Parameters
----------
obs : ndarray
Each row of the 'M' x 'N' array is an observation. The columns are
the "features" seen during each observation. The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray
The code book is usually generated using the k-means algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
>>> # f0 f1 f2 f3
>>> code_book = [
... [ 1., 2., 3., 4.], #c0
... [ 1., 2., 3., 4.], #c1
... [ 1., 2., 3., 4.]] #c2
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
A length M array holding the code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq
>>> code_book = array([[1.,1.,1.],
... [2.,2.,2.]])
>>> features = array([[ 1.9,2.3,1.7],
... [ 1.5,2.5,2.2],
... [ 0.8,0.6,1.7]])
>>> vq(features,code_book)
(array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239]))
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
ct = np.common_type(obs, code_book)
c_obs = obs.astype(ct, copy=False)
c_code_book = code_book.astype(ct, copy=False)
if np.issubdtype(ct, np.float64) or np.issubdtype(ct, np.float32):
return _vq.vq(c_obs, c_code_book)
return py_vq(obs, code_book, check_finite=False)
def py_vq(obs, code_book, check_finite=True):
""" Python version of vq algorithm.
The algorithm computes the euclidian distance between each
observation and every frame in the code_book.
Parameters
----------
obs : ndarray
Expects a rank 2 array. Each row is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should have same number of
features (eg columns) than obs.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
Notes
-----
This function is slower than the C version but works for
all input types. If the inputs have the wrong types for the
C versions of the function, this one is called as a last resort.
It is about 20 times slower than the C version.
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
if obs.ndim != code_book.ndim:
raise ValueError("Observation and code_book should have the same rank")
if obs.ndim == 1:
obs = obs[:, np.newaxis]
code_book = code_book[:, np.newaxis]
dist = cdist(obs, code_book)
code = dist.argmin(axis=1)
min_dist = dist[np.arange(len(code)), code]
return code, min_dist
# py_vq2 was equivalent to py_vq
py_vq2 = np.deprecate(py_vq, old_name='py_vq2', new_name='py_vq')
def _kmeans(obs, guess, thresh=1e-5):
""" "raw" version of k-means.
Returns
-------
code_book
the lowest distortion codebook found.
avg_dist
the average distance a observation is from a code in the book.
Lower means the code_book matches the data better.
See Also
--------
kmeans : wrapper around k-means
Examples
--------
Note: not whitened in this example.
>>> from numpy import array
>>> from scipy.cluster.vq import _kmeans
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 1.0,1.0]])
>>> book = array((features[0],features[2]))
>>> _kmeans(features,book)
(array([[ 1.7 , 2.4 ],
[ 0.73333333, 1.13333333]]), 0.40563916697728591)
"""
code_book = np.asarray(guess)
diff = np.inf
prev_avg_dists = deque([diff], maxlen=2)
while diff > thresh:
# compute membership and distances between obs and code_book
obs_code, distort = vq(obs, code_book, check_finite=False)
prev_avg_dists.append(distort.mean(axis=-1))
# recalc code_book as centroids of associated obs
code_book, has_members = _vq.update_cluster_means(obs, obs_code,
code_book.shape[0])
code_book = code_book[has_members]
diff = prev_avg_dists[0] - prev_avg_dists[1]
return code_book, prev_avg_dists[1]
def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True):
"""
Performs k-means on a set of observation vectors forming k clusters.
The k-means algorithm adjusts the classification of the observations
into clusters and updates the cluster centroids until the position of
the centroids is stable over successive iterations. In this
implementation of the algorithm, the stability of the centroids is
determined by comparing the absolute value of the change in the average
Euclidean distance between the observations and their corresponding
centroids against a threshold. This yields
a code book mapping centroids to codes and vice versa.
Parameters
----------
obs : ndarray
Each row of the M by N array is an observation vector. The
columns are the features seen during each observation.
The features must be whitened first with the `whiten` function.
k_or_guess : int or ndarray
The number of centroids to generate. A code is assigned to
each centroid, which is also the row index of the centroid
in the code_book matrix generated.
The initial k centroids are chosen by randomly selecting
observations from the observation matrix. Alternatively,
passing a k by N array specifies the initial k centroids.
iter : int, optional
The number of times to run k-means, returning the codebook
with the lowest distortion. This argument is ignored if
initial centroids are specified with an array for the
``k_or_guess`` parameter. This parameter does not represent the
number of iterations of the k-means algorithm.
thresh : float, optional
Terminates the k-means algorithm if the change in
distortion since the last k-means iteration is less than
or equal to thresh.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
codebook : ndarray
A k by N array of k centroids. The i'th centroid
codebook[i] is represented with the code i. The centroids
and codes generated represent the lowest distortion seen,
not necessarily the globally minimal distortion.
distortion : float
The mean (non-squared) Euclidean distance between the observations
passed and the centroids generated. Note the difference to the standard
definition of distortion in the context of the K-means algorithm, which
is the sum of the squared distances.
See Also
--------
kmeans2 : a different implementation of k-means clustering
with more methods for generating initial centroids but without
using a distortion change threshold as a stopping criterion.
whiten : must be called prior to passing an observation matrix
to kmeans.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq, kmeans, whiten
>>> import matplotlib.pyplot as plt
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 0.1,0.1],
... [ 0.2,1.8],
... [ 2.0,0.5],
... [ 0.3,1.5],
... [ 1.0,1.0]])
>>> whitened = whiten(features)
>>> book = np.array((whitened[0],whitened[2]))
>>> kmeans(whitened,book)
(array([[ 2.3110306 , 2.86287398], # random
[ 0.93218041, 1.24398691]]), 0.85684700941625547)
>>> from numpy import random
>>> random.seed((1000,2000))
>>> codes = 3
>>> kmeans(whitened,codes)
(array([[ 2.3110306 , 2.86287398], # random
[ 1.32544402, 0.65607529],
[ 0.40782893, 2.02786907]]), 0.5196582527686241)
>>> # Create 50 datapoints in two clusters a and b
>>> pts = 50
>>> a = np.random.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts)
>>> b = np.random.multivariate_normal([30, 10],
... [[10, 2], [2, 1]],
... size=pts)
>>> features = np.concatenate((a, b))
>>> # Whiten data
>>> whitened = whiten(features)
>>> # Find 2 clusters in the data
>>> codebook, distortion = kmeans(whitened, 2)
>>> # Plot whitened data and cluster centers in red
>>> plt.scatter(whitened[:, 0], whitened[:, 1])
>>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r')
>>> plt.show()
"""
obs = _asarray_validated(obs, check_finite=check_finite)
if iter < 1:
raise ValueError("iter must be at least 1, got %s" % iter)
# Determine whether a count (scalar) or an initial guess (array) was passed.
if not np.isscalar(k_or_guess):
guess = _asarray_validated(k_or_guess, check_finite=check_finite)
if guess.size < 1:
raise ValueError("Asked for 0 clusters. Initial book was %s" %
guess)
return _kmeans(obs, guess, thresh=thresh)
# k_or_guess is a scalar, now verify that it's an integer
k = int(k_or_guess)
if k != k_or_guess:
raise ValueError("If k_or_guess is a scalar, it must be an integer.")
if k < 1:
raise ValueError("Asked for %d clusters." % k)
# initialize best distance value to a large value
best_dist = np.inf
for i in xrange(iter):
# the initial code book is randomly selected from observations
guess = _kpoints(obs, k)
book, dist = _kmeans(obs, guess, thresh=thresh)
if dist < best_dist:
best_book = book
best_dist = dist
return best_book, best_dist
def _kpoints(data, k):
"""Pick k points at random in data (one row = one observation).
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
"""
idx = np.random.choice(data.shape[0], size=k, replace=False)
return data[idx]
def _krandinit(data, k):
"""Returns k samples of a random variable which parameters depend on data.
More precisely, it returns k observations sampled from a Gaussian random
variable which mean and covariances are the one estimated from data.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
"""
mu = data.mean(axis=0)
if data.ndim == 1:
cov = np.cov(data)
x = np.random.randn(k)
x *= np.sqrt(cov)
elif data.shape[1] > data.shape[0]:
# initialize when the covariance matrix is rank deficient
_, s, vh = np.linalg.svd(data - mu, full_matrices=False)
x = np.random.randn(k, s.size)
sVh = s[:, None] * vh / np.sqrt(data.shape[0] - 1)
x = x.dot(sVh)
else:
cov = np.atleast_2d(np.cov(data, rowvar=False))
# k rows, d cols (one row = one obs)
# Generate k sample of a random variable ~ Gaussian(mu, cov)
x = np.random.randn(k, mu.size)
x = x.dot(np.linalg.cholesky(cov).T)
x += mu
return x
_valid_init_meth = {'random': _krandinit, 'points': _kpoints}
def _missing_warn():
"""Print a warning when called."""
warnings.warn("One of the clusters is empty. "
"Re-run kmeans with a different initialization.")
def _missing_raise():
"""raise a ClusterError when called."""
raise ClusterError("One of the clusters is empty. "
"Re-run kmeans with a different initialization.")
_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise}
def kmeans2(data, k, iter=10, thresh=1e-5, minit='random',
missing='warn', check_finite=True):
"""
Classify a set of observations into k clusters using the k-means algorithm.
The algorithm attempts to minimize the Euclidian distance between
observations and centroids. Several initialization methods are
included.
Parameters
----------
data : ndarray
A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
'M' array of 'M' one-dimensional observations.
k : int or ndarray
The number of clusters to form as well as the number of
centroids to generate. If `minit` initialization string is
'matrix', or if a ndarray is given instead, it is
interpreted as initial cluster to use instead.
iter : int, optional
Number of iterations of the k-means algorithm to run. Note
that this differs in meaning from the iters parameter to
the kmeans function.
thresh : float, optional
(not used yet)
minit : str, optional
Method for initialization. Available methods are 'random',
'points', and 'matrix':
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
'points': choose k observations (rows) at random from data for
the initial centroids.
'matrix': interpret the k parameter as a k by M (or length k
array for one-dimensional data) array of initial centroids.
missing : str, optional
Method to deal with empty clusters. Available methods are
'warn' and 'raise':
'warn': give a warning and continue.
'raise': raise an ClusterError and terminate the algorithm.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
centroid : ndarray
A 'k' by 'N' array of centroids found at the last iteration of
k-means.
label : ndarray
label[i] is the code or index of the centroid the
i'th observation is closest to.
"""
if int(iter) < 1:
raise ValueError("Invalid iter (%s), "
"must be a positive integer." % iter)
try:
miss_meth = _valid_miss_meth[missing]
except KeyError:
raise ValueError("Unknown missing method %r" % (missing,))
data = _asarray_validated(data, check_finite=check_finite)
if data.ndim == 1:
d = 1
elif data.ndim == 2:
d = data.shape[1]
else:
raise ValueError("Input of rank > 2 is not supported.")
if data.size < 1:
raise ValueError("Empty input is not supported.")
# If k is not a single value it should be compatible with data's shape
if minit == 'matrix' or not np.isscalar(k):
code_book = np.array(k, copy=True)
if data.ndim != code_book.ndim:
raise ValueError("k array doesn't match data rank")
nc = len(code_book)
if data.ndim > 1 and code_book.shape[1] != d:
raise ValueError("k array doesn't match data dimension")
else:
nc = int(k)
if nc < 1:
raise ValueError("Cannot ask kmeans2 for %d clusters"
" (k was %s)" % (nc, k))
elif nc != k:
warnings.warn("k was not an integer, was converted.")
try:
init_meth = _valid_init_meth[minit]
except KeyError:
raise ValueError("Unknown init method %r" % (minit,))
else:
code_book = init_meth(data, k)
for i in xrange(iter):
# Compute the nearest neighbor for each obs using the current code book
label = vq(data, code_book)[0]
# Update the code book by computing centroids
new_code_book, has_members = _vq.update_cluster_means(data, label, nc)
if not has_members.all():
miss_meth()
# Set the empty clusters to their previous positions
new_code_book[~has_members] = code_book[~has_members]
code_book = new_code_book
return code_book, label
| gfyoung/scipy | scipy/cluster/vq.py | Python | bsd-3-clause | 24,205 | [
"Gaussian"
] | a912401d2998e32ef0661828a2213cba32478f0d9bc8f3de3d35a2cbc5d09994 |
#!/usr/bin/env python
# To do:
# - Let user specify the parser class on the command line.
# - Let user specify a sequence file to BLAST on the net.
# - Script should help debug connection to NCBI website.
from __future__ import print_function
import os
import re
import sys
import getopt
import traceback
from Bio import ParserSupport
from Bio.Blast import NCBIStandalone, NCBIWWW
CONTEXT = 5 # show 5 lines of context around the error in the format file
USAGE = """%s [-h] [-v] [-p] [-n] [-o] <testfile>
This script helps diagnose problems with the BLAST parser.
OPTIONS:
-h Show this help file.
-v Verbose output.
-p <testfile> is a protein file.
-n <testfile> is a nucleotide file.
-o <testfile> is a BLAST output file.
""" % sys.argv[0]
class DebuggingConsumer:
def __init__(self, decorated=None):
self.linenum = 0
if decorated is None:
decorated = ParserSupport.AbstractConsumer()
self.decorated = decorated
self._prev_attr = None
def _decorated_section(self):
getattr(self.decorated, self._prev_attr)()
def _decorated(self, data):
getattr(self.decorated, self._prev_attr)(data)
self.linenum += 1
def __getattr__(self, attr):
self._prev_attr = attr
if attr.startswith('start_') or attr.startswith('end_'):
return self._decorated_section
else:
return self._decorated
def chomp(line):
return re.sub(r"[\r\n]*$", "", line)
def choose_parser(outfile):
data = open(outfile).read()
ldata = data.lower()
if "<html>" in ldata or "<pre>" in ldata:
return NCBIWWW.BlastParser
if "results from round)" in ldata or "converged!" in ldata:
return NCBIStandalone.PSIBlastParser
return NCBIStandalone.BlastParser
def test_blast_output(outfile):
# Try to auto-detect the format
if 1:
print("No parser specified. I'll try to choose one for you based")
print("on the format of the output file.")
print("")
parser_class = choose_parser(outfile)
print("It looks like you have given output that should be parsed")
print("with %s.%s. If I'm wrong, you can select the correct parser" %\
(parser_class.__module__, parser_class.__name__))
print("on the command line of this script (NOT IMPLEMENTED YET).")
else:
raise NotImplementedError
parser_class = NCBIWWW.BlastParser
print("Using %s to parse the file." % parser_class.__name__)
print("")
scanner_class = parser_class()._scanner.__class__
consumer_class = parser_class()._consumer.__class__
# parser_class()._scanner.feed(
# open(outfile), ParserSupport.TaggingConsumer())
print("I'm going to run the data through the parser to see what happens...")
parser = parser_class()
try:
rec = parser.parse_file(outfile)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as x:
exception_info = str(x)
print("Dang, the parsing failed.")
else:
print("Parsing succeeded, no problems detected.")
print("However, you should check to make sure the following scanner")
print("trace looks reasonable.")
print("")
parser_class()._scanner.feed(
open(outfile), ParserSupport.TaggingConsumer())
return 0
print("")
print("Alright. Let me try and figure out where in the parser the")
print("problem occurred...")
etype, value, tb = sys.exc_info()
ftb = traceback.extract_tb(tb)
ftb.reverse()
class_found = None
for err_file, err_line, err_function, err_text in ftb:
if hasattr(consumer_class, err_function):
class_found = consumer_class
break
elif hasattr(scanner_class, err_function):
class_found = scanner_class
break
if class_found is None:
print("Sorry, I could not pinpoint the error to the parser.")
print("There's nothing more I can tell you.")
print("Here's the traceback:")
traceback.print_exception(etype, value, tb)
return 1
else:
print("I found the problem in %s.%s.%s, line %d:" % \
(class_found.__module__, class_found.__name__,
err_function, err_line))
print(" %s" % err_text)
print("This output caused an %s to be raised with the" % etype)
print("information %r." % exception_info)
print("")
print("Let me find the line in the file that triggers the problem...")
parser = parser_class()
scanner, consumer = parser._scanner, parser._consumer
consumer = DebuggingConsumer(consumer)
try:
scanner.feed(open(outfile), consumer)
except etype as x:
pass
else:
print("Odd, the exception disappeared! What happened?")
return 3
print("It's caused by line %d:" % consumer.linenum)
lines = open(outfile).readlines()
start, end = consumer.linenum - CONTEXT, consumer.linenum + CONTEXT + 1
if start < 0:
start = 0
if end > len(lines):
end = len(lines)
ndigits = len(str(end))
for linenum in range(start, end):
line = chomp(lines[linenum])
if linenum == consumer.linenum:
prefix = '*'
else:
prefix = ' '
s = "%s%*d %s" % (prefix, ndigits, linenum, line)
s = s[:80]
print(s)
print("")
if class_found == scanner_class:
print("Problems in %s are most likely caused by changed formats." % \
class_found.__name__)
print("You can start to fix this by going to line %d in module %s." % \
(err_line, class_found.__module__))
print("Perhaps the scanner needs to be made more lenient by accepting")
print("the changed format?")
print("")
if VERBOSITY <= 0:
print("For more help, you can run this script in verbose mode")
print("to see detailed information about how the scanner")
print("identifies each line.")
else:
print("OK, let's see what the scanner's doing!")
print("")
print("*" * 20 + " BEGIN SCANNER TRACE " + "*" * 20)
try:
parser_class()._scanner.feed(
open(outfile), ParserSupport.TaggingConsumer())
except etype as x:
pass
print("*" * 20 + " END SCANNER TRACE " + "*" * 20)
print("")
elif class_found == consumer_class:
print("Problems in %s can be caused by two things:" % \
class_found.__name__)
print(" - The format of the line parsed by '%s' changed." % \
err_function)
print(" - The scanner misidentified the line.")
print("Check to make sure '%s' should parse the line:" % \
err_function)
s = " %s" % chomp(lines[consumer.linenum])
s = s[:80]
print(s)
print("If so, debug %s.%s. Otherwise, debug %s." % \
(class_found.__name__, err_function, scanner_class.__name__))
VERBOSITY = 0
if __name__ == '__main__':
try:
optlist, args = getopt.getopt(sys.argv[1:], "hpnov")
except getopt.error as x:
sys.stderr.write("%s\n" % x)
sys.exit(-1)
if len(args) != 1:
sys.stderr.write(USAGE)
sys.exit(-1)
TESTFILE, = args
if not os.path.exists(TESTFILE):
sys.stderr.write("I could not find file: %s\n" % TESTFILE)
sys.exit(-1)
PROTEIN = NUCLEOTIDE = OUTPUT = None
for opt, arg in optlist:
if opt == '-h':
print(USAGE)
sys.exit(0)
elif opt == '-p':
PROTEIN = 1
elif opt == '-n':
NUCLEOTIDE = 1
elif opt == '-o':
OUTPUT = 1
elif opt == '-v':
VERBOSITY += 1
if len([x for x in (PROTEIN, NUCLEOTIDE, OUTPUT) if x is not None]) != 1:
OUTPUT = 1
# sys.stderr.write("Exactly one of -p, -n, or -o should be specified.\n")
# sys.exit(-1)
if PROTEIN or NUCLEOTIDE:
sys.stderr.write("-p and -n not implemented yet\n")
sys.exit(-1)
test_blast_output(TESTFILE)
| updownlife/multipleK | dependencies/biopython-1.65/Scripts/debug/debug_blast_parser.py | Python | gpl-2.0 | 8,273 | [
"BLAST"
] | d0128606c7f09ac82cb0ae25bf3ded274c17443a44c133e9828e9fa3da800f7e |
import numpy as np
from ase.data import atomic_numbers, chemical_symbols
from ase.units import Bohr
from gpaw.setup import Setups
from gpaw.xc import XC
from gpaw.mpi import world
Bondi64jpc_vdWradii = { # units Anstrom
'He' : 1.40,
'Ne' : 1.54,
'Ar' : 1.88,
'Kr' : 2.02,
'Xe' : 2.16
}
# Van der Waals Radii after
# Pekka Pyykko, Chem. Rev. 97 (1997) 597-636
# Table 2
Pyykko97cr_vdWradii = { # units Anstrom
'Ne' : 1.55,
'Ar' : 1.88,
'Kr' : 2.00,
'Xe' : 2.18,
'Rn' : 2.24
}
collected_vdWradii = Bondi64jpc_vdWradii
collected_vdWradii['Rn'] = Pyykko97cr_vdWradii['Rn']
def vdWradii(symbols, xc):
"""Find the elements van der Waals radius.
Method proposed in:
Tkatchenko and Scheffler PRL 102 (2009) 073005
The returned radii are given in Angstroms.
"""
Z_rare_gas = [atomic_numbers[symbol] for symbol in Bondi64jpc_vdWradii]
Z_rare_gas.append(atomic_numbers['Rn'])
Z_rare_gas.sort()
if isinstance(xc, str):
xc = XC(xc)
def get_density(Z):
"""Return density and radial grid from setup."""
# load setup
setups = Setups([Z], 'paw', {}, 2,
xc, world)
setup = setups[0].data
# create density
n_g = setup.nc_g.copy()
for f, phi_g in zip(setup.f_j, setup.phi_jg):
n_g += f * phi_g**2
return n_g, setup.rgd.r_g
radii = []
radius = {}
for symbol in symbols:
Z = atomic_numbers[symbol]
if symbol not in radius:
# find the rare gas of the elements row
Zrg = None
for Zr in Z_rare_gas:
if Zrg is None and Z <= Zr:
Zrg = Zr
n_g, r_g = get_density(Zrg)
# find density at R
R = collected_vdWradii[chemical_symbols[Zrg]] / Bohr
n = 0
while r_g[n] < R:
n += 1
# linear interpolation
ncut = (n_g[n-1] +
(n_g[n] - n_g[n-1]) * (R - r_g[n-1]) / (r_g[n] - r_g[n-1]))
# print "Z, Zrg, ncut", Z, Zrg, ncut
# find own R at this density
n_g, r_g = get_density(Z)
n = 0
while n_g[n] > ncut:
n += 1
# linear interpolation
R = (r_g[n-1] +
(r_g[n] - r_g[n-1]) * (ncut - n_g[n-1]) / (n_g[n] - n_g[n-1]))
radius[symbol] = R * Bohr
radii.append(radius[symbol])
return radii
| robwarm/gpaw-symm | gpaw/analyse/vdwradii.py | Python | gpl-3.0 | 2,542 | [
"ASE",
"GPAW"
] | 135a191acc7ab9540cfe9f1b1e96ca161d316eaad921a544dd5913aca8eb6056 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Uses graphlib and topiclib to run partial sLDA
Copyright (C) 2011 Joseph Perla
GNU Affero General Public License. See <http://www.gnu.org/licenses/>.
"""
global final_output
from itertools import chain,izip
from functools import partial
try:
import numpypy as np
except ImportError:
import numpy as np
np.seterr(invalid='raise')
import graphlib
import topiclib
class PartialSupervisedLDAVars(graphlib.GraphVars):
"""
Same as Supervisd LDA,
but only use Ks of the topics to multiply by eta to get y, the response variable.
"""
def __init__(self, data=None, Ks=None, Kb=None):
self.documents = None
self.alpha = None
self.beta = None
self.gamma = None
self.phi = None
self.eta = None
self.sigma_squared = None
self.is_initialized = False
if data is not None:
self.set_documents(data)
if Ks is not None:
self.initialize(Ks, Kb)
def set_documents(self, data):
"""Accepts a 2-tuple of arrays of dictionaries.
Each element in 2-tuple is an array of documents.
Each document is a dictionary (sparse vector).
Saves the data locally. Computes vocabulary.
"""
documents,y = data
assert len(documents) == len(y)
self.documents = documents
self.y = np.array(y)
assert len(self.y.shape) == 1
self.vocab = max(chain(*[[w[0] for w in d] for d in self.documents]))
self.W = self.vocab + 1
self.D = len(documents)
self.optimize_documents()
def iterdocs(self):
"""Documents are computed in E-step in turn. Yields generator of documents.
In this case, 2-tuples of (document,comment).
"""
return izip(self.documents, self.y)
def optimize_documents(self):
"""Converts the local documents from sparse representation into normal vector."""
# OPTIMIZATION: turn all documents into arrays
self.documents = [topiclib.doc_to_array(d) for d in self.documents]
def initialize(self, Ks, Kb):
"""Accepts K number of topics in document.
Initializes all of the hidden variable arrays now that it knows dimensions
of topics, vocabulary, etc.
"""
assert self.documents is not None
assert Ks is not None
assert Kb is not None
K = Ks + Kb
# give at least more documents than topics
# so that it's not singular
assert self.D > K
self.K = K
D = self.D
W = self.W
# "it suffices to fix alpha to uniform 1/K"
# initialize to ones so that the topics are more evenly distributed
# good for small datasets
self.alpha = np.ones((K,)) * (1.0 / K)
# Initialize the variational distribution q(beta|lambda)
self.beta = topiclib.initialize_beta(K, W)
document_Nds = self.num_words_per(self.documents)
self.phi = [(np.ones((document_Nds[d], K))*(1.0/K)) for d in xrange(D)]
self.gamma = np.ones((D, K)) * (1.0 / K)
graphlib.initialize_random(self.gamma)
self.eta = graphlib.random_normal(0, 2.0, (Ks,))
self.sigma_squared = 0.5
print 'eta start: {0}'.format(self.eta)
self.is_initialized = True
def to_dict(self):
return { 'eta': self.eta, 'sigma_squared': self.sigma_squared,
'beta': self.beta, 'gamma': self.gamma, 'phi': self.phi, }
def partial_slda_e_step(global_iterations, v):
local_i = 0
for d, (document,y) in enumerate(v.iterdocs()):
local_i = topiclib.partial_slda_E_step_for_doc(global_iterations,
local_i,
d, document, y,
v.alpha, v.beta,
v.gamma[d], v.phi[d],
v.eta, v.sigma_squared)
return local_i
def partial_slda_m_step(var):
### M-step: ###
print 'updating betas..'
# update betaD for documents first
topiclib.lda_recalculate_beta(var.documents, var.beta, var.phi)
print 'eta sigma...'
# update response variable gaussian global parameters
var.sigma_squared = topiclib.partial_slda_recalculate_eta_sigma(var.eta, var.y, var.phi)
import slda
run_partial_slda = partial(graphlib.run_variational_em,
e_step_func=partial_slda_e_step,
m_step_func=partial_slda_m_step,
global_elbo_func=topiclib.partial_slda_global_elbo,
print_func=slda.slda_print_func)
if __name__=='__main__':
# documents are 2-tuples of document, comment
noisy_test_data = ([
[(1,1), (2,1), (3,3), (5,2),],
[(0,1), (2,3), (3,1), (4,1),],
[(1,2), (2,1), (4,2), (5,4),],
[(5,1), (6,4), (7,1), (9,1),],
[(5,2), (6,1), (7,2), (9,4),],
[(5,1), (6,2), (7,2), (8,1),],
],[
1.7,
2.0,
1.2,
4.8,
5,
4.2
])
test_data = (
[
[(0,1), (2,2), (3,1), (4,1),],
[(0,1), (2,1), (3,2), (4,3),],
[(0,1), (2,3), (3,3), (4,1),],
[(5,1), (6,2), (8,1), (9,3),],
[(5,1), (6,2), (8,1), (9,1),],
[(5,2), (6,1), (8,1), (9,1),],
],
[
1.7,
2.0,
1.2,
4.8,
5,
4.2,
])
#var = SupervisedLDAVars(test_data, K=3)
#var = SupervisedLDAVars(noisy_test_data, K=3)
# use my big generated dataset
n = 9994
labeled_documents = topiclib.read_sparse('data/synthbigtlc/labeled.dat')[:100]
y = np.loadtxt('data/synthbigtlc/yL.npy')[:100]
real_data = (labeled_documents, y)
var = PartialSupervisedLDAVars(real_data, Ks=5, Kb=20)
try:
output = run_partial_slda(var)
except Exception,e:
print e
import pdb; pdb.post_mortem()
| jperla/happynews | model/partial_slda.py | Python | agpl-3.0 | 6,446 | [
"Gaussian"
] | 6d0c141ae96a0762dac95367476667c7597136f5625571694a666e22c642605c |
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .theplatform import ThePlatformIE
from .adobepass import AdobePassIE
from ..compat import compat_urllib_parse_urlparse
from ..utils import (
find_xpath_attr,
lowercase_escape,
smuggle_url,
unescapeHTML,
update_url_query,
int_or_none,
)
class NBCIE(AdobePassIE):
_VALID_URL = r'https?://(?:www\.)?nbc\.com/(?:[^/]+/)+(?P<id>n?\d+)'
_TESTS = [
{
'url': 'http://www.nbc.com/the-tonight-show/video/jimmy-fallon-surprises-fans-at-ben-jerrys/2848237',
'info_dict': {
'id': '2848237',
'ext': 'mp4',
'title': 'Jimmy Fallon Surprises Fans at Ben & Jerry\'s',
'description': 'Jimmy gives out free scoops of his new "Tonight Dough" ice cream flavor by surprising customers at the Ben & Jerry\'s scoop shop.',
'timestamp': 1424246400,
'upload_date': '20150218',
'uploader': 'NBCU-COM',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
'url': 'http://www.nbc.com/the-tonight-show/episodes/176',
'info_dict': {
'id': '176',
'ext': 'flv',
'title': 'Ricky Gervais, Steven Van Zandt, ILoveMakonnen',
'description': 'A brand new episode of The Tonight Show welcomes Ricky Gervais, Steven Van Zandt and ILoveMakonnen.',
},
'skip': '404 Not Found',
},
{
'url': 'http://www.nbc.com/saturday-night-live/video/star-wars-teaser/2832821',
'info_dict': {
'id': '2832821',
'ext': 'mp4',
'title': 'Star Wars Teaser',
'description': 'md5:0b40f9cbde5b671a7ff62fceccc4f442',
'timestamp': 1417852800,
'upload_date': '20141206',
'uploader': 'NBCU-COM',
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'Only works from US',
},
{
# This video has expired but with an escaped embedURL
'url': 'http://www.nbc.com/parenthood/episode-guide/season-5/just-like-at-home/515',
'only_matching': True,
},
{
# HLS streams requires the 'hdnea3' cookie
'url': 'http://www.nbc.com/Kings/video/goliath/n1806',
'info_dict': {
'id': '101528f5a9e8127b107e98c5e6ce4638',
'ext': 'mp4',
'title': 'Goliath',
'description': 'When an unknown soldier saves the life of the King\'s son in battle, he\'s thrust into the limelight and politics of the kingdom.',
'timestamp': 1237100400,
'upload_date': '20090315',
'uploader': 'NBCU-COM',
},
'params': {
'skip_download': True,
},
'skip': 'Only works from US',
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
info = {
'_type': 'url_transparent',
'ie_key': 'ThePlatform',
'id': video_id,
}
video_data = None
preload = self._search_regex(
r'PRELOAD\s*=\s*({.+})', webpage, 'preload data', default=None)
if preload:
preload_data = self._parse_json(preload, video_id)
path = compat_urllib_parse_urlparse(url).path.rstrip('/')
entity_id = preload_data.get('xref', {}).get(path)
video_data = preload_data.get('entities', {}).get(entity_id)
if video_data:
query = {
'mbr': 'true',
'manifest': 'm3u',
}
video_id = video_data['guid']
title = video_data['title']
if video_data.get('entitlement') == 'auth':
resource = self._get_mvpd_resource(
'nbcentertainment', title, video_id,
video_data.get('vChipRating'))
query['auth'] = self._extract_mvpd_auth(
url, video_id, 'nbcentertainment', resource)
theplatform_url = smuggle_url(update_url_query(
'http://link.theplatform.com/s/NnzsPC/media/guid/2410887629/' + video_id,
query), {'force_smil_url': True})
info.update({
'id': video_id,
'title': title,
'url': theplatform_url,
'description': video_data.get('description'),
'keywords': video_data.get('keywords'),
'season_number': int_or_none(video_data.get('seasonNumber')),
'episode_number': int_or_none(video_data.get('episodeNumber')),
'series': video_data.get('showName'),
})
else:
theplatform_url = unescapeHTML(lowercase_escape(self._html_search_regex(
[
r'(?:class="video-player video-player-full" data-mpx-url|class="player" src)="(.*?)"',
r'<iframe[^>]+src="((?:https?:)?//player\.theplatform\.com/[^"]+)"',
r'"embedURL"\s*:\s*"([^"]+)"'
],
webpage, 'theplatform url').replace('_no_endcard', '').replace('\\/', '/')))
if theplatform_url.startswith('//'):
theplatform_url = 'http:' + theplatform_url
info['url'] = smuggle_url(theplatform_url, {'source_url': url})
return info
class NBCSportsVPlayerIE(InfoExtractor):
_VALID_URL = r'https?://vplayer\.nbcsports\.com/(?:[^/]+/)+(?P<id>[0-9a-zA-Z_]+)'
_TESTS = [{
'url': 'https://vplayer.nbcsports.com/p/BxmELC/nbcsports_share/select/9CsDKds0kvHI',
'info_dict': {
'id': '9CsDKds0kvHI',
'ext': 'flv',
'description': 'md5:df390f70a9ba7c95ff1daace988f0d8d',
'title': 'Tyler Kalinoski hits buzzer-beater to lift Davidson',
'timestamp': 1426270238,
'upload_date': '20150313',
'uploader': 'NBCU-SPORTS',
}
}, {
'url': 'http://vplayer.nbcsports.com/p/BxmELC/nbc_embedshare/select/_hqLjQ95yx8Z',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
iframe_m = re.search(
r'<iframe[^>]+src="(?P<url>https?://vplayer\.nbcsports\.com/[^"]+)"', webpage)
if iframe_m:
return iframe_m.group('url')
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
theplatform_url = self._og_search_video_url(webpage)
return self.url_result(theplatform_url, 'ThePlatform')
class NBCSportsIE(InfoExtractor):
# Does not include https because its certificate is invalid
_VALID_URL = r'https?://(?:www\.)?nbcsports\.com//?(?:[^/]+/)+(?P<id>[0-9a-z-]+)'
_TEST = {
'url': 'http://www.nbcsports.com//college-basketball/ncaab/tom-izzo-michigan-st-has-so-much-respect-duke',
'info_dict': {
'id': 'PHJSaFWbrTY9',
'ext': 'flv',
'title': 'Tom Izzo, Michigan St. has \'so much respect\' for Duke',
'description': 'md5:ecb459c9d59e0766ac9c7d5d0eda8113',
'uploader': 'NBCU-SPORTS',
'upload_date': '20150330',
'timestamp': 1427726529,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
return self.url_result(
NBCSportsVPlayerIE._extract_url(webpage), 'NBCSportsVPlayer')
class CSNNEIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?csnne\.com/video/(?P<id>[0-9a-z-]+)'
_TEST = {
'url': 'http://www.csnne.com/video/snc-evening-update-wright-named-red-sox-no-5-starter',
'info_dict': {
'id': 'yvBLLUgQ8WU0',
'ext': 'mp4',
'title': 'SNC evening update: Wright named Red Sox\' No. 5 starter.',
'description': 'md5:1753cfee40d9352b19b4c9b3e589b9e3',
'timestamp': 1459369979,
'upload_date': '20160330',
'uploader': 'NBCU-SPORTS',
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
return {
'_type': 'url_transparent',
'ie_key': 'ThePlatform',
'url': self._html_search_meta('twitter:player:stream', webpage),
'display_id': display_id,
}
class NBCNewsIE(ThePlatformIE):
_VALID_URL = r'''(?x)https?://(?:www\.)?(?:nbcnews|today|msnbc)\.com/
(?:video/.+?/(?P<id>\d+)|
([^/]+/)*(?:.*-)?(?P<mpx_id>[^/?]+))
'''
_TESTS = [
{
'url': 'http://www.nbcnews.com/video/nbc-news/52753292',
'md5': '47abaac93c6eaf9ad37ee6c4463a5179',
'info_dict': {
'id': '52753292',
'ext': 'flv',
'title': 'Crew emerges after four-month Mars food study',
'description': 'md5:24e632ffac72b35f8b67a12d1b6ddfc1',
},
},
{
'url': 'http://www.nbcnews.com/watch/nbcnews-com/how-twitter-reacted-to-the-snowden-interview-269389891880',
'md5': 'af1adfa51312291a017720403826bb64',
'info_dict': {
'id': 'p_tweet_snow_140529',
'ext': 'mp4',
'title': 'How Twitter Reacted To The Snowden Interview',
'description': 'md5:65a0bd5d76fe114f3c2727aa3a81fe64',
'uploader': 'NBCU-NEWS',
'timestamp': 1401363060,
'upload_date': '20140529',
},
},
{
'url': 'http://www.nbcnews.com/feature/dateline-full-episodes/full-episode-family-business-n285156',
'md5': 'fdbf39ab73a72df5896b6234ff98518a',
'info_dict': {
'id': '529953347624',
'ext': 'mp4',
'title': 'FULL EPISODE: Family Business',
'description': 'md5:757988edbaae9d7be1d585eb5d55cc04',
},
'skip': 'This page is unavailable.',
},
{
'url': 'http://www.nbcnews.com/nightly-news/video/nightly-news-with-brian-williams-full-broadcast-february-4-394064451844',
'md5': '73135a2e0ef819107bbb55a5a9b2a802',
'info_dict': {
'id': 'nn_netcast_150204',
'ext': 'mp4',
'title': 'Nightly News with Brian Williams Full Broadcast (February 4)',
'description': 'md5:1c10c1eccbe84a26e5debb4381e2d3c5',
'timestamp': 1423104900,
'uploader': 'NBCU-NEWS',
'upload_date': '20150205',
},
},
{
'url': 'http://www.nbcnews.com/business/autos/volkswagen-11-million-vehicles-could-have-suspect-software-emissions-scandal-n431456',
'md5': 'a49e173825e5fcd15c13fc297fced39d',
'info_dict': {
'id': 'x_lon_vwhorn_150922',
'ext': 'mp4',
'title': 'Volkswagen U.S. Chief:\xa0 We Have Totally Screwed Up',
'description': 'md5:c8be487b2d80ff0594c005add88d8351',
'upload_date': '20150922',
'timestamp': 1442917800,
'uploader': 'NBCU-NEWS',
},
},
{
'url': 'http://www.today.com/video/see-the-aurora-borealis-from-space-in-stunning-new-nasa-video-669831235788',
'md5': '118d7ca3f0bea6534f119c68ef539f71',
'info_dict': {
'id': 'tdy_al_space_160420',
'ext': 'mp4',
'title': 'See the aurora borealis from space in stunning new NASA video',
'description': 'md5:74752b7358afb99939c5f8bb2d1d04b1',
'upload_date': '20160420',
'timestamp': 1461152093,
'uploader': 'NBCU-NEWS',
},
},
{
'url': 'http://www.msnbc.com/all-in-with-chris-hayes/watch/the-chaotic-gop-immigration-vote-314487875924',
'md5': '6d236bf4f3dddc226633ce6e2c3f814d',
'info_dict': {
'id': 'n_hayes_Aimm_140801_272214',
'ext': 'mp4',
'title': 'The chaotic GOP immigration vote',
'description': 'The Republican House votes on a border bill that has no chance of getting through the Senate or signed by the President and is drawing criticism from all sides.',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1406937606,
'upload_date': '20140802',
'uploader': 'NBCU-NEWS',
},
},
{
'url': 'http://www.nbcnews.com/watch/dateline/full-episode--deadly-betrayal-386250819952',
'only_matching': True,
},
{
# From http://www.vulture.com/2016/06/letterman-couldnt-care-less-about-late-night.html
'url': 'http://www.nbcnews.com/widget/video-embed/701714499682',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
if video_id is not None:
all_info = self._download_xml('http://www.nbcnews.com/id/%s/displaymode/1219' % video_id, video_id)
info = all_info.find('video')
return {
'id': video_id,
'title': info.find('headline').text,
'ext': 'flv',
'url': find_xpath_attr(info, 'media', 'type', 'flashVideo').text,
'description': info.find('caption').text,
'thumbnail': find_xpath_attr(info, 'media', 'type', 'thumbnail').text,
}
else:
# "feature" and "nightly-news" pages use theplatform.com
video_id = mobj.group('mpx_id')
webpage = self._download_webpage(url, video_id)
filter_param = 'byId'
bootstrap_json = self._search_regex(
[r'(?m)(?:var\s+(?:bootstrapJson|playlistData)|NEWS\.videoObj)\s*=\s*({.+});?\s*$',
r'videoObj\s*:\s*({.+})', r'data-video="([^"]+)"',
r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);'],
webpage, 'bootstrap json', default=None)
if bootstrap_json:
bootstrap = self._parse_json(
bootstrap_json, video_id, transform_source=unescapeHTML)
info = None
if 'results' in bootstrap:
info = bootstrap['results'][0]['video']
elif 'video' in bootstrap:
info = bootstrap['video']
elif 'msnbcVideoInfo' in bootstrap:
info = bootstrap['msnbcVideoInfo']['meta']
elif 'msnbcThePlatform' in bootstrap:
info = bootstrap['msnbcThePlatform']['videoPlayer']['video']
else:
info = bootstrap
if 'guid' in info:
video_id = info['guid']
filter_param = 'byGuid'
elif 'mpxId' in info:
video_id = info['mpxId']
return {
'_type': 'url_transparent',
'id': video_id,
# http://feed.theplatform.com/f/2E2eJC/nbcnews also works
'url': update_url_query('http://feed.theplatform.com/f/2E2eJC/nnd_NBCNews', {filter_param: video_id}),
'ie_key': 'ThePlatformFeed',
}
class NBCOlympicsIE(InfoExtractor):
_VALID_URL = r'https?://www\.nbcolympics\.com/video/(?P<id>[a-z-]+)'
_TEST = {
# Geo-restricted to US
'url': 'http://www.nbcolympics.com/video/justin-roses-son-leo-was-tears-after-his-dad-won-gold',
'md5': '54fecf846d05429fbaa18af557ee523a',
'info_dict': {
'id': 'WjTBzDXx5AUq',
'display_id': 'justin-roses-son-leo-was-tears-after-his-dad-won-gold',
'ext': 'mp4',
'title': 'Rose\'s son Leo was in tears after his dad won gold',
'description': 'Olympic gold medalist Justin Rose gets emotional talking to the impact his win in men\'s golf has already had on his children.',
'timestamp': 1471274964,
'upload_date': '20160815',
'uploader': 'NBCU-SPORTS',
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
drupal_settings = self._parse_json(self._search_regex(
r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);',
webpage, 'drupal settings'), display_id)
iframe_url = drupal_settings['vod']['iframe_url']
theplatform_url = iframe_url.replace(
'vplayer.nbcolympics.com', 'player.theplatform.com')
return {
'_type': 'url_transparent',
'url': theplatform_url,
'ie_key': ThePlatformIE.ie_key(),
'display_id': display_id,
}
| Tithen-Firion/youtube-dl | youtube_dl/extractor/nbc.py | Python | unlicense | 17,491 | [
"Brian"
] | 73739aaec86ee3fcfa7dfd6d28317efedf32e97f3562ef48a42bc29df137c606 |
import numpy as np
import sklearn.base
class AppxGaussianProcessRegressor(sklearn.base.BaseEstimator,
sklearn.base.RegressorMixin):
"""Approximate Gaussian process regression (GPR).
Based on applying the Woodbury matrix identity to GPR according to
https://github.com/chengsoonong/mclass-sky/issues/182
Parameters
----------
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations
and reduce potential numerical issue during fitting. If an array is
passed, it must have the same number of entries as the data used for
fitting and is used as datapoint-dependent noise level. Note that this
is equivalent to adding a WhiteKernel with c=alpha. Allowing to specify
the noise level directly as a parameter is mainly for convenience and
for consistency with Ridge.
Attributes
----------
alpha_ : number
Dual coefficients of training data points in kernel space
"""
def __init__(self, alpha=1e-10):
self.alpha_ = alpha
def fit(self, X, y):
assert len(X.shape) == 2 and len(y.shape) == 1
assert X.shape[0] == y.shape[0]
XTX_alph = X.T @ X / self.alpha_
XTy_alph = X.T @ y / self.alpha_
eye = np.eye(XTX_alph.shape[0])
woodbury = np.linalg.inv(eye + XTX_alph)
self.uncertainty_ = eye - XTX_alph + XTX_alph @ woodbury @ XTX_alph
self.weights_ = XTy_alph - XTX_alph @ woodbury @ XTy_alph
def predict(self, X, return_std=False, return_cov=False):
y_mean = X @ self.weights_
if return_cov:
y_covariance = X.T @ self.uncertainty_ @ X
return y_mean, y_covariance
elif return_std:
y_var = np.zeros((X.shape[0],))
for i, x in enumerate(X):
y_var[i] = x.T @ self.uncertainty_ @ x
return y_mean, np.sqrt(y_var)
else:
return y_mean
| alasdairtran/mclearn | projects/jakub/appx_gaussian_processes/appx_gp.py | Python | bsd-3-clause | 2,116 | [
"Gaussian"
] | 2c1a2611088162a4637e0ad7a10c382f79c63f31707399b2d854c02454fc58da |
# Copyright (c) 2010 Howard Hughes Medical Institute.
# All rights reserved.
# Use is subject to Janelia Farm Research Campus Software Copyright 1.1 license terms.
# http://license.janelia.org/license/jfrc_copyright_1_1.html
from __future__ import with_statement # This isn't required in Python 2.6
import neuroptikon
import wx.glcanvas
from pydispatch import dispatcher
import osg, osgDB, osgGA, osgManipulator, osgText, osgViewer
from math import log, pi
import os.path, platform, sys, cPickle
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
import xml.etree.ElementTree as ElementTree
from gettext import gettext
from pick_handler import PickHandler
from dragger_cull_callback import DraggerCullCallback
from network.object import Object
from network.pathway import Pathway # pylint: disable=E0611,F0401
from network.arborization import Arborization
from network.stimulus import Stimulus
from network.neuron import Neuron
from network.object_list import ObjectList
from network.synapse import Synapse
from visible import Visible
import layout as layout_module
from shape import Shape
from library.texture import Texture
# Navigation modes
PANNING_MODE = 0
ROTATING_MODE = 1
# TODO: DRAG_SELECTING_MODE = 2
# TODO: other modes?
class Display(wx.glcanvas.GLCanvas):
def __init__(self, parent, network = None, wxId = wx.ID_ANY):
"""
Displays allow the visualization of networks.
Each display can visualize any number of objects from a single network. By default all objects added to the network are visualized but this can be disabled by setting the display's autoVisualize attribute to False
Multiple displays can visualize the same network at the same time. By default the selection is synchronized between displays so selecting an object in one display will select the corresponding object in all other displays. This can be disabled by calling setSynchronizeDisplays(False) on the network.
You should never create an instance of this class directly. Instances are automatically created when you open a new window either via File --> New Network or by calling displayNetwork() in a console or script.
"""
style = wx.WANTS_CHARS | wx.FULL_REPAINT_ON_RESIZE | wx.HSCROLL | wx.VSCROLL
attribList = [wx.glcanvas.WX_GL_RGBA, wx.glcanvas.WX_GL_DOUBLEBUFFER]
if neuroptikon.config.ReadBool('Smooth All Objects') and hasattr(wx.glcanvas, 'WX_GL_SAMPLE_BUFFERS'):
attribList += [wx.glcanvas.WX_GL_SAMPLE_BUFFERS, 1, wx.glcanvas.WX_GL_SAMPLES, 4]
attribList += [wx.glcanvas.WX_GL_DEPTH_SIZE, 16, 0, 0]
wx.glcanvas.GLCanvas.__init__(self, parent, wxId, attribList = attribList, pos = wx.DefaultPosition, size = (200,200), style = style, name = "")
self.glContext = wx.glcanvas.GLContext(self)
self._name = None
self.network = network
if self.network is not None:
self.network.addDisplay(self)
self.displayRules = []
self.autoVisualize = True
self.visibles = {}
self._visibleIds = {}
self.selectedVisibles = set()
self.highlightedVisibles = set()
self.animatedVisibles = set()
self.selectConnectedVisibles = True
self._showRegionNames = True
self._showNeuronNames = False
self._showNeuronNamesOnSelection = False
self._printNeuronNamesOnSelection = False
self._hideUnselectedNeurons = False
self._hideSynapsesOnConnections = True
self._labelsFloatOnTop = False
self._showFlow = False
self._highlightOnlyWithinSelection = False
self._useGhosts = True
self._ghostingOpacity = 0.15
self._primarySelectionColor = (0, 0, 1, .4)
self._secondarySelectionColor = (0, 0, 1, .2)
self._visiblesSelectionColors = {}
self._selectionHighlightDepth = 3
self.viewDimensions = 2
self.console = None
self._recomputeBounds = True
self._recomputeBoundsScheduled = False
self.visiblesMin = [-100, -100, -100]
self.visiblesMax = [100, 100, 100]
self.visiblesCenter = [0, 0, 0]
self.visiblesSize = [200, 200, 200]
self._navigationMode = PANNING_MODE
self._previous3DNavMode = ROTATING_MODE
self.orthoCenter = (0, 0)
self.orthoViewPlane = 'xy'
self.orthoXPlane = 0
self.orthoYPlane = 1
self.orthoZoom = 0
self.zoomScale = 1
self.rootNode = osg.MatrixTransform()
self.rootStateSet = self.rootNode.getOrCreateStateSet()
self.rootNode.setMatrix(osg.Matrixd.identity())
self.rootStateSet.setMode(osg.GL_NORMALIZE, osg.StateAttribute.ON )
if platform.system() == 'Windows':
self.scrollWheelScale = 0.1
else:
self.scrollWheelScale = 1
# TODO: only if pref set?
# Not in osg 3.2.1?
# osg.DisplaySettings.instance().setNumMultiSamples(4)
self.trackball = osgGA.TrackballManipulator()
self._previousTrackballMatrix = None
self._previousTrackballCenter = None
self._pickHandler = PickHandler(self)
self.viewer = osgViewer.Viewer()
self.viewer.setThreadingModel(osgViewer.ViewerBase.SingleThreaded) # TODO: investigate multithreaded options
self.viewer.addEventHandler(osgViewer.StatsHandler())
self.viewer.setSceneData(self.rootNode)
self.viewer.addEventHandler(self._pickHandler)
light = self.viewer.getLight()
light.setAmbient(osg.Vec4f(0.4, 0.4, 0.4, 1))
light.setDiffuse(osg.Vec4f(0.5, 0.5, 0.5, 1))
self.viewer.setLight(light)
self._first3DView = True
self.backgroundColor = None
clearColor = (neuroptikon.config.ReadFloat("Color/Background/Red", 0.75), \
neuroptikon.config.ReadFloat("Color/Background/Green", 0.75), \
neuroptikon.config.ReadFloat("Color/Background/Blue", 0.75), \
neuroptikon.config.ReadFloat("Color/Background/Alpha", 0.0))
self.setBackgroundColor(clearColor)
self.Bind(wx.EVT_SIZE, self.onSize)
self.Bind(wx.EVT_PAINT, self.onPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.onEraseBackground)
self.Bind(wx.EVT_KEY_DOWN, self.onKeyDown)
self.Bind(wx.EVT_KEY_UP, self.onKeyUp)
self.Bind(wx.EVT_MOUSE_EVENTS, self.onMouseEvent) # TODO: factor this out into individual events
self.Bind(wx.EVT_MOUSEWHEEL, self.onMouseWheel)
self.Bind(wx.EVT_SCROLLWIN, self.onScroll)
self.dragSelection = None
self.draggerLOD = None
self.simpleDragger = None
self.compositeDragger = None
self.activeDragger = None
self.commandMgr = None
self.draggerScale = 1.0
self.draggerOffset = (0.0, 0.0, 0.0)
self.selectionShouldExtend = False
self.findShortestPath = False
self._selectedShortestPath = False
self._useMouseOverSelecting = False
self.hoverSelect = True
self.hoverSelecting = False
self.hoverSelected = False # set to True if the current selection was made by hovering
width, height = self.GetClientSize()
self.graphicsWindow = self.viewer.setUpViewerAsEmbeddedInWindow(0, 0, width, height)
self.SetDropTarget(DisplayDropTarget(self))
self._nextUniqueId = -1
self._animationTimer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.onAnimate, self._animationTimer)
self._suppressRefresh = False
if neuroptikon.runningFromSource:
shaderDir = os.path.join(neuroptikon.rootDir, 'display')
else:
shaderDir = neuroptikon.rootDir
with open(os.path.join(shaderDir, 'flow_shader.vert')) as f:
flowVertexShader = f.read()
with open(os.path.join(shaderDir, 'flow_shader.frag')) as f:
flowFragmentShader = f.read()
self.flowProgram = osg.Program()
self.flowProgram.addShader(osg.Shader(osg.Shader.VERTEX, flowVertexShader))
self.flowProgram.addShader(osg.Shader(osg.Shader.FRAGMENT, flowFragmentShader))
self.defaultFlowColor = (1.0, 1.0, 1.0, 1.0)
self.defaultFlowToColorUniform = osg.Uniform('flowToColor', osg.Vec4f(*self.defaultFlowColor))
self.rootStateSet.addUniform(self.defaultFlowToColorUniform)
self.defaultFlowFromColorUniform = osg.Uniform('flowFromColor', osg.Vec4f(*self.defaultFlowColor))
self.rootStateSet.addUniform(self.defaultFlowFromColorUniform)
self.defaultFlowSpacing = 0.4 # Distance between pulses
self.defaultFlowToSpacingUniform = osg.Uniform('flowToSpacing', self.defaultFlowSpacing)
self.rootStateSet.addUniform(self.defaultFlowToSpacingUniform)
self.defaultFlowFromSpacingUniform = osg.Uniform('flowFromSpacing', self.defaultFlowSpacing)
self.rootStateSet.addUniform(self.defaultFlowFromSpacingUniform)
self.defaultFlowSpeed = 0.15 # Pulse speed
self.defaultFlowToSpeedUniform = osg.Uniform('flowToSpeed', self.defaultFlowSpeed)
self.rootStateSet.addUniform(self.defaultFlowToSpeedUniform)
self.defaultFlowFromSpeedUniform = osg.Uniform('flowFromSpeed', self.defaultFlowSpeed)
self.rootStateSet.addUniform(self.defaultFlowFromSpeedUniform)
self.defaultFlowSpread = 0.9 # The pulse should cover 50% of the path
self.defaultFlowToSpreadUniform = osg.Uniform('flowToSpread', self.defaultFlowSpread)
self.rootStateSet.addUniform(self.defaultFlowToSpreadUniform)
self.defaultFlowFromSpreadUniform = osg.Uniform('flowFromSpread', self.defaultFlowSpread)
self.rootStateSet.addUniform(self.defaultFlowFromSpreadUniform)
dispatcher.connect(self._onSelectionOrShowFlowChanged, ('set', 'selection'), self)
dispatcher.connect(self._onSelectionOrShowFlowChanged, ('set', 'showFlow'), self)
self.lastUsedLayout = None
self._closing = False
self._visibleBeingAdded = None
self.compassCamera = None
self._compassDrawables = {}
def _fromXMLElement(self, xmlElement):
self._suppressRefresh = True
name = xmlElement.findtext('Name')
if name is not None:
self.setName(name)
colorElement = xmlElement.find('BackgroundColor')
if colorElement is None:
colorElement = xmlElement.find('backgroundColor')
if colorElement is not None:
red = float(colorElement.get('r'))
green = float(colorElement.get('g'))
blue = float(colorElement.get('b'))
alpha = float(colorElement.get('a'))
self.setBackgroundColor((red, green, blue, alpha))
flowAppearanceElement = xmlElement.find('DefaultFlowAppearance')
if flowAppearanceElement is None:
flowAppearanceElement = xmlElement.find('defaultFlowAppearance')
if flowAppearanceElement is not None:
colorElement = flowAppearanceElement.find('Color')
if colorElement is None:
colorElement = flowAppearanceElement.find('color')
if colorElement is not None:
red = float(colorElement.get('r'))
green = float(colorElement.get('g'))
blue = float(colorElement.get('b'))
alpha = float(colorElement.get('a'))
self.setDefaultFlowColor((red, green, blue))
if flowAppearanceElement.get('spacing') is not None:
self.setDefaultFlowSpacing(float(flowAppearanceElement.get('spacing')))
if flowAppearanceElement.get('speed') is not None:
self.setDefaultFlowSpeed(float(flowAppearanceElement.get('speed')))
if flowAppearanceElement.get('spread') is not None:
self.setDefaultFlowSpread(float(flowAppearanceElement.get('spread')))
if self.defaultFlowSpacing == 1.0 and self.defaultFlowSpeed == 1.0 and self.defaultFlowSpread == 0.2:
# Switch to new world-space relative defaults.
self.setDefaultFlowSpacing(0.05)
self.setDefaultFlowSpeed(0.05)
visibleElements = xmlElement.findall('Visible')
# Add all of the nodes
for visibleElement in visibleElements:
if visibleElement.find('Path') is None and visibleElement.find('path') is None:
visible = Visible._fromXMLElement(visibleElement, self)
if visible is None:
raise ValueError, gettext('Could not create visualized item')
self.addVisible(visible)
# Add all of the paths (must be done after nodes are added)
for visibleElement in visibleElements:
if visibleElement.find('Path') is not None or visibleElement.find('path') is not None:
visible = Visible._fromXMLElement(visibleElement, self)
if visible is None:
raise ValueError, gettext('Could not create visualized item')
self.addVisible(visible)
self.computeVisiblesBound()
self.setViewDimensions(int(xmlElement.get('dimensions')))
trueValues = ['true', 'True', 'TRUE', '1']
if xmlElement.get('showRegionNames') is not None:
self.setShowRegionNames(xmlElement.get('showRegionNames') in trueValues)
if xmlElement.get('showNeuronNames') is not None:
self.setShowNeuronNames(xmlElement.get('showNeuronNames') in trueValues)
if xmlElement.get('showNeuronNamesOnSelection') is not None:
self.setShowNeuronNamesOnSelection(xmlElement.get('showNeuronNamesOnSelection') in trueValues)
if xmlElement.get('printNeuronNamesOnSelection') is not None:
self.setPrintNeuronNamesOnSelection(xmlElement.get('printNeuronNamesOnSelection') in trueValues)
if xmlElement.get('hideUnselectedNeurons') is not None:
self.setHideUnselectedNeurons(xmlElement.get('hideUnselectedNeurons') in trueValues)
if xmlElement.get('showFlow') is not None:
self.setShowFlow(xmlElement.get('showFlow') in trueValues)
if xmlElement.get('useGhosting') is not None:
self.setUseGhosts(xmlElement.get('useGhosting') in trueValues)
if xmlElement.get('ghostingOpacity') is not None:
self.setGhostingOpacity(float(xmlElement.get('ghostingOpacity')))
if xmlElement.get('useMouseOverSelecting') is not None:
self._useMouseOverSelecting = xmlElement.get('useMouseOverSelecting') in trueValues
if xmlElement.get('autoVisualize') is not None:
self.autoVisualize = xmlElement.get('autoVisualize') in trueValues
if xmlElement.get('labelsFloatOnTop') is not None:
self.setLabelsFloatOnTop(xmlElement.get('labelsFloatOnTop') in trueValues)
if xmlElement.get('selectionHighlightDepth') is not None:
self.setSelectionHighlightDepth(int(xmlElement.get('selectionHighlightDepth')))
if xmlElement.get('highlightOnlyWithinSelection') is not None:
self.setHighlightOnlyWithinSelection(xmlElement.get('highlightOnlyWithinSelection') in trueValues)
if xmlElement.get('showCompass') is not None:
self.setShowCompass(xmlElement.get('showCompass') in trueValues)
selectedVisibleIds = xmlElement.get('selectedVisibleIds')
visiblesToSelect = []
if selectedVisibleIds is not None:
for visibleId in selectedVisibleIds.split(','):
if visibleId.isdigit() and int(visibleId) in self._visibleIds:
visiblesToSelect.append(self._visibleIds[int(visibleId)])
self.selectVisibles(visiblesToSelect)
self._suppressRefresh = False
self._recomputeBounds = True
if self.viewDimensions == 2:
self.zoomToFit()
else:
self.resetView()
self.Refresh()
def _toXMLElement(self, parentElement):
displayElement = ElementTree.SubElement(parentElement, 'Display')
if self._name:
ElementTree.SubElement(displayElement, 'Name').text = self._name
# Add the background color
colorElement = ElementTree.SubElement(displayElement, 'BackgroundColor')
colorElement.set('r', str(self.backgroundColor[0]))
colorElement.set('g', str(self.backgroundColor[1]))
colorElement.set('b', str(self.backgroundColor[2]))
colorElement.set('a', str(self.backgroundColor[3]))
# Add the default flow appearance
flowAppearanceElement = ElementTree.SubElement(displayElement, 'DefaultFlowAppearance')
colorElement = ElementTree.SubElement(flowAppearanceElement, 'Color')
colorElement.set('r', str(self.defaultFlowColor[0]))
colorElement.set('g', str(self.defaultFlowColor[1]))
colorElement.set('b', str(self.defaultFlowColor[2]))
colorElement.set('a', str(self.defaultFlowColor[3]))
flowAppearanceElement.set('spacing', str(self.defaultFlowSpacing))
flowAppearanceElement.set('speed', str(self.defaultFlowSpeed))
flowAppearanceElement.set('spread', str(self.defaultFlowSpread))
# Add the display rules
for displayRule in self.displayRules:
ruleElement = displayRule._toXMLElement(displayElement)
if ruleElement is None:
raise ValueError, gettext('Could not save display rule')
# Add the visibles
for visibles in self.visibles.itervalues():
for visible in visibles:
if visible.parent is None:
visibleElement = visible._toXMLElement(displayElement)
if visibleElement is None:
raise ValueError, gettext('Could not save visualized item')
displayElement.set('dimensions', str(self.viewDimensions))
displayElement.set('showRegionNames', 'true' if self._showRegionNames else 'false')
displayElement.set('showNeuronNames', 'true' if self._showNeuronNames else 'false')
displayElement.set('showNeuronNamesOnSelection', 'true' if self._showNeuronNamesOnSelection else 'false')
displayElement.set('hideUnselectedNeurons', 'true' if self._hideUnselectedNeurons else 'false')
displayElement.set('hideSynapsesOnConnections', 'true' if self._hideSynapsesOnConnections else 'false')
displayElement.set('showFlow', 'true' if self._showFlow else 'false')
displayElement.set('useGhosting', 'true' if self._useGhosts else 'false')
displayElement.set('ghostingOpacity', str(self._ghostingOpacity))
displayElement.set('useMouseOverSelecting', 'true' if self._useMouseOverSelecting else 'false')
displayElement.set('autoVisualize', 'true' if self.autoVisualize else 'false')
displayElement.set('labelsFloatOnTop', 'true' if self._labelsFloatOnTop else 'false')
displayElement.set('selectionHighlightDepth', str(self._selectionHighlightDepth))
displayElement.set('highlightOnlyWithinSelection', 'true' if self._highlightOnlyWithinSelection else 'false')
displayElement.set('showCompass', 'true' if self.isShowingCompass() else 'false')
selectedVisibleIds = []
for visible in self.selectedVisibles:
selectedVisibleIds.append(str(visible.displayId))
displayElement.set('selectedVisibleIds', ','.join(selectedVisibleIds))
return displayElement
def _toScriptFile(self, scriptFile, scriptRefs, displayRef, savingNetwork):
if self._name != None:
scriptFile.write(displayRef + '.setName(' + repr(self._name) + ')\n')
scriptFile.write(displayRef + '.setBackgroundColor((' + ', '.join([str(component) for component in self.backgroundColor]) + '))\n')
scriptFile.write(displayRef + '.setDefaultFlowColor(' + str(self.defaultFlowColor) + ')\n')
scriptFile.write(displayRef + '.setDefaultFlowSpacing(' + str(self.defaultFlowSpacing) + ')\n')
scriptFile.write(displayRef + '.setDefaultFlowSpeed(' + str(self.defaultFlowSpeed) + ')\n')
scriptFile.write(displayRef + '.setDefaultFlowSpread(' + str(self.defaultFlowSpread) + ')\n')
scriptFile.write(displayRef + '.setViewDimensions(' + str(self.viewDimensions) + ')\n')
scriptFile.write(displayRef + '.setShowCompass(' + str(self.isShowingCompass()) + ')\n')
scriptFile.write(displayRef + '.setShowRegionNames(' + str(self._showRegionNames) + ')\n')
scriptFile.write(displayRef + '.setShowNeuronNames(' + str(self._showNeuronNames) + ')\n')
scriptFile.write(displayRef + '.setShowNeuronNamesOnSelection(' + str(self._showNeuronNamesOnSelection) + ')\n')
scriptFile.write(displayRef + '.setPrintNeuronNamesOnSelection(' + str(self._showNeuronNamesOnSelection) + ')\n')
scriptFile.write(displayRef + '.setHideUnselectedNeurons(' + str(self._hideUnselectedNeurons) + ')\n')
scriptFile.write(displayRef + '.setHideSynapsesOnConnections(' + str(self._hideSynapsesOnConnections) + ')\n')
scriptFile.write(displayRef + '.setShowFlow(' + str(self._showFlow) + ')\n')
scriptFile.write(displayRef + '.setUseGhosts(' + str(self._useGhosts) + ')\n')
scriptFile.write(displayRef + '.setGhostingOpacity(' + str(self._ghostingOpacity) + ')\n')
scriptFile.write(displayRef + '.setUseMouseOverSelecting(' + str(self._useMouseOverSelecting) + ')\n')
scriptFile.write(displayRef + '.setLabelsFloatOnTop(' + str(self._labelsFloatOnTop) + ')\n')
scriptFile.write(displayRef + '.setSelectionHighlightDepth(' + str(self._selectionHighlightDepth) + ')\n')
scriptFile.write(displayRef + '.setHighlightOnlyWithinSelection(' + str(self._highlightOnlyWithinSelection) + ')\n')
scriptFile.write('\n')
# First visualize all of the nodes.
for visibles in self.visibles.itervalues():
for visible in visibles:
if not visible.isPath() and visible.parent is None and not isinstance(visible.client, Stimulus):
visible._toScriptFile(scriptFile, scriptRefs, displayRef, savingNetwork)
# Next visualize all of the connections between the nodes.
for visibles in self.visibles.itervalues():
for visible in visibles:
if visible.isPath():
visible._toScriptFile(scriptFile, scriptRefs, displayRef, savingNetwork)
objectRefs = []
visibleIds = []
for visible in self.selectedVisibles:
if visible.client:
objectRefs.append(scriptRefs[visible.client.networkId])
else:
visibleIds += [visible.displayId]
if any(objectRefs):
scriptFile.write(displayRef + '.selectObjects([' + ', '.join(objectRefs) + '])\n')
for visibleId in visibleIds:
scriptFile.write(displayRef + '.selectVisibles([' + displayRef + '.visibleWithId(' + visibleId + ')], extend = True)')
if self.viewDimensions == 2:
scriptFile.write('\n' + displayRef + '.zoomToFit()\n')
else:
scriptFile.write('\n' + displayRef + '.resetView()\n')
def setName(self, name):
if name != self._name:
self._name = name
dispatcher.send(('set', 'name'), self)
def name(self):
return None if not self._name else str(self._name)
def _generateUniqueId(self):
self._nextUniqueId += 1
return self._nextUniqueId
def setViewDimensions(self, dimensions):
""" Set the number of dimension in which to visualize the network.
The argument must be either 2 or 3.
"""
if dimensions not in (2, 3):
raise ValueError, 'The dimensions argument passed to setViewDimensions() must be 2 or 3.'
if dimensions != self.viewDimensions:
self.viewDimensions = dimensions
width, height = self.GetClientSize()
self._clearDragger()
if self.viewDimensions == 2:
self._previous3DNavMode = self._navigationMode
self.setNavigationMode(PANNING_MODE)
self._previousTrackballMatrix = self.trackball.getMatrix()
self._previousTrackballCenter = self.trackball.getCenter()
self.viewer.setCameraManipulator(None)
self.computeVisiblesBound()
self._resetView()
elif self.viewDimensions == 3:
self.setNavigationMode(self._previous3DNavMode)
# Hide the scroll bars before we get the size of the viewport.
self.SetScrollbar(wx.HORIZONTAL, 0, width, width, True)
self.SetScrollbar(wx.VERTICAL, 0, height, height, True)
width, height = self.GetClientSize()
self.graphicsWindow = self.viewer.setUpViewerAsEmbeddedInWindow(0, 0, width, height)
self.viewer.getCamera().setProjectionMatrixAsPerspective(30.0, float(width)/height, 1.0, 1000.0)
self.viewer.setCameraManipulator(self.trackball)
if self._first3DView:
self.resetView()
self._first3DView = False
else:
self.trackball.computeHomePosition()
self.viewer.home()
self.trackball.setByMatrix(self._previousTrackballMatrix)
#self.trackball.setCenter(self._previousTrackballCenter)
if len(self.selectedVisibles) == 1:
visible = list(self.selectedVisibles)[0]
if visible._isDraggable():
self._addDragger(visible)
# Call _updatePath on all path visibles so parallel edges are drawn correctly.
for visibles in self.visibles.values():
for visible in visibles:
if visible.isPath():
visible._updatePath()
self._updateCompassAxes()
self.Refresh()
dispatcher.send(('set', 'viewDimensions'), self)
def onViewIn2D(self, event_):
self.setViewDimensions(2)
def onViewIn3D(self, event_):
self.setViewDimensions(3)
def setOrthoViewPlane(self, plane):
"""
Set which plane should be viewed in 2D.
The argument must be one of 'xy', 'xz' or 'zy'.
"""
if plane not in ('xy', 'xz', 'zy'):
raise ValueError, "The plane argument passed to setOrthoViewPlane() must be one of 'xy', 'xz' or 'zy'"
if plane != self.orthoViewPlane:
self.orthoViewPlane = plane
if self.orthoViewPlane == 'xy':
self.orthoXPlane = 0
self.orthoYPlane = 1
elif self.orthoViewPlane == 'xz':
self.orthoXPlane = 0
self.orthoYPlane = 2
elif self.orthoViewPlane == 'zy':
self.orthoXPlane = 1
self.orthoYPlane = 2
self._resetView()
# Call _updatePath on all path visibles so parallel edges are drawn correctly.
for visibles in self.visibles.values():
for visible in visibles:
if visible.isPath():
visible._updatePath()
self._updateCompassAxes()
self.Refresh()
dispatcher.send(('set', 'orthoViewPlane'), self)
def setShowCompass(self, showCompass):
def _addCompassAxis(geode, text, position):
# Add a line along the axis.
axis = osg.Geometry()
axis.setVertexArray(Shape.vectorArrayFromList([(0.0, 0.0, 0.0), (position[0] * 0.75, position[1] * 0.75, position[2] * 0.75)]))
axis.addPrimitiveSet(Shape.primitiveSetFromList(osg.PrimitiveSet.LINE_STRIP, range(2)))
axis.setNormalArray(Shape.vectorArrayFromList([(0.0, 0.0, 0.0)]))
axis.setNormalBinding(osg.Geometry.BIND_OVERALL)
axis.setColorArray(Shape.vectorArrayFromList([(0.5, 0.5, 0.5)]))
axis.setColorBinding(osg.Geometry.BIND_OVERALL)
geode.addDrawable(axis)
# Add the axis label.
label = osgText.Text()
label.setCharacterSizeMode(osgText.Text.SCREEN_COORDS)
if Visible.labelFont is None:
label.setCharacterSize(48.0)
else:
label.setFont(Visible.labelFont)
label.setCharacterSize(18.0)
label.setAxisAlignment(osgText.Text.SCREEN)
label.setAlignment(osgText.Text.CENTER_CENTER)
label.setColor(osg.Vec4(0.25, 0.25, 0.25, 1.0))
label.setBackdropColor(osg.Vec4(0.75, 0.75, 0.75, 0.25))
label.setBackdropType(osgText.Text.OUTLINE)
label.setPosition(osg.Vec3(*position))
label.setText(text)
geode.addDrawable(label)
return (axis, label)
if showCompass != (self.compassCamera != None):
if showCompass:
self.compassCamera = osg.Camera()
self.compassCamera.setProjectionMatrixAsPerspective(30.0, 1.0, 1.0, 10000.0)
self.compassCamera.setReferenceFrame(osg.Transform.ABSOLUTE_RF)
self.compassCamera.setViewMatrixAsLookAt(osg.Vec3d(0, 0, 5), osg.Vec3d(0, 0, 0), osg.Vec3d(0, 1, 0))
self.compassCamera.setClearMask(osg.GL_DEPTH_BUFFER_BIT)
self.compassCamera.setRenderOrder(osg.Camera.POST_RENDER)
self.compassCamera.setAllowEventFocus(False)
self.compassCamera.setViewport(0, 0, 50, 50)
# Add the axes
self._compassGeode = osg.Geode()
self.compassTransform = osg.MatrixTransform()
self.compassTransform.addChild(self._compassGeode)
self.compassCamera.addChild(self.compassTransform)
self._compassDrawables['X'] = _addCompassAxis(self._compassGeode, 'X', (1.0, 0.0, 0.0))
self._compassDrawables['Y'] = _addCompassAxis(self._compassGeode, 'Y', (0.0, 1.0, 0.0))
self._compassDrawables['Z'] = _addCompassAxis(self._compassGeode, 'Z', (0.0, 0.0, 1.0))
self._updateCompassAxes()
stateSet = self._compassGeode.getOrCreateStateSet()
stateSet.setMode(osg.GL_LIGHTING, osg.StateAttribute.OFF)
stateSet.setMode(osg.GL_LINE_SMOOTH, osg.StateAttribute.ON)
stateSet.setRenderingHint(osg.StateSet.TRANSPARENT_BIN)
stateSet.setMode(osg.GL_BLEND, osg.StateAttribute.ON)
self.rootNode.addChild(self.compassCamera)
else:
self.rootNode.removeChild(self.compassCamera)
self._compassGeode = None
self.compassCamera = None
self.Refresh()
def isShowingCompass(self):
return self.compassCamera != None
def _updateCompassAxes(self):
# Show/hide the desired axes.
if self.compassCamera:
if self.viewDimensions == 2:
if self.orthoViewPlane == 'xy':
axesToShow = ['X', 'Y']
elif self.orthoViewPlane == 'xz':
axesToShow = ['X', 'Z']
elif self.orthoViewPlane == 'zy':
axesToShow = ['Y', 'Z']
else:
axesToShow = ['X', 'Y', 'Z']
for axis in ['X', 'Y', 'Z']:
for drawable in self._compassDrawables[axis]:
if axis in axesToShow:
if not self._compassGeode.containsDrawable(drawable):
self._compassGeode.addDrawable(drawable)
else:
if self._compassGeode.containsDrawable(drawable):
self._compassGeode.removeDrawable(drawable)
def _updateCompass(self):
if self.viewDimensions == 2:
if self.orthoViewPlane == 'xy':
rotation = osg.Quat(0, osg.Vec3(1, 0, 0))
elif self.orthoViewPlane == 'xz':
rotation = osg.Quat(-pi / 2.0, osg.Vec3(1, 0, 0))
elif self.orthoViewPlane == 'zy':
rotation = osg.Quat(pi / 2.0, osg.Vec3(0, 1, 0))
else:
rotation = self.trackball.getRotation().inverse()
self.compassTransform.setMatrix(osg.Matrixd.rotate(rotation))
def setUseStereo(self, useStereo):
"""
Set whether the visualization should be viewable through red/blue 3D glasses.
The argument should be either True or False.
"""
settings = self.viewer.getDisplaySettings()
if useStereo:
if settings is None:
settings = osg.DisplaySettings()
self.viewer.setDisplaySettings(settings)
settings.setStereo(True)
settings.setStereoMode(osg.DisplaySettings.ANAGLYPHIC)
elif settings is not None:
settings.setStereo(False)
self.Refresh()
def _resetView(self):
if self.viewDimensions == 2:
width, height = self.GetClientSize()
# TODO: if self.orthoZoom just changed to 0 then width and height will be too small by assuming the scroll bars are still there.
zoom = 2.0 ** (self.orthoZoom / 10.0)
self.viewer.getCamera().setProjectionMatrixAsOrtho2D(self.orthoCenter[0] - (width + 20) * self.zoomScale / 2.0 / zoom,
self.orthoCenter[0] + (width + 20) * self.zoomScale / 2.0 / zoom,
self.orthoCenter[1] - (height + 20) * self.zoomScale / 2.0 / zoom,
self.orthoCenter[1] + (height + 20) * self.zoomScale / 2.0 / zoom)
if self.orthoViewPlane == 'xy':
self.viewer.getCamera().setViewMatrix(osg.Matrixd.translate(osg.Vec3d(0.0, 0.0, self.visiblesMin[2] - 2.0)))
elif self.orthoViewPlane == 'xz':
self.viewer.getCamera().setViewMatrix(osg.Matrixd.translate(osg.Vec3d(0.0, self.visiblesMax[1] + 2.0, 0.0)) * \
osg.Matrixd.rotate(osg.Quat(pi / -2.0, osg.Vec3d(1, 0, 0))))
elif self.orthoViewPlane == 'zy':
self.viewer.getCamera().setViewMatrix(osg.Matrixd.translate(osg.Vec3d(self.visiblesMax[0] + 2.0, 0.0, 0.0)) * \
osg.Matrixd.rotate(osg.Quat(pi / 2.0, osg.Vec3d(0, 1, 0))))
self.SetScrollbar(wx.HORIZONTAL, (self.orthoCenter[0] - self.visiblesMin[0]) / self.visiblesSize[0] * width - width / zoom / 2.0, width / zoom, width, True)
self.SetScrollbar(wx.VERTICAL, (self.visiblesMax[1] - self.orthoCenter[1]) / self.visiblesSize[1] * height - height / zoom / 2.0, height / zoom, height, True)
def computeVisiblesBound(self):
if self._recomputeBounds:
# This:
# boundingSphere = node.getBound()
# sphereCenter = boundingSphere.center()
# computes a screwy center. Because there's no camera?
# Manually compute the bounding box instead.
# TODO: figure out how to let the faster C++ code do this
origBounds = (self.visiblesCenter, self.visiblesSize)
self.visiblesMin = [100000, 100000, 100000]
self.visiblesMax = [-100000, -100000, -100000]
for visibles in self.visibles.values():
for visible in visibles:
x, y, z = visible.worldPosition()
w, h, d = visible.worldSize()
if x - w / 2.0 < self.visiblesMin[0]:
self.visiblesMin[0] = x - w / 2.0
if x + w / 2.0 > self.visiblesMax[0]:
self.visiblesMax[0] = x + w / 2.0
if y - h / 2.0 < self.visiblesMin[1]:
self.visiblesMin[1] = y - h / 2.0
if y + h / 2.0 > self.visiblesMax[1]:
self.visiblesMax[1] = y + h / 2.0
if z - d / 2.0 < self.visiblesMin[2]:
self.visiblesMin[2] = z - d / 2.0
if z + d / 2.0 > self.visiblesMax[2]:
self.visiblesMax[2] = z + d / 2.0
if visible.isPath():
for x, y, z in visible.pathMidPoints():
if x < self.visiblesMin[0]:
self.visiblesMin[0] = x
if x > self.visiblesMax[0]:
self.visiblesMax[0] = x
if y < self.visiblesMin[1]:
self.visiblesMin[1] = y
if y > self.visiblesMax[1]:
self.visiblesMax[1] = y
if z < self.visiblesMin[2]:
self.visiblesMin[2] = z
if z > self.visiblesMax[2]:
self.visiblesMax[2] = z
self.visiblesCenter = ((self.visiblesMin[0] + self.visiblesMax[0]) / 2.0, (self.visiblesMin[1] + self.visiblesMax[1]) / 2.0, (self.visiblesMin[2] + self.visiblesMax[2]) / 2.0)
self.visiblesSize = (self.visiblesMax[0] - self.visiblesMin[0], self.visiblesMax[1] - self.visiblesMin[1], self.visiblesMax[2] - self.visiblesMin[2])
self._recomputeBounds = False
if origBounds != (self.visiblesCenter, self.visiblesSize):
# The size of the glow effect is based on the bounding box of the whole display.
# This is expensive so only do it if something actually changed.
for visibles in self.visibles.itervalues():
for visible in visibles:
visible._updateGlow()
width, height = self.GetClientSize()
xZoom = self.visiblesSize[self.orthoXPlane] / (width - 10.0)
yZoom = self.visiblesSize[self.orthoYPlane] / (height - 10.0)
if xZoom > yZoom:
self.zoomScale = xZoom
else:
self.zoomScale = yZoom
def centerView(self):
"""
Deprecated, use resetView or zoomToFit instead.
"""
if self.viewDimensions == 2:
self.zoomToFit()
else:
self.resetView()
def resetView(self):
"""
Reset the view point of the 3D view to the default distance and rotation.
"""
if self.viewDimensions == 3:
self.trackball.setNode(self.rootNode)
self.trackball.computeHomePosition()
self.viewer.home()
self.trackball.setRotation(osg.Quat(0, 0, 0, 1))
self.Refresh()
def zoomToFit(self):
"""
Change the magnification of the 2D view so that all objects are visible.
"""
if self.viewDimensions == 2:
self.computeVisiblesBound()
self.orthoCenter = (self.visiblesCenter[self.orthoXPlane], self.visiblesCenter[self.orthoYPlane])
self.orthoZoom = 0
self._resetView()
self.Refresh()
#osgDB.writeNodeFile(self.rootNode, "test.osg");
def zoomToSelection(self):
"""
Change the magnification of the 2D view so that all selected or highlighted objects are visible.
"""
minX, maxX = (1e300, -1e300)
minY, maxY = (1e300, -1e300)
for visible in self.selectedVisibles.union(self.highlightedVisibles).union(self.animatedVisibles):
worldPos = visible.worldPosition()
worldSize = visible.worldSize()
minX = min(minX, worldPos[0] - worldSize[0] / 2.0)
maxX = max(maxX, worldPos[0] + worldSize[0] / 2.0)
minY = min(minY, worldPos[1] - worldSize[1] / 2.0)
maxY = max(maxY, worldPos[1] + worldSize[1] / 2.0)
self.orthoCenter = ((minX + maxX) / 2.0, (minY + maxY) / 2.0)
width, height = self.GetClientSize()
xZoom = (width - 20) * self.zoomScale / (maxX - minX)
yZoom = (height - 20) * self.zoomScale / (maxY - minY)
self.orthoZoom = log(min(xZoom, yZoom), 2) * 10.0
self._resetView()
self.Refresh()
def _zoom(self, amount):
if self.viewDimensions == 2:
self.orthoZoom += 10 * amount
if self.orthoZoom < 0:
self.orthoZoom = 0
# Alter orthoCenter if the new zoom level will cause any visibles to fall outside the reach of the scroll bars.
width, height = self.GetClientSize()
zoom = 2 ** (self.orthoZoom / 10.0)
horScrollPos = (self.orthoCenter[0] - self.visiblesMin[0]) / self.visiblesSize[0] * width - width / zoom / 2.0
maxHorScrollPos = width - width / zoom
if horScrollPos < 0.0:
self.orthoCenter = ((width / zoom / 2.0) / width * self.visiblesSize[0] + self.visiblesMin[0], self.orthoCenter[1])
elif horScrollPos > maxHorScrollPos:
self.orthoCenter = ((maxHorScrollPos + width / zoom / 2.0) / width * self.visiblesSize[0] + self.visiblesMin[0], self.orthoCenter[1])
vertScrollPos = (self.visiblesMax[1] - self.orthoCenter[1]) / self.visiblesSize[1] * height - height / zoom / 2.0
maxVertScrollPos = height - height / zoom
if vertScrollPos < 0.0:
self.orthoCenter = (self.orthoCenter[0], self.visiblesMax[1] - (height / zoom / 2.0) * self.visiblesSize[1] / height)
elif vertScrollPos > maxVertScrollPos:
self.orthoCenter = (self.orthoCenter[0], self.visiblesMax[1] - (maxVertScrollPos + height / zoom / 2.0) * self.visiblesSize[1] / height)
elif self.viewDimensions == 3:
self.computeVisiblesBound()
self.trackball.setDistance(self.trackball.getDistance() - max(self.visiblesSize) * 0.2 * amount)
self._resetView()
self.Refresh()
def zoomIn(self):
"""
Increase the magnification of the view.
"""
self._zoom(1.0)
def zoomOut(self):
"""
Decrease the magnification of the view.
"""
self._zoom(-1.0)
def onScroll(self, event):
width, height = self.GetClientSize()
zoom = 2 ** (self.orthoZoom / 10.0)
if event.GetOrientation() == wx.HORIZONTAL:
# Reverse the calculation in _resetView():
# pos = (self.orthoCenter[0] - self.visiblesMin[0]) / self.visiblesSize[0] * width - width / zoom / 2
# pos + width / zoom / 2 = (self.orthoCenter[0] - self.visiblesMin[0]) / self.visiblesSize[0] * width
# (pos + width / zoom / 2) * self.visiblesSize[0] / width = self.orthoCenter[0] - self.visiblesMin[0]
self.orthoCenter = ((event.GetPosition() + width / zoom / 2.0) / width * self.visiblesSize[0] + self.visiblesMin[0], self.orthoCenter[1])
else:
# Reverse the calculation in _resetView():
# pos = (self.visiblesMax[1] - self.orthoCenter[1]) / self.visiblesSize[1] * height - height / zoom / 2
# pos + height / zoom / 2 = (self.visiblesMax[1] - self.orthoCenter[1]) / self.visiblesSize[1] * height
# (pos + height / zoom / 2) * self.visiblesSize[1] / height = self.visiblesMax[1] - self.orthoCenter[1]
self.orthoCenter = (self.orthoCenter[0], self.visiblesMax[1] - (event.GetPosition() + height / zoom / 2.0) * self.visiblesSize[1] / height)
self._resetView()
self.Refresh()
def setNavigationMode(self, mode):
if mode != self._navigationMode:
self._navigationMode = mode
def navigationMode(self):
return self._navigationMode
def shiftView(self, dx, dy):
if self.viewDimensions == 3:
self._shiftView(dx, dy)
elif self.orthoZoom > 0:
# At least on the Mac the scroll bars don't update if set immediately. Instead, queue the update to happen after all current events have cleared.
wx.CallAfter(self._shiftView, dx, dy)
def _shiftView(self, dx, dy):
width, height = self.GetClientSize()
if self.viewDimensions == 2:
# Convert screen coordinates to world coordinates.
dx = -dx / (width - 20.0) * width
dy = -dy / (height - 20.0) * height
zoom = 2.0 ** (self.orthoZoom / 10.0)
self.orthoCenter = (self.orthoCenter[0] + dx * self.zoomScale / zoom, self.orthoCenter[1] + dy * self.zoomScale / zoom)
self._resetView()
else:
# Mimic the panning code from OSG's trackball manipulator (in TrackballManipulator::calcMovement()).
# It expects dx and dy to be normalized (-1.0 ... 1.0).
dx /= width / 2.0
dy /= height / 2.0
scale = -0.3 * self.trackball.getDistance()
rotation = osg.Matrixd()
rotation.makeRotate(self.trackball.getRotation())
shiftVector = osg.Vec3d(dx * scale, dy * scale, 0.0)
center = self.trackball.getCenter()
center += rotation.preMult(shiftVector)
self.trackball.setCenter(center)
self.Refresh()
def setBackgroundColor(self, color):
"""
Set the background color of the entire display.
The color argument should be a tuple or list of four values between 0.0 and 1.0 indicating the red, green, blue and alpha values of the color. For example:
* (0.0, 0.0, 0.0, 1.0) -> black
* (1.0, 0.0, 0.0, 1.0) -> red
* (0.0, 1.0, 0.0, 1.0) -> green
* (0.0, 0.0, 1.0, 1.0) -> blue
* (1.0, 1.0, 1.0, 1.0) -> white
* (1.0, 1.0, 1.0, 0.0) -> white, but clear if saved as image
"""
if not isinstance(color, (list, tuple)) or len(color) != 4:
raise ValueError, 'The color passed to setBackgroundColor() must be a tuple or list of four numbers.'
for colorComponent in color:
if not isinstance(colorComponent, (int, float)) or colorComponent < 0.0 or colorComponent > 1.0:
raise ValueError, 'The components of the color passed to setBackgroundColor() must all be numbers between 0.0 and 1.0, inclusive.'
if color != self.backgroundColor:
self.viewer.getCamera().setClearColor(osg.Vec4f(color[0], color[1], color[2], color[3]))
self.backgroundColor = color
dispatcher.send(('set', 'backgroundColor'), self)
def setUseMouseOverSelecting(self, useIt):
"""
Set whether objects should be automatically selected as the mouse passes over them.
This setting is ignored if a manual selection is already in place.
"""
if useIt != self._useMouseOverSelecting:
self._useMouseOverSelecting = useIt
dispatcher.send(('set', 'useMouseOverSelecting'), self)
def useMouseOverSelecting(self):
return self._useMouseOverSelecting
def onMouseEvent(self, event):
if event.ButtonDown():
self.selectionShouldExtend = event.CmdDown()
self.findShortestPath = event.ShiftDown()
self.graphicsWindow.getEventQueue().mouseButtonPress(event.GetX(), event.GetY(), event.GetButton())
elif event.ButtonUp():
self.graphicsWindow.getEventQueue().mouseButtonRelease(event.GetX(), event.GetY(), event.GetButton())
elif event.Dragging():
self.graphicsWindow.getEventQueue().mouseMotion(event.GetX(), event.GetY())
elif event.Moving() and ((self._useMouseOverSelecting and self.hoverSelect) or self._visibleBeingAdded is not None):
if self._visibleBeingAdded is None:
self.hoverSelecting = True
self.graphicsWindow.getEventQueue().mouseButtonPress(event.GetX(), event.GetY(), wx.MOUSE_BTN_LEFT)
self.graphicsWindow.getEventQueue().mouseButtonRelease(event.GetX(), event.GetY(), wx.MOUSE_BTN_LEFT)
self.Refresh()
event.Skip()
def onMouseWheel(self, event):
if event.ShiftDown():
self._zoom(event.GetWheelRotation() / 100.0 * self.scrollWheelScale)
else:
self._zoom(event.GetWheelRotation() / 10.0 * self.scrollWheelScale)
event.Skip()
def onEraseBackground(self, event):
pass
def onSize(self, event):
w, h = self.GetClientSize()
if self.IsShownOnScreen():
self.SetCurrent(self.glContext)
if self.graphicsWindow.valid():
self.graphicsWindow.getEventQueue().windowResize(0, 0, w, h)
self.graphicsWindow.resized(0, 0, w, h)
self._resetView()
event.Skip()
def onPaint(self, event_):
wx.PaintDC(self)
if self.IsShownOnScreen(): #self.GetContext() != 0 and self.graphicsWindow.valid():
self.SetCurrent(self.glContext)
self.viewer.frame()
self.SwapBuffers()
def onAnimate(self, event):
self.Refresh()
event.Skip()
def _getConvertedKeyCode(self, event):
key = event.GetKeyCode()
if key >= ord('A') and key <= ord('Z'):
if not event.ShiftDown():
key += 32
return key
def onKeyDown(self, event):
key = self._getConvertedKeyCode(event)
self.graphicsWindow.getEventQueue().keyPress(key)
event.Skip()
def onKeyUp(self, event):
key = self._getConvertedKeyCode(event)
self.graphicsWindow.getEventQueue().keyRelease(key)
event.Skip()
def visiblesForObject(self, networkObject):
"""
Return the list of :class:`visible proxies <Display.Visible.Visible>` for the given object or an empty list if the object is not visualized.
"""
return list(self.visibles[networkObject.networkId]) if networkObject and networkObject.networkId in self.visibles else []
def Refresh(self, *args, **keywordArgs): # pylint: disable=W0221
if not self._suppressRefresh:
if self.compassCamera:
self._updateCompass()
wx.glcanvas.GLCanvas.Refresh(self, *args, **keywordArgs)
def _visibleChanged(self, signal):
if signal[1] in ('position', 'size', 'rotation', 'path', 'pathMidPoints'):
self._recomputeBounds = True
if not self._recomputeBoundsScheduled:
# Trigger a single recompute of the visibles bounds this pass through the event loop no matter how many visibles are updated.
wx.CallAfter(self._resetViewAfterVisiblesChanged)
self._recomputeBoundsScheduled = True
elif signal[1] in ('positionIsFixed', 'sizeIsFixed') and any(self.selectedVisibles):
self._clearDragger()
visible = list(self.selectedVisibles)[0]
if visible._isDraggable():
self._addDragger(visible)
self.Refresh()
if signal[1] not in ('glowColor'):
self.GetTopLevelParent().setModified(True)
def _resetViewAfterVisiblesChanged(self):
self.computeVisiblesBound()
if self.orthoZoom == 0:
self.orthoCenter = (self.visiblesCenter[self.orthoXPlane], self.visiblesCenter[self.orthoYPlane])
self._resetView()
self._recomputeBoundsScheduled = False
def addVisible(self, visible, parentVisible = None):
clientId = -1 if visible.client == None else visible.client.networkId
if clientId in self.visibles:
self.visibles[clientId].append(visible)
else:
self.visibles[clientId] = [visible]
self._visibleIds[visible.displayId] = visible
if parentVisible is None:
self.rootNode.addChild(visible.sgNode)
else:
parentVisible.addChildVisible(visible)
dispatcher.connect(self._visibleChanged, dispatcher.Any, visible)
def visibleWithId(self, visibleId):
if visibleId in self._visibleIds:
return self._visibleIds[visibleId]
else:
return None
def close(self):
self._closing = True
self.setNetwork(None)
def removeVisible(self, visible):
"""
Remove the indicated :class:`visual proxy <Display.Visible.Visible>` from the visualization.
If the object has any nested objects or connections then they will be removed as well.
"""
if visible.displayId not in self._visibleIds:
raise ValueError, 'The visible passed to removeVisible() is not part of the display.'
# Remove any child visibles before removing this one.
for childVisible in list(visible.children):
self.removeVisible(childVisible)
# Remove any dependent visibles before removing this one. (like an arborization before its region)
for dependentVisible in list(visible.dependentVisibles):
self.removeVisible(dependentVisible)
# Remove the visible from the current selection if needed.
if visible in self.selectedVisibles:
self.selectVisibles([visible], extend = True)
# Remove the visible's node from the scene graph.
if visible.parent:
visible.parent.removeChildVisible(visible)
self.rootNode.removeChild(visible.sgNode)
# Remove any dependencies.
dispatcher.disconnect(self._visibleChanged, dispatcher.Any, visible)
if visible.isPath():
visible.setPathEndPoints(None, None)
# Remove the visible from self._visibleIds and self.visibles.
del self._visibleIds[visible.displayId]
clientId = -1 if visible.client == None else visible.client.networkId
visibles = self.visibles[clientId]
visibles.remove(visible)
if not any(visibles):
del self.visibles[clientId]
visible.display = None
self.Refresh()
def visualizeObject(self, networkObject = None, orphanClass = None, **keywordArgs):
"""
Create a visual representation of the :class:`object <network.object.Object>`.
If you want to have a purely visual object that does not represent any object in the biological network then pass None.
You can customize the visualization of the object by passing additional parameters. The parameters that would be used for automatic visualization can be obtained by calling :meth:`defaultVisualizationParams() <network.object.Object.defaultVisualizationParams>` on the object.
Returns the :class:`visible proxy <Display.Visible.Visible>` of the object.
"""
# TODO: document the list of possible params somewhere.
# TODO: replace this whole block with display rules.
visible = Visible(self, networkObject)
isStimulus = False
# Start with the default params for this object, object class or dummy object and override with any supplied params.
if orphanClass:
visible.setOrphanClass(orphanClass)
params = orphanClass._defaultVisualizationParams()
if orphanClass == Stimulus:
edgeVisible = visible
nodeVisible = Visible(self, None)
target = keywordArgs['target']
del keywordArgs['target']
isStimulus = True
elif networkObject:
params = networkObject.defaultVisualizationParams()
else:
params = Object._defaultVisualizationParams()
for key, value in keywordArgs.iteritems():
params[key] = value
if isinstance(networkObject, Arborization):
dispatcher.connect(self._arborizationChangedFlow, ('set', 'sendsOutput'), networkObject)
dispatcher.connect(self._arborizationChangedFlow, ('set', 'receivesInput'), networkObject)
elif isinstance(networkObject, Pathway):
dispatcher.connect(self._pathwayChangedFlow, ('set', 'region1Projects'), networkObject)
dispatcher.connect(self._pathwayChangedFlow, ('set', 'region2Projects'), networkObject)
elif isinstance(networkObject, Stimulus):
edgeVisible = visible
nodeVisible = Visible(self, networkObject)
target = networkObject.target
isStimulus = True
if 'color' in params:
visible.setColor(params['color'])
if 'shape' in params:
if isinstance(params['shape'], str):
shape = neuroptikon.shapeClass(params['shape'])()
elif isinstance(params['shape'], type(self.__class__)):
shape = params['shape']()
else:
shape = params['shape']
visible.setShape(shape)
if 'opacity' in params:
visible.setOpacity(params['opacity'])
if isStimulus:
nodeVisible.setOpacity(params['opacity'])
if 'sizeIsAbsolute' in params:
visible.setSizeIsAbsolute(params['sizeIsAbsolute'])
if 'texture' in params:
visible.setTexture(params['texture'])
if 'textureScale' in params:
visible.setTextureScale(params['textureScale'])
if 'weight' in params:
visible.setWeight(params['weight'])
# Label and position are applied to the node visible of a stimulus.
if isStimulus:
visible = nodeVisible
if 'size' in params:
visible.setSize(params['size'])
if 'label' in params:
visible.setLabel(params['label'])
if 'labelColor' in params:
visible.setLabelColor(params['labelColor'])
if 'labelPosition' in params:
visible.setLabelPosition(params['labelPosition'])
if 'position' in params:
visible.setPosition(params['position'])
if 'positionIsFixed' in params:
visible.setPositionIsFixed(params['positionIsFixed'])
if 'rotation' in params:
visible.setRotation(params['rotation'])
if 'arrangedAxis' in params:
visible.setArrangedAxis(params['arrangedAxis'])
if 'arrangedSpacing' in params:
visible.setArrangedSpacing(params['arrangedSpacing'])
if 'arrangedWeight' in params:
visible.setArrangedWeight(params['arrangedWeight'])
if 'path' in params:
params['pathMidPoints'] = params['path']
del params['path']
pathStart, pathEnd = params.get('pathEndPoints', (None, None))
pathFlowsTo = params.get('flowTo', None)
pathFlowsFrom = params.get('flowFrom', None)
flowToColor = params.get('flowToColor', None)
flowFromColor = params.get('flowFromColor', None)
parentObject = params.get('parent', None)
if isinstance(parentObject, Object):
parentVisibles = self.visiblesForObject(parentObject)
parentVisible = parentVisibles[0] if len(parentVisibles) == 1 else None
else:
parentVisible = parentObject
self.addVisible(visible, parentVisible)
if isStimulus:
if isinstance(target, Object):
targetVisibles = self.visiblesForObject(target)
if len(targetVisibles) == 1:
target = targetVisibles[0]
if target is not None:
edgeVisible.setPathEndPoints(nodeVisible, target)
edgeVisible.setPathIsFixed(True)
edgeVisible.setFlowTo(True)
if flowToColor:
edgeVisible.setFlowToColor(flowToColor)
if self._showFlow:
edgeVisible.animateFlow()
nodeVisible.setShape(None)
edgeVisible.setPositionIsFixed(True)
self.addVisible(edgeVisible)
else:
if pathStart is not None and pathEnd is not None:
# The path start and end can either be objects or visibles.
if isinstance(pathStart, Object):
pathStartVisibles = self.visiblesForObject(pathStart)
else:
pathStartVisibles = [pathStart]
if isinstance(pathEnd, Object):
pathEndVisibles = self.visiblesForObject(pathEnd)
else:
pathEndVisibles = [pathEnd]
if len(pathStartVisibles) == 1 and len(pathEndVisibles) == 1:
pathStartVisible = pathStartVisibles[0]
# if pathStartVisible.isPath():
# pathStartVisible = pathStartVisible._pathEnd
pathEndVisible = pathEndVisibles[0]
# if pathEndVisible.isPath():
# pathEndVisible = pathEndVisible._pathStart
visible.setPathEndPoints(pathStartVisible, pathEndVisible)
visible.setPathMidPoints(params.get('pathMidPoints', []))
visible.setPathIsFixed(params.get('pathIsFixed', None))
visible.setFlowTo(pathFlowsTo)
if flowToColor:
visible.setFlowToColor(flowToColor)
visible.setFlowFrom(pathFlowsFrom)
if flowFromColor:
visible.setFlowFromColor(flowFromColor)
if self._showFlow:
visible.animateFlow()
childObjects = params.get('children', [])
for childObject in childObjects:
subVisibles = self.visiblesForObject(childObject)
if len(subVisibles) == 1:
# TODO: what if the subVisible is already a child?
self.rootNode.removeChild(subVisibles[0].sgNode)
visible.addChildVisible(subVisibles[0])
# The visible may be outside of the previously computed bounds.
_recomputeBounds = True
return visible
def removeObject(self, networkObject):
"""
Remove the indicated :class:`network object <network.object.Object>` from the visualization.
If the object has any nested objects or connections then they will be removed as well.
"""
while any(self.visiblesForObject(networkObject)):
self.removeVisible(self.visiblesForObject(networkObject)[0])
def clear(self):
"""
Remove every :class:`network object <network.object.Object>` from the visualization.
"""
while any(self.visibles):
self.removeVisible(self.visibles.values()[0][0])
def _arborizationChangedFlow(self, sender):
arborizationVis = self.visiblesForObject(sender)
if len(arborizationVis) == 1:
arborizationVis[0].setFlowTo(sender.sendsOutput)
arborizationVis[0].setFlowFrom(sender.receivesInput)
def _pathwayChangedFlow(self, sender):
pathwayVis = self.visiblesForObject(sender)
if len(pathwayVis) == 1:
pathwayVis[0].setFlowTo(sender.region1Projects)
pathwayVis[0].setFlowFrom(sender.region2Projects)
def setConsole(self, console):
self.console = console
def setNetwork(self, network):
if network != self.network:
if self.network != None:
self.network.removeDisplay(self)
# TBD: are there situations where you wouldn't want to clear anonymous visibles?
self.clear()
# TODO: anything else?
self.network = network
if network is not None:
self.network.addDisplay(self)
if self.autoVisualize:
for networkObject in network.objects:
if not networkObject.parentObject():
if not (isinstance(addedObject, Synapse) and self.hideSynapsesOnConnections()):
self.visualizeObject(networkObject)
dispatcher.connect(receiver=self._networkChanged, signal=dispatcher.Any, sender=self.network)
dispatcher.send(('set', 'network'), self)
def _networkChanged(self, affectedObjects=None, **arguments):
signal = arguments['signal']
if signal == 'addition' and self.autoVisualize:
for addedObject in affectedObjects:
if not addedObject.parentObject():
# TODO if object is synapse and not display synapse is on then don't add to visualize object
if not (isinstance(addedObject, Synapse) and self.hideSynapsesOnConnections()):
self.visualizeObject(addedObject)
self.Refresh()
elif signal == 'deletion':
for removedObject in affectedObjects:
self.removeObject(removedObject)
elif signal == 'hideSynapsesOnConnections':
# If we hide/show synapses we need to add or delete them from visibles
if self.autoVisualize:
for networkObject in self.network.objects:
if isinstance(networkObject, Synapse):
if self.hideSynapsesOnConnections():
self.removeObject(networkObject)
else:
self.visualizeObject(networkObject)
else:
pass # TODO: anything?
self.GetTopLevelParent().setModified(True)
def _neuronRegionChanged(self, sender):
# TODO: untested method
visible = self.visiblesForObject(sender)
if visible.parent is not None:
visible.parent.removeChildVisible(visible)
if sender.region is not None:
newParent = self.visiblesForObject(sender.region)
if newParent is not None:
newParent.addChildVisible(visible)
def setShowRegionNames(self, show):
"""
Set whether the names of regions should be shown by default in the visualization.
"""
if show != self._showRegionNames:
self._showRegionNames = show
dispatcher.send(('set', 'showRegionNames'), self)
self.Refresh()
def showRegionNames(self):
"""
Return whether the names of regions should be shown by default in the visualization.
"""
return self._showRegionNames
def setShowNeuronNames(self, show):
"""
Set whether the names of neurons should be shown by default in the visualization.
"""
if show != self._showNeuronNames:
self._showNeuronNames = show
dispatcher.send(('set', 'showNeuronNames'), self)
self.Refresh()
def showNeuronNames(self):
"""
Return whether the names of neurons should be shown by default in the visualization.
"""
return self._showNeuronNames
def hideUnselectedNeurons(self):
"""
Returns whether to hide unselected neurons (when at least one item is selected).
"""
return self._hideUnselectedNeurons
def setHideUnselectedNeurons(self, value):
"""
Set whether to hide hide unselected neurons when at least one other item is selected.
"""
if value != self._hideUnselectedNeurons:
self._hideUnselectedNeurons = value
dispatcher.send(('set', 'hideUnselectedNeurons'))
self.selectVisibles(self.selectedVisibles, reselect=True)
self.Refresh()
def hideSynapsesOnConnections(self):
"""
Returns whether to hide unselected neurons (when at least one item is selected).
"""
return self._hideSynapsesOnConnections
def setHideSynapsesOnConnections(self, value):
"""
Set whether to hide hide unselected neurons when at least one other item is selected.
"""
if value != self._hideSynapsesOnConnections:
self._hideSynapsesOnConnections = value
dispatcher.send('hideSynapsesOnConnections', self.network)
self.Refresh()
def setShowNeuronNamesOnSelection(self, show):
"""
Set whether the names of neurons should be shown by default in the visualization when selected.
"""
if show != self._showNeuronNamesOnSelection:
self._showNeuronNamesOnSelection = show
dispatcher.send(('set', 'showNeuronNamesOnSelection'), self)
self.Refresh()
def showNeuronNamesOnSelection(self):
"""
Return whether the names of neurons should be shown by default in the visualization when selected.
"""
return self._showNeuronNamesOnSelection
def setPrintNeuronNamesOnSelection(self, show):
"""
Set whether the names of neurons should be printed by default in the visualization when selected.
"""
if show != self._printNeuronNamesOnSelection:
self._printNeuronNamesOnSelection = show
dispatcher.send(('set', 'printNeuronNamesOnSelection'), self)
self.Refresh()
def printNeuronNamesOnSelection(self):
"""
Return whether the names of neurons should be printed by default in the visualization when selected.
"""
return self._printNeuronNamesOnSelection
def setLabelsFloatOnTop(self, floatLabels):
"""
Set whether labels should be rendered on top of all other objects in the visualization.
"""
if floatLabels != self._labelsFloatOnTop:
self._labelsFloatOnTop = floatLabels
dispatcher.send(('set', 'labelsFloatOnTop'), self)
self.Refresh()
def labelsFloatOnTop(self):
"""
Return whether labels should be rendered on top of all other objects in the visualization.
"""
return self._labelsFloatOnTop
def setShowFlow(self, showFlow):
"""
Set whether the flow of information should be shown for all objects in the visualization.
"""
if showFlow != self._showFlow:
self._showFlow = showFlow
dispatcher.send(('set', 'showFlow'), self)
def showFlow(self):
"""
Return whether the flow of information should be shown for all objects in the visualization.
"""
return self._showFlow
def setSelectionHighlightDepth(self, depth):
"""
Set how far away objects connected to the current selection should be highlighted.
"""
if depth != self._selectionHighlightDepth:
self._selectionHighlightDepth = depth
self._onSelectionOrShowFlowChanged()
dispatcher.send(('set', 'selectionHighlightDepth'), self)
def selectionHighlightDepth(self):
"""
Return how far away objects connected to the current selection should be highlighted.
"""
return self._selectionHighlightDepth
def setHighlightOnlyWithinSelection(self, flag):
"""
Set whether connections to objects outside of the selection should be highlighted when more than one object is selected.
"""
if flag != self._highlightOnlyWithinSelection:
self._highlightOnlyWithinSelection = flag
self._onSelectionOrShowFlowChanged()
dispatcher.send(('set', 'highlightOnlyWithinSelection'), self)
def highlightOnlyWithinSelection(self):
"""
Return whether connections to objects outside of the selection will be highlighted when more than one object is selected.
"""
return self._highlightOnlyWithinSelection
def setUseGhosts(self, useGhosts):
"""
Set whether unselected objects should be dimmed in the visualization.
"""
if useGhosts != self._useGhosts:
self._useGhosts = useGhosts
dispatcher.send(('set', 'useGhosts'), self)
self.Refresh()
def useGhosts(self):
"""
Return whether unselected objects should be dimmed in the visualization.
"""
return self._useGhosts
def setGhostingOpacity(self, opacity):
"""
Set the opacity to be used for unselected objects when ghosting is enabled.
The opacity must be between 0.0 and 1.0, inclusive.
"""
if not isinstance(opacity, (float, int)):
raise TypeError, 'The value passed to setGhostingOpacity() must be a number.'
elif opacity < 0.0 or opacity > 1.0:
raise ValueError, 'The value passed to setGhostingOpacity() must be between 0.0 and 1.0, inclusive.'
if opacity != self._ghostingOpacity:
self._ghostingOpacity = opacity
dispatcher.send(('set', 'ghostingOpacity'), self)
self.Refresh()
def ghostingOpacity(self):
"""
Return the opacity to be used for unselected objects when ghosting is enabled.
"""
return self._ghostingOpacity
def setLabel(self, networkObject, label):
"""
Set the label that adorns the visualization of the indicated :class:`network object <network.object.Object>`.
The label argument should be a string value or None to indicate that the object's abbreviation or name should be used. To have no label pass an empty string.
"""
if not isinstance(networkObject, (Object, Visible)) or (isinstance(networkObject, Object) and networkObject.network != self.network) or (isinstance(networkObject, Visible) and networkObject.display != self):
raise ValueError, 'The object argument passed to setLabel() must be an object from the network being visualized by this display.'
if not isinstance(label, (str, type(None))):
raise TypeError, 'The label argument passed to setLabel() must be a string or None.'
visible = None
if isinstance(networkObject, Object):
visibles = self.visiblesForObject(networkObject)
if len(visibles) == 1:
visible = visibles[0]
elif isinstance(networkObject, Stimulus):
visible = visibles[0 if visibles[1].isPath() else 1]
else:
visible = networkObject
if visible is not None:
visible.setLabel(label)
def setLabelColor(self, networkObject, color):
"""
Set the color of the label of the indicated :class:`network object <network.object.Object>`.
The color argument should be a tuple or list of three values between 0.0 and 1.0 indicating the red, green and blue values of the color. For example:
* (0.0, 0.0, 0.0) -> black
* (1.0, 0.0, 0.0) -> red
* (0.0, 1.0, 0.0) -> green
* (0.0, 0.0, 1.0) -> blue
* (1.0, 1.0, 1.0) -> white
Any alpha value should be set independently using :meth:`setVisibleOpacity <Display.Display.Display.setVisibleOpacity>`.
"""
if not isinstance(networkObject, (Object, Visible)) or (isinstance(networkObject, Object) and networkObject.network != self.network) or (isinstance(networkObject, Visible) and networkObject.display != self):
raise ValueError, 'The object argument passed to setLabelColor() must be an object from the network being visualized by this display .'
if (not isinstance(color, (tuple, list)) or len(color) != 3 or
not isinstance(color[0], (int, float)) or color[0] < 0.0 or color[0] > 1.0 or
not isinstance(color[1], (int, float)) or color[1] < 0.0 or color[1] > 1.0 or
not isinstance(color[2], (int, float)) or color[2] < 0.0 or color[2] > 1.0):
raise ValueError, 'The color argument passed to setLabelColor() should be a tuple or list of three integer or floating point values between 0.0 and 1.0, inclusively.'
visible = None
if isinstance(networkObject, Object):
visibles = self.visiblesForObject(networkObject)
if len(visibles) == 1:
visible = visibles[0]
elif isinstance(networkObject, Stimulus):
visible = visibles[0 if visibles[1].isPath() else 1]
else:
visible = networkObject
if visible is not None:
visible.setLabelColor(color)
def setLabelPosition(self, networkObject, position):
"""
Set the position of the label that adorns the visualization of the indicated :class:`network object <network.object.Object>`.
The position argument should be a tuple or list indicating the position of the label. The coordinates are local to the object with is usually a unit square centered at (0.0, 0.0). For example:
(0.0, 0.0) -> label at center of object
(-0.5, -0.5) -> label at lower left corner of object
(0.0, 0.5) -> label centered at top of object
"""
if not isinstance(networkObject, (Object, Visible)) or (isinstance(networkObject, Object) and networkObject.network != self.network) or (isinstance(networkObject, Visible) and networkObject.display != self):
raise ValueError, 'The object argument passed to setLabelPosition() must be an object from the network being visualized by this display .'
if not isinstance(position, (tuple, list)):
raise TypeError, 'The position argument passed to setLabelPosition() must be a tuple or list of numbers.'
for dim in position:
if not isinstance(dim, (int, float)):
raise TypeError, 'The components of the position argument passed to setLabelPosition() must be numbers.'
visible = None
if isinstance(networkObject, Object):
visibles = self.visiblesForObject(networkObject)
if len(visibles) == 1:
visible = visibles[0]
elif isinstance(networkObject, Stimulus):
visible = visibles[0 if visibles[1].isPath() else 1]
else:
visible = networkObject
if visible is not None:
visible.setLabelPosition(position)
def setVisiblePosition(self, networkObject, position = None, fixed = None):
"""
Set the position of the :class:`network object <network.object.Object>` within the display or within its visual container.
The position parameter should be a tuple or list of numbers. When setting the position of an object within another the coordinates are relative to a unit cube centered at (0.0, 0.0, 0.0).
The fixed parameter indicates whether the user should be given GUI controls to manipulate the position of the object.
"""
if not isinstance(networkObject, (Object, Visible)) or (isinstance(networkObject, Object) and networkObject.network != self.network) or (isinstance(networkObject, Visible) and networkObject.display != self):
raise ValueError, 'The object argument passed to setVisiblePosition() must be an object from the network being visualized by this display .'
if position != None:
if not isinstance(position, (tuple, list)) or len(position) != 3:
raise TypeError, 'The position argument passed to setVisiblePosition() must be a tuple or list of three numbers.'
for dim in position:
if not isinstance(dim, (int, float)):
raise TypeError, 'The components of the position argument passed to setVisiblePosition() must be numbers.'
visible = None
if isinstance(networkObject, Object):
visibles = self.visiblesForObject(networkObject)
if len(visibles) == 1:
visible = visibles[0]
elif isinstance(networkObject, Stimulus):
visible = visibles[0 if visibles[1].isPath() else 1]
else:
visible = networkObject
if visible is not None:
if position is not None:
visible.setPosition(position)
if fixed is not None:
visible.setPositionIsFixed(fixed)
def setVisibleRotation(self, networkObject, rotation):
visibles = self.visiblesForObject(networkObject)
if len(visibles) == 1:
visibles[0].setRotation(rotation)
def setVisibleSize(self, networkObject, size = None, fixed=True, absolute=False):
"""
Set the size of the :class:`network object <network.object.Object>` within the display or within its visual container.
The size parameter should be a tuple or list of numbers. When setting the position of an object within another the coordinates are relative to a unit cube centered at (0.0, 0.0, 0.0).
The fixed parameter indicates whether the user should be given GUI controls to manipulate the size of the object.
The absolute parameter indicates whether the size should be considered relative to the entire display (True) or relative to the visual container (False).
"""
if not isinstance(networkObject, (Object, Visible)) or (isinstance(networkObject, Object) and networkObject.network != self.network) or (isinstance(networkObject, Visible) and networkObject.display != self):
raise ValueError, 'The object argument passed to setVisibleSize() must be an object from the network being visualized by this display .'
if not isinstance(size, (tuple, list)):
raise TypeError, 'The size argument passed to setVisibleSize() must be a tuple or list of numbers.'
for dim in size:
if not isinstance(dim, (int, float)):
raise TypeError, 'The components of the size argument passed to setVisibleSize() must be numbers.'
visible = None
if isinstance(networkObject, Object):
visibles = self.visiblesForObject(networkObject)
if len(visibles) == 1:
visible = visibles[0]
else:
visible = networkObject
if visible is not None:
if size is not None:
visible.setSize(size)
visible.setSizeIsFixed(fixed)
visible.setSizeIsAbsolute(absolute)
def setVisibleColor(self, networkObject, color):
"""
Set the color of the indicated :class:`network object <network.object.Object>`.
The color argument should be a tuple or list of three values between 0.0 and 1.0 indicating the red, green and blue values of the color. For example:
* (0.0, 0.0, 0.0) -> black
* (1.0, 0.0, 0.0) -> red
* (0.0, 1.0, 0.0) -> green
* (0.0, 0.0, 1.0) -> blue
* (1.0, 1.0, 1.0) -> white
Any alpha value should be set independently using :meth:`setVisibleOpacity <Display.Display.Display.setVisibleOpacity>`.
"""
if not isinstance(networkObject, (Object, Visible)) or (isinstance(networkObject, Object) and networkObject.network != self.network) or (isinstance(networkObject, Visible) and networkObject.display != self):
raise TypeError, 'The object argument passed to setVisibleColor() must be an object from the network being visualized by this display.'
if (not isinstance(color, (tuple, list)) or len(color) != 3 or
not isinstance(color[0], (int, float)) or color[0] < 0.0 or color[0] > 1.0 or
not isinstance(color[1], (int, float)) or color[1] < 0.0 or color[1] > 1.0 or
not isinstance(color[2], (int, float)) or color[2] < 0.0 or color[2] > 1.0):
raise ValueError, 'The color argument should be a tuple or list of three integer or floating point values between 0.0 and 1.0, inclusively.'
visible = None
if isinstance(networkObject, Object):
visibles = self.visiblesForObject(networkObject)
if len(visibles) == 1:
visible = visibles[0]
elif isinstance(networkObject, Stimulus):
visible = visibles[0 if visibles[0].isPath() else 1]
else:
visible = networkObject
if visible is not None:
visible.setColor(color)
def setVisibleTexture(self, networkObject, texture, scale = 1.0):
"""
Set the :class:`texture <library.texture.Texture>` used to paint the surface of the visualized :class:`network object <network.object.Object>`.
>>> display.setVisibleTexture(region1, library.texture('Stripes'))
The texture parameter should be a :class:`texture <library.texture.Texture>` instance or None.
The scale parameter can be used to reduce or enlarge the texture relative to the visualized object.
"""
if not isinstance(networkObject, (Object, Visible)) or (isinstance(networkObject, Object) and networkObject.network != self.network) or (isinstance(networkObject, Visible) and networkObject.display != self):
raise TypeError, 'The object argument passed to setVisibleTexture() must be an object from the network being visualized by this display.'
if not isinstance(texture, (Texture, type(None))):
raise TypeError, 'The texture argument passed to setVisibleTexture() must be a texture from the library or None.'
if not isinstance(scale, (float, int)):
raise TypeError, 'The scale argument passed to setVisibleTexture() must be a number.'
visible = None
if isinstance(networkObject, Object):
visibles = self.visiblesForObject(networkObject)
if len(visibles) == 1:
visible = visibles[0]
elif isinstance(networkObject, Stimulus):
visible = visibles[0 if visibles[0].isPath() else 1]
else:
visible = networkObject
if visible is not None:
visible.setTexture(texture)
visible.setTextureScale(scale)
def setVisibleShape(self, networkObject, shape):
"""
Set the shape of the :class:`network object's <network.object.Object>` visualization.
>>> display.setVisibleShape(neuron1, shapes['Ball'])
>>> display.setVisibleShape(muscle1, shapes['Ring'](startAngle = 0.0, endAngle = pi))
The shape parameter should be one of the classes in the shapes dictionary, an instance of one of the classes or None.
"""
if isinstance(shape, str):
# Mapping for pre-0.9.4 scripts.
shapeNameMap = {'ball': 'Ball', 'box': 'Box', 'capsule': 'Capsule', 'cone': 'Cone', 'tube': 'Cylinder'}
if shape in shapeNameMap:
shape = shapeNameMap[shape]
shape = neuroptikon.shapeClass(shape)
if not isinstance(networkObject, (Object, Visible)) or (isinstance(networkObject, Object) and networkObject.network != self.network) or (isinstance(networkObject, Visible) and networkObject.display != self):
raise TypeError, 'The object argument passed to setVisibleShape() must be an object from the network being visualized by this display.'
if shape != None and not isinstance(shape, Shape) and (not type(shape) == type(self.__class__) or not issubclass(shape, Shape)):
raise TypeError, 'The shape parameter must be an instance of one of the classes in the shapes dictionary, an instance of one of the classes or None.'
visible = None
if isinstance(networkObject, Object):
visibles = self.visiblesForObject(networkObject)
if len(visibles) == 1:
visible = visibles[0]
elif isinstance(networkObject, Stimulus):
visible = visibles[0 if visibles[0].isPath() else 1]
else:
visible = networkObject
if visible is not None:
visible.setShape(shape)
def setVisibleOpacity(self, networkObject, opacity):
"""
Set the opacity of the :class:`network object's <network.object.Object>` visualization.
The opacity parameter should be a number from 0.0 (fully transparent) to 1.0 (fully opaque).
"""
if not isinstance(networkObject, (Object, Visible)) or (isinstance(networkObject, Object) and networkObject.network != self.network) or (isinstance(networkObject, Visible) and networkObject.display != self):
raise TypeError, 'The object argument passed to setVisibleOpacity() must be an object from the network being visualized by this display.'
if not isinstance(opacity, (int, float)) or opacity < 0.0 or opacity > 1.0:
raise ValueError, 'The opacity argument passed to setVisibleOpacity() must be an number between 0.0 and 1.0, inclusive.'
visible = None
if isinstance(networkObject, Object):
visibles = self.visiblesForObject(networkObject)
if len(visibles) == 1:
visible = visibles[0]
elif isinstance(networkObject, Stimulus):
visible = visibles[0 if visibles[0].isPath() else 1]
else:
visible = networkObject
if visible is not None:
visible.setOpacity(opacity)
def setVisibleWeight(self, networkObject, weight):
"""
Set the weight of the :class:`network object's <network.object.Object>` visualization.
The weight parameter should be a float value with 1.0 being a neutral weight. Currently this only applies to visualized connections.
"""
if not isinstance(networkObject, (Object, Visible)) or (isinstance(networkObject, Object) and networkObject.network != self.network) or (isinstance(networkObject, Visible) and networkObject.display != self):
raise TypeError, 'The object argument passed to setVisibleWeight() must be an object from the network being visualized by this display.'
if not isinstance(weight, (int, float)):
raise TypeError, 'The weight argument passed to setVisibleWeight() must be an number.'
visible = None
if isinstance(networkObject, Object):
visibles = self.visiblesForObject(networkObject)
if len(visibles) == 1:
visible = visibles[0]
elif isinstance(networkObject, Stimulus):
visible = visibles[0 if visibles[0].isPath() else 1]
else:
visible = networkObject
if visible is not None:
visible.setWeight(weight)
def setVisiblePath(self, networkObject, startObject, endObject, midPoints = None, fixed = None):
"""
Set the start and end points of a connecting :class:`object <network.object.Object>` and any additional mid-points.
The start and end object should be from the same network and the mid-points should be a list of coordinates, e.g. [(0.1, 0.3), (0.1, 0.5), (0.2, 0.5)].
If the start or end objects are moved, resized, etc. then the connecting object's visualization will be adjusted to maintain the connection.
"""
if isinstance(startObject, list):
# Versions 0.9.4 and prior put the midPoints first.
swap = startObject
startObject = endObject
endObject = midPoints
midPoints = swap
if ((not isinstance(networkObject, (Object, Visible)) or (isinstance(networkObject, Object) and networkObject.network != self.network) or (isinstance(networkObject, Visible) and networkObject.display != self)) or
not isinstance(startObject, (Object, Visible)) or (isinstance(startObject, Object) and startObject.network != self.network) or
not isinstance(endObject, (Object, Visible)) or (isinstance(endObject, Object) and endObject.network != self.network)):
raise ValueError, 'The object, startObject and endObject arguments passed to setVisiblePath() must be objects from the network being visualized by this display.'
if midPoints != None:
if not isinstance(midPoints, (list, tuple)):
raise TypeError, 'The midPoints argument passed to setVisiblePath() must be a list, a tuple or None.'
for midPoint in midPoints:
if not isinstance(midPoint, (list, tuple)) or len(midPoint) not in (2, 3):
raise ValueError, 'The mid-points passed to setVisiblePath() must be a list or tuple of numbers.'
for midPointDim in midPoint:
if not isinstance(midPointDim, (int, float)):
raise ValueError, 'Each list or tuple mid-point passed to setVisiblePath() must contain only numbers.'
if fixed != None:
if not isinstance(fixed, bool):
raise TypeError, 'The fixed argument passed to setVisiblePath() must be True, False or None'
visible = None
if isinstance(networkObject, Object):
visibles = self.visiblesForObject(networkObject)
if len(visibles) == 1:
visible = visibles[0]
elif isinstance(networkObject, Stimulus):
visible = visibles[0 if visibles[0].isPath() else 1]
else:
visible = networkObject
if visible is not None:
if isinstance(startObject, Object):
startVisibles = self.visiblesForObject(startObject)
if len(startVisibles) != 1:
raise ValueError, 'The starting object of the path is not visualized.'
else:
startVisibles = [startObject]
if isinstance(endObject, Object):
endVisibles = self.visiblesForObject(endObject)
if len(endVisibles) != 1:
raise ValueError, 'The ending object of the path is not visualized.'
else:
endVisibles = [endObject]
visible.setPathEndPoints(startVisibles[0], endVisibles[0])
if midPoints != None:
visible.setPathMidPoints(midPoints)
if fixed != None:
visible.setPathIsFixed(fixed)
def setVisibleFlowTo(self, networkObject, show = True, color = None, spacing = None, speed = None, spread = None):
"""
Set the visualization style for the flow of information from the :class:`path object <network.object.Object>` start to its end.
The color argument should be a tuple containing red, green and blue values. For example:
* (0.0, 0.0, 0.0) -> black
* (1.0, 0.0, 0.0) -> red
* (0.0, 1.0, 0.0) -> green
* (0.0, 0.0, 1.0) -> blue
* (1.0, 1.0, 1.0) -> white
The spacing argument determines how far apart the pulses are placed and the speed argument determines how fast they move. Both arguments should be in world space coordinates.
The spread argument determines how far the tail of the pulse reaches, from 0.0 (no tail) to 1.0 (the tail reaches all the way to the next pulse).
"""
if not isinstance(networkObject, (Object, Visible)) or (isinstance(networkObject, Object) and networkObject.network != self.network) or (isinstance(networkObject, Visible) and networkObject.display != self):
raise TypeError, 'The object argument passed to setVisibleFlowTo() must be an object from the network being visualized by this display.'
visible = None
if isinstance(networkObject, Object):
visibles = self.visiblesForObject(networkObject)
if len(visibles) == 1:
visible = visibles[0]
elif isinstance(networkObject, Stimulus):
visible = visibles[0 if visibles[0].isPath() else 1]
else:
visible = networkObject
if visible is not None:
visible.setFlowTo(show)
if color is not None:
if len(color) == 3:
color = (color[0], color[1], color[2], 1.0)
visible.setFlowToColor(color)
if spacing is not None:
visible.setFlowToSpacing(spacing)
if speed is not None:
visible.setFlowToSpeed(speed)
if spread is not None:
visible.setFlowToSpread(spread)
def setVisibleFlowFrom(self, networkObject, show = True, color = None, spacing = None, speed = None, spread = None):
"""
Set the visualization style for the flow of information from the :class:`path object's <network.object.Object>` end back to its start.
The color argument should be a tuple containing red, green and blue values. For example:
* (0.0, 0.0, 0.0) -> black
* (1.0, 0.0, 0.0) -> red
* (0.0, 1.0, 0.0) -> green
* (0.0, 0.0, 1.0) -> blue
* (1.0, 1.0, 1.0) -> white
The spacing argument determines how far apart the pulses are placed and the speed argument determines how fast they move. Both arguments should be in world space coordinates.
The spread argument determines how far the tail of the pulse reaches, from 0.0 (no tail) to 1.0 (the tail reaches all the way to the next pulse).
"""
if not isinstance(networkObject, (Object, Visible)) or (isinstance(networkObject, Object) and networkObject.network != self.network) or (isinstance(networkObject, Visible) and networkObject.display != self):
raise TypeError, 'The object argument passed to setVisibleFlowFrom() must be an object from the network being visualized by this display.'
visible = None
if isinstance(networkObject, Object):
visibles = self.visiblesForObject(networkObject)
if len(visibles) == 1:
visible = visibles[0]
elif isinstance(networkObject, Stimulus):
visible = visibles[0 if visibles[0].isPath() else 1]
else:
visible = networkObject
if visible is not None:
visible.setFlowFrom(show)
if color is not None:
if len(color) == 3:
color = (color[0], color[1], color[2], 1.0)
visible.setFlowFromColor(color)
if spacing is not None:
visible.setFlowFromSpacing(spacing)
if speed is not None:
visible.setFlowFromSpeed(speed)
if spread is not None:
visible.setFlowFromSpread(color)
def setArrangedAxis(self, networkObject, axis = 'largest', recurse = False):
"""
Automatically arrange the visible children of the indicated :class:`network object <network.object.Object>` along the specified axis.
The axis value should be one of 'largest', 'X', 'Y', 'Z' or None. When 'largest' is indicated the children will be arranged along whichever axis is longest at any given time. Resizing the parent object therefore can change which axis is used.
If recurse is True then all descendants will have their axes set as well.
"""
if not isinstance(networkObject, (Object, Visible)) or (isinstance(networkObject, Object) and networkObject.network != self.network) or (isinstance(networkObject, Visible) and networkObject.display != self):
raise ValueError, 'The object argument passed to setArrangedAxis() must be an object from the network being visualized by this display .'
if axis not in [None, 'largest', 'X', 'Y', 'Z']:
raise ValueError, 'The axis argument passed to setArrangedAxis() must be one of \'largest\', \'X\', \'Y\', \'Z\' or None.'
visible = None
if isinstance(networkObject, Object):
visibles = self.visiblesForObject(networkObject)
if len(visibles) == 1:
visible = visibles[0]
else:
visible = networkObject
if visible is not None:
visible.setArrangedAxis(axis = axis, recurse = recurse)
def setArrangedSpacing(self, networkObject, spacing = .02, recurse = False):
"""
Set the visible spacing between the children of the indicated :class:`network object <network.object.Object>`.
The spacing is measured as a fraction of the whole. So a value of .02 uses 2% of the parent's size for the spacing between each object.
If recurse is True then all descendants will have their spacing set as well.
"""
if not isinstance(networkObject, (Object, Visible)) or (isinstance(networkObject, Object) and networkObject.network != self.network) or (isinstance(networkObject, Visible) and networkObject.display != self):
raise ValueError, 'The object argument passed to setArrangedSpacing() must be an object from the network being visualized by this display .'
if not isinstance(spacing, (int, float)):
raise TypeError, 'The spacing argument passed to setArrangedSpacing() must be an integer or floating point value.'
visible = None
if isinstance(networkObject, Object):
visibles = self.visiblesForObject(networkObject)
if len(visibles) == 1:
visible = visibles[0]
else:
visible = networkObject
if visible is not None:
visible.setArrangedSpacing(spacing = spacing, recurse = recurse)
def setArrangedWeight(self, networkObject, weight):
"""
Set the amount of its parent's space the indicated :class:`network object <network.object.Object>` should use compared to its siblings.
Larger weight values will result in more of the parent's space being used.
If recurse is True then all descendants will have their spacing set as well.
"""
if not isinstance(networkObject, (Object, Visible)) or (isinstance(networkObject, Object) and networkObject.network != self.network) or (isinstance(networkObject, Visible) and networkObject.display != self):
raise ValueError, 'The object argument passed to setArrangedWeight() must be an object from the network being visualized by this display .'
if not isinstance(weight, (int, float)):
raise TypeError, 'The weight argument passed to setArrangedWeight() must be an integer or floating point value.'
visible = None
if isinstance(networkObject, Object):
visibles = self.visiblesForObject(networkObject)
if len(visibles) == 1:
visible = visibles[0]
else:
visible = networkObject
if visible is not None:
visible.setArrangedWeight(weight)
def selectObjectsMatching(self, predicate):
matchingVisibles = []
for networkObject in self.network.objects:
if predicate.matches(networkObject):
for visible in self.visiblesForObject(networkObject):
matchingVisibles.append(visible)
self.selectVisibles(matchingVisibles)
def selectObjects(self, objects, extend = False, findShortestPath = False, color = None):
"""
Select the indicated :class:`network objects <network.object.Object>`.
If extend is True then the objects will be added to the current selection, otherwise the objects will replace the current selection.
If findShortestPath is True then the shortest path between the currently selected object(s)s and the indicated object(s) will be found and all will be selected.
"""
if not isinstance(objects, (list, tuple, set)):
raise TypeError, 'The objects argument passed to selectObjects must be a list, tuple or set.'
visibles = []
for networkObject in objects:
visibles.extend(self.visiblesForObject(networkObject))
if color:
for visible in visibles:
self._visiblesSelectionColors[visible] = color
self.selectVisibles(visibles, extend, findShortestPath)
def deselectObjects(self, objects):
"""
Deselect the indicated :class:`network objects <network.object.Object>`.
Objects will be deleted from the current selection.
"""
if not isinstance(objects, (list, tuple, set)):
raise TypeError, 'The objects argument passed to selectObjects must be a list, tuple or set.'
visibles = []
for networkObject in objects:
visibles.extend(self.visiblesForObject(networkObject))
self.deselectVisibles(visibles)
def deselectObject(self, networkObject):
"""
Deselect the indicated :class:`network objects <network.object.Object>`.
Objects will be deleted from the current selection.
"""
for visible in self.visiblesForObject(networkObject):
self.deselectVisibles([visible])
def selectObject(self, networkObject, extend = False, findShortestPath = False, color = None):
"""
Select the indicated :class:`network object <network.object.Object>`.
If extend is True then the object will be added to the current selection, otherwise the object will replace the current selection.
If findShortestPath is True then the shortest path between the currently selected object(s)s and the indicated object will be found and all will be selected.
"""
for visible in self.visiblesForObject(networkObject):
if color:
self._visiblesSelectionColors[visible] = color
self.selectVisibles([visible], extend, findShortestPath)
def objectIsSelected(self, networkObject):
"""
Return whether the indicated :class:`network object <network.object.Object>` is part of the current selection.
"""
for visible in self.visiblesForObject(networkObject):
if visible in self.selectedVisibles:
return True
return False
def selectVisibles(self, visibles, extend = False, findShortestPath = False, fromclick=False, reselect=False):
"""
Select the indicated :class:`visible proxies <display.visible.Visible>`.
If extend is True then the visible will be added to the current selection, otherwise the visible will replace the current selection.
If findShortestPath is True then the shortest path between the currently selected visible(s) and the indicated visible will be found and all will be selected.
"""
if (extend or findShortestPath) and not self.hoverSelected:
newSelection = set(self.selectedVisibles)
else:
newSelection = set()
if self._hideUnselectedNeurons and fromclick == True and len(visibles):
visibles = [visible for visible in visibles if visible.getCurrentOpacity() != 0]
if findShortestPath:
# Add the visibles that exist along the path to the selection.
pathWasFound = False
#TODO Slow
for visible in visibles:
for startVisible in self.selectedVisibles:
for pathObject in self.network.shortestPath(startVisible.client, visible.client):
for pathVisible in self.visiblesForObject(pathObject):
pathWasFound = True
if visible in self._visiblesSelectionColors:
self._visiblesSelectionColors[pathVisible] = self._visiblesSelectionColors[visible]
newSelection.add(pathVisible)
if not pathWasFound:
wx.Bell()
elif extend and len(visibles) == 1 and visibles[0] in newSelection:
# Remove the visible from the selection
newSelection.remove(visibles[0])
else:
# Add the visibles to the new selection.
for visible in visibles:
# Select the root of the object if appropriate.
rootObject = visible.client.rootObject()
if rootObject and not self.objectIsSelected(rootObject) and not self.visiblesForObject(rootObject)[0] in visibles:
visibles = self.visiblesForObject(rootObject)
# Highlight root object instead of visible
if visible in self._visiblesSelectionColors:
self._visiblesSelectionColors[visibles[0]] = self._visiblesSelectionColors[visible]
del self._visiblesSelectionColors[visible]
if any(visibles):
visible = visibles[0]
newSelection.add(visible)
self._selectedShortestPath = findShortestPath
if newSelection != self.selectedVisibles or (self.hoverSelected and not self.hoverSelecting) or reselect == True:
self._clearDragger()
self.selectedVisibles = newSelection
if len(self.selectedVisibles) == 0:
# There is no selection so hover selecting should be enabled.
self.hoverSelecting = False
self.hoverSelect = True
elif not self.hoverSelecting:
# An explicit selection has been made via the GUI or console.
self.hoverSelect = False # disable hover selecting
# TODO Dragging doesn't work so this just takes time
if len(self.selectedVisibles) == 1:
pass
# Add a dragger to the selected visible.
# visible = list(self.selectedVisibles)[0]
# if visible._isDraggable():
# self._addDragger(visible)
dispatcher.send(('set', 'selection'), self)
self.hoverSelected = self.hoverSelecting
self.hoverSelecting = False
self.Refresh()
def deselectVisibles(self, visibles):
"""
Deselect the indicated :class:`visible proxies <display.visible.Visible>`.
The visible will be deleted from the current selection.
"""
newSelection = set(self.selectedVisibles)
for visible in visibles:
if visible in newSelection:
newSelection.remove(visible)
if newSelection != self.selectedVisibles or (self.hoverSelected and not self.hoverSelecting):
self._clearDragger()
self.selectedVisibles = newSelection
if len(self.selectedVisibles) == 0:
# There is no selection so hover selecting should be enabled.
self.hoverSelecting = False
self.hoverSelect = True
elif not self.hoverSelecting:
# An explicit selection has been made via the GUI or console.
self.hoverSelect = False # disable hover selecting
# TODO Dragging doesn't work so this just takes time
if len(self.selectedVisibles) == 1:
pass
# Add a dragger to the selected visible.
# visible = list(self.selectedVisibles)[0]
# if visible._isDraggable():
# self._addDragger(visible)
dispatcher.send(('set', 'selection'), self)
self.hoverSelected = self.hoverSelecting
self.hoverSelecting = False
self.Refresh()
def selection(self):
return ObjectList(self.selectedVisibles)
def selectedObjects(self):
"""
Return the list of :class:`network objects <network.object.Object>` that are currently selected.
"""
selection = set()
for visible in self.selectedVisibles:
if visible.client is not None:
selection.add(visible.client)
return list(selection)
def selectAll(self):
"""
Select all :class:`network objects <network.object.Object>` in the visualization.
"""
visiblesToSelect = []
for visibles in self.visibles.itervalues():
for visible in visibles:
visiblesToSelect.append(visible)
self.selectVisibles(visiblesToSelect)
def _onSelectionOrShowFlowChanged(self):
# Update the highlighting, animation and ghosting based on the current selection.
# TODO: this should all be handled by display rules
refreshWasSupressed = self._suppressRefresh
self._suppressRefresh = True
def _highlightObject(networkObject, originalObject = None):
highlightedSomething = False
# Highlight/animate all visibles for this object.
# If root object's visible in colors, add this visible to colors too.
originalColors = []
if originalObject:
originalVisibles = self.visiblesForObject(originalObject)
originalColors = [o for o in originalVisibles if o in self._visiblesSelectionColors]
for visible in self.visiblesForObject(networkObject):
if visible.isPath():
if visible not in visiblesToAnimate:
visiblesToAnimate.add(visible)
visiblesToHighlight.add(visible)
highlightedSomething = True
if originalColors:
self._visiblesSelectionColors[visible] = self._visiblesSelectionColors[originalColors[0]]
elif visible not in visiblesToHighlight:
visiblesToHighlight.add(visible)
highlightedSomething = True
if originalColors:
self._visiblesSelectionColors[visible] = self._visiblesSelectionColors[originalColors[0]]
# Highlight to the root of the object if appropriate.
networkObject = networkObject.parentObject()
while networkObject:
if _highlightObject(networkObject):
networkObject = networkObject.parentObject()
else:
networkObject = None
return highlightedSomething
# TODO: selecting neuron X in Morphology.py doesn't highlight neurites
def _highlightConnectedObjects(rootObjects, maxDepth, highlightWithinSelection):
# Do a breadth-first search on the graph of objects.
queue = [[rootObject] for rootObject in rootObjects]
highlightedObjects = [rootObject.rootObject() for rootObject in rootObjects]
visitedObjects = highlightedObjects
while any(queue):
curPath = queue.pop(0)
curObject = curPath[-1]
originalObject = curPath[0]
visitedObjects.append(curObject)
curObjectRoot = curObject.rootObject()
# If we've reached a highlighted object or the maximum depth then highlight the objects in the current path.
if curObjectRoot in highlightedObjects or (not highlightWithinSelection and len(curPath) == maxDepth + 1):
for pathObject in curPath:
_highlightObject(pathObject, originalObject)
# If we haven't reached the maximum depth then add the next layer of connections to the end of the queue.
if len(curPath) <= maxDepth:
for connectedObject in curObjectRoot.connections():
if connectedObject not in curPath and connectedObject.rootObject() not in curPath and connectedObject not in visitedObjects:
queue += [curPath + [connectedObject]]
visiblesToHighlight = set()
visiblesToAnimate = set()
if self._selectedShortestPath or not self.selectConnectedVisibles:
isSingleSelection = (len(self.selectedVisibles) == 1) or not self._highlightOnlyWithinSelection
for selectedVisible in self.selectedVisibles:
if isinstance(selectedVisible.client, Object):
_highlightObject(selectedVisible.client)
else:
# The selected visible has no network counterpart so highlight/animate connected visibles purely based on connectivity in the visualization.
visiblesToHighlight.add(selectedVisible)
if selectedVisible.isPath() and (selectedVisible.flowTo() or selectedVisible.flowFrom()):
visiblesToAnimate.add(selectedVisible)
visiblesToHighlight.add(selectedVisible)
if selectedVisible.isPath():
# Highlight the visibles at each end of the path.
if selectedVisible.flowTo() or selectedVisible.flowFrom():
visiblesToAnimate.add(selectedVisible)
visiblesToHighlight.add(selectedVisible)
[visiblesToHighlight.add(endPoint) for endPoint in selectedVisible.pathEndPoints()]
elif self.selectConnectedVisibles and not self._selectedShortestPath:
# Animate paths connecting to this non-path visible and highlight the other end of the paths.
for pathVisible in selectedVisible.connectedPaths:
otherVis = pathVisible._pathCounterpart(selectedVisible)
if isSingleSelection or otherVis in self.selectedVisibles:
visiblesToAnimate.add(pathVisible)
visiblesToHighlight.add(pathVisible)
visiblesToHighlight.add(otherVis)
else:
# TODO: handle object-less visibles
# SLOW for selecting object, no time for deselecting objects
_highlightConnectedObjects(self.selectedObjects(), self._selectionHighlightDepth, len(self.selectedVisibles) > 1 and self._highlightOnlyWithinSelection)
if len(self.selectedVisibles) == 0 and self._showFlow:
for visibles in self.visibles.itervalues():
for visible in visibles:
if visible.isPath() and (visible.flowTo() or visible.flowFrom()):
visiblesToAnimate.add(visible)
# Turn off highlighting/animating for visibles that shouldn't have it anymore.
for highlightedNode in self.highlightedVisibles:
if highlightedNode not in visiblesToHighlight:
highlightedNode.setGlowColor(None)
if highlightedNode in self._visiblesSelectionColors:
del self._visiblesSelectionColors[highlightedNode]
for animatedEdge in self.animatedVisibles:
if animatedEdge not in visiblesToAnimate:
animatedEdge.animateFlow(False)
if animatedEdge in self._visiblesSelectionColors:
del self._visiblesSelectionColors[animatedEdge]
# Highlight/animate the visibles that should have it now.
selectedString = ""
for visibleToHighlight in visiblesToHighlight:
if visibleToHighlight in self.selectedVisibles:
if visibleToHighlight in self._visiblesSelectionColors:
visibleToHighlight.setGlowColor(self._visiblesSelectionColors[visibleToHighlight])
else:
visibleToHighlight.setGlowColor(self._primarySelectionColor)
visibleToHighlight._updateLabel()
if isinstance(visibleToHighlight.client, Neuron) and visibleToHighlight.client.name:
selectedString += " " + visibleToHighlight.client.name + ","
elif visibleToHighlight in self._visiblesSelectionColors:
visibleToHighlight.setGlowColor(self._visiblesSelectionColors[visibleToHighlight])
elif not self._useGhosts:
visibleToHighlight.setGlowColor(self._secondarySelectionColor)
else:
visibleToHighlight.setGlowColor(None)
if self._printNeuronNamesOnSelection and selectedString:
self.console.run("print 'Selected:" + selectedString[:-1] + "'", False, False)
# SLOW
for visibleToAnimate in visiblesToAnimate:
visibleToAnimate.animateFlow()
self.highlightedVisibles = visiblesToHighlight
self.animatedVisibles = visiblesToAnimate
# SLOWISH not the main culprit
if self._useGhosts:
# Dim everything that isn't selected, highlighted or animated.
for visibles in self.visibles.itervalues():
for visible in visibles:
visible._updateOpacity()
if any(self.animatedVisibles):
# Start the animation timer and cap the frame rate at 60 fps.
if not self._animationTimer.IsRunning():
self._animationTimer.Start(1000.0 / 60.0)
elif self._animationTimer.IsRunning():
# Don't need to redraw automatically if nothing is animated.
self._animationTimer.Stop()
self._suppressRefresh = refreshWasSupressed
def _addDragger(self, visible):
if visible.parent is None:
rootNode = self.rootNode
else:
rootNode = visible.parent.childGroup
lodBound = visible.sgNode.getBound()
rootNode.removeChild(visible.sgNode)
self.dragSelection = osgManipulator.Selection()
self.dragSelection.addChild(visible.sgNode)
rootNode.addChild(self.dragSelection)
self.compositeDragger = None
pixelCutOff = 200.0
if self.viewDimensions == 2:
self.draggerScale = 1.0
self.simpleDragger = osgManipulator.TranslatePlaneDragger()
if not visible.sizeIsFixed():
self.compositeDragger = osgManipulator.TabPlaneDragger()
if self.orthoViewPlane == 'xy':
if visible.parent is None or not visible.sizeIsAbsolute():
self.draggerOffset = (0.0, 0.0, visible.size()[2])
else:
self.draggerOffset = (0.0, 0.0, visible.size()[2] / visible.parent.worldSize()[2])
pixelCutOff /= visible.parent.worldSize()[0]
draggerMatrix = osg.Matrixd.rotate(pi / 2.0, osg.Vec3d(1, 0, 0)) * \
visible.sgNode.getMatrix() * \
osg.Matrixd.translate(*self.draggerOffset)
elif self.orthoViewPlane == 'xz':
if visible.parent is None or not visible.sizeIsAbsolute():
self.draggerOffset = (0.0, visible.size()[1], 0.0)
else:
self.draggerOffset = (0.0, visible.size()[1] / visible.parent.worldSize()[1], 0.0)
pixelCutOff /= visible.parent.worldSize()[0]
draggerMatrix = visible.sgNode.getMatrix() * \
osg.Matrixd.translate(*self.draggerOffset)
elif self.orthoViewPlane == 'zy':
if visible.parent is None or not visible.sizeIsAbsolute():
self.draggerOffset = (visible.size()[0], 0.0, 0.0)
else:
self.draggerOffset = (visible.size()[0] / visible.parent.worldSize()[0], 0.0, 0.0)
pixelCutOff /= visible.parent.worldSize()[1]
draggerMatrix = osg.Matrixd.rotate(pi / 2.0, osg.Vec3d(1, 0, 0)) * \
osg.Matrixd.rotate(pi / 2.0, osg.Vec3d(0, 1, 0)) * \
visible.sgNode.getMatrix() * \
osg.Matrixd.translate(*self.draggerOffset)
elif self.viewDimensions == 3:
self.draggerOffset = (0.0, 0.0, 0.0)
self.draggerScale = 1.02
self.simpleDragger = osgManipulator.TranslateAxisDragger()
if not visible.sizeIsFixed():
self.compositeDragger = osgManipulator.TabBoxDragger()
if visible.parent is not None and visible.sizeIsAbsolute():
pixelCutOff /= visible.parent.worldSize()[0]
draggerMatrix = osg.Matrixd.rotate(pi / 2.0, osg.Vec3d(1, 0, 0)) * \
osg.Matrixd.scale(self.draggerScale, self.draggerScale, self.draggerScale) * \
visible.sgNode.getMatrix()
self.simpleDragger.setMatrix(draggerMatrix)
self.simpleDragger.setupDefaultGeometry()
self.commandMgr = osgManipulator.CommandManager()
self.commandMgr.connect(self.simpleDragger, self.dragSelection)
if visible.sizeIsFixed():
rootNode.addChild(self.simpleDragger)
self.activeDragger = self.simpleDragger
else:
self.commandMgr.connect(self.compositeDragger, self.dragSelection)
self.compositeDragger.setMatrix(draggerMatrix)
self.compositeDragger.setupDefaultGeometry()
self.draggerLOD = osg.LOD()
self.draggerLOD.setRangeMode(osg.LOD.PIXEL_SIZE_ON_SCREEN)
self.draggerLOD.addChild(self.simpleDragger, 0.0, pixelCutOff)
self.draggerLOD.addChild(self.compositeDragger, pixelCutOff, 10000.0)
self.draggerLOD.setCenter(lodBound.center())
self.draggerLOD.setRadius(lodBound.radius())
rootNode.addChild(self.draggerLOD)
# TODO: This is a serious hack. The existing picking code in PickHandler doesn't handle the dragger LOD correctly. It always picks the composite dragger. Cull callbacks are added here so that we can know which dragger was most recently rendered.
self.activeDragger = None
self.simpleDragger.setCullCallback(DraggerCullCallback(self, self.simpleDragger).__disown__())
self.compositeDragger.setCullCallback(DraggerCullCallback(self, self.compositeDragger).__disown__())
# TODO: observe the visible's 'positionIsFixed' attribute and add/remove the draggers as needed
def _visibleWasDragged(self):
# TODO: It would be nice to constrain dragging if the visible has a parent. "Resistance" would be added when the child reached the parent border so that dragging slowed or stopped but if dragged far enough the child could force its way through.
visible = list(self.selectedVisibles)[0]
if self.activeDragger is not None:
matrix = self.activeDragger.getMatrix()
position = matrix.getTrans()
size = matrix.getScale()
if visible.parent is None or not visible.sizeIsAbsolute():
parentSize = (1.0, 1.0, 1.0)
else:
parentSize = visible.parent.worldSize()
visible.setPosition((position.x() - self.draggerOffset[0], position.y() - self.draggerOffset[1], position.z() - self.draggerOffset[2]))
visible.setSize((size.x() * parentSize[0] / self.draggerScale, size.y() * parentSize[1] / self.draggerScale, size.z() * parentSize[2] / self.draggerScale))
visible._updateTransform()
def _clearDragger(self):
if self.dragSelection != None:
visible = list(self.selectedVisibles)[0]
if visible.parent is None:
rootNode = self.rootNode
else:
rootNode = visible.parent.childGroup
self.commandMgr.disconnect(self.simpleDragger)
if self.compositeDragger is not None:
self.commandMgr.disconnect(self.compositeDragger)
self.commandMgr = None
self.dragSelection.removeChild(visible.sgNode)
rootNode.removeChild(self.dragSelection)
self.dragSelection = None
rootNode.addChild(visible.sgNode)
self._visibleWasDragged()
if self.draggerLOD is not None:
rootNode.removeChild(self.draggerLOD)
else:
rootNode.removeChild(self.simpleDragger)
self.simpleDragger.setCullCallback(None)
self.simpleDragger = None
if self.compositeDragger is not None:
self.compositeDragger.setCullCallback(None)
self.compositeDragger = None
self.draggerLOD = None
def onLayout(self, event):
layoutClasses = self.GetTopLevelParent().layoutClasses
layoutId = event.GetId()
if layoutId in layoutClasses:
layout = layoutClasses[layoutId]()
self.lastUsedLayout = layout
else:
layout = None
self.performLayout(layout)
def autoLayout(self, method = None):
# Backwards compatibility method, new code should use performLayout() instead.
if (method == 'graphviz' or method is None) and self.viewDimensions == 2:
from Layouts.force_directed import ForceDirectedLayout
self.performLayout(ForceDirectedLayout())
elif (method == 'spectral' or method is None) and self.viewDimensions == 3:
from Layouts.spectral import SpectralLayout
self.performLayout(SpectralLayout())
def performLayout(self, layout = None, **kwargs):
""" Perform an automatic layout of the :class:`network objects <network.object.Object>` in the visualization.
>>> display.performLayout(layouts['Force Directed'])
The layout parameter should be one of the classes in layouts, an instance of one of the classes or None to re-execute the previous or default layout.
"""
if layout != None and not isinstance(layout, layout_module.Layout) and (not type(layout) == type(self.__class__) or not issubclass(layout, layout_module.Layout)):
raise TypeError, 'The layout parameter passed to performLayout() should be one of the classes in layouts, an instance of one of the classes or None.'
self.beginProgress('Laying out the network...')
try:
if layout == None:
# Fall back to the last layout used.
layout = self.lastUsedLayout
else:
# If a layout class was passed in then create a default instance.
if isinstance(layout, type(self.__class__)):
layout = layout(**kwargs)
if not layout.__class__.canLayoutDisplay(self):
raise ValueError, gettext('The supplied layout cannot be used.')
if layout == None or not layout.__class__.canLayoutDisplay(self): # pylint: disable=E1103
layouts = neuroptikon.scriptLocals()['layouts']
if 'Graphviz' in layouts:
layout = layouts['Graphviz'](**kwargs)
elif 'Force Directed' in layouts:
layout = layouts['Force Directed'](**kwargs)
elif 'Spectral' in layouts:
layout = layouts['Spectral'](**kwargs)
else:
# Pick the first layout class capable of laying out the display.
for layoutClass in layouts.itervalues():
if layoutClass.canLayoutDisplay(self):
layout = layoutClass(**kwargs)
break
refreshWasSuppressed = self._suppressRefresh
self._suppressRefresh = True
layout.layoutDisplay(self)
self.lastUsedLayout = layout
except:
(exceptionType, exceptionValue) = sys.exc_info()[0:2]
wx.MessageBox(str(exceptionValue) + ' (' + exceptionType.__name__ + ')', gettext('An error occurred while performing the layout:'), parent = self, style = wx.ICON_ERROR | wx.OK)
finally:
self._suppressRefresh = refreshWasSuppressed
if self.viewDimensions == 2:
self.zoomToFit()
else:
self.resetView()
self.endProgress()
def saveViewAsImage(self, path):
"""
Save a snapshot of the current visualization to an image file.
The path parameter should indicate where the snapshot should be saved. The extension included in the path will determine the format of the image. Currently, bmp, jpg, png and tiff extensions are supported.
If the background color of the display has an alpha value less than 1.0 then the image saved will have a transparent background for formats that support it.
"""
width, height = self.GetClientSize()
image = osg.Image()
self.SetCurrent(self.glContext)
image.readPixels(0, 0, width, height, osg.GL_RGBA, osg.GL_UNSIGNED_BYTE)
osgDB.writeImageFile(image, path)
def onSaveView(self, event_):
fileTypes = ['JPG', 'Microsoft BMP', 'PNG', 'TIFF']
fileExtensions = ['jpg', 'bmp', 'png', 'tiff']
wildcard = ''
for index in range(0, len(fileTypes)):
if wildcard != '':
wildcard += '|'
wildcard += fileTypes[index] + '|' + fileExtensions[index]
fileDialog = wx.FileDialog(None, gettext('Save As:'), '', '', wildcard, wx.SAVE | wx.FD_OVERWRITE_PROMPT)
if fileDialog.ShowModal() == wx.ID_OK:
extension = fileExtensions[fileDialog.GetFilterIndex()]
savePath = str(fileDialog.GetPath())
if not savePath.endswith('.' + extension):
savePath += '.' + extension
self.saveViewAsImage(savePath)
fileDialog.Destroy()
def setDefaultFlowColor(self, color):
"""
Set the default color of the pulses in paths showing the flow of information.
The color argument should be a tuple or list of three values between 0.0 and 1.0 indicating the red, green and blue values of the color. For example:
* (0.0, 0.0, 0.0) -> black
* (1.0, 0.0, 0.0) -> red
* (0.0, 1.0, 0.0) -> green
* (0.0, 0.0, 1.0) -> blue
* (1.0, 1.0, 1.0) -> white
"""
if not isinstance(color, (list, tuple)): # or len(color) != 3:
raise ValueError, 'The color passed to setDefaultFlowColor() must be a tuple or list of three numbers.'
for colorComponent in color:
if not isinstance(colorComponent, (int, float)) or colorComponent < 0.0 or colorComponent > 1.0:
raise ValueError, 'The components of the color passed to setDefaultFlowColor() must all be numbers between 0.0 and 1.0, inclusive.'
if len(color) == 3:
color = (color[0], color[1], color[2], 1.0)
if color != self.defaultFlowColor:
self.defaultFlowColor = color
vec4color = osg.Vec4f(color[0], color[1], color[2], color[3])
self.defaultFlowToColorUniform.set(vec4color)
self.defaultFlowFromColorUniform.set(vec4color)
dispatcher.send(('set', 'defaultFlowColor'), self)
def setDefaultFlowSpacing(self, spacing):
"""
Set the default spacing between pulses in paths showing the flow of information.
The spacing argument is measured in world-space coordinates.
"""
if not isinstance(spacing, (int, float)):
raise TypeError, 'The spacing passed to setDefaultFlowSpacing() must be a number.'
if spacing != self.defaultFlowSpacing:
self.defaultFlowSpacing = float(spacing)
self.defaultFlowToSpacingUniform.set(self.defaultFlowSpacing)
self.defaultFlowFromSpacingUniform.set(self.defaultFlowSpacing)
dispatcher.send(('set', 'defaultFlowSpacing'), self)
def setDefaultFlowSpeed(self, speed):
"""
Set the default speed of the pulses in paths showing the flow of information.
The speed argument is measured in world-space coordinates per second.
"""
if not isinstance(speed, (int, float)):
raise TypeError, 'The speed passed to setDefaultFlowSpeed() must be a number.'
if speed != self.defaultFlowSpeed:
self.defaultFlowSpeed = float(speed)
self.defaultFlowToSpeedUniform.set(self.defaultFlowSpeed)
self.defaultFlowFromSpeedUniform.set(self.defaultFlowSpeed)
dispatcher.send(('set', 'defaultFlowSpeed'), self)
def setDefaultFlowSpread(self, spread):
"""
Set the length of the pulse tails in paths showing the flow of information.
The spread argument should be a number from 0.0 (no tail) to 1.0 (tail extends all the way to the next pulse).
"""
if not isinstance(spread, (int, float)):
raise TypeError, 'The spread passed to setDefaultFlowSpread() must be a number.'
if spread != self.defaultFlowSpread:
self.defaultFlowSpread = float(spread)
self.defaultFlowToSpreadUniform.set(self.defaultFlowSpread)
self.defaultFlowFromSpreadUniform.set(self.defaultFlowSpread)
dispatcher.send(('set', 'defaultFlowSpread'), self)
def beginProgress(self, message = None, visualDelay = 1.0):
"""
Display a message that a lengthy task has begun.
Each call to this method must be balanced by a call to :meth:`endProgress <display.display.Display.endProgress>`. Any number of :meth:`updateProgress <display.display.Display.updateProgress>` calls can be made in the interim. Calls to this method can be nested as long as the right number of :meth:`endProgress <display.display.Display.endProgress>` calls are made.
The visualDelay argument indicates how many seconds to wait until the progress user interface is shown. This avoids flashing the interface open and closed for tasks that end up running quickly.
"""
return self.GetTopLevelParent().beginProgress(message, visualDelay)
def updateProgress(self, message = None, fractionComplete = None):
"""
Update the message and/or completion fraction during a lengthy task.
If the user has pressed the Cancel button then this method will return False and the task should be aborted.
"""
return self.GetTopLevelParent().updateProgress(message, fractionComplete)
def endProgress(self):
"""
Indicate that the lengthy task has ended.
"""
return self.GetTopLevelParent().endProgress()
def addObjectOfClass(self, objectClass):
self._visibleBeingAdded = self.visualizeObject(None, **objectClass._defaultVisualizationParams())
self._visibleBeingAdded.objectClass = objectClass
def objectClassBeingAdded(self):
return self._visibleBeingAdded.objectClass if self._visibleBeingAdded else None
class DisplayDropTarget(wx.PyDropTarget):
def __init__(self, display):
wx.PyDropTarget.__init__(self)
self.display = display
# specify the type of data we will accept
self.dropData = wx.CustomDataObject("Neuroptikon Ontology Term")
self.SetDataObject(self.dropData)
def OnData(self, x_, y_, dragType):
if self.GetData():
termData = self.dropData.GetData()
termDict = cPickle.loads(termData)
ontologyId = termDict['Ontology']
termId = termDict['Term']
ontology = neuroptikon.library.ontology(ontologyId)
if ontology is not None:
term = ontology[termId]
if term is not None:
self.display.network.createRegion(ontologyTerm = term, addSubTerms = wx.GetKeyState(wx.WXK_ALT))
if len(self.display.visibles) == 1:
self.display.zoomToFit()
return dragType
| JaneliaSciComp/Neuroptikon | Source/display/display.py | Python | bsd-3-clause | 143,164 | [
"NEURON"
] | fab38d122bc7f3bf3a98880f706b0b539c4a056f0b2dbec1d5c300df49da8405 |
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
import logging
import os
from PyQt4 import QtCore, QtGui
from openlp.core.lib import ItemCapabilities, MediaManagerItem,MediaType, Registry, ServiceItem, ServiceItemContext, \
Settings, UiStrings, build_icon, check_item_selected, check_directory_exists, translate
from openlp.core.lib.ui import critical_error_message_box, create_horizontal_adjusting_combo_box
from openlp.core.ui import DisplayController, Display, DisplayControllerType
from openlp.core.ui.media import get_media_players, set_media_players
from openlp.core.utils import AppLocation, get_locale_key
log = logging.getLogger(__name__)
CLAPPERBOARD = ':/media/slidecontroller_multimedia.png'
VIDEO_ICON = build_icon(':/media/media_video.png')
AUDIO_ICON = build_icon(':/media/media_audio.png')
DVD_ICON = build_icon(':/media/media_video.png')
ERROR_ICON = build_icon(':/general/general_delete.png')
class MediaMediaItem(MediaManagerItem):
"""
This is the custom media manager item for Media Slides.
"""
log.info('%s MediaMediaItem loaded', __name__)
def __init__(self, parent, plugin):
self.icon_path = 'images/image'
self.background = False
self.automatic = ''
super(MediaMediaItem, self).__init__(parent, plugin)
self.single_service_item = False
self.has_search = True
self.media_object = None
self.display_controller = DisplayController(parent)
self.display_controller.controller_layout = QtGui.QVBoxLayout()
self.media_controller.register_controller(self.display_controller)
self.media_controller.set_controls_visible(self.display_controller, False)
self.display_controller.preview_display = Display(self.display_controller, False, self.display_controller)
self.display_controller.preview_display.hide()
self.display_controller.preview_display.setGeometry(QtCore.QRect(0, 0, 300, 300))
self.display_controller.preview_display.screen = {'size': self.display_controller.preview_display.geometry()}
self.display_controller.preview_display.setup()
self.media_controller.setup_display(self.display_controller.preview_display, False)
Registry().register_function('video_background_replaced', self.video_background_replaced)
Registry().register_function('mediaitem_media_rebuild', self.rebuild_players)
Registry().register_function('config_screen_changed', self.display_setup)
# Allow DnD from the desktop
self.list_view.activateDnD()
def retranslateUi(self):
self.on_new_prompt = translate('MediaPlugin.MediaItem', 'Select Media')
self.replace_action.setText(UiStrings().ReplaceBG)
self.replace_action.setToolTip(UiStrings().ReplaceLiveBG)
self.reset_action.setText(UiStrings().ResetBG)
self.reset_action.setToolTip(UiStrings().ResetLiveBG)
self.automatic = UiStrings().Automatic
self.display_type_label.setText(translate('MediaPlugin.MediaItem', 'Use Player:'))
self.rebuild_players()
def required_icons(self):
"""
Set which icons the media manager tab should show
"""
MediaManagerItem.required_icons(self)
self.has_file_icon = True
self.has_new_icon = False
self.has_edit_icon = False
def add_list_view_to_toolbar(self):
MediaManagerItem.add_list_view_to_toolbar(self)
self.list_view.addAction(self.replace_action)
def add_end_header_bar(self):
# Replace backgrounds do not work at present so remove functionality.
self.replace_action = self.toolbar.add_toolbar_action('replace_action', icon=':/slides/slide_blank.png',
triggers=self.onReplaceClick)
self.reset_action = self.toolbar.add_toolbar_action('reset_action', icon=':/system/system_close.png',
visible=False, triggers=self.onResetClick)
self.media_widget = QtGui.QWidget(self)
self.media_widget.setObjectName('media_widget')
self.display_layout = QtGui.QFormLayout(self.media_widget)
self.display_layout.setMargin(self.display_layout.spacing())
self.display_layout.setObjectName('display_layout')
self.display_type_label = QtGui.QLabel(self.media_widget)
self.display_type_label.setObjectName('display_type_label')
self.display_type_combo_box = create_horizontal_adjusting_combo_box(
self.media_widget, 'display_type_combo_box')
self.display_type_label.setBuddy(self.display_type_combo_box)
self.display_layout.addRow(self.display_type_label, self.display_type_combo_box)
# Add the Media widget to the page layout.
self.page_layout.addWidget(self.media_widget)
self.display_type_combo_box.currentIndexChanged.connect(self.overridePlayerChanged)
def overridePlayerChanged(self, index):
player = get_media_players()[0]
if index == 0:
set_media_players(player)
else:
set_media_players(player, player[index-1])
def onResetClick(self):
"""
Called to reset the Live background with the media selected,
"""
self.media_controller.media_reset(self.live_controller)
self.reset_action.setVisible(False)
def video_background_replaced(self):
"""
Triggered by main display on change of serviceitem.
"""
self.reset_action.setVisible(False)
def onReplaceClick(self):
"""
Called to replace Live background with the media selected.
"""
if check_item_selected(self.list_view,
translate('MediaPlugin.MediaItem', 'You must select a media file to replace the background with.')):
item = self.list_view.currentItem()
filename = item.data(QtCore.Qt.UserRole)
if os.path.exists(filename):
service_item = ServiceItem()
service_item.title = 'webkit'
service_item.processor = 'webkit'
(path, name) = os.path.split(filename)
service_item.add_from_command(path, name,CLAPPERBOARD)
if self.media_controller.video(DisplayControllerType.Live, service_item, video_behind_text=True):
self.reset_action.setVisible(True)
else:
critical_error_message_box(UiStrings().LiveBGError,
translate('MediaPlugin.MediaItem', 'There was no display item to amend.'))
else:
critical_error_message_box(UiStrings().LiveBGError,
translate('MediaPlugin.MediaItem',
'There was a problem replacing your background, the media file "%s" no longer exists.') % filename)
def generate_slide_data(self, service_item, item=None, xml_version=False, remote=False,
context=ServiceItemContext.Live):
"""
Generate the slide data. Needs to be implemented by the plugin.
"""
if item is None:
item = self.list_view.currentItem()
if item is None:
return False
filename = item.data(QtCore.Qt.UserRole)
if not os.path.exists(filename):
if not remote:
# File is no longer present
critical_error_message_box(
translate('MediaPlugin.MediaItem', 'Missing Media File'),
translate('MediaPlugin.MediaItem', 'The file %s no longer exists.') % filename)
return False
(path, name) = os.path.split(filename)
service_item.title = name
service_item.processor = self.display_type_combo_box.currentText()
service_item.add_from_command(path, name, CLAPPERBOARD)
# Only get start and end times if going to a service
if context == ServiceItemContext.Service:
# Start media and obtain the length
if not self.media_controller.media_length(service_item):
return False
service_item.add_capability(ItemCapabilities.CanAutoStartForLive)
service_item.add_capability(ItemCapabilities.RequiresMedia)
if Settings().value(self.settings_section + '/media auto start') == QtCore.Qt.Checked:
service_item.will_auto_start = True
# force a non-existent theme
service_item.theme = -1
return True
def initialise(self):
self.list_view.clear()
self.list_view.setIconSize(QtCore.QSize(88, 50))
self.servicePath = os.path.join(AppLocation.get_section_data_path(self.settings_section), 'thumbnails')
check_directory_exists(self.servicePath)
self.load_list(Settings().value(self.settings_section + '/media files'))
self.populateDisplayTypes()
def rebuild_players(self):
"""
Rebuild the tab in the media manager when changes are made in the settings.
"""
self.populateDisplayTypes()
self.on_new_file_masks = translate('MediaPlugin.MediaItem', 'Videos (%s);;Audio (%s);;%s (*)') % (
' '.join(self.media_controller.video_extensions_list),
' '.join(self.media_controller.audio_extensions_list), UiStrings().AllFiles)
def display_setup(self):
self.media_controller.setup_display(self.display_controller.preview_display, False)
def populateDisplayTypes(self):
"""
Load the combobox with the enabled media players, allowing user to select a specific player if settings allow.
"""
# block signals to avoid unnecessary overridePlayerChanged Signals while combo box creation
self.display_type_combo_box.blockSignals(True)
self.display_type_combo_box.clear()
usedPlayers, overridePlayer = get_media_players()
media_players = self.media_controller.media_players
currentIndex = 0
for player in usedPlayers:
# load the drop down selection
self.display_type_combo_box.addItem(media_players[player].original_name)
if overridePlayer == player:
currentIndex = len(self.display_type_combo_box)
if self.display_type_combo_box.count() > 1:
self.display_type_combo_box.insertItem(0, self.automatic)
self.display_type_combo_box.setCurrentIndex(currentIndex)
if overridePlayer:
self.media_widget.show()
else:
self.media_widget.hide()
self.display_type_combo_box.blockSignals(False)
def on_delete_click(self):
"""
Remove a media item from the list.
"""
if check_item_selected(self.list_view,
translate('MediaPlugin.MediaItem', 'You must select a media file to delete.')):
row_list = [item.row() for item in self.list_view.selectedIndexes()]
row_list.sort(reverse=True)
for row in row_list:
self.list_view.takeItem(row)
Settings().setValue(self.settings_section + '/media files', self.get_file_list())
def load_list(self, media, target_group=None):
# Sort the media by its filename considering language specific characters.
media.sort(key=lambda filename: get_locale_key(os.path.split(str(filename))[1]))
for track in media:
track_info = QtCore.QFileInfo(track)
if not os.path.exists(track):
filename = os.path.split(str(track))[1]
item_name = QtGui.QListWidgetItem(filename)
item_name.setIcon(ERROR_ICON)
item_name.setData(QtCore.Qt.UserRole, track)
elif track_info.isFile():
filename = os.path.split(str(track))[1]
item_name = QtGui.QListWidgetItem(filename)
if '*.%s' % (filename.split('.')[-1].lower()) in self.media_controller.audio_extensions_list:
item_name.setIcon(AUDIO_ICON)
else:
item_name.setIcon(VIDEO_ICON)
item_name.setData(QtCore.Qt.UserRole, track)
else:
filename = os.path.split(str(track))[1]
item_name = QtGui.QListWidgetItem(filename)
item_name.setIcon(build_icon(DVD_ICON))
item_name.setData(QtCore.Qt.UserRole, track)
item_name.setToolTip(track)
self.list_view.addItem(item_name)
def get_list(self, type=MediaType.Audio):
media = Settings().value(self.settings_section + '/media files')
media.sort(key=lambda filename: get_locale_key(os.path.split(str(filename))[1]))
extension = []
if type == MediaType.Audio:
extension = self.media_controller.audio_extensions_list
else:
extension = self.media_controller.video_extensions_list
extension = [x[1:] for x in extension]
media = [x for x in media if os.path.splitext(x)[1] in extension]
return media
def search(self, string, showError):
files = Settings().value(self.settings_section + '/media files')
results = []
string = string.lower()
for file in files:
filename = os.path.split(str(file))[1]
if filename.lower().find(string) > -1:
results.append([file, filename])
return results
| marmyshev/bug_1117098 | openlp/plugins/media/lib/mediaitem.py | Python | gpl-2.0 | 15,387 | [
"Brian"
] | 7fb1ac897bc32df572a5b143734200977ea5e1fce0d0867c9c77c172cd3edb51 |
# Copyright (C) 2018 Henrique Pereira Coutada Miranda
# All rights reserved.
#
# This file is part of yambopy
#
"""
Scripts to manipulate Quantum Espresso input files
Also able to read output files in xml format (datafile.xml or datafile-schema.xml)
"""
import os
class qepyenv():
PW = "pw.x"
PH = "ph.x"
DYNMAT = "dynmat.x"
PSEUDODIR = os.path.join(os.path.dirname(__file__),'data','pseudos')
CONV_THR = 1e-8
from .xml import *
from .bravais import *
from .pw import *
from .pwxml import *
from .projwfc import *
from .projwfcxml import *
from .ph import *
from .dynmat import *
from .matdyn import *
from .lattice import *
from .unfolding import *
from .unfoldingyambo import *
from .supercell import *
| alexmoratalla/yambopy | qepy/__init__.py | Python | bsd-3-clause | 729 | [
"Quantum ESPRESSO"
] | 1061a8e0642bb816067c3b113fad3d4f270a52af9d6263669e5cf46dc17f947f |
#built by Tcll5850
#inspired by Roo525
from data.COMMON import * #essentials
Header( 0.001, #Script Version (for updates)
('MikuMikuDance',['pmd']),#model activation
('MikuMikuDance',['vmd']),#anim activation
['']) #included libs
def ImportModel(T,C):
def Vector(): return [f32(label=' -- X'),f32(label=' -- Y'),f32(label=' -- Z')]
#--header--
signature = string(3, label=' -- Signature') #'Pmd'
if signature=='Pmd': #is the file valid?
#continue if so
version = f32(label=' -- Version')
name = string(20,code='cp932',label=' -- Model Name').split('\x00')[0]
comment = string(256,code='cp932',label=' -- Comment').split('\x00')[0]
V,N,U,u=[],[],[],[]
for I in range(u32(label=' -- Vertex count')):
V+=[[f32(label=' -- Vert_X'),
f32(label=' -- Vert_Y'),
f32(label=' -- Vert_Z')]]
N+=[[f32(label=' -- Normal_X'),
f32(label=' -- Normal_Y'),
f32(label=' -- Normal_Z')]]
U+=[[f32(label=' -- UV_S'),
f32(label=' -- UV_T')]]
u+=[[u16(label=' -- Unknown'),
u16(label=' -- Unknown')],
[u8(label=' -- Unknown'),
u8(label=' -- Unknown')]]
SetObject()
#I had a problem with the other method not setting the vector data >_>
SetVerts( V )
SetNormals( N )
SetPrimitive(UMC_TRIANGLES)
for tri in StructArr(['u16','u16','u16'],
u32(label=' -- Triangle Count\n -- Triangle Data: [V1,V2,V3]')/3):
SetFacepoint(tri[0],tri[0])
SetFacepoint(tri[1],tri[1])
SetFacepoint(tri[2],tri[2])
else: print 'Invalid PMD file'
| Universal-Model-Converter/UMC3.0a | scripts/MMD_PMD.py | Python | mit | 1,915 | [
"VMD"
] | fe875787f447780b88dea4839309c6f9dc2015e29600f3a0f41e0e01ab6c35ea |
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from math import pi
import sys
from Bio.PDB import *
from AbstractPropertyMap import AbstractPropertyMap
__doc__="Half sphere exposure and coordination number calculation."
class _AbstractHSExposure(AbstractPropertyMap):
"""
Abstract class to calculate Half-Sphere Exposure (HSE).
The HSE can be calculated based on the CA-CB vector, or the pseudo CB-CA
vector based on three consecutive CA atoms. This is done by two separate
subclasses.
"""
def __init__(self, model, radius, offset, hse_up_key, hse_down_key,
angle_key=None):
"""
@param model: model
@type model: L{Model}
@param radius: HSE radius
@type radius: float
@param offset: number of flanking residues that are ignored in the calculation of the number of neighbors
@type offset: int
@param hse_up_key: key used to store HSEup in the entity.xtra attribute
@type hse_up_key: string
@param hse_down_key: key used to store HSEdown in the entity.xtra attribute
@type hse_down_key: string
@param angle_key: key used to store the angle between CA-CB and CA-pCB in
the entity.xtra attribute
@type angle_key: string
"""
assert(offset>=0)
# For PyMOL visualization
self.ca_cb_list=[]
ppb=CaPPBuilder()
ppl=ppb.build_peptides(model)
hse_map={}
hse_list=[]
hse_keys=[]
for pp1 in ppl:
for i in range(0, len(pp1)):
if i==0:
r1=None
else:
r1=pp1[i-1]
r2=pp1[i]
if i==len(pp1)-1:
r3=None
else:
r3=pp1[i+1]
# This method is provided by the subclasses to calculate HSE
result=self._get_cb(r1, r2, r3)
if result is None:
# Missing atoms, or i==0, or i==len(pp1)-1
continue
pcb, angle=result
hse_u=0
hse_d=0
ca2=r2['CA'].get_vector()
for pp2 in ppl:
for j in range(0, len(pp2)):
if pp1 is pp2 and abs(i-j)<=offset:
# neighboring residues in the chain are ignored
continue
ro=pp2[j]
if not is_aa(ro) or not ro.has_id('CA'):
continue
cao=ro['CA'].get_vector()
d=(cao-ca2)
if d.norm()<radius:
if d.angle(pcb)<(pi/2):
hse_u+=1
else:
hse_d+=1
res_id=r2.get_id()
chain_id=r2.get_parent().get_id()
# Fill the 3 data structures
hse_map[(chain_id, res_id)]=(hse_u, hse_d, angle)
hse_list.append((r2, (hse_u, hse_d, angle)))
hse_keys.append((chain_id, res_id))
# Add to xtra
r2.xtra[hse_up_key]=hse_u
r2.xtra[hse_down_key]=hse_d
if angle_key:
r2.xtra[angle_key]=angle
AbstractPropertyMap.__init__(self, hse_map, hse_keys, hse_list)
def _get_gly_cb_vector(self, residue):
"""
Return a pseudo CB vector for a Gly residue.
The pseudoCB vector is centered at the origin.
CB coord=N coord rotated over -120 degrees
along the CA-C axis.
"""
try:
n_v=residue["N"].get_vector()
c_v=residue["C"].get_vector()
ca_v=residue["CA"].get_vector()
except:
return None
# center at origin
n_v=n_v-ca_v
c_v=c_v-ca_v
# rotation around c-ca over -120 deg
rot=rotaxis(-pi*120.0/180.0, c_v)
cb_at_origin_v=n_v.left_multiply(rot)
# move back to ca position
cb_v=cb_at_origin_v+ca_v
# This is for PyMol visualization
self.ca_cb_list.append((ca_v, cb_v))
return cb_at_origin_v
class HSExposureCA(_AbstractHSExposure):
"""
Class to calculate HSE based on the approximate CA-CB vectors,
using three consecutive CA positions.
"""
def __init__(self, model, radius=12, offset=0):
"""
@param model: the model that contains the residues
@type model: L{Model}
@param radius: radius of the sphere (centred at the CA atom)
@type radius: float
@param offset: number of flanking residues that are ignored in the calculation of the number of neighbors
@type offset: int
"""
_AbstractHSExposure.__init__(self, model, radius, offset,
'EXP_HSE_A_U', 'EXP_HSE_A_D', 'EXP_CB_PCB_ANGLE')
def _get_cb(self, r1, r2, r3):
"""
Calculate the approximate CA-CB direction for a central
CA atom based on the two flanking CA positions, and the angle
with the real CA-CB vector.
The CA-CB vector is centered at the origin.
@param r1, r2, r3: three consecutive residues
@type r1, r2, r3: L{Residue}
"""
if r1 is None or r3 is None:
return None
try:
ca1=r1['CA'].get_vector()
ca2=r2['CA'].get_vector()
ca3=r3['CA'].get_vector()
except:
return None
# center
d1=ca2-ca1
d3=ca2-ca3
d1.normalize()
d3.normalize()
# bisection
b=(d1+d3)
b.normalize()
# Add to ca_cb_list for drawing
self.ca_cb_list.append((ca2, b+ca2))
if r2.has_id('CB'):
cb=r2['CB'].get_vector()
cb_ca=cb-ca2
cb_ca.normalize()
angle=cb_ca.angle(b)
elif r2.get_resname()=='GLY':
cb_ca=self._get_gly_cb_vector(r2)
if cb_ca is None:
angle=None
else:
angle=cb_ca.angle(b)
else:
angle=None
# vector b is centered at the origin!
return b, angle
def pcb_vectors_pymol(self, filename="hs_exp.py"):
"""
Write a PyMol script that visualizes the pseudo CB-CA directions
at the CA coordinates.
@param filename: the name of the pymol script file
@type filename: string
"""
if len(self.ca_cb_list)==0:
sys.stderr.write("Nothing to draw.\n")
return
fp=open(filename, "w")
fp.write("from pymol.cgo import *\n")
fp.write("from pymol import cmd\n")
fp.write("obj=[\n")
fp.write("BEGIN, LINES,\n")
fp.write("COLOR, %.2f, %.2f, %.2f,\n" % (1.0, 1.0, 1.0))
for (ca, cb) in self.ca_cb_list:
x,y,z=ca.get_array()
fp.write("VERTEX, %.2f, %.2f, %.2f,\n" % (x,y,z))
x,y,z=cb.get_array()
fp.write("VERTEX, %.2f, %.2f, %.2f,\n" % (x,y,z))
fp.write("END]\n")
fp.write("cmd.load_cgo(obj, 'HS')\n")
fp.close()
class HSExposureCB(_AbstractHSExposure):
"""
Class to calculate HSE based on the real CA-CB vectors.
"""
def __init__(self, model, radius=12, offset=0):
"""
@param model: the model that contains the residues
@type model: L{Model}
@param radius: radius of the sphere (centred at the CA atom)
@type radius: float
@param offset: number of flanking residues that are ignored in the calculation of the number of neighbors
@type offset: int
"""
_AbstractHSExposure.__init__(self, model, radius, offset,
'EXP_HSE_B_U', 'EXP_HSE_B_D')
def _get_cb(self, r1, r2, r3):
"""
Method to calculate CB-CA vector.
@param r1, r2, r3: three consecutive residues (only r2 is used)
@type r1, r2, r3: L{Residue}
"""
if r2.get_resname()=='GLY':
return self._get_gly_cb_vector(r2), 0.0
else:
if r2.has_id('CB') and r2.has_id('CA'):
vcb=r2['CB'].get_vector()
vca=r2['CA'].get_vector()
return (vcb-vca), 0.0
return None
class ExposureCN(AbstractPropertyMap):
def __init__(self, model, radius=12.0, offset=0):
"""
A residue's exposure is defined as the number of CA atoms around
that residues CA atom. A dictionary is returned that uses a L{Residue}
object as key, and the residue exposure as corresponding value.
@param model: the model that contains the residues
@type model: L{Model}
@param radius: radius of the sphere (centred at the CA atom)
@type radius: float
@param offset: number of flanking residues that are ignored in the calculation of the number of neighbors
@type offset: int
"""
assert(offset>=0)
ppb=CaPPBuilder()
ppl=ppb.build_peptides(model)
fs_map={}
fs_list=[]
fs_keys=[]
for pp1 in ppl:
for i in range(0, len(pp1)):
fs=0
r1=pp1[i]
if not is_aa(r1) or not r1.has_id('CA'):
continue
ca1=r1['CA']
for pp2 in ppl:
for j in range(0, len(pp2)):
if pp1 is pp2 and abs(i-j)<=offset:
continue
r2=pp2[j]
if not is_aa(r2) or not r2.has_id('CA'):
continue
ca2=r2['CA']
d=(ca2-ca1)
if d<radius:
fs+=1
res_id=r1.get_id()
chain_id=r1.get_parent().get_id()
# Fill the 3 data structures
fs_map[(chain_id, res_id)]=fs
fs_list.append((r1, fs))
fs_keys.append((chain_id, res_id))
# Add to xtra
r1.xtra['EXP_CN']=fs
AbstractPropertyMap.__init__(self, fs_map, fs_keys, fs_list)
if __name__=="__main__":
import sys
p=PDBParser()
s=p.get_structure('X', sys.argv[1])
model=s[0]
# Neighbor sphere radius
RADIUS=13.0
OFFSET=0
hse=HSExposureCA(model, radius=RADIUS, offset=OFFSET)
for l in hse:
print l
print
hse=HSExposureCB(model, radius=RADIUS, offset=OFFSET)
for l in hse:
print l
print
hse=ExposureCN(model, radius=RADIUS, offset=OFFSET)
for l in hse:
print l
print
for c in model:
for r in c:
try:
print r.xtra['PCB_CB_ANGLE']
except:
pass
| dbmi-pitt/DIKB-Micropublication | scripts/mp-scripts/Bio/PDB/HSExposure.py | Python | apache-2.0 | 11,168 | [
"Biopython",
"PyMOL"
] | 0d7da8ecf944744d26d4144e93b558e279c2a41f6a059c56a0c7a8dfc777070b |
'''
This script describes how to use the *outliers* method to detect and
remove outliers prior to conditioning a *GaussinaProcess*.
'''
import numpy as np
import matplotlib.pyplot as plt
import logging
from rbf.gproc import gpiso, gppoly
logging.basicConfig(level=logging.DEBUG)
np.random.seed(1)
y = np.linspace(-7.5, 7.5, 50) # obsevation points
x = np.linspace(-7.5, 7.5, 1000) # interpolation points
truth = np.exp(-0.3*np.abs(x))*np.sin(x) # true signal at interp. points
# form synthetic data
obs_sigma = np.full(50, 0.1) # noise standard deviation
noise = np.random.normal(0.0, obs_sigma)
noise[20], noise[25] = 2.0, 1.0 # add anomalously large noise
obs_mu = np.exp(-0.3*np.abs(y))*np.sin(y) + noise
# form prior Gaussian process
prior = gpiso('se', eps=1.0, var=1.0) + gppoly(1)
# find outliers which will be removed
toss = prior.outliers(y[:, None], obs_mu, obs_sigma, tol=4.0)
# condition with non-outliers
post = prior.condition(
y[~toss, None],
obs_mu[~toss],
dcov=np.diag(obs_sigma[~toss]**2)
)
post_mu, post_sigma = post(x[:, None])
# plot the results
fig, ax = plt.subplots(figsize=(6, 4))
ax.errorbar(y[~toss], obs_mu[~toss], obs_sigma[~toss], fmt='k.', capsize=0.0, label='inliers')
ax.errorbar(y[toss], obs_mu[toss], obs_sigma[toss], fmt='r.', capsize=0.0, label='outliers')
ax.plot(x, post_mu, 'b-', label='posterior mean')
ax.fill_between(x, post_mu-post_sigma, post_mu+post_sigma,
color='b', alpha=0.2, edgecolor='none',
label='posterior uncertainty')
ax.plot(x, truth, 'k-', label='true signal')
ax.legend(fontsize=10)
ax.set_xlim((-7.5, 7.5))
ax.grid(True)
fig.tight_layout()
plt.savefig('../figures/gproc.c.png')
plt.show()
| treverhines/RBF | docs/scripts/gproc.c.py | Python | mit | 1,709 | [
"Gaussian"
] | eae421cc45a40429a50d264ae59cf64da9cffae2e415a82f27e268f90fbc8f45 |
__author__ = "joanne cohn"
__email__ = "jcohn@berkeley.edu"
__version__= "1.1" #updated BWC M*(Mh) from newer version of paper
import numpy as N
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.cm as cm
import matplotlib.mlab as mlab
def comments():
"""
Please do email me if you have questions!
Appendix of http://arxiv.org/abs/1609.03956 has information on how to run this program if more documentation is needed.
generate 7 plots using runsuite(): below.
4 are stellar mass functions:
all, quiescent, star forming, and all on one page, compared to several observations described below
(quiescent/star forming division at log sfr = -0.49 + (0.65+slopeval) (logM* - 10) +1.07 *(z-0.1)+shiftval, for slopeval=shiftval=0,
Moustakas et al eq 2), although many of papers listed use UVJ.
1 is stellar mass-sfr diagram [can be compared with e.g., Moustakas et al 2013, but not overplotted with it]
1 is ssfr in 4 stellar mass bins* (no cut on ra, dec for this)
1 is stellar mass to halo mass diagram for central galaxies, compared to Behroozi, Wechsler, Conroy 2013 and Moster,Naab, White 2013 fits
Behroozi,Wechsler,Conroy 2013 use Mvir
Moster, Naab,White 2013 use M200
If you use this program, please reference the papers and people who measured
all of these data!!
They are listed below "%%%"
USAGE:
runsuite(zcen, "inputfile.dat",hval,omm,slopeval,shiftval, boxside,runname,delz,ramin,ramax,decmin,decmax):
zcen is central redshift
fname = "inputfile.dat" described below, can call it something else
if you want. ascii text.
hval = hubble constant
omm = omega_matter (e.g. 0.31)
slopeval = in sfr-M* bimodal diagram, **change in** slope of line to
separate star-forming and quiescent from PRIMUS
shiftval = change in shift of line between star forming and quiescent
from PRIMUS
PRIMUS starforming and quiescent split by:
log SFR = log sfrmin -0.49 + (0.65+slopeval) (logM* - 10) +1.07 *(z-0.1) + shiftval
boxside = in Mpc/h for fixed time, any negative number if light cone
runname = string, such as "run0"
if lightcone, delz,ramin,ramax,decmin,decmax listed next.
if fixed time these arguments (delz, ramin,ramax,decmin,decmax)
are ignored and are not needed.
files needed:
from your simulation: requires "inputfile.dat" in the form of
log10 m* (0) sfr (1), ra (2), dec (3), zred(4), ifsat (5) log10 m_halo (6)
units:
log10 M* [M_o]
sfr units are per yr (not gyr)
ra, dec the usual
zred = redshift
ifsat = 0 for central, 1 for sat
m_halo = halo mass (Mvir, [M_o])
Comparisons are made with data files, listed below and in this directory:
--note that aside from (1),(5), (6), these were copied from tables and plots,
please let me know if you find errors! thank you.
1. moustakas_z%s.smf,
provided at www.peterbehroozi.com/data.html,observational-data.tar.gz
2. fig3_bwc_12.dat
points from fig 3, Behroozi, Peter S., Wechsler, Risa H., Conroy, Charlie
The Average Star Formation Histories of Galaxies in Dark Matter Halos from z=0-8
3. smf_all_supergrid01.txt, smf_starforming_supergrid01.txt,
smf_quiescent_supergrid01.txt
Moustakas et al, 2013, table 4.
4. smf_zfourge_%s_supergrid.txt, %s= all, starforming, quiescent,
Tomczak et al 2014, table 1
5. Vmax_NN*.dat, MaxLik**.dat,
provided at
http://cosmos.phy.tufts.edu/~danilo/MuzzinEtal2013/Muzzin_et_al._(2013).html
paper is
6. henriques_all.dat, henriques_quiescent.dat,henriques_starforming.dat:
points from figs 2 and 7 of Henriques et al
http://arxiv.org/abs/1410.0365, data provided at
http://galformod.mpa-garching.mpg.de/public/LGalaxies/figures_and_data.php
7. viper_sch_*.dat from Moutard et al, Moutard et al, 2016
The VIPERS Multi-Lambda Survey. II
Diving with massive galaxies in 22 square degrees since z = 1.5
arxiv: 1602.05917 v3
table 2.
%%%%%%%%%%%%%%%%%%%%%%%%%%%
Full references for papers:
Behroozi,Wechsler,Conroy
from papers http://arxiv.org/abs/1207.6105 , http://arxiv.org/abs/1209.3013
The Average Star Formation Histories of Galaxies in Dark Matter Halos from z = 0-8
2013,ApJ,770,57 and
On the Lack of Evolution in Galaxy Star Formation Efficiency,
2013 ApJ, 762, L31
Henriques, Bruno M. B.; White, Simon D. M.; Thomas, Peter A.; Angulo, Raul; Guo, Qi; Lemson, Gerard; Springel, Volker; Overzier, Roderik
Galaxy formation in the Planck cosmology - I. Matching the observed evolution of star formation rates, colours and stellar masses
2015
http://arxiv.org/abs/1410.0365
MNRAS,451,2663
data tables: http://galformod.mpa-garching.mpg.de/public/LGalaxies/figures_and_data.php
Moster, Naab & White, 2013
Galactic star formation and accretion histories from matching galaxies to dark matter haloes
http://arxiv.org/abs/1205.5807
MNRAS, 428, 3121
Moustakas, John, et al,
PRIMUS: Constraints on Star Formation Quenching and Galaxy Merging, and the Evolution of the Stellar Mass Function from z = 0-1
http://arxiv.org/abs/1301.1688
ApJ, 2013, 767, 50
Moutard et al, 2016
The VIPERS Multi-Lambda Survey. II
Diving with massive galaxies in 22 square degrees since z = 1.5
arxiv: 1602.05917 v3
Adam Muzzin, Danilo Marchesini, Mauro Stefanon, Marijn Franx, Henry J. McCracken, Bo Milvang-Jensen, James S. Dunlop, J. P. U. Fynbo, Gabriel Brammer, Ivo Labbe, Pieter van Dokkum, 2013,
The Evolution of the Stellar Mass Functions of Star-Forming and Quiescent Galaxies to z = 4 from the COSMOS/UltraVISTA Survey
http://arxiv.org/abs/1303.4409
2013, ApJ, 777, 18
Tomczak et al, 2014,
Galaxy Stellar Mass Functions from ZFOURGE/CANDELS: An Excess of Low-mass Galaxies since z = 2 and the Rapid Buildup of Quiescent Galaxies
arXiv:1309.5972
2014,ApJ,783,85
A companion reference to Tomczak et al is the description of how the data/catalogs were put
together for the survey, to appear in Straatman et al, submitted.
%%%%%%%%%%%%%%%%%
Many thanks to P. Behroozi, M. van Daalen, A. Gonzalez, L. Guzzo, B. Henriques, J. Moustakas, M. White for help in putting the data and
plots together.
%%%%%%%%%%%%
model choices: BC03, Maraston (->BC03 0.14dexM*),Pegase, FSPS for sps model
type of sf: ssp,burst (exponential), constant
type of dust: Blanton-Roweis, Charlot-Fall, Calzetti
A comprehensive analysis of uncertainties affecting the stellar mass-halo mass
relation for 0<z<4,
http://arxiv.org/abs/1001.0015
imf: salpeter,kroupa,chabrier
To rescale stellar masses from Chabrier or Kroupa to Salpeter IMF,
we divide by constant factors 0.61 and 0.66, respectively. Madau and Dickinson, 2014,Cosmic Star Formation History, http://arxiv.org/abs/1403.0007, page 14
See also conversions in Rodriguez-Puebla, A., Primack, JR., Avila-Reese, V., Faber, S.M.,2017,MNRAS, 470,651, arxiv:1703.04542, Eq. 38,
MBC03 = MBC07 + 0.13 = MP,0.1 − 0.05 =
= MP,z +0.03 = MM05 +0.2 = MFSPS −0.05.
"""
def chiofz(zval=0.45,omm=0.31):
"""
comoving distance to redshift zval
in Mpc/h
omega matter = omm
use for volume if specify region with ra/dec
"""
Nint = 300000
zp1int = N.linspace(1,zval+1,Nint)
ez = N.sqrt(omm*zp1int*zp1int*zp1int + (1-omm))
tmp = 2997.925* N.trapz(1/ez, dx = zp1int[1]-zp1int[0])
return(tmp)
def getsimstellar(zcen=0.45,addcolor=0,fname="galshort.dat",hval=0.67,omm=0.31,slopeval=0,shiftval=0,boxside=100,delz=0.02,ramin=-2,ramax=-2,decmin=2,decmax=-2,scatterval=0):
"""
usage:
getsimstellar(zval,addcolor,inputfil,hval,omm,slopeval,shiftval,boxside,delz,ra_min,ra_max,dec_min,dec_max)
zcen: central redshift for simulation
addcolor=0 all galaxies
addcolor=1 red
addcolor=2 blue
fname: galaxy data file, more below
hval = hubble constant (e.g. 0.67)
shiftval,slopeval =changes from PRIMUS SFR-M* active quiescent classification, set to 0 if want to be as simple as possible
omm = omega_matter (e.g. 0.31)
boxside: positive for periodic box, negative for light cone
boxside = side of box when periodic box
**or**
boxside<0 (e.g. -1) if light cone
delz = use z range zcen+delz > z < zcen-delz for light cone [ignored for per]
ra_min,ra_max,dec_min,dec_max :min/max ra and dec for light cone [ignored for per]
this is n(M) not N(>M)
gal units M_o
smf_*supergrid*01,03,10 used.
color log sfrmin -0.49 + (0.65+slopeval) (logM* - 10) +1.07 *(z-0.1) + shiftval
galaxy data file fname entries on each line, one per galaxy
example:
#a = 0.9947
# M*(0) [M_o], sfr(1) [M_o/yr],ra(2),dec(3),zred(4),ifsat(5),logmh(6)[M_o]
1.146e+01 4.831e-01 1. 1. 0.01 0 14.4675
9.696e+00 7.124e-03 1. 1. 0.01 1 11.2347
1.142e+01 1.355e-01 1. 1. 0.01 0 14.4215
8.386e+00 2.415e-03 1. 1. 0.01 1 9.5894
etc...
[boxside > 0, i.e. fixed time, give boxside in units of Mpc/h]
log10 mstellar (no h), sfr (M_o/Gyr)
[boxside < 0,use any boxside value < 0, lightcone]
log10 mstellar (no h), sfr (M_o/Gyr), ra, dec, redshift
"""
ff = open(fname)
gals = N.loadtxt(ff)
ff.close()
logstell = gals[:,0]
sfr = gals[:,1]
if (boxside < 0):
print "using light cone"
ra = gals[:,2]
dec = gals[:,3]
redz = gals[:,4]
#need ra, dec, redshift,delz
chimax = chiofz(zcen+delz,omm) #[Mpc/h]
chimin = chiofz(zcen-delz,omm) #[Mpc/h]
print "ramin,ramax, decmin,decmax %5.4f %5.4f %5.4f %5.4f \n"%(ramin,ramax,decmin,decmax)
angvol = -(N.cos((90-decmin)*N.pi/180) - N.cos((90-decmax)*N.pi/180))*(N.pi*(ramax-ramin)/180.)
chivol =(chimax*chimax*chimax - chimin*chimin*chimin)/3.
vol = chivol*angvol # in [Mpc/h]^3
# truncate galaxy sample to light cone
jj = N.nonzero((ra>ramin)&(ra<ramax)&(dec>decmin)&(dec<decmax)&
(redz<zcen+delz)&(redz>zcen-delz))[0]
sfr = sfr[jj]
logstell = logstell[jj]
redz = redz[jj]
if (boxside>0):
print "using periodic box, side %8.2f Mpc/h"%(boxside)
vol = boxside*boxside*boxside
redz = zcen
#units:
# want mpc not mpc/h
vol = vol/(hval*hval*hval)
## add random scatter as function of z
if (scatterval==1):
sigval = 0.07 +0.04*redz
logstell += N.random.normal(0,sigval,logstell.size)
jj = N.arange(logstell.size)
#note color cut is assuming h70's in units
if (addcolor>0):
#moustakas 2013 units to compare
#Moustakas box is in units of [Mpc/h70]^3, we have [Mpc/h]^3,
# so divide volume by (h/h70)^3 = (h/(h/0.7))^3 = 0.7^3
# Mstar is in units of [M_o/h70^2]
sfrtest = sfr**(hval/0.70)**2
logstelltest = logstell + 2.*N.log10(hval/0.70)
if (addcolor==1):
jj = N.nonzero(N.log10(sfrtest+1.e-16)<-0.49+(0.65+slopeval)*(logstelltest-10)+1.07*(redz-0.1)+shiftval)[0]
if (addcolor==2):
jj = N.nonzero(N.log10(sfrtest+1.e-16)>=-0.49+(0.65+slopeval)*(logstelltest-10)+1.07*(redz-0.1)+shiftval)[0]
logstell = logstell[jj]
nbin = 50
nhist,bins = N.histogram(logstell,nbin,range=(8.3,12.3))
bins += (bins[1]-bins[0])/2.
bins = N.delete(bins,nbin)
ngalact = nhist*1./(vol*(bins[1]-bins[0]))
galkind =("all","red","blue")
return(bins,ngalact,nhist.sum())
###
### different models
###
def getphim_bc03(zcen=0.25,addcolor=0):
"""
only for z>0.2
Behroozi,Wechsler,Conroy
from papers
http://arxiv.org/abs/1207.6105 , http://arxiv.org/abs/1209.3013
The Average Star Formation Histories of Galaxies in Dark Matter Halos from z = 0-8, 2013,ApJ,770,57
On the Lack of Evolution in Galaxy Star Formation Efficiency, 2013 ApJ, 762, L31
data from publicly available
behroozi-2013-data-compilation at www.peterbehroozi.com
Stellar Mass functions: smf_ms/moustakas*.smf
Columns: Log10(stellar mass) (Msun), Log10(ND) (1/Mpc^3/dex), Err+ (dex), Err- (dex)
*OR*
Columns: Log10(stellar mass) (Msun), ND (1/Mpc^3/dex), Err+ , Err-
In the latter case, the data files are marked with "#errors: linear".
Assumptions:BC03 SPS models, Chabrier (2003) IMF, Blanton & Roweis (kcorrect) dust modeling.
"""
if ((addcolor !=0)|(zcen>1)):
return(N.array([1,1]),N.array([1,1]),0.,0.,0.,0.)
znamelist = ("0.105","0.25","0.35","0.45","0.575","0.725","0.9")
zvals = N.array([0.01,0.2,0.3,0.4,0.5,0.65,0.8,1.0])
jj= N.nonzero(zcen>=zvals)[0]
if (jj.size==0):
return(N.array([1,1]),N.array([1,1]),0.,0.,0.,0.)
if (jj.size==1):
zmin =0.01
zmax = 0.2
if (jj.size>1):
jj = jj.max()
zmin =zvals[jj]
zmax = zvals[jj+1]
print "behroozi compilation"
ff =open("moustakas_z%s.smf"%(znamelist[jj]))
phivals = N.loadtxt(ff)
ff.close()
# logm phi errplus errmin
ctypelist="all"
#log phi errors are both positive
logm = phivals[:,0]
phi = phivals[:,1]
phip = phivals[:,2]
phim = phivals[:,3]
return(logm,phi,phip,phim,zmin,zmax)
def getphibwc(zcen):
"""
for all galaxies only, points in fig 3 of
Behroozi, Peter S., Wechsler, Risa H., Conroy, Charlie
The Average Star Formation Histories of Galaxies in Dark Matter Halos from z = 0-8
2013,ApJ,770,57
arXiv:1207.6105
bc03, Blanton-Roweis dust, chabrier imf
"""
zmid = -1. # flag
phis = N.loadtxt("fig3_bwc12.dat")
if (zcen<0.2):
phis = phis[N.arange(66),:]
zmid = 0.
if ((zcen>0.3)&(zcen<0.75)):
phis = phis[N.arange(66,88),:]
zmid = 0.5
if ((zcen>0.75)&(zcen<1.25)):
phis = phis[N.arange(88,103),:]
zmid = 1.
if (zmid>=0):
return(N.log10(phis[:,0]),phis[:,1],zmid)
if (zmid<0):
return(N.array([1,1]),N.array([1,1]),zmid)
def getphisg(zcen=0.1,addcolor=0):
"""
Moustakas, John, et al,
PRIMUS: Constraints on Star Formation Quenching and Galaxy Merging, and the Evolution of the Stellar Mass Function from z = 0-1
http://arxiv.org/abs/1301.1688
table 3 h70 units
fsps (Conroy/White/Gunn, Conroy/Gunn/White, Conroy/Gunn 2009,2010)
Charlot Fall(2000) dust
chabrier imf
"""
if (zcen>0.2):
return(N.array([1,1]), N.array([1,1]),0,0,0,0)
ctypelist=("all","quiescent","starforming")
logm = 9.0+ N.arange(31)*1./10.
if (addcolor==0): #only have fsps
logphi = N.array([-1.899,-1.923,-1.970,-2.031,-2.055,-2.106,-2.144,-2.179,-2.188,-2.216,-2.234,-2.235,-2.262,-2.252,-2.285,-2.317,-2.365,-2.419,-2.504,-2.607,-2.728,-2.888,-3.104,-3.332,-3.606,-3.953,-4.363,-4.778,-5.255,-5.87,-6.49])
logphi_plus = N.array([0.017,0.017,0.015,0.015,0.014,0.012,0.012,0.012,0.010,0.0086,0.0080,0.0069,0.0063,0.0056,0.0051,0.0047,0.0044,0.0041,0.0040,0.0039,0.0040,0.0043,0.0049,0.0059,0.0080,0.012,0.020,0.033,0.060,0.010,0.030])
logphi_minus = N.array([-0.017,-0.016,-0.015,-0.014,-0.013,-0.012,-0.011,-0.012,-0.010,-0.0084,-0.0078,-0.0068,-0.0062,-0.0056,-0.0051,-0.0046,-0.0044,-0.0041,-0.0040,-0.0039,-0.0040,-0.0043,-0.0048,-0.0059,-0.0079,-0.012,-0.019,-0.031,-0.053,-0.010,-0.020])
if (addcolor==1): #only have fsps
logphi = N.array([-2.495,-2.486,-2.485,-2.523,-2.576,-2.603,-2.634,-2.642,-2.652,-2.655,-2.649,-2.614,-2.607,-2.5640,-2.5640,-2.5800,-2.6050,-2.6450,-2.7050,-2.7860,-2.8840,-3.0190,-3.2090,-3.4130,-3.6670,-4.002,-4.401,-4.806,-5.296,-5.93,-6.16])
logphi_plus = N.array([0.048,0.044,0.038,0.037,0.033,0.030,0.026,0.028,0.021,0.018,0.015,0.013,0.011,0.0089,0.0077,0.0069,0.0062,0.0057,0.0053,0.0050,0.0049,0.0050,0.0055,0.0065,0.0085,0.013,0.021,0.034,0.063,0.10,0.40])
logphi_minus = N.array([-0.043,-0.041,-0.035,-0.034,-0.031,-0.028,-0.025,-0.026,-0.020,-0.017,-0.015,-0.012,-0.011,-0.0087,-0.0076,-0.0068,-0.0061,-0.0056,-0.0052,-0.0050,-0.0049,-0.0050,-0.0054,-0.0064,-0.0084,-0.012,-0.020,-0.032,-0.056,-0.10,-0.20])
if (addcolor==2): #only have fsps
logphi = N.array([-2.026,-2.062,-2.129,-2.201,-2.211,-2.272,-2.313,-2.362,-2.371,-2.4120,-2.4450,-2.4700,-2.5240,-2.5410,-2.6090,-2.6600,-2.7370,-2.8110,-2.9340,-3.0770,-3.2500,-3.4720,-3.769,-4.102,-4.487,-4.930,-5.437,-5.98,-6.30,-6.77,-7.09])
logphi_plus = N.array([0.018,0.017,0.015,0.014,0.014,0.012,0.012,0.011,0.011,0.0092,0.0090,0.0079,0.0074,0.0071,0.0066,0.0063,0.0062,0.0059,0.0061,0.0064,0.0071,0.0085,0.011,0.016,0.024,0.042,0.079,0.20,0.30,0.60,1.00])
logphi_minus = N.array([-0.017,-0.016,-0.015,-0.014,-0.013,-0.012,-0.012,-0.011,-0.011,-0.0090,-0.0088,-0.0078,-0.0072,-0.0070,-0.0065,-0.0062,-0.0061,-0.0059,-0.0060,-0.0063,-0.0070,-0.0084,-0.010,-0.015,-0.023,-0.038,-0.067,-0.10,-0.20,-0.30,-0.40])
phi = 10**logphi
phip = phi*(10**logphi_plus -1)
phim =phi*(1-10**logphi_minus)
return(logm,phi,phip,phim,0.01,0.2)
def getphim(zcen=0.25,addcolor=0,ismf=0):
"""
Moustakas, John, et al,
PRIMUS: Constraints on Star Formation Quenching and Galaxy Merging, and the Evolution of the Stellar Mass Function from z = 0-1
http://arxiv.org/abs/1301.1688
table 4
fsps (Conroy/White/Gunn, Conroy/Gunn/White, Conroy/Gunn 2009,2010)
Charlot Fall(2000) dust
chabrier imf
only for z>0.2
"""
if ((zcen<0.2)|(zcen>1.0)):
return(N.array([1,1]),N.array([1,1]),0,0,0,0)
ctypelist=("all","quiescent","starforming")
ff = open("smf_%s_supergrid01.txt"%(ctypelist[addcolor]))
#zlow 0 zhi 1 ngal 2 logm* 3 limit4 logphi5 logphierrm6 logphierrp7 logphierrcv8
#log phi errors are both positive
gals = N.loadtxt(ff,usecols=(0,1,3,5,6,7))
ff.close()
jj = N.nonzero((gals[:,0]<zcen)&(gals[:,1]>=zcen))[0]
gals = gals[jj]
zmin = gals[0,0]
zmax = gals[0,1]
logm = gals[:,2]
phi = 10**gals[:,3]
phim = phi*(1-10**(-gals[:,4]))
phip = phi*(10**gals[:,5]-1)
return(logm,phi,phip,phim,zmin,zmax)
def getphit(zcen=0.45,addcolor=0):
"""
Tomczak et al, 2014,
Galaxy Stellar Mass Functions from ZFOURGE/CANDELS: An Excess of Low-mass Galaxies since z = 2 and the Rapid Buildup of Quiescent Galaxies
arXiv:1309.5972
2014,ApJ,783,85
Another reference with with Tomczak et al is the paper detailing
the way that the data/catalogs were put
together for the survey, to appear in Straatman et al submitted.
read data of tomczak et al 2014 table 1
surrounding region of zcen chosen
units are: log M, log Phi
M* is in units of M_o/h70^2
-if your M* is in units of M_o, multiply your M* by h70^2
Phi is in units of [h70/Mpc]^3,
-if your Phi is in units of [Mpc^-3], divide by h70^3
stellar masses using FAST (Kriek et al 2009)
Bruzual & Charlot (2003) following an exponentially declining starformation history assuming a Chabrier (2003) initial mass function. They assume solar metallicity and allow Av to vary between [0, 4].
"""
# now need to find right redshift and type
# first redshift
zrange = N.array([0.2,0.5,0.75,1.0,1.25, 1.5,2.0,2.5,3.0])
jjz = N.nonzero(zcen>=zrange)[0]
if ((jjz.size==0)|(zcen>3)):
return(N.array([1,1]),N.array([1,1]),0.,0.,0.,0.)
if (jjz.size>1):
jjz = jjz.max()
zmin = zrange[jjz]
zmax = zrange[jjz+1]
print "using ZFOURGE range %3.2f < z < %3.2f "%(zrange[jjz],zrange[jjz+1])
print "Bruzual Charlot used to calculate M*, solar Z, Av in [0,4] "
colornamelist =("all","quiescent","starforming")
ff = open("smf_zfourge_%s_supergrid.txt"%(colornamelist[addcolor]))
phitom = N.loadtxt(ff,usecols=(0,1,3,5,6,7))
ff.close()
#zlo0 zhi 1 logm2 logphi3 logphim4 logphip5
jj = N.nonzero((zcen> phitom[:,0])&(zcen<=phitom[:,1]))[0]
phitom = phitom[jj,:] # now have right redshift and right color sel
logm = phitom[:,2]
logphi = phitom[:,3]
logphim = phitom[:,4]
logphip = phitom[:,5]
phi = 10**logphi
phip = phi*(10**logphip-1)
phim = phi*(1-10**(-logphim))
return(logm,phi,phip,phim,zmin,zmax)
def getphiuv(zcen=0.25,addcolor=0):
"""
The Evolution of the Stellar Mass Functions of Star-Forming and Quiescent Galaxies to z = 4 from the COSMOS/UltraVISTA Survey
Adam Muzzin, Danilo Marchesini, Mauro Stefanon, Marijn Franx, Henry J. McCracken, Bo Milvang-Jensen, James S. Dunlop, J. P. U. Fynbo, Gabriel Brammer, Ivo Labbe, Pieter van Dokkum, 2013,
http://arxiv.org/abs/1303.4409
downloads at cosmos2.phy.tufts.edu/~danilo/Downloads.html
bc03, calzetti dust, kroupa imf
"""
if (zcen<0.2):
return(N.array([1,1]),N.array([1,1]),0,0,0,0)
if (zcen>4.):
return(N.array([1,1]),N.array([1,1]),0,0,0,0)
ctypelist=("all","quiescent","starforming")
zlist = N.array([0.2,0.5,1.0,1.5,2.0,2.5,3.0,4.0])
jj = N.nonzero(zcen>zlist)[0]
if (jj.size>1):
jj = jj.max() #largest value of z less than zcen
zmin = zlist[jj]
zmax = zlist[jj+1]
print "using COSMOS/Ultravista range %3.2f < z < %3.2f "%(zmin,zmax)
ff = open("Vmax_%2.1fz%2.1f.dat"%(zmin,zmax))
#logMs EMstar(1) logphi(2), eu(phi3) el(phi4) logphiq(5) ueq(6) leq(7) phisf(8) uesf(9) lesf(10)
#log phi errors are both positive
gals = N.loadtxt(ff,usecols=(0,2+3*addcolor,3+3*addcolor,4+3*addcolor))
ff.close()
jj = N.nonzero(gals[:,1]>-99)[0] #only where have measurements.
gals = gals[jj,:]
logm = gals[:,0]
# shift from kroupa to chabrier using 0.61/0.66, Madau and Dickinson, page 14
logm += N.log10(0.61/0.66)
phi = 10**gals[:,1]
phim = phi*(1-10**(-gals[:,3]))
phip = phi*(10**gals[:,2]-1)
return(logm,phi,phip,phim,zmin,zmax)
def getphiuv_sch(zcen=0.25,addcolor=0):
"""
THE EVOLUTION OF THE STELLAR MASS FUNCTIONS OF STAR-FORMING AND QUIESCENT GALAXIES TO z = 4 FROM THE COSMOS/UltraVISTA SURVEY,
Adam Muzzin, Danilo Marchesini, Mauro Stefanon, Marijn Franx, Henry J. McCracken, Bo Milvang-Jensen, James S. Dunlop, J. P. U. Fynbo, Gabriel Brammer, Ivo Labbe, PG van Dokkum, 2013, ApJ, 777, 1
http://arxiv.org/abs/1303.4409
schechter function fit
downloads at cosmos2.phy.tufts.edu/~danilo/Downloads.html
bc03,calzetti dust,kroupa imf.
"""
if (zcen<0.2):
return(N.array([1,1]),N.array([1,1]),0,0)
if (zcen>4.):
return(N.array([1,1]),N.array([1,1]),0,0)
galkind = ("ALL","QUIESCENT","STARFORMING")
ff = open("MaxLik_Schechter_%s.dat"%(galkind[addcolor]))
sparams = N.loadtxt(ff)
ff.close()
#zlow,zhigh,nobj(2),mlim(3),m*(4),m*1su(5),m*1sl(6),m*1sutot(7),m*1sltot(8),phi*(9),phi*_1su(10)
#phi*_1sl(11),phi*1_sutot(12),phi*1_sltot(13),alpha(14),alpha_1su(15),alpha_1sl(16),alpha1sutot(17),
#alpha1sltot(18),
#phi2*(19),phi2*_su(20)
#phi2*_1sl(21),phi2*1_sutot(22),phi2*1_sltot(23),alpha2(24),alpha2_1su(25),alpha2_1sl(26),alpha2sutot(27),
#alpha2sltot(28)
doublefit=0
jj = N.nonzero((zcen>sparams[:,0])&(zcen<=sparams[:,1])&(sparams[:,15]!=0))[0]#redshift and floating alpha
sparams = sparams[jj,:] #now at right redshift and floating alpha
if (sparams[:,0].size>1): #double schechter fit
doublefit=1
jj = N.nonzero(sparams[:,20]>-99)[0] #get double schechter fit
sparams=sparams[jj,:]
#now just one row
zmin = sparams[0,0]
zmax = sparams[0,1]
logm = N.linspace(sparams[:,3],12,100) #log M, units M_o
mstar = sparams[0,4]
phistar = sparams[0,9]*1.e-4
alpha = sparams[0,14]
phi = N.log(10)*phistar*N.power(10.,(logm-mstar)*(1+alpha))*N.exp(-N.power(10,logm-mstar))
if (doublefit==1): #double schechter fit
phistar2 = sparams[0,19]*1.e-4
alpha2 = sparams[0,24]
phi += N.log(10)*phistar2*N.power(10.,(logm-mstar)*(1+alpha2))*N.exp(-N.power(10,logm-mstar))
logm += N.log10(0.61/0.66) #convert kroupa to chabrier
return(logm,phi,zmin,zmax)
def getphihen(zcen=0.25,addcolor=0):
"""
Henriques, Bruno M. B.; White, Simon D. M.; Thomas, Peter A.; Angulo, Raul; Guo, Qi; Lemson, Gerard; Springel, Volker; Overzier, Roderik
Galaxy formation in the Planck cosmology - I. Matching the observed evolution of star formation rates, colours and stellar masses
2015, MNRAS,451,2663
1410.0365: compilation of several measurements, details in appendix A2.
sigma8 = 0.829, H0 = 67.3 km/s/mpc, OmegaL = 0.685, Omegam = 0.315, Omegab = 0.0487 (fb = 0.155) and n = 0.96.
downloadable tables:
http://galformod.mpa-garching.mpg.de/public/LGalaxies/figures_and_data.php
points from figures 2 (all), figure 7 (quiescent and starforming)
shift from maraston to BC03 by adding 0.14 to logM*
"""
#they have hval = 0.673 but have taken it out, units (mpc/h)^-3 and M*/h^2
ctypelist=("all","quiescent","starforming")
ff = open("henriques_%s.dat"%(ctypelist[addcolor]))
gals = N.loadtxt(ff)
ff.close()
#vol is Mpc/h^3 so need to multiply by h^{-3}
#M* is
startpoints = N.zeros(6*3,dtype='int')
startpoints.shape=(6,3)
startpoints[:,0]=N.array([0,17,17,30,41,49]) #all
startpoints[:,1]=N.array([0,13,26,38,47,49]) #red
startpoints[:,2]=N.array([0,13,26,38,48,55]) #blue
zlist = N.array([0.1,0.4,1.0,2.0,3.0])
jj = N.nonzero(abs(zcen-zlist)<0.2)[0]
if (jj.size==0):
return(N.array([1,1]),N.array([1,1]),0,0,0)
if (jj.size>1):
distval = abs(zcen-zlist)
jj = N.nonzero(distval <=distval.min())[0]
if (jj.size>1):
jj = jj[0]
zmidh=zlist[jj]
jstart = startpoints[jj,addcolor]
jend = startpoints[jj+1,addcolor]
if (jend<=jstart):
return(N.array([1,1]),N.array([1,1]),0,0,0) #just junk :), e.g.all for z=3.
mass = (10**gals[N.arange(jstart,jend),0]+10**gals[N.arange(jstart,jend),1])/2.
phi = gals[N.arange(jstart,jend),2]
phip = phim = gals[N.arange(jstart,jend),3]
#log will be later
logm = N.log10(mass)
print "using Henriques z = %3.2f "%(zlist[jj])
print "Maraston used to calculate M*, convert +0.14 dex"
# shift from maraston to BC using 0.14, Henriques++1410.0365
logm += 0.14
return(logm,phi,phip,phim,zmidh)
def getphiv(zcen=0.45,addcolor=0):
"""
Moutard et al, 2016
The VIPERS Multi-Lambda Survey. II
Diving with massive galaxies in 22 square degrees since z = 1.5
arxiv: 1602.05917 v3
Use Schechter functions from table II (double)
also have single schechter functions in table I
stellar masses using LePhare, metallicities 0.008 and 0.02,
Bruzual & Charlot (2003),
assuming a Chabrier (2003) initial mass function.
exponentially declining star formation history, 9 possible decay rates between 0.1 and 30 Gyr. See paper sec. 4.1 for dust
prescription-three considered including Chabrier (2000).
use equation 6
phi(M*) dM* = exp(-M*/Mref) (phi1* (M*/Mref)^alpha1 + phi2*(M*/Mref)^alpha2) dM*/Mref
"""
# now need to find right redshift and type
# first redshift
zrange = N.array([0.2,0.5,0.8,1.1,1.5])
if ((zcen<zrange.min())|(zcen>zrange.max())):
return(N.array([1,1]),N.array([1,1]),0.,0.)
galkind =("all","quiescent","starforming")
ff = open("viper_sche_%s.dat"%(galkind[addcolor]))
sparams = N.loadtxt(ff)
ff.close()
#zlow,zhigh,nobj(2),mlim(3),m*(4),m*1su(5),m*1sl(6),phi*(7),phi*_1su(8)
#phi*_1sl(9),alpha(10),alpha_1su(11),alpha_1sl(12),
#phi2*(13),phi2*_su(14)
#phi2*_1sl(15),
doublefit=0
jj = N.nonzero((zcen>sparams[:,0])&(zcen<=sparams[:,1]))[0]#redshift and floating alpha
sparams = sparams[jj,:].flatten() #now at right redshift
if (sparams[13] != -99): #double schechter fit
doublefit=1
#now just one row
zmin = sparams[0]
zmax = sparams[1]
print "using VIPERS range %3.2f < z < %3.2f "%(zmin,zmax)
print "Bruzual Charlot used to calculate M*, Chabrier IMF "
logm = N.linspace(sparams[3],12,100) #log M, units M_o
mstar = sparams[4]
phistar = N.power(10,sparams[7])
alpha = sparams[10]
phi = phistar*N.power(10.,(logm-mstar)*(alpha+1))*N.exp(-N.power(10,logm-mstar))
print "doublefit=",doublefit
if (doublefit==1):
phistar2 = N.power(10,sparams[13])
alpha2 = sparams[16]
phi += phistar2*N.power(10.,(logm-mstar)*(alpha2+1))*N.exp(-N.power(10,logm-mstar))
phi = phi*N.log(10)
return(logm,phi,zmin,zmax)
def plot4tog(zcen=0.45,fname="galshort.dat",hval=0.7,omm=0.31,slopeval=0.,shiftval=0.,boxside=-1,runname="runname",delz=0.02,ramin=16.98,ramax=20.17,decmin=13.23,decmax=16.33):
"""
three colors, four models, all together, with or without obs scatter
"""
f,ax = plt.subplots(2,2,sharex=True, sharey=True)
collist = ('k','r','b')
galtype=("all","quiescent","starforming")
smftype=("primus_bc03","bwc_comp","sdss_gal","primus_fsps","zfourge","cos/uv","hen15","vipers16")
zminlist = N.zeros(len(smftype),dtype='float')
zmaxlist = N.zeros(len(smftype),dtype='float')
smfflag=N.zeros(len(smftype),dtype='int') #flag for what appears
smarkerlist=('s','^','x','*','o','+','v')
scollist=('c','y','g','m','darkgreen','thistle','pink','sandybrown')
for i in range(3): #color
#first get data
bin_centers,ngalact,ngal=getsimstellar(zcen,i,fname,hval,omm,slopeval,shiftval,boxside,delz,ramin,ramax,decmin,decmax,0)
#now with scatter Behroozi/Wechsler/Conroy 2013
ax[i%2,i/2].step(bin_centers, ngalact,collist[i],linestyle=':',label='simulation')
bin_centers,ngalact,ngal=getsimstellar(zcen,i,fname,hval,omm,slopeval,shiftval,boxside,delz,ramin,ramax,decmin,decmax,1)
ax[i%2,i/2].step(bin_centers, ngalact,collist[i],label="scattered sim")
#run through smf's
hrat = hval/0.7 #h70, most people use h=0.7 in their analysis
if (i==0):
ismf = 0
logm,phi,phip,phim,bpmin,bpmax =getphim_bc03(zcen,i)
if (logm[0]>1):
phi *=(hrat*hrat*hrat) #they use (h70^-1 mpc)^3 for vol, h70^-2 Mo
phip *=(hrat*hrat*hrat)
phim *=(hrat*hrat*hrat)
logm -= 2.*N.log10(hval/0.70)
ax[i%2,i/2].errorbar(logm,phi,yerr=[phim,phip],fmt=' ',color=scollist[ismf],marker=smarkerlist[ismf],label="%s %3.2f<z<%3.2f"%(smftype[ismf],bpmin,bpmax))
zminlist[ismf] = bpmin
zmaxlist[ismf] = bpmax
smfflag[ismf]=1
ismf=1
logm,phi,zmid = getphibwc(zcen)
if (logm[0]>1):
phi *=(hrat*hrat*hrat) #they use (h70^-1 mpc)^3 for vol, h70^-2 Mo
logm -= 2.*N.log10(hval/0.70)
ax[i%2,i/2].plot(logm,phi,color=scollist[ismf],marker=smarkerlist[ismf],label="%s z=%3.2f"%(smftype[ismf],zmid))
smfflag[ismf]=1
zminlist[ismf] = zmid
ismf=2
logm,phi,phip,phim,szmin,szmax =getphisg(zcen,i)
if (logm[0]>1):
phi *=(hrat*hrat*hrat) #they use (h70^-1 mpc)^3 for vol, h70^-2 Mo
phip *=(hrat*hrat*hrat)
phim *=(hrat*hrat*hrat)
logm -= 2.*N.log10(hval/0.70)
ax[i%2,i/2].errorbar(logm,phi,yerr=[phim,phip],xerr=0.0,fmt=' ',marker=smarkerlist[ismf],color=scollist[ismf],label="SDSS-FSPS %3.2f<z<%3.2f"%(szmin,szmax))
smfflag[ismf]=1
zminlist[ismf] = szmin
zmaxlist[ismf] = szmax
ismf=3
logm,phi,phip,phim,pzmin,pzmax=getphim(zcen,i)
if (logm[0]>1):
phi *=(hrat*hrat*hrat) #they use (h70^-1 mpc)^3 for vol, h70^-2 Mo
phip *=(hrat*hrat*hrat)
phim *=(hrat*hrat*hrat)
logm -= 2.*N.log10(hval/0.70)
ax[i%2,i/2].errorbar(logm,phi,yerr=[phim,phip],xerr=0.0,fmt=' ',marker=smarkerlist[ismf],color=scollist[ismf],label="%s %3.2f<z<%3.2f"%(smftype[ismf],pzmin,pzmax))
smfflag[ismf]=1
zminlist[ismf] = pzmin
zmaxlist[ismf] = pzmax
ismf = 4
#add zfourge
logm,phi,phip,phim,zfmin,zfmax =getphit(zcen,i)
if (logm[0]>1):
phi *=(hrat*hrat*hrat) #they use (h70^-1 mpc)^3 for vol, h70^-2 Mo
phip *=(hrat*hrat*hrat)
phim *=(hrat*hrat*hrat)
logm -= 2.*N.log10(hval/0.70)
ax[i%2,i/2].errorbar(logm,phi,yerr=[phim,phip],xerr=0.0,fmt=' ',marker=smarkerlist[ismf],color=scollist[ismf],label="%s %3.2f<z<%3.2f"%(smftype[ismf],zfmin,zfmax))
smfflag[ismf]=1
zminlist[ismf] = zfmin
zmaxlist[ismf] = zfmax
ismf = 5 #ultravista
logm,phi,phip,phim,uzmin,uzmax =getphiuv(zcen,i)
if (logm[0]>1):
phi *=(hrat*hrat*hrat) #they use (h70^-1 mpc)^3 for vol, h70^-2 Mo
phip *=(hrat*hrat*hrat)
phim *=(hrat*hrat*hrat)
logm -= 2.*N.log10(hval/0.70)
ax[i%2,i/2].errorbar(logm,phi,yerr=[phim,phip],xerr=0.0,fmt=' ',marker=smarkerlist[ismf],color=scollist[ismf],label="%s %3.2f<z<%3.2f "%(smftype[ismf],uzmin,uzmax))
smfflag[ismf]=1
zminlist[ismf] = uzmin
zmaxlist[ismf] = uzmax
#schechter function to ultravista
logm,phi,uzmin,uzmax = getphiuv_sch(zcen,i)
if (logm[0]>1):
phi *=(hrat*hrat*hrat) #they use (h70^-1 mpc)^3 for vol, h70^-2 Mo
phip *=(hrat*hrat*hrat)
phim *=(hrat*hrat*hrat)
logm -= 2.*N.log10(hval/0.70)
ax[i%2,i/2].plot(logm,phi,color=scollist[ismf],label="%s %3.2f<z<%3.2f "%(smftype[ismf],zfmin,zfmax))
smfflag[ismf]=1
ismf = 6 #henriques #center of mass bin taken
logm,phi,phip,phim,zhen = getphihen(zcen,i)
if (logm[0]>1):
phi *= hval*hval*hval #units [h/Mpc]^3
phip *= hval*hval*hval
phim *= hval*hval*hval
logm -= 2*N.log10(hval) #units [M*/h^2]
ax[i%2,i/2].errorbar(logm,phi,yerr=[phim,phip],xerr=0.0,fmt=' ',marker=smarkerlist[ismf],color=scollist[ismf],label="%s z=%3.2f "%(smftype[ismf],zhen))
smfflag[ismf]=1
zminlist[ismf] = zhen
ismf = 7 #vipers
logm,phi,vzmin,vzmax = getphiv(zcen,i)
if (logm[0]>1): #they assume h=0.70
phi *=(hrat*hrat*hrat) #they use (h70^-1 mpc)^3 for vol, h70^-2 Mo
logm -= 2.*N.log10(hval/0.70)
ax[i%2,i/2].plot(logm,phi,color=scollist[ismf],label="%s %3.2f<z<%3.2f "%(smftype[ismf],vzmin,vzmax))
smfflag[ismf]=1
zminlist[ismf]=vzmin
zmaxlist[ismf] = vzmax
ax[i%2,i/2].set_xlim(8.2,12.)
ax[i%2,i/2].set_ylim(1.e-5,0.02)
ax[i%2,i/2].set_yscale("log")
ax[i%2,i/2].text(8.5,1.e-4,'%s'%(galtype[i]))
ax[i%2,i/2].text(8.5,5.e-5,r'$\bar{z}_{\rm sim}$=%3.2f'%(zcen))
#trick it into putting legend in empty box
logm = N.array([6.0,6.01])
phi = N.array([1.e-7,1.2e-7])
phim = N.array([1.e-8,1.2e-8])
phip = N.array([1.e-8,1.2e-8])
bin_centers = N.array([7,7.2])
ngalact = N.array([1.e-8,1.e-8])
ax[1,1].set_ylim(1.e-5,0.04)
ax[1,1].set_yscale("log")
ax[1,1].step(bin_centers, ngalact,'k',label="simulation")
ax[1,1].step(bin_centers, ngalact,'k',linestyle=':',label="sim w/out obs scatter")
ax[1,1].get_xaxis().set_visible(False)
ax[1,1].get_yaxis().set_visible(False)
logm=N.array([5.,5.1])
phi = 1.e-7*N.ones(2,dtype="float")
phim = phip = 1.e-8
i = 0
for ismf in range(len(smftype)):
if (smfflag[ismf]>0):
if ((ismf==1)|(ismf==6)):
ax[1,1].plot(logm,phi,marker=smarkerlist[ismf],color=scollist[ismf],linestyle='None',label="%s z=%3.2f"%(smftype[ismf],zminlist[ismf]))
else:
if (ismf !=7):
ax[1,1].errorbar(logm,phi,yerr=[phim,phip],xerr=0.0,fmt=' ',marker=smarkerlist[ismf],color=scollist[ismf],label="%s %3.2f<z<%3.2f"%(smftype[ismf],zminlist[ismf],zmaxlist[ismf]))
if ((ismf==5)|(ismf==7)):
ax[1,1].plot(logm,phi,color=scollist[ismf],label="%s %3.2f<z<%3.2f "%(smftype[ismf],zminlist[ismf],zmaxlist[ismf]))
ax[0,0].set_ylabel(" $\Phi$ [Mpc${}^{-3}$/dex]")
ax[1,1].legend(loc=3,fontsize='10',frameon=False)
ax[1,1].axis('off')
ax[1,0].set_xlabel(r'M* [log $M_\odot$]')
f.subplots_adjust(hspace=0.001)
f.subplots_adjust(wspace=0.001)
plt.tight_layout()
plt.savefig("smf4sims_%d_%s.pdf"%(zcen*100.5,runname))
plt.close("all")
def plot4sep(zcen=0.45,fname="galshort.dat",hval=0.7,omm=0.31,slopeval=0.,shiftval=0.,boxside=-1,runname="runname",delz=0.02,ramin=16.98,ramax=20.17,decmin=13.23,decmax=16.33):
"""
one color four models, all together, with or without obs scatter
"""
hrat = hval/0.7
collist = ('k','r','b')
galtype=("all","quiescent","starforming")
smftype=("primus_bc03","bwc_comp","sdss_gal","primus_fsps","zfourge","cos/uv","hen15","vipers16")
smarkerlist=('s','^','x','*','o','+','v')
scollist=('c','y','g','m','darkgreen','thistle','pink','sandybrown')
for i in range(3): #color
#set flags for each color separately
zminlist = N.zeros(len(smftype),dtype='float')
zmaxlist = N.zeros(len(smftype),dtype='float')
smfflag=N.zeros(len(smftype),dtype='int') #flag for what appears
f,ax = plt.subplots(1,1)
bin_centers,ngalact,ngal=getsimstellar(zcen,i,fname,hval,omm,slopeval,shiftval,boxside,delz,ramin,ramax,decmin,decmax,1)
#with scatter
ax.step(bin_centers, ngalact,collist[i],label="simulation")
bin_centers,ngalact,ngal=getsimstellar(zcen,i,fname,hval,omm,slopeval,shiftval,boxside,delz,ramin,ramax,decmin,decmax,0)
#no scatter
ax.step(bin_centers, ngalact,collist[i],linestyle=':',label="sim, no obs scatter")
#run through smf's
if (i==0):
ismf = 0
logm,phi,phip,phim,bpmin,bpmax =getphim_bc03(zcen,i)
if (logm[0]>1):
phi *=(hrat*hrat*hrat) #they use (h70^-1 mpc)^3 for vol, h70^-2 Mo
phip *=(hrat*hrat*hrat)
phim *=(hrat*hrat*hrat)
logm -= 2.*N.log10(hval/0.70)
ax.errorbar(logm,phi,yerr=[phim,phip],fmt=' ',color=scollist[ismf],marker=smarkerlist[ismf],label="%s %3.2f<z<%3.2f"%(smftype[ismf],bpmin,bpmax))
zminlist[ismf] = bpmin
zmaxlist[ismf] = bpmax
smfflag[ismf]=1
ismf=1
logm,phi,zmid = getphibwc(zcen)
if (logm[0]>1):
phi *=(hrat*hrat*hrat) #they use (h70^-1 mpc)^3 for vol, h70^-2 Mo
logm -= 2.*N.log10(hval/0.70)
ax.plot(logm,phi,color=scollist[ismf],marker=smarkerlist[ismf],linestyle='None',label="%s z=%3.2f"%(smftype[ismf],zmid))
smfflag[ismf]=1
zminlist[ismf] = zmid
ismf=2
logm,phi,phip,phim,szmin,szmax =getphisg(zcen,i)
if (logm[0]>1):
phi *=(hrat*hrat*hrat) #they use (h70^-1 mpc)^3 for vol, h70^-2 Mo
phip *=(hrat*hrat*hrat)
phim *=(hrat*hrat*hrat)
logm -= 2.*N.log10(hval/0.70)
ax.errorbar(logm,phi,yerr=[phim,phip],xerr=0.0,fmt=' ',marker=smarkerlist[ismf],color=scollist[ismf],label="SDSS-FSPS %3.2f<z<%3.2f"%(szmin,szmax))
smfflag[ismf]=1
zminlist[ismf] = szmin
zmaxlist[ismf] = szmax
ismf=3
logm,phi,phip,phim,pzmin,pzmax=getphim(zcen,i)
if (logm[0]>1):
phi *=(hrat*hrat*hrat) #they use (h70^-1 mpc)^3 for vol, h70^-2 Mo
phip *=(hrat*hrat*hrat)
phim *=(hrat*hrat*hrat)
logm -= 2.*N.log10(hval/0.70)
ax.errorbar(logm,phi,yerr=[phim,phip],xerr=0.0,fmt=' ',marker=smarkerlist[ismf],color=scollist[ismf],label="%s %3.2f<z<%3.2f"%(smftype[ismf],pzmin,pzmax))
smfflag[ismf]=1
zminlist[ismf] = pzmin
zmaxlist[ismf] = pzmax
ismf = 4
#add zfourge
logm,phi,phip,phim,zfmin,zfmax =getphit(zcen,i)
if (logm[0]>1):
phi *=(hrat*hrat*hrat) #they use (h70^-1 mpc)^3 for vol, h70^-2 Mo
phip *=(hrat*hrat*hrat)
phim *=(hrat*hrat*hrat)
logm -= 2.*N.log10(hval/0.70)
ax.errorbar(logm,phi,yerr=[phim,phip],xerr=0.0,fmt=' ',marker=smarkerlist[ismf],color=scollist[ismf],label="%s %3.2f<z<%3.2f"%(smftype[ismf],zfmin,zfmax))
smfflag[ismf]=1
zminlist[ismf] = zfmin
zmaxlist[ismf] = zfmax
ismf = 5 #ultravista
logm,phi,phip,phim,uzmin,uzmax =getphiuv(zcen,i)
if (logm[0]>1):
phi *=(hrat*hrat*hrat) #they use (h70^-1 mpc)^3 for vol, h70^-2 Mo
phip *=(hrat*hrat*hrat)
phim *=(hrat*hrat*hrat)
logm -= 2.*N.log10(hval/0.70)
ax.errorbar(logm,phi,yerr=[phim,phip],xerr=0.0,fmt=' ',marker=smarkerlist[ismf],color=scollist[ismf],label="%s %3.2f<z<%3.2f "%(smftype[ismf],uzmin,uzmax))
smfflag[ismf]=1
zminlist[ismf] = uzmin
zmaxlist[ismf] = uzmax
#schechter function to ultravista
logm,phi,uzmin,uzmax = getphiuv_sch(zcen,i)
if (logm[0]>1):
phi *=(hrat*hrat*hrat) #they use (h70^-1 mpc)^3 for vol, h70^-2 Mo
logm -= 2.*N.log10(hval/0.70)
ax.plot(logm,phi,color=scollist[ismf],label="%s Schechter %3.2f<z<%3.2f "%(smftype[ismf],uzmin,uzmax))
smfflag[ismf]=1
ismf = 6 #henriques
logm,phi,phip,phim,zhen = getphihen(zcen,i)
if (logm[0]>1):
phi *= hval*hval*hval #units [h/Mpc]^3
phip *= hval*hval*hval
phim *= hval*hval*hval
logm -= 2*N.log10(hval) #units [M*/h^2]
ax.errorbar(logm,phi,yerr=[phim,phip],xerr=0.0,fmt=' ',marker=smarkerlist[ismf],color=scollist[ismf],label="%s z=%3.2f "%(smftype[ismf],zhen))
smfflag[ismf]=1
zminlist[ismf] = zhen
ismf = 7 #vipers
logm,phi,vzmin,vzmax = getphiv(zcen,i)
if (logm[0]>1): #they assume h=0.70
phi *=(hrat*hrat*hrat) #they use (h70^-1 mpc)^3 for vol, h70^-2 Mo
logm -= 2.*N.log10(hval/0.70)
ax.plot(logm,phi,color=scollist[ismf],label="%s Schechter %3.2f<z<%3.2f "%(smftype[ismf],vzmin,vzmax))
smfflag[ismf]=1
zminlist[ismf]=vzmin
zmaxlist[ismf] = vzmax
ax.set_xlim(8.2,12.)
ax.set_ylim(1.e-5,0.02)
ax.set_yscale("log")
ax.set_xlim(8.2,12.)
ax.set_ylim(1.e-5,0.04)
ax.set_yscale("log")
ax.text(10.6,0.02,r'$\bar{z}_{\rm sim}$ = %3.2f'%(zcen))
ax.text(10.6,0.013,'%s'%(galtype[i]),color=collist[i])
ax.text(10.6,0.008,'%s'%(runname),color=collist[i])
ax.set_ylabel(" $\Phi$ [Mpc${}^{-3}$/dex]")
ax.legend(loc=3,fontsize='10',frameon=False)
ax.set_xlabel(r'M* [log $M_\odot$]')
plt.tight_layout()
plt.savefig("smf4sims_%s_%d_%s.pdf"%(galtype[i],zcen*100.5,runname))
plt.close("all")
def getmsmh(fname="inputfile.dat",ratflag=1):
"""
M*(Mh) for centrals
ratflag=1 :M*/Mh as fn of Mh
ratflag= 0: M* as fn of Mh
"""
ff = open(fname)
gals = N.loadtxt(ff,usecols=(0,5,6))
ff.close()
jj = N.nonzero(gals[:,1]==0)[0] #get centrals
logmstar = gals[jj,0]
logmh = gals[jj,2]
mhbin = N.linspace(logmh.min(),logmh.max(),40)
mstarave = N.zeros(40,dtype="float")
mstarlist =[]
ngaltot = 0
for i in range(39):
jj = N.nonzero((logmh> mhbin[i])&(logmh<=mhbin[i+1]))[0]
mstarave[i] = (10**logmstar[jj]).sum()/(jj.size +1.e-10)
mstarlist.append(10**(logmstar[jj]-ratflag*logmh[jj]))
ngaltot += jj.size
mhbin +=(mhbin[1]-mhbin[0])/2.
mhbin = N.delete(mhbin,39)
return(mhbin,mstarave,mstarlist,ngaltot)
def rhox(xval):
"""
from M. White
nfw profile, x=r/rs, rho_0 = 1
"""
return(1./(xval*(1+xval)*(1+xval)))
def rhobar(xval):
""" mean interior density
from M. White
"""
return(3*(N.log(1+xval)/(xval*xval*xval) -1./((1+xval)*xval*xval)))
def mmean(rho,rho0):
"""
from M. White
Returns the mass enclosed within the radius at which the mean interior
density falls to rho (times the critical density).
for defns like delta = 200c or 500omegab*/
"""
xlo = 1.e-10
rlo = rho0*rhobar(xlo)-rho
xhi = 100.
rhi = rho0*rhobar(xhi)-rho
xmid = (xlo+xhi)/2.0
rmid = rho0*rhobar(xmid)-rho
if (rmid*rhi < 0):
xlo=xmid
rlo=rmid
else:
xhi = xmid
rhi = rmid
while (xhi-xlo>1.e-3*xmid):
xmid = (xlo+xhi)/2.0
rmid = rho0*rhobar(xmid)-rho
if (rmid*rhi < 0):
xlo=xmid
rlo=rmid
else:
xhi = xmid
rhi = rmid
tmp = 4*N.pi*rho0*(N.log(1+xmid)-xmid/(1+xmid))
return(tmp)
def mvirtom200(logmval,zcen=0.45):
"""
from M. White
given Mvir
convert to m200 to feed to moster mf
from martin white, uses nwf profile to convert as in 2001 paper mass of halo
http://arxiv.org/abs/astro-ph/0011495
assume mvir and m200 not so different that concentration for
one can be used for other.
"""
omm = 0.31
#use mvir to get c, difference with m200 too small to care
mvirstart = 10**logmval
c = 10*N.power(mvirstart/3.42e12,-0.2/(1+zcen))
omz = omm/(omm + (1-omm)/(1+zcen)**3)
DelC = 18*N.pi*N.pi+82*(omz-1.)-39*(omz-1)*(omz-1)
rho0 = (200./3.)*N.power(c,3.0)/(N.log(1+c)-c/(1+c))
mvir = 4*N.pi*(200./3.)*c*c*c
mvirdivm200 = mmean(DelC*1.,rho0)/mvir
return(1/mvirdivm200)
def getmoster(logmval,zcen=0.25,convflag=1):
"""
units are in M* for everything, including Mh (no h)
Moster, Naab & White, 2013
Galactic star formation and accretion histories from matching galaxies to dark matter haloes, MNRAS, 428, 3121
http://arxiv.org/abs/1205.5807
Mh = M200c
Bruzual-Charlot
tuned to perez-gonzalez 08
http://arxiv.org/abs/0709.1354
664 arcmin^2
and santini(2011)
http://arxiv.org/abs/1111.5728
33 arcmin^2
table 1
"""
mvirconvert = mvirtom200(logmval,zcen) #m200 = mvirconvert*mvir
if (convflag==0):
mvirconvert=1.
m10 = 11.590
m11 = 1.195
n10 = 0.0351
n11 = -0.0247
beta10 = 1.376
beta11 = -0.826
gamma10 = 0.608
gamma11 = 0.329
#errors
em10 = 0.236
em11 = 0.353
en10 = 0.0058
en11 = 0.0069
ebeta10 = 0.153
ebeta11 = 0.225
egamma10 = 0.059
egamma11 = 0.173
zrat = zcen/(zcen+1)
norm = n10+n11*zrat
M1 = m10 +m11*zrat #this is log
beta = beta10+beta11*zrat
gamma = gamma10+gamma11*zrat
rat = 2*norm/(10**(beta*(M1-logmval)) + 10**(gamma*(logmval-M1)))
#sign of beta is correct, logmval and m1 switched
rat *=mvirconvert
#scatter sigma_m(logm) = 0.15
return(rat)
def fpb(xval,zcen=0.25):
"""
x is log(Mh/M1)
equations 3,4 and and start of section 5
of behroozi, wechsler, conroy
The Average Star Formation Histories of Galaxies in Dark Matter Halos from z=0-8, http://arxiv.org/abs/1207.6105
From Peter B: alpha should be -alpha in Eq. 3. Note also that the exponent of gamma is intended to be applied after the logarithm is taken, not before.
#parameters in section 5 of behroozi et al
"""
a = 1/(1+zcen)
nu = N.exp(-4*a*a)
#alpha = -1.474 + 1.339*(a-1)*nu #older version of BWC
#delta = 3.529 + (4.152*(a-1) +1.122*zcen)*nu
#gamma = 0.395 + (0.766*(a-1) +0.435*zcen)*nu
alpha = -1.412 + 0.731*(a-1)*nu
delta = 3.508 + (2.608*(a-1) -0.043*zcen)*nu
gamma = 0.316 + (1.319*(a-1) +0.279*zcen)*nu
tmp = -N.log10(10**(alpha*xval) +1) + delta* (N.log10(1+N.exp(xval)))**gamma /(1+N.exp(10**(-xval)))
return(tmp)
def getms_pb(mhbin,zcen=0.25):
"""
Behroozi, Wechsler, Conroy fitting function
The Average Star Formation Histories of Galaxies in Dark Matter Halos from z=0-8, http://arxiv.org/abs/1207.6105
From Peter B: alpha should be -alpha in Eq. 3. Note also that the exponent of gamma is intended to be applied after the logarithm is taken, not before.
Mh is Mvir (M_delta)
"""
#parameters in section 5 of behroozi et al
a= 1/(1+zcen)
nu = N.exp(-4*a*a)
#logm1 = 11.539 + (-1.751*(a-1) -0.329*zcen)*nu older version of BWC
#logepsilon = -1.785 + (-0.074*(a-1) + -0.048*zcen)*nu -0.179*(a-1)
logm1 = 11.514 + (-1.793*(a-1) -0.251*zcen)*nu
logepsilon = -1.777 + (-0.006*(a-1) + -0.0*zcen)*nu -0.119*(a-1)
mstarmh =logm1 + logepsilon + fpb(mhbin-logm1,zcen) - fpb(0,zcen)
return(10**(mstarmh-mhbin))
def plotboxwhisker_mult(outid="thisrun",fname="inputfile.dat",zcen=0.45,runname="runname"):
# instead use M* vs Mh not M*/Mh
#box is lower and upper quartiles
# line is median
# whisker is by choice 10,90 percentiles
# outliers not plotted ("showfliers='false')
from matplotlib.ticker import MultipleLocator,FormatStrFormatter
fig,ax = plt.subplots(1,1)
ax.set_xlim(10.7,15.)
ax.set_ylim(1.e7,5.e13)
majorLocator= MultipleLocator(1)
minorLocator=MultipleLocator(0.2)
majorFormatter=FormatStrFormatter('%d')
mhbin,mstarave,mstarlist,ngaltot = getmsmh(fname,0) #just M* not M*/Mh
ax.boxplot(mstarlist,whis=[10,90],showfliers=False,positions=mhbin,widths=0.1)
#mhbin is log M halo
msrat_moster = N.zeros(mhbin.size,dtype="float")
for im in range(mhbin.size):
msrat_moster[im] = getmoster(mhbin[im],zcen)
ax.plot(mhbin,msrat_moster*(10**mhbin),'m-.',label='MNW 2013')
for im in range(mhbin.size):
msrat_moster[im] = getmoster(mhbin[im],zcen,0)
ax.plot(mhbin,msrat_moster*(10**mhbin),color='cornflowerblue',linestyle=':',label=r'MNW13, no $M_{200} \rightarrow M_{vir}$')
msrat_pb = getms_pb(mhbin,zcen)
ax.plot(mhbin,msrat_pb*(10**mhbin),'m',label='BWC 2013')
#plot 15 in Behroozi, Wechsler,Conroy 2013, 0-8 paper
ax.set_yscale("log")
ax.set_xlabel(r'log$M_{\rm vir}$ $[M_\odot]$ ')
ax.set_ylabel(r' $M_* $')
ax.text(mhbin[25],1.e8,r'$\bar{z}$= %3.2f'%(zcen))
ax.text(mhbin[25],5.e8,r'$%d$ galaxies'%(ngaltot))
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_major_formatter(majorFormatter)
ax.xaxis.set_minor_locator(minorLocator)
ax.legend(fontsize=12,frameon=False)
plt.tight_layout()
plt.savefig("stellar_halo_noratio_%s_%s.pdf"%(outid,runname))
plt.close("all")
def plotboxwhisker(outid="thisrun",fname="inputfile.dat",zcen=0.45,runname="runname"):
#box is lower and upper quartiles
# line is median
# whisker is by choice 10,90 percentiles
# outliers not plotted ("showfliers='false')
from matplotlib.ticker import MultipleLocator,FormatStrFormatter
fig,ax = plt.subplots(1,1)
ax.set_xlim(10.7,15.)
ax.set_ylim(0.001,0.05)
majorLocator= MultipleLocator(1)
minorLocator=MultipleLocator(0.2)
majorFormatter=FormatStrFormatter('%d')
mhbin,mstarave,mstarlist,ngaltot = getmsmh(fname)
ax.boxplot(mstarlist,whis=[10,90],showfliers=False,positions=mhbin,widths=0.1)
#mhbin is log M halo
msrat_moster = N.zeros(mhbin.size,dtype="float")
for im in range(mhbin.size):
msrat_moster[im] = getmoster(mhbin[im],zcen)
ax.plot(mhbin,msrat_moster,'m-.',label='MNW 2013')
for im in range(mhbin.size):
msrat_moster[im] = getmoster(mhbin[im],zcen,0)
ax.plot(mhbin,msrat_moster,color='cornflowerblue',linestyle=':',label=r'MNW13, no $M_{200} \rightarrow M_{vir}$')
msrat_pb = getms_pb(mhbin,zcen)
ax.plot(mhbin,msrat_pb,'m',label='BWC 2013')
#plot 15 in Behroozi, Wechsler,Conroy 2013, 0-8 paper
ax.set_yscale("log")
ax.set_xlabel(r'log$M_{\rm vir}$ $[M_\odot]$ ')
ax.set_ylabel(r' $M_*/M_{\rm vir} $')
ax.text(14,0.03,r'$\bar{z}$= %3.2f'%(zcen))
ax.text(14,0.04,r'$%d$ galaxies'%(ngaltot))
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_major_formatter(majorFormatter)
ax.xaxis.set_minor_locator(minorLocator)
ax.legend(loc=1,fontsize=12,frameon=False)
plt.tight_layout()
plt.savefig("stellar_halo_ratio_%s_%s.pdf"%(outid,runname))
plt.close("all")
def plotcon(binx,biny,nhist2,ax,delchoice=0.5):
"""
stolen from
http://matplotlib.org/examples/pylab_examples/contour_demo.html
"""
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
delta = delchoice
nhist2 = N.rot90(nhist2) #setorigin=lower axes bottom to top
nhist2 = N.flipud(nhist2) #nhist2.T() transpose for physics
#http://oceanpython.org/2013/02/25/2d-histogram/
cs = ax.contour(binx,biny,nhist2)
return(cs)
def mstar_sfr(fname="inputfile.dat",hval=0.67,slopeval=0.,shiftval=0.,runname="runname"):
"""
from M. White
"""
ff = open(fname)
gals = N.loadtxt(ff, usecols=(0,1,4))
ff.close()
#logm, sfr, zval, logMh
zmin = gals[:,2].min()
zmax = gals[:,2].max()
zcen = gals[:,2].sum()/gals[:,2].size
#convert to h70 stellar mass
logm = gals[:,0]+2*N.log10(hval/0.70)
logsfr = N.log10(gals[:,1]+1.e-8) + 2*N.log10(hval/0.70)
fig,ax=plt.subplots(1,1)
maxms = 12.
minms = 8.5
maxsf = 3.
minsf = -5.
nbin = 40
# seems reversed
nhist2,binx,biny = N.histogram2d(logm,logsfr,nbin,range=([minms,maxms],[minsf,maxsf]))
nhist2 = N.arcsinh(nhist2)
#nhist2 = nhist2*1./nhist2.sum()
binx +=(binx[1]-binx[0])/2.
biny +=(biny[1]-biny[0])/2.
binx = N.delete(binx,nbin)
biny = N.delete(biny,nbin)
cs=plotcon(binx,biny,nhist2,ax)
fig.colorbar(cs)
logms = N.arange(8.5,12.5,0.1)
logsf = -0.49+ 0.65*(logms - 10)+1.07*(zcen-0.1)
ax.plot(logms,logsf,'--',lw=2,label=r'log(sfr) =-0.49+ 0.65(log $M^*$ - 10)+1.07($\bar{z}$-0.1)')
logsf = -0.49+ (0.65+slopeval)*(logms - 10)+1.07*(zcen-0.1)+shiftval
ax.plot(logms,logsf,lw=2,label=r'log(sfr) =-0.49+ %3.2f(log $M^*$ - 10)+1.07($\bar{z}$-0.1)+%3.2f'%(slopeval+0.65,shiftval))
ax.legend(loc=4)
ax.set_xlabel('log M* $[M_\odot]$')
ax.set_ylabel('log SFR ')
ax.set_title(r'$\bar{z}_{\rm sim}$ = %3.2f'%(zcen))
plt.tight_layout()
plt.savefig("mstarsfr_%d_%s.pdf"%(zcen*100.5,runname))
plt.close("all")
def ssfr_slice(fname="inputfile.dat",hval=0.67,runname="runname"):
"""
slice of fixed mstar (log 10 bin width 0.5)
"""
ff = open(fname)
gals = N.loadtxt(ff, usecols=(0,1,4,6))
ff.close()
#logm, sfr, zval, logMh
zmin = gals[:,2].min()
zmax = gals[:,2].max()
zcen = gals[:,2].sum()/gals[:,2].size
#convert to h70 stellar mass
logm = gals[:,0]+2*N.log10(hval/0.70)
logsfr = N.log10( gals[:,1]+1.e-8) + 2*N.log10(hval/0.70)
fig,ax=plt.subplots(2,2)
minssfr = -13
maxssfr = -8
for i in range(4):
mstarlow = 9.5+i*0.5
jj = N.nonzero((logm>mstarlow)&(logm<=mstarlow +0.5))[0]
ssfr = logsfr[jj]-logm[jj]
nbin = 50
#maxssfr = ssfr.max()
#minssfr = ssfr.min()
nhist,bins = N.histogram(ssfr,nbin,range=(minssfr,maxssfr))
nhist = nhist*1./jj.size #fraction of all galaxies in M*
#range, including those outside for minssfr,maxssfr range
bins +=(bins[1]-bins[0])/2.
bins = N.delete(bins,nbin)
ax[i/2,i%2].set_xlim(minssfr,maxssfr)
ax[i/2,i%2].step(bins, nhist,'k',where='mid')
if (i!=1):
ax[i/2,i%2].set_title("%3.1f<logM*<%3.1f "%(mstarlow,mstarlow+0.5),fontsize="10")
if (i==1):
ax[i/2,i%2].set_title(r'%3.1f<logM*<%3.1f, $\bar{z}=$%3.2f '%(mstarlow,mstarlow+0.5,zcen),fontsize="10")
ax[i/2,i%2].set_ylabel('frac of %d gals'%(ssfr.size))
ax[i/2,i%2].set_xlabel('$log_{10}$ SSFR [yr${}^{-1}$]')
plt.tight_layout()
plt.savefig("ssfr_z%d_%s.pdf"%(zcen*100.5,runname))
plt.close("all")
def runsuite(zcen=0.45, fname="inputfile.dat",hval=0.7,omm=0.31,slopeval=0.,shiftval=0, boxside=256,runname="runname",delz=0.1,ramin=-2,ramax=2,decmin=-2,decmax=2):
"""
inputfile:
logM*(0) [M_o], sfr(1) [M_o/yr],ra(2),dec(3),zred(4),ifsat(5),logmh(6)[M_o]
"""
#4 smf's
plot4sep(zcen,fname,hval,omm,slopeval,shiftval, boxside,runname,delz,ramin,ramax,decmin,decmax)
plot4tog(zcen,fname,hval,omm, slopeval,shiftval,boxside,runname,delz,ramin,ramax,decmin,decmax)
plotboxwhisker("z%d"%(zcen*100.5),fname,zcen,runname) #M*/Mh of Mh
plotboxwhisker_mult("z%d"%(zcen*100.5),fname,zcen,runname) #M* of Mh
mstar_sfr(fname,hval,slopeval,shiftval,runname)
ssfr_slice(fname,hval,runname)
#plotting M*/Mh all different redshifts has to be tailored by hand
print "bwc_comp from Behroozi/Wechlsler/Conroy compilation Fig.3"
print " arxiv http://arxiv.org/abs/1207.6105"
print " "
print "primus_bc03 from www.behroozi.com/data.html"
print " behroozi-2013-data-compilation calculated in "
print " http://arxiv.org/abs/1207.6105 , http://arxiv.org/abs/1209.3013"
print " "
print "sdss_gal from Moustakas++ 2013, table 3,http://arxiv.org/abs/1301.1688"
print " "
print "primus_fsps from Moustakas++ 2013, table 4,http://arxiv.org/abs/1301.1688"
print " "
print "zfourge from Tomczak++2014, table 1, http://arxiv.org/abs/1309.5972"
print "survey details, Straatman et al, submitted"
print " "
print "cosmos/ultravista from Muzzin++2013, paper http://arxiv.org/abs/1303.4409"
print " data http://cosmos2.phy.tufts.edu/~danilo/Downloads.html"
print " "
print " hen15 Henriques compilation http://arxiv.org/abs/1410.0365"
print " data http://galformod.mpa-garching.mpg.de/public/LGalaxies/figures_and_data.php "
print " "
print "vipers16 from Moutard et al, 2016, table 2, http://arxiv.org/abs/1602.05917"
print " "
print "default color cut from Moustakas et al 2013, modified to"
print "log(sfr) =-0.49+ (0.65+%3.2f)*(log M* - 10)+1.07(zbar-0.1)+%3.2f"%(slopeval,shiftval)
| jdcphysics/validation | codes/vsuite/valid_suite.py | Python | mit | 59,806 | [
"Galaxy"
] | 87046224f3311bd68b6ec4795f786a3a094e9f0e1252ea2f9c49ba2dcfcbc60c |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from mainapp.views import LoadUserLikes, HomeView, TestView, LoadQuestions, ResultView
from django.views.decorators.cache import cache_page
from action.api import UserDataDetail, UserDataList
urlpatterns = [
url(
r'^$',
view=HomeView.as_view(),
name='home'
),
url(
r'^load/',
view=LoadUserLikes.as_view(),
name='load'
),
url(
r'^test/',
view=TestView.as_view(),
name='test'
),
url(
r'^result/(?P<testid>[0-9]+)/$',
view=ResultView.as_view(),
name='result'
),
url(
r'^loadquestions/',
view=LoadQuestions.as_view(),
name='loadquestions'
),
# url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('fbstats.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
url(r'^accounts/', include('allauth.urls')),
url(
r'^api-auth/', include(
'rest_framework.urls', namespace='rest_framework')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| bhanduroshan/fbstats-docker | config/urls.py | Python | mit | 2,359 | [
"VisIt"
] | 5c88d1a80c65213a6ce8a0f40fe5a30cf93e9fb27bb17c38647e9db8c1c404e7 |
# Copyright (c) 2010 ActiveState Software Inc. All rights reserved.
"""
pypm.client.fs
~~~~~~~~~~~~~~
File system related functionality, which includes:
- Downloading packages from remote location to local cache
- Extracting packages into the appropriate directory structure
(user site layout)
"""
import os
import logging
import six.moves
from applib import sh
from applib.misc import xjoin
from pypm.common import net, licensing
from pypm.common.util import wrapped, concise_path
from pypm.common.package import PackageFile
from pypm.common.repository import RepoPackage
from pypm.common.supported import PLATNAME
from pypm.client.base import application
from pypm.client import error
LOG = logging.getLogger(__name__)
# TODO: we are not actually utilizing the download "cache" yet.
DOWNLOAD_CACHE = xjoin(application.locations.user_cache_dir, 'download-cache')
class Downloader:
def __init__(self, pypmenv):
self.pypmenv = pypmenv
def download_packages(self, packages):
"""Download the given list of packages
We first download the BE packages first in order to catch license
related errors early. This does not, however, prevent late errors
occuring due to missing/expired license.
Return a dictionary of location to downloaded packages.
"""
# reorder packages to keep BE at top
paid, free = [], []
for pkg in packages:
(paid if pkg.requires_be_license else free).append(pkg)
packages = paid + free
locations = {}
for pkg in packages:
locations[pkg] = self.download_package(pkg)
return locations
def download_package(self, package):
assert type(package) is RepoPackage
sh.mkdirs(DOWNLOAD_CACHE)
auth = licensing.get_be_license_auth()
send_license = package.requires_be_license
license_installed = auth is not None
# A license is required for this package, but no license is installed
if not license_installed and send_license:
msg = '\n'.join([
wrapped('If you have purchased ActivePython Business Edition, '
'please login to your account at:'),
' https://account.activestate.com/',
wrapped('and download and run the license installer for your '
'platform.'),
'',
wrapped('Please visit <%s> to learn more about the '
'ActivePython Business Edition offering.' % \
licensing.BE_HOME_PAGE)])
raise error.PackageAccessError(
package, 'requires Business Edition subscription', msg)
try:
# At this point, the user is already known to have a BE license
file_location, _ = net.download_file(
package.download_url,
DOWNLOAD_CACHE,
dict(
auth=auth,
use_cache=True, # XXX: this introduces network delay
# (If-None-Match) despite having the file
# in download cache
# TODO: abstract client.store...autosync
save_properties=True,
start_info='{{status}}: [{0}] {1} {2}'.format(
six.moves.urlparse(package.download_url).netloc,
package.name,
package.printable_version)),
interactive=self.pypmenv.options['interactive'])
except six.moves.HTTPError as e:
reason = str(e)
LOG.debug("HTTPError while accessing URL: %s -- reason: %s",
package.download_url, reason)
if send_license and e.code in (401, 402, 403):
msg = wrapped(
'Your ActivePython Business Edition subscription seems to '
'have expired. Please visit your account at '
'https://account.activestate.com/ to renew your subscription.'
)
else:
msg = ''
raise error.PackageAccessError(package, reason, msg)
return file_location
class Extractor:
"""Extracts the binary package to Python directory
This is not as simple as it may sound. While we build all packages in a
simple Python directory structure (including virtualenv) and store that very
same directory structure in the created binary packages, the *user* may be
using a different directory structure.
PEP 370, for example, uses $APPDATA/Python/Python26 as LIB directory on
Windows; ~/Library/Python/lib/python/ on OSX/2.7. But as far as the binary
package file is concerned, when extracted - as it is - over $APPDATA/Python,
it implicitly expects the LIB directory to be $APPDATA/Python/Lib.
Therefore we 'rewrite' the paths in tarball (.pypm/data.tar.gz) to the
corresponding install scheme path[1] in local ``pyenv``.
-
[1] See ``pypm.common.python...get_install_scheme_path`` function
"""
def __init__(self, pypmenv):
self.pypmenv = pypmenv
def extract_package(self, pkg_filename, name):
bpkgfile = PackageFile(pkg_filename)
pyenv = self.pypmenv.pyenv
return self._extract_to_install_scheme(bpkgfile, name)
def _pyenv_scheme_path(self, path):
pyenv = self.pypmenv.pyenv
fullpath = pyenv.get_install_scheme_path(path)
assert fullpath.startswith(pyenv.base_dir), \
"'%s' is not based on '%s' (%s)" % (
fullpath, pyenv.base_dir, pyenv.root_dir)
p = os.path.relpath(fullpath, pyenv.base_dir)
if PLATNAME.startswith('win'):
p = p.replace('\\', '/')
return p
def _extract_to_install_scheme(self, bpkgfile, name):
pyenv = self.pypmenv.pyenv
# Install scheme used by the build environment (i.e., pyenv used by
# pypm-builder on our build machines).
as_build_scheme = {
'win': {
'purelib': 'lib/site-packages',
'stdlib': 'lib',
'scripts': 'scripts',
},
'unix': {
'purelib': 'lib/python{0}/site-packages'.format(pyenv.pyver),
'stdlib': 'lib/python{0}'.format(pyenv.pyver),
'scripts': 'bin',
},
}
plat = PLATNAME.startswith('win') and 'win' or 'unix'
# Scheme used by pyenv
pyenv_scheme = {
'purelib': self._pyenv_scheme_path('purelib'),
'stdlib': self._pyenv_scheme_path('stdlib'),
'scripts': self._pyenv_scheme_path('scripts'),
}
files_to_overwrite = []
force_overwrite = self.pypmenv.options['force']
# Hack #1: Don't check for distribute and pip, as virtualenvs usually
# already have a copy of them installed.
if name in ('distribute', 'setuptools', 'pip'):
force_overwrite = True
with bpkgfile.extract_over2(pyenv.base_dir) as tf:
for tinfo in tf.getmembers():
# Replace AS build virtualenv scheme with the user's scheme
# Eg: lib/site-packages/XYZ -> %APPDATA%/Python/Python26/XYZ
for name, prefix in as_build_scheme[plat].items():
if tinfo.name.lower().startswith(prefix):
old = tinfo.name
new = pyenv_scheme[name] + old[len(prefix):]
if new != old:
LOG.debug('fs:extract: transforming "%s" to "%s"',
old, new)
tinfo.name = new
# Check for overwrites
if os.path.lexists(tinfo.name) and not os.path.isdir(tinfo.name):
# Hack #2: allow overwriting of *.pth files (setuptools
# hackishness) eg: [...]/site-packages/setuptools.pth
if not tinfo.name.endswith('.pth'):
files_to_overwrite.append(tinfo.name)
if files_to_overwrite:
LOG.debug(
'install requires overwriting of %d files:\n%s',
len(files_to_overwrite),
'\n'.join([os.path.join(pyenv.base_dir, f)
for f in files_to_overwrite]))
if force_overwrite:
LOG.warn('overwriting %d files' % len(files_to_overwrite))
else:
errmsg = ['cannot overwrite "%s"' % concise_path(os.path.join(
pyenv.base_dir, files_to_overwrite[0]))]
if len(files_to_overwrite) > 1:
errmsg.append(' (and %d other files)' % (len(files_to_overwrite)-1,))
errmsg.append('; run pypm as "pypm --force ..." to overwrite anyway')
if len(files_to_overwrite) > 1:
errmsg.append('; run "pypm log" to see the full list of files to be overwritten')
raise IOError(wrapped(''.join(errmsg)))
return tf.getnames()
def undo_extract(self, files_list):
"""Undo whatever self.extract_package did"""
# sort in descending order so that children of a directory
# get removed before the directory itself
files_list.sort()
files_list.reverse()
for path in files_list:
path = self.pypmenv.pyenv.get_abspath(path)
if not os.path.lexists(path):
LOG.warn('no longer exists: %s', path)
else:
if os.path.isfile(path) or os.path.islink(path):
sh.rm(path)
# remove the corresponding .pyc and .pyo files
if path.endswith('.py'):
sh.rm(path+'c')
sh.rm(path+'o')
elif os.path.isdir(path):
if len(os.listdir(path)) > 0:
# cannot delete directory with files added
# after the installation
LOG.debug(
'non-empty directory: %s - hence skipping', path)
else:
# directory `path` is empty
sh.rm(path)
else:
raise TypeError(
"don't know what to do with this type of file: " + path)
| igemsoftware/SYSU-Software2013 | project/Python27_32/Lib/site-packages/pypm/client/fs.py | Python | mit | 10,900 | [
"VisIt"
] | 1d04553d575dbea9430d81768295894668e53daea7e881e1f98317a5339e4d21 |
# Copyright 2012, 2013 The GalSim developers:
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
#
# GalSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalSim. If not, see <http://www.gnu.org/licenses/>
#
"""@file des_shapelet.py
Part of the DES module. This file implements one way that DES measures the PSF.
The DES_Shapelet class handles interpolated shapelet decompositions, which are generally
stored in *_fitpsf.fits files.
"""
import galsim
class DES_Shapelet(object):
"""Class that handles DES files describing interpolated polar shapelet decompositions.
These are usually stored as *_fitpsf.fits files, although there is also an ASCII
version stored as *_fitpsf.dat.
Typical usage:
des_shapelet = galsim.des.DES_Shapelet(fitpsf_file_name)
...
pos = galsim.Position(image_x, image_y) # position in pixels on the image
# NOT in arcsec on the sky!
psf = des_shapelet.getPSF(pos)
Note that the DES_Shapelet profile is measured with respect to sky coordinates, not
pixel coordinates. So if you want the drawn image to look like the original, it should be
drawn with the same WCS as found in the original image. However, GalSim doesn't yet have
the ability to handle such WCS functions. This is Issue #364. Until then, an approximate
workaround is to use pixel_scale=0.262, and apply a rotation of -90 degrees before drawing.
This class will only interpolate within the defining bounds. It won't extrapolate
beyond the bounding box of where the stars defined the interpolation.
If you try to use it with an invalid position, it will throw an IndexError.
You can check whether a position is valid with
if des_shapelet.bounds.includes(pos):
psf = des_shapelet.getPSF(pos)
else:
[...skip this object...]
@param file_name The file name to be read in.
@param dir Optionally a directory name can be provided if the file_name does not
already include it.
@param file_type Either 'ASCII' or 'FITS' or None. If None, infer from the file name ending
(default = None).
"""
_req_params = { 'file_name' : str }
_opt_params = { 'file_type' : str , 'dir' : str }
_single_params = []
_takes_rng = False
def __init__(self, file_name, dir=None, file_type=None):
if dir:
import os
file_name = os.path.join(dir,file_name)
self.file_name = file_name
if not file_type:
if self.file_name.lower().endswith('.fits'):
file_type = 'FITS'
else:
file_type = 'ASCII'
file_type = file_type.upper()
if file_type not in ['FITS', 'ASCII']:
raise ValueError("file_type must be either FITS or ASCII if specified.")
if file_type == 'FITS':
self.read_fits()
else:
self.read_ascii()
def read_ascii(self):
"""Read in a DES_Shapelet stored using the the ASCII-file version.
"""
import numpy
fin = open(self.file_name, 'r')
lines = fin.readlines()
temp = lines[0].split()
self.psf_order = int(temp[0])
self.psf_size = (self.psf_order+1) * (self.psf_order+2) / 2
self.sigma = float(temp[1])
self.fit_order = int(temp[2])
self.fit_size = (self.fit_order+1) * (self.fit_order+2) / 2
self.npca = int(temp[3])
temp = lines[1].split()
self.bounds = galsim.BoundsD(
float(temp[0]), float(temp[1]),
float(temp[2]), float(temp[3]))
temp = lines[2].split()
assert int(temp[0]) == self.psf_size
self.ave_psf = numpy.array(temp[2:self.psf_size+2]).astype(float)
assert self.ave_psf.shape == (self.psf_size,)
temp = lines[3].split()
assert int(temp[0]) == self.npca
assert int(temp[1]) == self.psf_size
self.rot_matrix = numpy.array(
[ lines[4+k].split()[1:self.psf_size+1] for k in range(self.npca) ]
).astype(float)
assert self.rot_matrix.shape == (self.npca, self.psf_size)
temp = lines[5+self.npca].split()
assert int(temp[0]) == self.fit_size
assert int(temp[1]) == self.npca
self.interp_matrix = numpy.array(
[ lines[6+self.npca+k].split()[1:self.npca+1] for k in range(self.fit_size) ]
).astype(float)
assert self.interp_matrix.shape == (self.fit_size, self.npca)
def read_fits(self):
"""Read in a DES_Shapelet stored using the the FITS-file version.
"""
import pyfits
cat = pyfits.getdata(self.file_name,1)
# These fields each only contain one element, hence the [0]'s.
self.psf_order = cat.field('psf_order')[0]
self.psf_size = (self.psf_order+1) * (self.psf_order+2) / 2
self.sigma = cat.field('sigma')[0]
self.fit_order = cat.field('fit_order')[0]
self.fit_size = (self.fit_order+1) * (self.fit_order+2) / 2
self.npca = cat.field('npca')[0]
self.bounds = galsim.BoundsD(
float(cat.field('xmin')[0]), float(cat.field('xmax')[0]),
float(cat.field('ymin')[0]), float(cat.field('ymax')[0]))
self.ave_psf = cat.field('ave_psf')[0]
assert self.ave_psf.shape == (self.psf_size,)
# Note: older pyfits versions don't get the shape right.
# For newer pyfits versions the reshape command should be a no op.
self.rot_matrix = cat.field('rot_matrix')[0].reshape((self.psf_size,self.npca)).T
assert self.rot_matrix.shape == (self.npca, self.psf_size)
self.interp_matrix = cat.field('interp_matrix')[0].reshape((self.npca,self.fit_size)).T
assert self.interp_matrix.shape == (self.fit_size, self.npca)
def getPSF(self, pos, gsparams=None):
"""Returns the PSF at position pos
@param pos The position in pixel units for which to build the PSF.
@param gsparams (Optional) A GSParams instance to pass to the constructed GSObject.
@returns a galsim.Shapelet instance.
"""
if not self.bounds.includes(pos):
raise IndexError("position in DES_Shapelet.getPSF is out of bounds")
import numpy
Px = self._definePxy(pos.x,self.bounds.xmin,self.bounds.xmax)
Py = self._definePxy(pos.y,self.bounds.ymin,self.bounds.ymax)
order = self.fit_order
P = numpy.array([ Px[n-q] * Py[q] for n in range(order+1) for q in range(n+1) ])
assert len(P) == self.fit_size
# Note: This is equivalent to:
#
# P = numpy.empty(self.fit_size)
# k = 0
# for n in range(self.fit_order+1):
# for q in range(n+1):
# P[k] = Px[n-q] * Py[q]
# k = k+1
b1 = numpy.dot(P,self.interp_matrix)
b = numpy.dot(b1,self.rot_matrix)
assert len(b) == self.psf_size
b += self.ave_psf
ret = galsim.Shapelet(self.sigma, self.psf_order, b, gsparams=gsparams)
return ret
def _definePxy(self, x, min, max):
import numpy
x1 = (2.*x-min-max)/(max-min)
temp = numpy.empty(self.fit_order+1)
temp[0] = 1
if self.fit_order > 0:
temp[1] = x1
for i in range(2,self.fit_order+1):
temp[i] = ((2.*i-1.)*x1*temp[i-1] - (i-1.)*temp[i-2]) / float(i)
return temp
# Now add this class to the config framework.
import galsim.config
# First we need to add the class itself as a valid input_type.
galsim.config.process.valid_input_types['des_shapelet'] = ('galsim.des.DES_Shapelet', [], False)
# Also make a builder to create the PSF object for a given position.
# The builders require 4 args.
# config is a dictionary that includes 'type' plus other items you might want to allow or require.
# key is the key name one level up in the config structure. Probably 'psf' in this case.
# base is the top level config dictionary where some global variables are stored.
# ignore is a list of key words that might be in the config dictionary that you should ignore.
def BuildDES_Shapelet(config, key, base, ignore, gsparams):
"""@brief Build a RealGalaxy type GSObject from user input.
"""
opt = { 'flux' : float }
kwargs, safe = galsim.config.GetAllParams(config, key, base, opt=opt, ignore=ignore)
if 'des_shapelet' not in base:
raise ValueError("No DES_Shapelet instance available for building type = DES_Shapelet")
des_shapelet = base['des_shapelet']
if 'image_pos' not in base:
raise ValueError("DES_Shapelet requested, but no image_pos defined in base.")
image_pos = base['image_pos']
# Convert gsparams from a dict to an actual GSParams object
if gsparams: gsparams = galsim.GSParams(**gsparams)
else: gsparams = None
if des_shapelet.bounds.includes(image_pos):
psf = des_shapelet.getPSF(image_pos, gsparams)
else:
message = 'Position '+str(image_pos)+' not in interpolation bounds: '
message += str(des_shapelet.bounds)
raise galsim.config.gsobject.SkipThisObject(message)
if 'flux' in kwargs:
psf.setFlux(kwargs['flux'])
# The second item here is "safe", a boolean that declares whether the returned value is
# safe to save and use again for later objects. In this case, we wouldn't want to do
# that, since they will be at different positions, so the interpolated PSF will be different.
return psf, False
# Register this builder with the config framework:
galsim.config.gsobject.valid_gsobject_types['DES_Shapelet'] = 'galsim.des.BuildDES_Shapelet'
| mardom/GalSim | galsim/des/des_shapelet.py | Python | gpl-3.0 | 10,378 | [
"Galaxy"
] | ff5f999b8b8e1bb2290142fd366f1ad7ab1594acfc3e98db2d39a6b73f128fd8 |
# Copyright 2000-2009 by Iddo Friedberg. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Iddo Friedberg idoerg@cc.huji.ac.il
"""Substitution matrices, log odds matrices, and operations on them.
General:
--------
This module provides a class and a few routines for generating
substitution matrices, similar ot BLOSUM or PAM matrices, but based on
user-provided data.
The class used for these matrices is SeqMat
Matrices are implemented as a dictionary. Each index contains a 2-tuple,
which are the two residue/nucleotide types replaced. The value differs
according to the matrix's purpose: e.g in a log-odds frequency matrix, the
value would be log(Pij/(Pi*Pj)) where:
Pij: frequency of substitution of letter (residue/nucleotide) i by j
Pi, Pj: expected frequencies of i and j, respectively.
Usage:
------
The following section is laid out in the order by which most people wish
to generate a log-odds matrix. Of course, interim matrices can be
generated and investigated. Most people just want a log-odds matrix,
that's all.
Generating an Accepted Replacement Matrix:
------------------------------------------
Initially, you should generate an accepted replacement matrix (ARM)
from your data. The values in ARM are the _counted_ number of
replacements according to your data. The data could be a set of pairs
or multiple alignments. So for instance if Alanine was replaced by
Cysteine 10 times, and Cysteine by Alanine 12 times, the corresponding
ARM entries would be:
['A','C']: 10,
['C','A'] 12
As order doesn't matter, user can already provide only one entry:
['A','C']: 22
A SeqMat instance may be initialized with either a full (first
method of counting: 10, 12) or half (the latter method, 22) matrix. A
Full protein alphabet matrix would be of the size 20x20 = 400. A Half
matrix of that alphabet would be 20x20/2 + 20/2 = 210. That is because
same-letter entries don't change. (The matrix diagonal). Given an
alphabet size of N:
Full matrix size:N*N
Half matrix size: N(N+1)/2
If you provide a full matrix, the constructor will create a half-matrix
automatically.
If you provide a half-matrix, make sure of a (low, high) sorted order in
the keys: there should only be
a ('A','C') not a ('C','A').
Internal functions:
Generating the observed frequency matrix (OFM):
-----------------------------------------------
Use: OFM = _build_obs_freq_mat(ARM)
The OFM is generated from the ARM, only instead of replacement counts, it
contains replacement frequencies.
Generating an expected frequency matrix (EFM):
----------------------------------------------
Use: EFM = _build_exp_freq_mat(OFM,exp_freq_table)
exp_freq_table: should be a freqTableC instantiation. See freqTable.py for
detailed information. Briefly, the expected frequency table has the
frequencies of appearance for each member of the alphabet
Generating a substitution frequency matrix (SFM):
-------------------------------------------------
Use: SFM = _build_subs_mat(OFM,EFM)
Accepts an OFM, EFM. Provides the division product of the corresponding
values.
Generating a log-odds matrix (LOM):
-----------------------------------
Use: LOM=_build_log_odds_mat(SFM[,logbase=10,factor=10.0,roundit=1])
Accepts an SFM. logbase: base of the logarithm used to generate the
log-odds values. factor: factor used to multiply the log-odds values.
roundit: default - true. Whether to round the values.
Each entry is generated by log(LOM[key])*factor
And rounded if required.
External:
---------
In most cases, users will want to generate a log-odds matrix only, without
explicitly calling the OFM --> EFM --> SFM stages. The function
build_log_odds_matrix does that. User provides an ARM and an expected
frequency table. The function returns the log-odds matrix.
Methods for subtraction, addition and multiplication of matrices:
-----------------------------------------------------------------
* Generation of an expected frequency table from an observed frequency
matrix.
* Calculation of linear correlation coefficient between two matrices.
* Calculation of relative entropy is now done using the
_make_relative_entropy method and is stored in the member
self.relative_entropy
* Calculation of entropy is now done using the _make_entropy method and
is stored in the member self.entropy.
* Jensen-Shannon distance between the distributions from which the
matrices are derived. This is a distance function based on the
distribution's entropies.
"""
from __future__ import print_function
import re
import sys
import copy
import math
import warnings
# BioPython imports
import Bio
from Bio import Alphabet
from Bio.SubsMat import FreqTable
log = math.log
# Matrix types
NOTYPE = 0
ACCREP = 1
OBSFREQ = 2
SUBS = 3
EXPFREQ = 4
LO = 5
EPSILON = 0.00000000000001
class SeqMat(dict):
"""A Generic sequence matrix class
The key is a 2-tuple containing the letter indices of the matrix. Those
should be sorted in the tuple (low, high). Because each matrix is dealt
with as a half-matrix."""
def _alphabet_from_matrix(self):
ab_dict = {}
s = ''
for i in self:
ab_dict[i[0]] = 1
ab_dict[i[1]] = 1
for i in sorted(ab_dict):
s += i
self.alphabet.letters = s
def __init__(self, data=None, alphabet=None, mat_name='', build_later=0):
# User may supply:
# data: matrix itself
# mat_name: its name. See below.
# alphabet: an instance of Bio.Alphabet, or a subclass. If not
# supplied, constructor builds its own from that matrix.
# build_later: skip the matrix size assertion. User will build the
# matrix after creating the instance. Constructor builds a half matrix
# filled with zeroes.
assert isinstance(mat_name, str)
# "data" may be:
# 1) None --> then self.data is an empty dictionary
# 2) type({}) --> then self takes the items in data
# 3) An instance of SeqMat
# This whole creation-during-execution is done to avoid changing
# default values, the way Python does because default values are
# created when the function is defined, not when it is created.
if data:
try:
self.update(data)
except ValueError:
raise ValueError("Failed to store data in a dictionary")
if alphabet is None:
alphabet = Alphabet.Alphabet()
assert Alphabet.generic_alphabet.contains(alphabet)
self.alphabet = alphabet
# If passed alphabet is empty, use the letters in the matrix itself
if not self.alphabet.letters:
self._alphabet_from_matrix()
# Assert matrix size: half or full
if not build_later:
N = len(self.alphabet.letters)
assert len(self) == N ** 2 or len(self) == N * (N + 1) / 2
self.ab_list = list(self.alphabet.letters)
self.ab_list.sort()
# Names: a string like "BLOSUM62" or "PAM250"
self.mat_name = mat_name
if build_later:
self._init_zero()
else:
# Convert full to half
self._full_to_half()
self._correct_matrix()
self.sum_letters = {}
self.relative_entropy = 0
def _correct_matrix(self):
for key in self:
if key[0] > key[1]:
self[(key[1], key[0])] = self[key]
del self[key]
def _full_to_half(self):
"""
Convert a full-matrix to a half-matrix
"""
# For instance: two entries ('A','C'):13 and ('C','A'):20 will be summed
# into ('A','C'): 33 and the index ('C','A') will be deleted
# alphabet.letters:('A','A') and ('C','C') will remain the same.
N = len(self.alphabet.letters)
# Do nothing if this is already a half-matrix
if len(self) == N * (N + 1) / 2:
return
for i in self.ab_list:
for j in self.ab_list[:self.ab_list.index(i) + 1]:
if i != j:
self[j, i] = self[j, i] + self[i, j]
del self[i, j]
def _init_zero(self):
for i in self.ab_list:
for j in self.ab_list[:self.ab_list.index(i) + 1]:
self[j, i] = 0.
def make_entropy(self):
self.entropy = 0
for i in self:
if self[i] > EPSILON:
self.entropy += self[i] * log(self[i]) / log(2)
self.entropy = -self.entropy
def sum(self):
result = {}
for letter in self.alphabet.letters:
result[letter] = 0.0
for pair, value in self.items():
i1, i2 = pair
if i1 == i2:
result[i1] += value
else:
result[i1] += value / 2
result[i2] += value / 2
return result
def print_full_mat(self, f=None, format="%4d", topformat="%4s",
alphabet=None, factor=1, non_sym=None):
f = f or sys.stdout
# create a temporary dictionary, which holds the full matrix for
# printing
assert non_sym is None or isinstance(non_sym, float) or \
isinstance(non_sym, int)
full_mat = copy.copy(self)
for i in self:
if i[0] != i[1]:
full_mat[(i[1], i[0])] = full_mat[i]
if not alphabet:
alphabet = self.ab_list
topline = ''
for i in alphabet:
topline = topline + topformat % i
topline = topline + '\n'
f.write(topline)
for i in alphabet:
outline = i
for j in alphabet:
if alphabet.index(j) > alphabet.index(i) and non_sym is not None:
val = non_sym
else:
val = full_mat[i, j]
val *= factor
if val <= -999:
cur_str = ' ND'
else:
cur_str = format % val
outline = outline + cur_str
outline = outline + '\n'
f.write(outline)
def print_mat(self, f=None, format="%4d", bottomformat="%4s",
alphabet=None, factor=1):
"""Print a nice half-matrix. f=sys.stdout to see on the screen
User may pass own alphabet, which should contain all letters in the
alphabet of the matrix, but may be in a different order. This
order will be the order of the letters on the axes"""
f = f or sys.stdout
if not alphabet:
alphabet = self.ab_list
bottomline = ''
for i in alphabet:
bottomline = bottomline + bottomformat % i
bottomline = bottomline + '\n'
for i in alphabet:
outline = i
for j in alphabet[:alphabet.index(i) + 1]:
try:
val = self[j, i]
except KeyError:
val = self[i, j]
val *= factor
if val == -999:
cur_str = ' ND'
else:
cur_str = format % val
outline = outline + cur_str
outline = outline + '\n'
f.write(outline)
f.write(bottomline)
def __str__(self):
"""Print a nice half-matrix."""
output = ""
alphabet = self.ab_list
n = len(alphabet)
for i in range(n):
c1 = alphabet[i]
output += c1
for j in range(i + 1):
c2 = alphabet[j]
try:
val = self[c2, c1]
except KeyError:
val = self[c1, c2]
if val == -999:
output += ' ND'
else:
output += "%4d" % val
output += '\n'
output += '%4s' * n % tuple(alphabet) + "\n"
return output
def __sub__(self, other):
""" returns a number which is the subtraction product of the two matrices"""
mat_diff = 0
for i in self:
mat_diff += (self[i] - other[i])
return mat_diff
def __mul__(self, other):
""" returns a matrix for which each entry is the multiplication product of the
two matrices passed"""
new_mat = copy.copy(self)
for i in self:
new_mat[i] *= other[i]
return new_mat
def __add__(self, other):
new_mat = copy.copy(self)
for i in self:
new_mat[i] += other[i]
return new_mat
class AcceptedReplacementsMatrix(SeqMat):
"""Accepted replacements matrix"""
class ObservedFrequencyMatrix(SeqMat):
"""Observed frequency matrix"""
class ExpectedFrequencyMatrix(SeqMat):
"""Expected frequency matrix"""
class SubstitutionMatrix(SeqMat):
"""Substitution matrix"""
def calculate_relative_entropy(self, obs_freq_mat):
"""Calculate and return the relative entropy with respect to an
observed frequency matrix"""
relative_entropy = 0.
for key, value in self.items():
if value > EPSILON:
relative_entropy += obs_freq_mat[key] * log(value)
relative_entropy /= log(2)
return relative_entropy
class LogOddsMatrix(SeqMat):
"""Log odds matrix"""
def calculate_relative_entropy(self, obs_freq_mat):
"""Calculate and return the relative entropy with respect to an
observed frequency matrix"""
relative_entropy = 0.
for key, value in self.items():
relative_entropy += obs_freq_mat[key] * value / log(2)
return relative_entropy
def _build_obs_freq_mat(acc_rep_mat):
"""
build_obs_freq_mat(acc_rep_mat):
Build the observed frequency matrix, from an accepted replacements matrix
The acc_rep_mat matrix should be generated by the user.
"""
# Note: acc_rep_mat should already be a half_matrix!!
total = float(sum(acc_rep_mat.values()))
obs_freq_mat = ObservedFrequencyMatrix(alphabet=acc_rep_mat.alphabet,
build_later=1)
for i in acc_rep_mat:
obs_freq_mat[i] = acc_rep_mat[i] / total
return obs_freq_mat
def _exp_freq_table_from_obs_freq(obs_freq_mat):
exp_freq_table = {}
for i in obs_freq_mat.alphabet.letters:
exp_freq_table[i] = 0.
for i in obs_freq_mat:
if i[0] == i[1]:
exp_freq_table[i[0]] += obs_freq_mat[i]
else:
exp_freq_table[i[0]] += obs_freq_mat[i] / 2.
exp_freq_table[i[1]] += obs_freq_mat[i] / 2.
return FreqTable.FreqTable(exp_freq_table, FreqTable.FREQ)
def _build_exp_freq_mat(exp_freq_table):
"""Build an expected frequency matrix
exp_freq_table: should be a FreqTable instance
"""
exp_freq_mat = ExpectedFrequencyMatrix(alphabet=exp_freq_table.alphabet,
build_later=1)
for i in exp_freq_mat:
if i[0] == i[1]:
exp_freq_mat[i] = exp_freq_table[i[0]] ** 2
else:
exp_freq_mat[i] = 2.0 * exp_freq_table[i[0]] * exp_freq_table[i[1]]
return exp_freq_mat
#
# Build the substitution matrix
#
def _build_subs_mat(obs_freq_mat, exp_freq_mat):
""" Build the substitution matrix """
if obs_freq_mat.ab_list != exp_freq_mat.ab_list:
raise ValueError("Alphabet mismatch in passed matrices")
subs_mat = SubstitutionMatrix(obs_freq_mat)
for i in obs_freq_mat:
subs_mat[i] = obs_freq_mat[i] / exp_freq_mat[i]
return subs_mat
#
# Build a log-odds matrix
#
def _build_log_odds_mat(subs_mat, logbase=2, factor=10.0, round_digit=0, keep_nd=0):
"""_build_log_odds_mat(subs_mat,logbase=10,factor=10.0,round_digit=1):
Build a log-odds matrix
logbase=2: base of logarithm used to build (default 2)
factor=10.: a factor by which each matrix entry is multiplied
round_digit: roundoff place after decimal point
keep_nd: if true, keeps the -999 value for non-determined values (for which there
are no substitutions in the frequency substitutions matrix). If false, plants the
minimum log-odds value of the matrix in entries containing -999
"""
lo_mat = LogOddsMatrix(subs_mat)
for key, value in subs_mat.items():
if value < EPSILON:
lo_mat[key] = -999
else:
lo_mat[key] = round(factor * log(value) / log(logbase), round_digit)
mat_min = min(lo_mat.values())
if not keep_nd:
for i in lo_mat:
if lo_mat[i] <= -999:
lo_mat[i] = mat_min
return lo_mat
#
# External function. User provides an accepted replacement matrix, and,
# optionally the following: expected frequency table, log base, mult. factor,
# and rounding factor. Generates a log-odds matrix, calling internal SubsMat
# functions.
#
def make_log_odds_matrix(acc_rep_mat, exp_freq_table=None, logbase=2,
factor=1., round_digit=9, keep_nd=0):
obs_freq_mat = _build_obs_freq_mat(acc_rep_mat)
if not exp_freq_table:
exp_freq_table = _exp_freq_table_from_obs_freq(obs_freq_mat)
exp_freq_mat = _build_exp_freq_mat(exp_freq_table)
subs_mat = _build_subs_mat(obs_freq_mat, exp_freq_mat)
lo_mat = _build_log_odds_mat(subs_mat, logbase, factor, round_digit, keep_nd)
return lo_mat
def observed_frequency_to_substitution_matrix(obs_freq_mat):
exp_freq_table = _exp_freq_table_from_obs_freq(obs_freq_mat)
exp_freq_mat = _build_exp_freq_mat(exp_freq_table)
subs_mat = _build_subs_mat(obs_freq_mat, exp_freq_mat)
return subs_mat
def read_text_matrix(data_file):
matrix = {}
tmp = data_file.read().split("\n")
table = []
for i in tmp:
table.append(i.split())
# remove records beginning with ``#''
for rec in table[:]:
if (rec.count('#') > 0):
table.remove(rec)
# remove null lists
while (table.count([]) > 0):
table.remove([])
# build a dictionary
alphabet = table[0]
j = 0
for rec in table[1:]:
# print(j)
row = alphabet[j]
# row = rec[0]
if re.compile('[A-z\*]').match(rec[0]):
first_col = 1
else:
first_col = 0
i = 0
for field in rec[first_col:]:
col = alphabet[i]
matrix[(row, col)] = float(field)
i += 1
j += 1
# delete entries with an asterisk
for i in matrix:
if '*' in i:
del(matrix[i])
ret_mat = SeqMat(matrix)
return ret_mat
diagNO = 1
diagONLY = 2
diagALL = 3
def two_mat_relative_entropy(mat_1, mat_2, logbase=2, diag=diagALL):
rel_ent = 0.
key_list_1 = sorted(mat_1)
key_list_2 = sorted(mat_2)
key_list = []
sum_ent_1 = 0.
sum_ent_2 = 0.
for i in key_list_1:
if i in key_list_2:
key_list.append(i)
if len(key_list_1) != len(key_list_2):
sys.stderr.write("Warning: first matrix has more entries than the second\n")
if key_list_1 != key_list_2:
sys.stderr.write("Warning: indices not the same between matrices\n")
for key in key_list:
if diag == diagNO and key[0] == key[1]:
continue
if diag == diagONLY and key[0] != key[1]:
continue
if mat_1[key] > EPSILON and mat_2[key] > EPSILON:
sum_ent_1 += mat_1[key]
sum_ent_2 += mat_2[key]
for key in key_list:
if diag == diagNO and key[0] == key[1]:
continue
if diag == diagONLY and key[0] != key[1]:
continue
if mat_1[key] > EPSILON and mat_2[key] > EPSILON:
val_1 = mat_1[key] / sum_ent_1
val_2 = mat_2[key] / sum_ent_2
# rel_ent += mat_1[key] * log(mat_1[key]/mat_2[key])/log(logbase)
rel_ent += val_1 * log(val_1 / val_2) / log(logbase)
return rel_ent
# Gives the linear correlation coefficient between two matrices
def two_mat_correlation(mat_1, mat_2):
try:
import numpy
except ImportError:
raise ImportError("Please install Numerical Python (numpy) if you want to use this function")
values = []
assert mat_1.ab_list == mat_2.ab_list
for ab_pair in mat_1:
try:
values.append((mat_1[ab_pair], mat_2[ab_pair]))
except KeyError:
raise ValueError("%s is not a common key" % ab_pair)
correlation_matrix = numpy.corrcoef(values, rowvar=0)
correlation = correlation_matrix[0, 1]
return correlation
# Jensen-Shannon Distance
# Need to input observed frequency matrices
def two_mat_DJS(mat_1, mat_2, pi_1=0.5, pi_2=0.5):
assert mat_1.ab_list == mat_2.ab_list
assert pi_1 > 0 and pi_2 > 0 and pi_1 < 1 and pi_2 < 1
assert not (pi_1 + pi_2 - 1.0 > EPSILON)
sum_mat = SeqMat(build_later=1)
sum_mat.ab_list = mat_1.ab_list
for i in mat_1:
sum_mat[i] = pi_1 * mat_1[i] + pi_2 * mat_2[i]
sum_mat.make_entropy()
mat_1.make_entropy()
mat_2.make_entropy()
# print(mat_1.entropy, mat_2.entropy)
dJS = sum_mat.entropy - pi_1 * mat_1.entropy - pi_2 * mat_2.entropy
return dJS
"""
This isn't working yet. Boo hoo!
def two_mat_print(mat_1, mat_2, f=None, alphabet=None, factor_1=1, factor_2=1,
format="%4d", bottomformat="%4s", topformat="%4s",
topindent=7*" ", bottomindent=1*" "):
f = f or sys.stdout
if not alphabet:
assert mat_1.ab_list == mat_2.ab_list
alphabet = mat_1.ab_list
len_alphabet = len(alphabet)
print_mat = {}
topline = topindent
bottomline = bottomindent
for i in alphabet:
bottomline += bottomformat % i
topline += topformat % alphabet[len_alphabet-alphabet.index(i)-1]
topline += '\n'
bottomline += '\n'
f.write(topline)
for i in alphabet:
for j in alphabet:
print_mat[i, j] = -999
diag_1 = {}
diag_2 = {}
for i in alphabet:
for j in alphabet[:alphabet.index(i)+1]:
if i == j:
diag_1[i] = mat_1[(i, i)]
diag_2[i] = mat_2[(alphabet[len_alphabet-alphabet.index(i)-1],
alphabet[len_alphabet-alphabet.index(i)-1])]
else:
if i > j:
key = (j, i)
else:
key = (i, j)
mat_2_key = [alphabet[len_alphabet-alphabet.index(key[0])-1],
alphabet[len_alphabet-alphabet.index(key[1])-1]]
# print(mat_2_key)
mat_2_key.sort()
mat_2_key = tuple(mat_2_key)
# print("%s||%s" % (key, mat_2_key)
print_mat[key] = mat_2[mat_2_key]
print_mat[(key[1], key[0])] = mat_1[key]
for i in alphabet:
outline = i
for j in alphabet:
if i == j:
if diag_1[i] == -999:
val_1 = ' ND'
else:
val_1 = format % (diag_1[i]*factor_1)
if diag_2[i] == -999:
val_2 = ' ND'
else:
val_2 = format % (diag_2[i]*factor_2)
cur_str = val_1 + " " + val_2
else:
if print_mat[(i, j)] == -999:
val = ' ND'
elif alphabet.index(i) > alphabet.index(j):
val = format % (print_mat[(i, j)]*factor_1)
else:
val = format % (print_mat[(i, j)]*factor_2)
cur_str = val
outline += cur_str
outline += bottomformat % (alphabet[len_alphabet-alphabet.index(i)-1] +
'\n')
f.write(outline)
f.write(bottomline)
"""
| zjuchenyuan/BioWeb | Lib/Bio/SubsMat/__init__.py | Python | mit | 23,884 | [
"Biopython"
] | fcdb75a72c57c1a30f36c2c84818582895a52129b6534674fbf4a51eddeb019c |
"""Known matrices related to physics"""
from __future__ import print_function, division
from sympy import Matrix, I, pi, sqrt
from sympy.functions import exp
from sympy.core.compatibility import range
def msigma(i):
r"""Returns a Pauli matrix `\sigma_i` with `i=1,2,3`
References
==========
.. [1] http://en.wikipedia.org/wiki/Pauli_matrices
Examples
========
>>> from sympy.physics.matrices import msigma
>>> msigma(1)
Matrix([
[0, 1],
[1, 0]])
"""
if i == 1:
mat = ( (
(0, 1),
(1, 0)
) )
elif i == 2:
mat = ( (
(0, -I),
(I, 0)
) )
elif i == 3:
mat = ( (
(1, 0),
(0, -1)
) )
else:
raise IndexError("Invalid Pauli index")
return Matrix(mat)
def pat_matrix(m, dx, dy, dz):
"""Returns the Parallel Axis Theorem matrix to translate the inertia
matrix a distance of `(dx, dy, dz)` for a body of mass m.
Examples
========
To translate a body having a mass of 2 units a distance of 1 unit along
the `x`-axis we get:
>>> from sympy.physics.matrices import pat_matrix
>>> pat_matrix(2, 1, 0, 0)
Matrix([
[0, 0, 0],
[0, 2, 0],
[0, 0, 2]])
"""
dxdy = -dx*dy
dydz = -dy*dz
dzdx = -dz*dx
dxdx = dx**2
dydy = dy**2
dzdz = dz**2
mat = ((dydy + dzdz, dxdy, dzdx),
(dxdy, dxdx + dzdz, dydz),
(dzdx, dydz, dydy + dxdx))
return m*Matrix(mat)
def mgamma(mu, lower=False):
r"""Returns a Dirac gamma matrix `\gamma^\mu` in the standard
(Dirac) representation.
If you want `\gamma_\mu`, use ``gamma(mu, True)``.
We use a convention:
`\gamma^5 = i \cdot \gamma^0 \cdot \gamma^1 \cdot \gamma^2 \cdot \gamma^3`
`\gamma_5 = i \cdot \gamma_0 \cdot \gamma_1 \cdot \gamma_2 \cdot \gamma_3 = - \gamma^5`
References
==========
.. [1] http://en.wikipedia.org/wiki/Gamma_matrices
Examples
========
>>> from sympy.physics.matrices import mgamma
>>> mgamma(1)
Matrix([
[ 0, 0, 0, 1],
[ 0, 0, 1, 0],
[ 0, -1, 0, 0],
[-1, 0, 0, 0]])
"""
if not mu in [0, 1, 2, 3, 5]:
raise IndexError("Invalid Dirac index")
if mu == 0:
mat = (
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, -1, 0),
(0, 0, 0, -1)
)
elif mu == 1:
mat = (
(0, 0, 0, 1),
(0, 0, 1, 0),
(0, -1, 0, 0),
(-1, 0, 0, 0)
)
elif mu == 2:
mat = (
(0, 0, 0, -I),
(0, 0, I, 0),
(0, I, 0, 0),
(-I, 0, 0, 0)
)
elif mu == 3:
mat = (
(0, 0, 1, 0),
(0, 0, 0, -1),
(-1, 0, 0, 0),
(0, 1, 0, 0)
)
elif mu == 5:
mat = (
(0, 0, 1, 0),
(0, 0, 0, 1),
(1, 0, 0, 0),
(0, 1, 0, 0)
)
m = Matrix(mat)
if lower:
if mu in [1, 2, 3, 5]:
m = -m
return m
#Minkowski tensor using the convention (+,-,-,-) used in the Quantum Field
#Theory
minkowski_tensor = Matrix( (
(1, 0, 0, 0),
(0, -1, 0, 0),
(0, 0, -1, 0),
(0, 0, 0, -1)
))
def mdft(n):
r"""
Returns an expression of a discrete Fourier transform as a matrix multiplication.
It is an n X n matrix.
References
==========
.. [1] https://en.wikipedia.org/wiki/DFT_matrix
Examples
========
>>> from sympy.physics.matrices import mdft
>>> mdft(3)
Matrix([
[sqrt(3)/3, sqrt(3)/3, sqrt(3)/3],
[sqrt(3)/3, sqrt(3)*exp(-2*I*pi/3)/3, sqrt(3)*exp(-4*I*pi/3)/3],
[sqrt(3)/3, sqrt(3)*exp(-4*I*pi/3)/3, sqrt(3)*exp(-8*I*pi/3)/3]])
"""
mat = [[None for x in range(n)] for y in range(n)]
base = exp(-2*pi*I/n)
mat[0] = [1]*n
for i in range(n):
mat[i][0] = 1
for i in range(1, n):
for j in range(i, n):
mat[i][j] = mat[j][i] = base**(i*j)
return (1/sqrt(n))*Matrix(mat)
| wxgeo/geophar | wxgeometrie/sympy/physics/matrices.py | Python | gpl-2.0 | 4,140 | [
"DIRAC"
] | 787708ad86c4e2a36a80bcd749b959d2310121c222c6cf40a9ab5b695dda3421 |
""" Bokeh is a Python interactive visualization library that targets modern
web browsers for presentation.
Its goal is to provide elegant, concise construction of novel graphics in the
style of d3.js, but also deliver this capability with high-performance
interactivity over very large or streaming datasets. Bokeh can help anyone
who would like to quickly and easily create interactive plots, dashboards,
and data applications.
For full documentation, please visit: http://bokeh.pydata.org
"""
from __future__ import absolute_import, print_function
# configure Bokeh version
from .util.version import __version__; __version__
from .util.version import __base_version__; __base_version__
# configure Bokeh logger
from .util import logconfig
del logconfig
# imports below are names we want to make available in the bokeh
# module as transitive imports
from . import sampledata; sampledata
def test(args=None):
from .util.testing import runtests
return runtests(args)
def license():
''' Print the Bokeh license to the console.
Returns:
None
'''
from os.path import join
with open(join(__path__[0], 'LICENSE.txt')) as lic:
print(lic.read())
| srinathv/bokeh | bokeh/__init__.py | Python | bsd-3-clause | 1,194 | [
"VisIt"
] | 26b1567021d3fc6162722c213d9423ca69770768de77ae7a493534fc76bf5ddf |
"""
This module contains classes for storing atomic data.
The Frame class may contain multiple Residues which may each contain multiple Atoms.
Both Frame and Residue are iterable. Residue is indexable with either atom numbers or names.
"""
import logging
import numpy as np
from .util import backup_file
from .parsers.cfg import CFG
logger = logging.getLogger(__name__)
np.seterr(all="raise")
# Create FileNotFoundError if using older version of Python
try:
try:
raise FileNotFoundError
except FileNotFoundError:
pass
except NameError:
class FileNotFoundError(OSError):
pass
class Atom:
"""
Hold data for a single atom
"""
__slots__ = ["name", "num", "type", "mass", "charge", "coords"]
def __init__(self, name, num, type=None, mass=None, charge=None, coords=None):
"""
Create an atom.
:param str name: The name of the atom
:param int num: The atom number
:param str type: The atom type
:param float mass: The mass of the atom
:param float charge: The charge of the atom
:param coords: The coordinates of the atom
"""
self.name = name
self.num = num
self.type = type
self.mass = mass
self.charge = charge
self.coords = coords
def __repr__(self):
return "Atom #{0} {1} type: {2} mass: {3} charge: {4}".format(
self.num, self.name, self.type, self.mass, self.charge
)
def add_missing_data(self, other):
assert self.name == other.name
assert self.num == other.num
for attr in ("type", "mass", "charge", "coords"):
if getattr(self, attr) is None:
setattr(self, attr, getattr(other, attr))
class Residue:
"""
Hold data for a residue - list of atoms
"""
__slots__ = ["name", "num", "atoms", "name_to_num"]
def __init__(self, name=None, num=None):
self.atoms = []
self.name = name
self.num = num
self.name_to_num = {}
def __iter__(self):
return iter(self.atoms)
def __getitem__(self, item):
try:
return self.atoms[self.name_to_num[item]]
except KeyError:
pass
try:
return self.atoms[item]
except TypeError as e:
e.args = ("Atom {0} does not exist in residue {1}".format(item, self.name),)
raise
def __len__(self):
return len(self.atoms)
def add_atom(self, atom):
"""
Add an Atom to this Residue and store location in index
:param atom: Atom to add to Residue
:return: None
"""
self.atoms.append(atom)
self.name_to_num[atom.name] = len(self.atoms) - 1
class Frame:
"""
Hold Atom data separated into Residues
"""
def __init__(self, gro=None, xtc=None, itp=None, frame_start=0, xtc_reader=None):
"""
Return Frame instance having read Residues and Atoms from GRO if provided
:param gro: GROMACS GRO file to read initial frame and extract residues
:param xtc: GROMACS XTC file to read subsequent frames
:param itp: GROMACS ITP file to read masses and charges
:return: Frame instance
"""
self.name = ""
self.residues = []
self.number = frame_start - 1
self.time = 0
self.numframes = 0
self.natoms = 0
self.box = np.zeros(3, dtype=np.float32)
self._xtc_buffer = None
if gro is not None:
from .framereader import get_frame_reader
self._trajreader = get_frame_reader(gro, traj=xtc, frame_start=frame_start, name=xtc_reader)
self._trajreader.initialise_frame(self)
if self._trajreader.num_atoms != self.natoms:
raise AssertionError("Number of atoms does not match between gro and xtc files.")
self.numframes += self._trajreader.num_frames
if itp is not None:
self._parse_itp(itp)
@classmethod
def instance_from_reader(cls, reader):
"""
Return Frame instance initialised from existing FrameReader object
:param FrameReader reader: FrameReader object
:return: Frame instance
"""
obj = cls()
obj._trajreader = reader
obj._trajreader.initialise_frame(obj)
return obj
def __len__(self):
return len(self.residues)
def __iter__(self):
return iter(self.residues)
def __getitem__(self, item):
return self.residues[item]
def __repr__(self):
rep = self.name + "\n"
atoms = []
for res in self.residues:
for atom in res:
atoms.append(repr(atom))
rep += "\n".join(atoms)
return rep
def yield_resname_in(self, container):
for res in self:
if res.name in container:
yield res
def next_frame(self):
"""
Read next frame from input XTC.
:return: True if successful else False
"""
result = self._trajreader.read_next(self)
if result:
self.number += 1
return result
def write_xtc(self, filename):
"""
Write frame to output XTC file.
:param filename: XTC filename to write to
"""
if self._xtc_buffer is None:
try:
import mdtraj
except ImportError as e:
if "scipy" in repr(e):
e.msg = "XTC output with MDTraj also requires Scipy"
else:
e.msg = "XTC output requires the module MDTraj (and probably Scipy)"
raise
backup_file(filename)
self._xtc_buffer = mdtraj.formats.XTCTrajectoryFile(filename, mode="w")
xyz = np.ndarray((1, self.natoms, 3), dtype=np.float32)
i = 0
for residue in self.residues:
for atom in residue.atoms:
xyz[0][i] = atom.coords
i += 1
time = np.array([self.time], dtype=np.float32)
step = np.array([self.number], dtype=np.int32)
box = np.zeros((1, 3, 3), dtype=np.float32)
for i in range(3):
box[0][i][i] = self.box[i]
self._xtc_buffer.write(xyz, time=time, step=step, box=box)
def _parse_itp(self, filename):
"""
Parse a GROMACS ITP file to extract atom charges/masses.
Optional but requires that ITP contains only a single residue.
:param filename: Filename of GROMACS ITP to read
"""
with CFG(filename) as itp:
itpres = Residue(itp["moleculetype"][0][0])
for line in itp["atoms"]:
atom = Atom(num=int(line[0]) - 1, type=line[1], name=line[4], charge=float(line[6]), mass=float(line[7]))
itpres.add_atom(atom)
for res in self.residues:
if res.name == itpres.name:
for atom, itpatom in zip(res, itpres):
atom.add_missing_data(itpatom)
def output(self, filename, format="gro"):
"""
Write coordinates from Frame to file.
:param filename: Name of file to write to
:param format: Format to write e.g. 'gro', 'lammps'
"""
outputs = {"gro": self._output_gro,
"lammps": self._output_lammps_data}
try:
outputs[format](filename)
except KeyError:
print("ERROR: Invalid output format {0}, coordinates will not be output.".format(format))
def _output_lammps_data(self, filename):
"""
Output Frame coordinates in LAMMPS data format.
:param filename: Name of DATA file to create
"""
raise NotImplementedError("LAMMPS Data output has not yet been implemented.")
def _output_gro(self, filename):
"""
Create a GROMACS GRO file from the data in this Frame
:param filename: Name of GRO file to create
"""
backup_file(filename)
with open(filename, "w") as gro:
print(self.name, file=gro)
print("{0:5d}".format(self.natoms), file=gro)
i = 1
format_string = "{0:5d}{1:5s}{2:>5s}{3:5d}{4:8.3f}{5:8.3f}{6:8.3f}"
for res in self.residues:
for atom in res:
print(format_string.format(res.num, res.name,
atom.name, i,
*atom.coords), file=gro)
i += 1
print("{0:10.5f}{1:10.5f}{2:10.5f}".format(*self.box), file=gro)
def add_residue(self, residue):
"""
Add a Residue to this Frame
:param residue: Residue to add
"""
self.residues.append(residue)
| jag1g13/pycgtool | pycgtool/frame.py | Python | gpl-3.0 | 8,888 | [
"Gromacs",
"LAMMPS",
"MDTraj"
] | 7abf02d5f12e4024869fe968b7d4e2ca176ce16f6db7a65789fc1614f630089a |
#!/usr/bin/env python
"""
Runs RAxML on a sequence file.
For use with RAxML version 7.3.0
usage:
<!-- raxmlHPC-PTHREADS-SSE3 -T 2 -f c -m GTRGAMMA -F -s "/Users/om/Downloads/rana.phy" -n rana_red -w "/Users/om/Downloads/" 0
## raxmlHPC-PTHREADS-SSE3 -T 2 -m GTRGAMMA -n test -p 323483 -s reduced.phy
command>raxmlHPC-HYBRID-SSE3 -T 4 -f ${search_algorithm} -m ${smodel} -N ${repeats} -o "${html_outfile.files_path}" -s "$input1"
"""
import os, shutil, subprocess, sys, optparse, fnmatch, glob
def stop_err(msg):
sys.stderr.write("%s\n" % msg)
sys.exit()
def getint(name):
basename = name.partition('RUN.')
if basename[2] != '':
num = basename[2]
return int(num)
def __main__():
usage = "usage: %prog -T <threads> -s <input> -n <output> -m <model> [optional arguments]"
# Parse the primary wrapper's command line options
parser = optparse.OptionParser(usage = usage)
# raxml binary name, hardcoded in the xml file
parser.add_option("--binary", action="store", type="string", dest="binary", help="Command to run")
# (-a)
parser.add_option("--weightfile", action="store", type="string", dest="weightfile", help="Column weight file")
# (-A)
parser.add_option("--secondary_structure_model", action="store", type="string", dest="secondary_structure_model", help="Secondary structure model")
# (-b)
parser.add_option("--bootseed", action="store", type="int", dest="bootseed", help="Bootstrap random number seed")
# (-c)
parser.add_option("--numofcats", action="store", type="int", dest="numofcats", help="Number of distinct rate categories")
# (-d)
parser.add_option("--search_complete_random_tree", action="store_true", dest="search_complete_random_tree", help="Search with a complete random starting tree")
# (-D)
parser.add_option("--ml_search_convergence", action="store_true", dest="ml_search_convergence", help="ML search onvergence criterion")
# (-e)
parser.add_option("--model_opt_precision", action="store", type="float", dest="model_opt_precision", help="Model Optimization Precision (-e)")
# (-E)
parser.add_option("--excludefile", action="store", type="string", dest="excludefile", help="Exclude File Name")
# (-f)
parser.add_option("--search_algorithm", action="store", type="string", dest="search_algorithm", help="Search Algorithm")
# (-F)
parser.add_option("--save_memory_cat_model", action="store_true", dest="save_memory_cat_model", help="Save memory under CAT and GTRGAMMA models")
# (-g)
parser.add_option("--groupingfile", action="store", type="string", dest="groupingfile", help="Grouping File Name")
# (-G)
parser.add_option("--enable_evol_heuristics", action="store_true", dest="enable_evol_heuristics", help="Enable evol algo heuristics")
# (-i)
parser.add_option("--initial_rearrangement_setting", action="store", type="int", dest="initial_rearrangement_setting", help="Initial Rearrangement Setting")
# (-I)
parser.add_option("--posterior_bootstopping_analysis", action="store", type="string", dest="posterior_bootstopping_analysis", help="Posterior bootstopping analysis")
# (-J)
parser.add_option("--majority_rule_consensus", action="store", type="string", dest="majority_rule_consensus", help="Majority rule consensus")
# (-k)
parser.add_option("--print_branch_lengths", action="store_true", dest="print_branch_lengths", help="Print branch lengths")
# (-K)
parser.add_option("--multistate_sub_model", action="store", type="string", dest="multistate_sub_model", help="Multistate substitution model")
# (-m)
parser.add_option("--model_type", action="store", type="string", dest="model_type", help="Model Type")
parser.add_option("--base_model", action="store", type="string", dest="base_model", help="Base Model")
parser.add_option("--aa_empirical_freq", action="store_true", dest="aa_empirical_freq", help="Use AA Empirical base frequences")
parser.add_option("--aa_search_matrix", action="store", type="string", dest="aa_search_matrix", help="AA Search Matrix")
# (-n)
parser.add_option("--name", action="store", type="string", dest="name", help="Run Name")
# (-N/#)
parser.add_option("--number_of_runs", action="store", type="int", dest="number_of_runs", help="Number of alternative runs")
parser.add_option("--number_of_runs_bootstop", action="store", type="string", dest="number_of_runs_bootstop", help="Number of alternative runs based on the bootstop criteria")
# (-M)
parser.add_option("--estimate_individual_branch_lengths", action="store_true", dest="estimate_individual_branch_lengths", help="Estimate individual branch lengths")
# (-o)
parser.add_option("--outgroup_name", action="store", type="string", dest="outgroup_name", help="Outgroup Name")
# (-O)
parser.add_option("--disable_undetermined_seq_check", action="store_true", dest="disable_undetermined_seq_check", help="Disable undetermined sequence check")
# (-p)
parser.add_option("--random_seed", action="store", type="int", dest="random_seed", help="Random Number Seed")
# (-P)
parser.add_option("--external_protein_model", action="store", type="string", dest="external_protein_model", help="External Protein Model")
# (-q)
parser.add_option("--multiple_model", action="store", type="string", dest="multiple_model", help="Multiple Model File")
# (-r)
parser.add_option("--constraint_file", action="store", type="string", dest="constraint_file", help="Constraint File")
# (-R)
parser.add_option("--bin_model_parameter_file", action="store", type="string", dest="bin_model_parameter_file", help="Constraint File")
# (-s)
parser.add_option("--source", action="store", type="string", dest="source", help="Input file")
# (-S)
parser.add_option("--secondary_structure_file", action="store", type="string", dest="secondary_structure_file", help="Secondary structure file")
# (-t)
parser.add_option("--starting_tree", action="store", type="string", dest="starting_tree", help="Starting Tree")
# (-T)
parser.add_option("-T", action="store", type="int", dest="threads", help="Number of threads to use")
# (-u)
parser.add_option("--use_median_approximation", action="store_true", dest="use_median_approximation", help="Use median approximation")
# (-U)
parser.add_option("--save_memory_gappy_alignments", action="store_true", dest="save_memory_gappy_alignments", help="Save memory in large gapped alignments")
# (-V)
parser.add_option("--disable_rate_heterogeneity", action="store_true", dest="disable_rate_heterogeneity", help="Disable rate heterogeneity")
# (-W)
parser.add_option("--sliding_window_size", action="store", type="string", dest="sliding_window_size", help="Sliding window size")
# (-x)
parser.add_option("--rapid_bootstrap_random_seed", action="store", type="int", dest="rapid_bootstrap_random_seed", help="Rapid Boostrap Random Seed")
# (-y)
parser.add_option("--parsimony_starting_tree_only", action="store_true", dest="parsimony_starting_tree_only", help="Generate a parsimony starting tree only")
# (-z)
parser.add_option("--file_multiple_trees", action="store", type="string", dest="file_multiple_trees", help="Multiple Trees File")
(options, args) = parser.parse_args()
cmd = []
# Required parameters
binary = options.binary
cmd.append(binary)
# Threads
threads = "-T %d" % options.threads
cmd.append(threads)
# Source
source = "-s %s" % options.source
cmd.append(source)
#Hardcode to "galaxy" first to simplify the output part of the wrapper
#name = "-n %s" % options.name
name = "-n galaxy"
cmd.append(name)
## Model
model_type = options.model_type
base_model = options.base_model
aa_search_matrix = options.aa_search_matrix
aa_empirical_freq = options.aa_empirical_freq
if model_type == 'aminoacid':
model = "-m %s%s" % (base_model, aa_search_matrix)
if aa_empirical_freq:
model = "-m %s%s%s" % (base_model, aa_search_matrix, 'F')
# (-P)
if options.external_protein_model:
external_protein_model = "-P %s" % options.external_protein_model
cmd.append(external_protein_model)
else:
model = "-m %s" % base_model
cmd.append(model)
if model == "GTRCAT":
# (-c)
if options.numofcats:
numofcats = "-c %d" % options.numofcats
cmd.append(numofcats)
# Optional parameters
if options.number_of_runs_bootstop:
number_of_runs_bootstop = "-N %s" % options.number_of_runs_bootstop
cmd.append(number_of_runs_bootstop)
else:
number_of_runs_bootstop = ''
if options.number_of_runs:
number_of_runs_opt = "-N %d" % options.number_of_runs
cmd.append(number_of_runs_opt)
else:
number_of_runs_opt = 0
# (-a)
if options.weightfile:
weightfile = "-a %s" % options.weightfile
cmd.append(weightfile)
# (-A)
if options.secondary_structure_model:
secondary_structure_model = "-A %s" % options.secondary_structure_model
cmd.append(secondary_structure_model )
# (-b)
if options.bootseed:
bootseed = "-b %d" % options.bootseed
cmd.append(bootseed)
else:
bootseed = 0
# -C - doesn't work in pthreads version, skipped
if options.search_complete_random_tree:
cmd.append("-d")
if options.ml_search_convergence:
cmd.append("-D" )
if options.model_opt_precision:
model_opt_precision = "-e %f" % options.model_opt_precision
cmd.append(model_opt_precision)
if options.excludefile:
excludefile = "-E %s" % options.excludefile
cmd.append(excludefile)
if options.search_algorithm:
search_algorithm = "-f %s" % options.search_algorithm
cmd.append(search_algorithm)
if options.save_memory_cat_model:
cmd.append("-F")
if options.groupingfile:
groupingfile = "-g %s" % options.groupingfile
cmd.append(groupingfile)
if options.enable_evol_heuristics:
enable_evol_heuristics = "-G %f" % options.enable_evol_heuristics
cmd.append(enable_evol_heuristics )
if options.initial_rearrangement_setting:
initial_rearrangement_setting = "-i %s" % options.initial_rearrangement_setting
cmd.append(initial_rearrangement_setting)
if options.posterior_bootstopping_analysis:
posterior_bootstopping_analysis = "-I %s" % options.posterior_bootstopping_analysis
cmd.append(posterior_bootstopping_analysis)
if options.majority_rule_consensus:
majority_rule_consensus = "-J %s" % options.majority_rule_consensus
cmd.append(majority_rule_consensus)
if options.print_branch_lengths:
cmd.append("-k")
if options.multistate_sub_model:
multistate_sub_model = "-K %s" % options.multistate_sub_model
cmd.append(multistate_sub_model)
if options.estimate_individual_branch_lengths:
cmd.append("-M")
if options.outgroup_name:
outgroup_name = "-o %s" % options.outgroup_name
cmd.append(outgroup_name)
if options.disable_undetermined_seq_check:
cmd.append("-O")
if options.random_seed:
random_seed = "-p %d" % options.random_seed
cmd.append(random_seed)
multiple_model = None
if options.multiple_model:
multiple_model = "-q %s" % options.multiple_model
cmd.append(multiple_model)
if options.constraint_file:
constraint_file = "-r %s" % options.constraint_file
cmd.append(constraint_file)
if options.bin_model_parameter_file:
bin_model_parameter_file_name = "RAxML_binaryModelParameters.galaxy"
os.symlink(options.bin_model_parameter_file, bin_model_parameter_file_name )
bin_model_parameter_file = "-R %s" % options.bin_model_parameter_file
#Needs testing. Is the hardcoded name or the real path needed?
cmd.append(bin_model_parameter_file)
if options.secondary_structure_file:
secondary_structure_file = "-S %s" % options.secondary_structure_file
cmd.append(secondary_structure_file)
if options.starting_tree:
starting_tree = "-t %s" % options.starting_tree
cmd.append(starting_tree)
if options.use_median_approximation:
cmd.append("-u")
if options.save_memory_gappy_alignments:
cmd.append("-U")
if options.disable_rate_heterogeneity:
cmd.append("-V")
if options.sliding_window_size:
sliding_window_size = "-W %d" % options.sliding_window_size
cmd.append(sliding_window_size)
if options.rapid_bootstrap_random_seed:
rapid_bootstrap_random_seed = "-x %d" % options.rapid_bootstrap_random_seed
cmd.append(rapid_bootstrap_random_seed)
else:
rapid_bootstrap_random_seed = 0
if options.parsimony_starting_tree_only:
cmd.append("-y")
if options.file_multiple_trees:
file_multiple_trees = "-z %s" % options.file_multiple_trees
cmd.append(file_multiple_trees)
print "cmd list: ", cmd, "\n"
full_cmd = " ".join(cmd)
print "Command string: %s" % full_cmd
try:
proc = subprocess.Popen(args=full_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception, err:
sys.stderr.write("Error invoking command: \n%s\n\n%s\n" % (cmd, err))
sys.exit(1)
stdout, stderr = proc.communicate()
return_code = proc.returncode
if return_code:
sys.stdout.write(stdout)
sys.stderr.write(stderr)
sys.stderr.write("Return error code %i from command:\n" % return_code)
sys.stderr.write("%s\n" % cmd)
else:
sys.stdout.write(stdout)
sys.stdout.write(stderr)
#Multiple runs - concatenate
if number_of_runs_opt > 0:
if (bootseed == 0) and (rapid_bootstrap_random_seed == 0 ):
runfiles = glob.glob('RAxML*RUN*')
runfiles.sort(key=getint)
# Logs
outfile = open('RAxML_log.galaxy','w')
for filename in runfiles:
if fnmatch.fnmatch(filename, 'RAxML_log.galaxy.RUN.*'):
infile = open(filename, 'r')
filename_line = "%s\n" % filename
outfile.write(filename_line)
for line in infile:
outfile.write(line)
infile.close()
outfile.close()
# Parsimony Trees
outfile = open('RAxML_parsimonyTree.galaxy','w')
for filename in runfiles:
if fnmatch.fnmatch(filename, 'RAxML_parsimonyTree.galaxy.RUN.*'):
infile = open(filename, 'r')
filename_line = "%s\n" % filename
outfile.write(filename_line)
for line in infile:
outfile.write(line)
infile.close()
outfile.close()
# Results
outfile = open('RAxML_result.galaxy','w')
for filename in runfiles:
if fnmatch.fnmatch(filename, 'RAxML_result.galaxy.RUN.*'):
infile = open(filename, 'r')
filename_line = "%s\n" % filename
outfile.write(filename_line)
for line in infile:
outfile.write(line)
infile.close()
outfile.close()
# Multiple Model Partition Files
if multiple_model:
files = glob.glob('RAxML_bestTree.galaxy.PARTITION.*')
if len(files) > 0:
files.sort(key=getint)
outfile = open('RAxML_bestTreePartitions.galaxy','w')
# Best Tree Partitions
for filename in files:
if fnmatch.fnmatch(filename, 'RAxML_bestTree.galaxy.PARTITION.*'):
infile = open(filename, 'r')
filename_line = "%s\n" % filename
outfile.write(filename_line)
for line in infile:
outfile.write(line)
infile.close()
outfile.close()
else:
outfile = open('RAxML_bestTreePartitions.galaxy','w')
outfile.write("No partition files were produced.\n")
outfile.close()
# Result Partitions
files = glob.glob('RAxML_result.galaxy.PARTITION.*')
if len(files) > 0:
files.sort(key=getint)
outfile = open('RAxML_resultPartitions.galaxy','w')
for filename in files:
if fnmatch.fnmatch(filename, 'RAxML_result.galaxy.PARTITION.*'):
infile = open(filename, 'r')
filename_line = "%s\n" % filename
outfile.write(filename_line)
for line in infile:
outfile.write(line)
infile.close()
outfile.close()
else:
outfile = open('RAxML_resultPartitions.galaxy','w')
outfile.write("No partition files were produced.\n")
outfile.close()
# DEBUG options
infof = open('RAxML_info.galaxy','a')
infof.write('\nOM: CLI options DEBUG START:\n')
infof.write(options.__repr__())
infof.write('\nOM: CLI options DEBUG END\n')
if __name__=="__main__": __main__()
| Rothamsted/AppliedBioinformatics | galaxyMetaomics/raxml.py | Python | mit | 17,489 | [
"Galaxy"
] | 3d1acfb5698962b5e323aa933e4b9280578f04c1bb3630a39dbeaa541a00beb2 |
#!/usr/bin/env python
import argparse
import copy
import logging
import re
import sys
from cpt_gffParser import gffParse, gffWrite, gffSeqFeature
from Bio import SearchIO
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(name='blastxml2gff3')
__doc__ = """
BlastXML files, when transformed to GFF3, do not normally show gaps in the
blast hits. This tool aims to fill that "gap".
"""
def blastxml2gff3(blastxml, min_gap=3, trim=False, trim_end=False, include_seq=False):
from Bio.Blast import NCBIXML
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation
blast_records = NCBIXML.parse(blastxml)
for idx_record, record in enumerate(blast_records):
# http://www.sequenceontology.org/browser/release_2.4/term/SO:0000343
match_type = { # Currently we can only handle BLASTN, BLASTP
'BLASTN': 'nucleotide_match',
'BLASTP': 'protein_match',
}.get(record.application, 'match')
recid = record.query
if ' ' in recid:
recid = recid[0:recid.index(' ')]
rec = SeqRecord(Seq("ACTG"), id=recid)
for idx_hit, hit in enumerate(record.alignments):
for idx_hsp, hsp in enumerate(hit.hsps):
qualifiers = {
"ID": 'b2g.%s.%s.%s' % (idx_record, idx_hit, idx_hsp),
"source": "blast",
"score": hsp.expect,
"accession": hit.accession,
"hit_id": hit.hit_id,
"length": hit.length,
"hit_titles": hit.title.split(' >'),
}
if include_seq:
qualifiers.update({
'blast_qseq': hsp.query,
'blast_sseq': hsp.sbjct,
'blast_mseq': hsp.match,
})
for prop in ('score', 'bits', 'identities', 'positives',
'gaps', 'align_length', 'strand', 'frame',
'query_start', 'query_end', 'sbjct_start',
'sbjct_end'):
qualifiers['blast_' + prop] = getattr(hsp, prop, None)
desc = hit.title.split(' >')[0]
qualifiers['description'] = desc[desc.index(' '):]
# This required a fair bit of sketching out/match to figure out
# the first time.
#
# the match_start location must account for queries and
# subjecst that start at locations other than 1
parent_match_start = hsp.query_start - hsp.sbjct_start
# The end is the start + hit.length because the match itself
# may be longer than the parent feature, so we use the supplied
# subject/hit length to calculate the real ending of the target
# protein.
parent_match_end = hsp.query_start + hit.length + hsp.query.count('-')
# If we trim the left end, we need to trim without losing information.
used_parent_match_start = parent_match_start
if trim:
if parent_match_start < 1:
used_parent_match_start = 0
if trim or trim_end:
if parent_match_end > hsp.query_end:
parent_match_end = hsp.query_end + 1
# The ``match`` feature will hold one or more ``match_part``s
top_feature = gffSeqFeature(
FeatureLocation(used_parent_match_start, parent_match_end),
type=match_type, strand=0,
qualifiers=qualifiers
)
# Unlike the parent feature, ``match_part``s have sources.
part_qualifiers = {
"source": "blast",
}
top_feature.sub_features = []
for idx_part, (start, end, cigar) in \
enumerate(generate_parts(hsp.query, hsp.match,
hsp.sbjct,
ignore_under=min_gap)):
part_qualifiers['Gap'] = cigar
part_qualifiers['ID'] = qualifiers['ID'] + ('.%s' % idx_part)
# Otherwise, we have to account for the subject start's location
match_part_start = parent_match_start + hsp.sbjct_start + start - 1
# We used to use hsp.align_length here, but that includes
# gaps in the parent sequence
#
# Furthermore align_length will give calculation errors in weird places
# So we just use (end-start) for simplicity
match_part_end = match_part_start + (end - start)
top_feature.sub_features.append(
gffSeqFeature(
FeatureLocation(match_part_start, match_part_end),
type="match_part", strand=0,
qualifiers=copy.deepcopy(part_qualifiers))
)
rec.features.append(top_feature)
rec.annotations = {}
yield rec
def __remove_query_gaps(query, match, subject):
"""remove positions in all three based on gaps in query
In order to simplify math and calculations...we remove all of the gaps
based on gap locations in the query sequence::
Q:ACTG-ACTGACTG
S:ACTGAAC---CTG
will become::
Q:ACTGACTGACTG
S:ACTGAC---CTG
which greatly simplifies the process of identifying the correct location
for a match_part
"""
prev = 0
fq = ''
fm = ''
fs = ''
for position in re.finditer('-', query):
fq += query[prev:position.start()]
fm += match[prev:position.start()]
fs += subject[prev:position.start()]
prev = position.start() + 1
fq += query[prev:]
fm += match[prev:]
fs += subject[prev:]
return (fq, fm, fs)
def generate_parts(query, match, subject, ignore_under=3):
region_q = []
region_m = []
region_s = []
(query, match, subject) = __remove_query_gaps(query, match, subject)
region_start = -1
region_end = -1
mismatch_count = 0
for i, (q, m, s) in enumerate(zip(query, match, subject)):
# If we have a match
if m != ' ' or m == '+':
if region_start == -1:
region_start = i
# It's a new region, we need to reset or it's pre-seeded with
# spaces
region_q = []
region_m = []
region_s = []
region_end = i
mismatch_count = 0
else:
mismatch_count += 1
region_q.append(q)
region_m.append(m)
region_s.append(s)
if mismatch_count >= ignore_under and region_start != -1 and region_end != -1:
region_q = region_q[0:-ignore_under]
region_m = region_m[0:-ignore_under]
region_s = region_s[0:-ignore_under]
yield region_start, region_end + 1, \
cigar_from_string(region_q, region_m, region_s, strict_m=True)
region_q = []
region_m = []
region_s = []
region_start = -1
region_end = -1
mismatch_count = 0
yield region_start, region_end + 1, \
cigar_from_string(region_q, region_m, region_s, strict_m=True)
def _qms_to_matches(query, match, subject, strict_m=True):
matchline = []
for (q, m, s) in zip(query, match, subject):
ret = ''
if m != ' ' or m == '+':
ret = '='
elif m == ' ':
if q == '-':
ret = 'D'
elif s == '-':
ret = 'I'
else:
ret = 'X'
else:
log.warn("Bad data: \n\t%s\n\t%s\n\t%s\n" % (query, match, subject))
if strict_m:
if ret == '=' or ret == 'X':
ret = 'M'
matchline.append(ret)
return matchline
def _matchline_to_cigar(matchline):
cigar_line = []
last_char = matchline[0]
count = 0
for char in matchline:
if char == last_char:
count += 1
else:
cigar_line.append("%s%s" % (last_char, count))
count = 1
last_char = char
cigar_line.append("%s%s" % (last_char, count))
return ' '.join(cigar_line)
def cigar_from_string(query, match, subject, strict_m=True):
matchline = _qms_to_matches(query, match, subject, strict_m=strict_m)
if len(matchline) > 0:
return _matchline_to_cigar(matchline)
else:
return ""
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert Blast XML to gapped GFF3', epilog='')
parser.add_argument('blastxml', type=argparse.FileType("r"), help='Blast XML Output')
parser.add_argument('--min_gap', type=int, help='Maximum gap size before generating a new match_part', default=3)
parser.add_argument('--trim', action='store_true', help='Trim blast hits to be only as long as the parent feature')
parser.add_argument('--trim_end', action='store_true', help='Cut blast results off at end of gene')
parser.add_argument('--include_seq', action='store_true', help='Include sequence')
args = parser.parse_args()
for rec in blastxml2gff3(**vars(args)):
if len(rec.features):
gffWrite([rec], sys.stdout)
| TAMU-CPT/galaxy-tools | tools/blast/blastxml2_to_gapped_gff3.py | Python | gpl-3.0 | 9,689 | [
"BLAST"
] | ad3ee15d9c26ec804fad133dc10df29b01f2fcb1554f9ac6d035cb717f4467ea |
# -*- coding: utf-8 -*-
"""
Acceptance tests for studio related to the outline page.
"""
import json
from datetime import datetime, timedelta
import itertools
from pytz import UTC
from bok_choy.promise import EmptyPromise
from nose.plugins.attrib import attr
from common.test.acceptance.pages.studio.settings_advanced import AdvancedSettingsPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage, ContainerPage, ExpandCollapseLinkState
from common.test.acceptance.pages.studio.utils import add_discussion, drag, verify_ordering
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.course_nav import CourseNavPage
from common.test.acceptance.pages.lms.staff_view import StaffPage
from common.test.acceptance.fixtures.config import ConfigModelFixture
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from base_studio_test import StudioCourseTest
from common.test.acceptance.tests.helpers import load_data_str, disable_animations
from common.test.acceptance.pages.lms.progress import ProgressPage
SECTION_NAME = 'Test Section'
SUBSECTION_NAME = 'Test Subsection'
UNIT_NAME = 'Test Unit'
class CourseOutlineTest(StudioCourseTest):
"""
Base class for all course outline tests
"""
def setUp(self):
"""
Install a course with no content using a fixture.
"""
super(CourseOutlineTest, self).setUp()
self.course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
self.advanced_settings = AdvancedSettingsPage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
""" Install a course with sections/problems, tabs, updates, and handouts """
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME).add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('html', 'Test HTML Component'),
XBlockFixtureDesc('discussion', 'Test Discussion Component')
)
)
)
)
def do_action_and_verify(self, outline_page, action, expected_ordering):
"""
Perform the supplied action and then verify the resulting ordering.
"""
if outline_page is None:
outline_page = self.course_outline_page.visit()
action(outline_page)
verify_ordering(self, outline_page, expected_ordering)
# Reload the page and expand all subsections to see that the change was persisted.
outline_page = self.course_outline_page.visit()
outline_page.q(css='.outline-item.outline-subsection.is-collapsed .ui-toggle-expansion').click()
verify_ordering(self, outline_page, expected_ordering)
@attr(shard=3)
class CourseOutlineDragAndDropTest(CourseOutlineTest):
"""
Tests of drag and drop within the outline page.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
"""
Create a course with one section, two subsections, and four units
"""
# with collapsed outline
self.chap_1_handle = 0
self.chap_1_seq_1_handle = 1
# with first sequential expanded
self.seq_1_vert_1_handle = 2
self.seq_1_vert_2_handle = 3
self.chap_1_seq_2_handle = 4
course_fixture.add_children(
XBlockFixtureDesc('chapter', "1").add_children(
XBlockFixtureDesc('sequential', '1.1').add_children(
XBlockFixtureDesc('vertical', '1.1.1'),
XBlockFixtureDesc('vertical', '1.1.2')
),
XBlockFixtureDesc('sequential', '1.2').add_children(
XBlockFixtureDesc('vertical', '1.2.1'),
XBlockFixtureDesc('vertical', '1.2.2')
)
)
)
def drag_and_verify(self, source, target, expected_ordering, outline_page=None):
self.do_action_and_verify(
outline_page,
lambda (outline): drag(outline, source, target),
expected_ordering
)
def test_drop_unit_in_collapsed_subsection(self):
"""
Drag vertical "1.1.2" from subsection "1.1" into collapsed subsection "1.2" which already
have its own verticals.
"""
course_outline_page = self.course_outline_page.visit()
# expand first subsection
course_outline_page.q(css='.outline-item.outline-subsection.is-collapsed .ui-toggle-expansion').first.click()
expected_ordering = [{"1": ["1.1", "1.2"]},
{"1.1": ["1.1.1"]},
{"1.2": ["1.1.2", "1.2.1", "1.2.2"]}]
self.drag_and_verify(self.seq_1_vert_2_handle, self.chap_1_seq_2_handle, expected_ordering, course_outline_page)
@attr(shard=3)
class WarningMessagesTest(CourseOutlineTest):
"""
Feature: Warning messages on sections, subsections, and units
"""
__test__ = True
STAFF_ONLY_WARNING = 'Contains staff only content'
LIVE_UNPUBLISHED_WARNING = 'Unpublished changes to live content'
FUTURE_UNPUBLISHED_WARNING = 'Unpublished changes to content that will release in the future'
NEVER_PUBLISHED_WARNING = 'Unpublished units will not be released'
class PublishState(object):
"""
Default values for representing the published state of a unit
"""
NEVER_PUBLISHED = 1
UNPUBLISHED_CHANGES = 2
PUBLISHED = 3
VALUES = [NEVER_PUBLISHED, UNPUBLISHED_CHANGES, PUBLISHED]
class UnitState(object):
""" Represents the state of a unit """
def __init__(self, is_released, publish_state, is_locked):
""" Creates a new UnitState with the given properties """
self.is_released = is_released
self.publish_state = publish_state
self.is_locked = is_locked
@property
def name(self):
""" Returns an appropriate name based on the properties of the unit """
result = "Released " if self.is_released else "Unreleased "
if self.publish_state == WarningMessagesTest.PublishState.NEVER_PUBLISHED:
result += "Never Published "
elif self.publish_state == WarningMessagesTest.PublishState.UNPUBLISHED_CHANGES:
result += "Unpublished Changes "
else:
result += "Published "
result += "Locked" if self.is_locked else "Unlocked"
return result
def populate_course_fixture(self, course_fixture):
""" Install a course with various configurations that could produce warning messages """
# Define the dimensions that map to the UnitState constructor
features = [
[True, False], # Possible values for is_released
self.PublishState.VALUES, # Possible values for publish_state
[True, False] # Possible values for is_locked
]
# Add a fixture for every state in the product of features
course_fixture.add_children(*[
self._build_fixture(self.UnitState(*state)) for state in itertools.product(*features)
])
def _build_fixture(self, unit_state):
""" Returns an XBlockFixtureDesc with a section, subsection, and possibly unit that has the given state. """
name = unit_state.name
start = (datetime(1984, 3, 4) if unit_state.is_released else datetime.now(UTC) + timedelta(1)).isoformat()
subsection = XBlockFixtureDesc('sequential', name, metadata={'start': start})
# Children of never published subsections will be added on demand via _ensure_unit_present
return XBlockFixtureDesc('chapter', name).add_children(
subsection if unit_state.publish_state == self.PublishState.NEVER_PUBLISHED
else subsection.add_children(
XBlockFixtureDesc('vertical', name, metadata={
'visible_to_staff_only': True if unit_state.is_locked else None
})
)
)
def test_released_never_published_locked(self):
""" Tests that released never published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_never_published_unlocked(self):
""" Tests that released never published unlocked units display 'Unpublished units will not be released' """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=False),
self.NEVER_PUBLISHED_WARNING
)
def test_released_unpublished_changes_locked(self):
""" Tests that released unpublished changes locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_unpublished_changes_unlocked(self):
""" Tests that released unpublished changes unlocked units display 'Unpublished changes to live content' """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=False),
self.LIVE_UNPUBLISHED_WARNING
)
def test_released_published_locked(self):
""" Tests that released published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_published_unlocked(self):
""" Tests that released published unlocked units display no warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.PUBLISHED, is_locked=False),
None
)
def test_unreleased_never_published_locked(self):
""" Tests that unreleased never published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_never_published_unlocked(self):
""" Tests that unreleased never published unlocked units display 'Unpublished units will not be released' """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=False),
self.NEVER_PUBLISHED_WARNING
)
def test_unreleased_unpublished_changes_locked(self):
""" Tests that unreleased unpublished changes locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_unpublished_changes_unlocked(self):
"""
Tests that unreleased unpublished changes unlocked units display 'Unpublished changes to content that will
release in the future'
"""
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=False),
self.FUTURE_UNPUBLISHED_WARNING
)
def test_unreleased_published_locked(self):
""" Tests that unreleased published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_published_unlocked(self):
""" Tests that unreleased published unlocked units display no warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.PUBLISHED, is_locked=False),
None
)
def _verify_unit_warning(self, unit_state, expected_status_message):
"""
Verifies that the given unit's messages match the expected messages.
If expected_status_message is None, then the unit status message is expected to not be present.
"""
self._ensure_unit_present(unit_state)
self.course_outline_page.visit()
section = self.course_outline_page.section(unit_state.name)
subsection = section.subsection_at(0)
subsection.expand_subsection()
unit = subsection.unit_at(0)
if expected_status_message == self.STAFF_ONLY_WARNING:
self.assertEqual(section.status_message, self.STAFF_ONLY_WARNING)
self.assertEqual(subsection.status_message, self.STAFF_ONLY_WARNING)
self.assertEqual(unit.status_message, self.STAFF_ONLY_WARNING)
else:
self.assertFalse(section.has_status_message)
self.assertFalse(subsection.has_status_message)
if expected_status_message:
self.assertEqual(unit.status_message, expected_status_message)
else:
self.assertFalse(unit.has_status_message)
def _ensure_unit_present(self, unit_state):
""" Ensures that a unit with the given state is present on the course outline """
if unit_state.publish_state == self.PublishState.PUBLISHED:
return
name = unit_state.name
self.course_outline_page.visit()
subsection = self.course_outline_page.section(name).subsection(name)
subsection.expand_subsection()
if unit_state.publish_state == self.PublishState.UNPUBLISHED_CHANGES:
unit = subsection.unit(name).go_to()
add_discussion(unit)
elif unit_state.publish_state == self.PublishState.NEVER_PUBLISHED:
subsection.add_unit()
unit = ContainerPage(self.browser, None)
unit.wait_for_page()
if unit.is_staff_locked != unit_state.is_locked:
unit.toggle_staff_lock()
@attr(shard=3)
class EditingSectionsTest(CourseOutlineTest):
"""
Feature: Editing Release date, Due date and grading type.
"""
__test__ = True
def test_can_edit_subsection(self):
"""
Scenario: I can edit settings of subsection.
Given that I have created a subsection
Then I see release date, due date and grading policy of subsection in course outline
When I click on the configuration icon
Then edit modal window is shown
And release date, due date and grading policy fields present
And they have correct initial values
Then I set new values for these fields
And I click save button on the modal
Then I see release date, due date and grading policy of subsection in course outline
"""
self.course_outline_page.visit()
subsection = self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME)
# Verify that Release date visible by default
self.assertTrue(subsection.release_date)
# Verify that Due date and Policy hidden by default
self.assertFalse(subsection.due_date)
self.assertFalse(subsection.policy)
modal = subsection.edit()
# Verify fields
self.assertTrue(modal.has_release_date())
self.assertTrue(modal.has_release_time())
self.assertTrue(modal.has_due_date())
self.assertTrue(modal.has_due_time())
self.assertTrue(modal.has_policy())
# Verify initial values
self.assertEqual(modal.release_date, u'1/1/1970')
self.assertEqual(modal.release_time, u'00:00')
self.assertEqual(modal.due_date, u'')
self.assertEqual(modal.due_time, u'')
self.assertEqual(modal.policy, u'Not Graded')
# Set new values
modal.release_date = '3/12/1972'
modal.release_time = '04:01'
modal.due_date = '7/21/2014'
modal.due_time = '23:39'
modal.policy = 'Lab'
modal.save()
self.assertIn(u'Released: Mar 12, 1972', subsection.release_date)
self.assertIn(u'04:01', subsection.release_date)
self.assertIn(u'Due: Jul 21, 2014', subsection.due_date)
self.assertIn(u'23:39', subsection.due_date)
self.assertIn(u'Lab', subsection.policy)
def test_can_edit_section(self):
"""
Scenario: I can edit settings of section.
Given that I have created a section
Then I see release date of section in course outline
When I click on the configuration icon
Then edit modal window is shown
And release date field present
And it has correct initial value
Then I set new value for this field
And I click save button on the modal
Then I see release date of section in course outline
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
# Verify that Release date visible by default
self.assertTrue(section.release_date)
# Verify that Due date and Policy are not present
self.assertFalse(section.due_date)
self.assertFalse(section.policy)
modal = section.edit()
# Verify fields
self.assertTrue(modal.has_release_date())
self.assertFalse(modal.has_due_date())
self.assertFalse(modal.has_policy())
# Verify initial value
self.assertEqual(modal.release_date, u'1/1/1970')
# Set new value
modal.release_date = '5/14/1969'
modal.save()
self.assertIn(u'Released: May 14, 1969', section.release_date)
# Verify that Due date and Policy are not present
self.assertFalse(section.due_date)
self.assertFalse(section.policy)
def test_subsection_is_graded_in_lms(self):
"""
Scenario: I can grade subsection from course outline page.
Given I visit progress page
And I see that problem in subsection has grading type "Practice"
Then I visit course outline page
And I click on the configuration icon of subsection
And I set grading policy to "Lab"
And I click save button on the modal
Then I visit progress page
And I see that problem in subsection has grading type "Problem"
"""
progress_page = ProgressPage(self.browser, self.course_id)
progress_page.visit()
progress_page.wait_for_page()
self.assertEqual(u'Practice', progress_page.grading_formats[0])
self.course_outline_page.visit()
subsection = self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME)
modal = subsection.edit()
# Set new values
modal.policy = 'Lab'
modal.save()
progress_page.visit()
self.assertEqual(u'Problem', progress_page.grading_formats[0])
def test_unchanged_release_date_is_not_saved(self):
"""
Scenario: Saving a subsection without changing the release date will not override the release date
Given that I have created a section with a subsection
When I open the settings modal for the subsection
And I pressed save
And I open the settings modal for the section
And I change the release date to 07/20/1969
And I press save
Then the subsection and the section have the release date 07/20/1969
"""
self.course_outline_page.visit()
modal = self.course_outline_page.section_at(0).subsection_at(0).edit()
modal.save()
modal = self.course_outline_page.section_at(0).edit()
modal.release_date = '7/20/1969'
modal.save()
release_text = 'Released: Jul 20, 1969'
self.assertIn(release_text, self.course_outline_page.section_at(0).release_date)
self.assertIn(release_text, self.course_outline_page.section_at(0).subsection_at(0).release_date)
@attr(shard=3)
class StaffLockTest(CourseOutlineTest):
"""
Feature: Sections, subsections, and units can be locked and unlocked from the course outline.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Create a course with one section, two subsections, and four units """
course_fixture.add_children(
XBlockFixtureDesc('chapter', '1').add_children(
XBlockFixtureDesc('sequential', '1.1').add_children(
XBlockFixtureDesc('vertical', '1.1.1'),
XBlockFixtureDesc('vertical', '1.1.2')
),
XBlockFixtureDesc('sequential', '1.2').add_children(
XBlockFixtureDesc('vertical', '1.2.1'),
XBlockFixtureDesc('vertical', '1.2.2')
)
)
)
def _verify_descendants_are_staff_only(self, item):
"""Verifies that all the descendants of item are staff only"""
self.assertTrue(item.is_staff_only)
if hasattr(item, 'children'):
for child in item.children():
self._verify_descendants_are_staff_only(child)
def _remove_staff_lock_and_verify_warning(self, outline_item, expect_warning):
"""Removes staff lock from a course outline item and checks whether or not a warning appears."""
modal = outline_item.edit()
modal.is_explicitly_locked = False
if expect_warning:
self.assertTrue(modal.shows_staff_lock_warning())
else:
self.assertFalse(modal.shows_staff_lock_warning())
modal.save()
def _toggle_lock_on_unlocked_item(self, outline_item):
"""Toggles outline_item's staff lock on and then off, verifying the staff lock warning"""
self.assertFalse(outline_item.has_staff_lock_warning)
outline_item.set_staff_lock(True)
self.assertTrue(outline_item.has_staff_lock_warning)
self._verify_descendants_are_staff_only(outline_item)
outline_item.set_staff_lock(False)
self.assertFalse(outline_item.has_staff_lock_warning)
def _verify_explicit_staff_lock_remains_after_unlocking_parent(self, child_item, parent_item):
"""Verifies that child_item's explicit staff lock remains after removing parent_item's staff lock"""
child_item.set_staff_lock(True)
parent_item.set_staff_lock(True)
self.assertTrue(parent_item.has_staff_lock_warning)
self.assertTrue(child_item.has_staff_lock_warning)
parent_item.set_staff_lock(False)
self.assertFalse(parent_item.has_staff_lock_warning)
self.assertTrue(child_item.has_staff_lock_warning)
def test_units_can_be_locked(self):
"""
Scenario: Units can be locked and unlocked from the course outline page
Given I have a course with a unit
When I click on the configuration icon
And I enable explicit staff locking
And I click save
Then the unit shows a staff lock warning
And when I click on the configuration icon
And I disable explicit staff locking
And I click save
Then the unit does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0)
self._toggle_lock_on_unlocked_item(unit)
def test_subsections_can_be_locked(self):
"""
Scenario: Subsections can be locked and unlocked from the course outline page
Given I have a course with a subsection
When I click on the subsection's configuration icon
And I enable explicit staff locking
And I click save
Then the subsection shows a staff lock warning
And all its descendants are staff locked
And when I click on the subsection's configuration icon
And I disable explicit staff locking
And I click save
Then the the subsection does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
self._toggle_lock_on_unlocked_item(subsection)
def test_sections_can_be_locked(self):
"""
Scenario: Sections can be locked and unlocked from the course outline page
Given I have a course with a section
When I click on the section's configuration icon
And I enable explicit staff locking
And I click save
Then the section shows a staff lock warning
And all its descendants are staff locked
And when I click on the section's configuration icon
And I disable explicit staff locking
And I click save
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
self._toggle_lock_on_unlocked_item(section)
def test_explicit_staff_lock_remains_after_unlocking_section(self):
"""
Scenario: An explicitly locked unit is still locked after removing an inherited lock from a section
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a section and one of its units
When I click on the section's configuration icon
And I disable explicit staff locking
And I click save
Then the unit still shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
unit = section.subsection_at(0).unit_at(0)
self._verify_explicit_staff_lock_remains_after_unlocking_parent(unit, section)
def test_explicit_staff_lock_remains_after_unlocking_subsection(self):
"""
Scenario: An explicitly locked unit is still locked after removing an inherited lock from a subsection
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a subsection and one of its units
When I click on the subsection's configuration icon
And I disable explicit staff locking
And I click save
Then the unit still shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
self._verify_explicit_staff_lock_remains_after_unlocking_parent(unit, subsection)
def test_section_displays_lock_when_all_subsections_locked(self):
"""
Scenario: All subsections in section are explicitly locked, section should display staff only warning
Given I have a course one section and two subsections
When I enable explicit staff lock on all the subsections
Then the section shows a staff lock warning
"""
self.course_outline_page.visit()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).set_staff_lock(True)
section.subsection_at(1).set_staff_lock(True)
self.assertTrue(section.has_staff_lock_warning)
def test_section_displays_lock_when_all_units_locked(self):
"""
Scenario: All units in a section are explicitly locked, section should display staff only warning
Given I have a course with one section, two subsections, and four units
When I enable explicit staff lock on all the units
Then the section shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).unit_at(0).set_staff_lock(True)
section.subsection_at(0).unit_at(1).set_staff_lock(True)
section.subsection_at(1).unit_at(0).set_staff_lock(True)
section.subsection_at(1).unit_at(1).set_staff_lock(True)
self.assertTrue(section.has_staff_lock_warning)
def test_subsection_displays_lock_when_all_units_locked(self):
"""
Scenario: All units in subsection are explicitly locked, subsection should display staff only warning
Given I have a course with one subsection and two units
When I enable explicit staff lock on all the units
Then the subsection shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.unit_at(0).set_staff_lock(True)
subsection.unit_at(1).set_staff_lock(True)
self.assertTrue(subsection.has_staff_lock_warning)
def test_section_does_not_display_lock_when_some_subsections_locked(self):
"""
Scenario: Only some subsections in section are explicitly locked, section should NOT display staff only warning
Given I have a course with one section and two subsections
When I enable explicit staff lock on one subsection
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).set_staff_lock(True)
self.assertFalse(section.has_staff_lock_warning)
def test_section_does_not_display_lock_when_some_units_locked(self):
"""
Scenario: Only some units in section are explicitly locked, section should NOT display staff only warning
Given I have a course with one section, two subsections, and four units
When I enable explicit staff lock on three units
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).unit_at(0).set_staff_lock(True)
section.subsection_at(0).unit_at(1).set_staff_lock(True)
section.subsection_at(1).unit_at(1).set_staff_lock(True)
self.assertFalse(section.has_staff_lock_warning)
def test_subsection_does_not_display_lock_when_some_units_locked(self):
"""
Scenario: Only some units in subsection are explicitly locked, subsection should NOT display staff only warning
Given I have a course with one subsection and two units
When I enable explicit staff lock on one unit
Then the subsection does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.unit_at(0).set_staff_lock(True)
self.assertFalse(subsection.has_staff_lock_warning)
def test_locked_sections_do_not_appear_in_lms(self):
"""
Scenario: A locked section is not visible to students in the LMS
Given I have a course with two sections
When I enable explicit staff lock on one section
And I click the View Live button to switch to staff view
Then I see two sections in the sidebar
And when I switch the view mode to student view
Then I see one section in the sidebar
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.course_outline_page.section_at(1).set_staff_lock(True)
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_sections, 2)
StaffPage(self.browser, self.course_id).set_staff_view_mode('Student')
self.assertEqual(courseware.num_sections, 1)
def test_locked_subsections_do_not_appear_in_lms(self):
"""
Scenario: A locked subsection is not visible to students in the LMS
Given I have a course with two subsections
When I enable explicit staff lock on one subsection
And I click the View Live button to switch to staff view
Then I see two subsections in the sidebar
And when I switch the view mode to student view
Then I see one section in the sidebar
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(1).set_staff_lock(True)
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_subsections, 2)
StaffPage(self.browser, self.course_id).set_staff_view_mode('Student')
self.assertEqual(courseware.num_subsections, 1)
def test_toggling_staff_lock_on_section_does_not_publish_draft_units(self):
"""
Scenario: Locking and unlocking a section will not publish its draft units
Given I have a course with a section and unit
And the unit has a draft and published version
When I enable explicit staff lock on the section
And I disable explicit staff lock on the section
And I click the View Live button to switch to staff view
Then I see the published version of the unit
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
add_discussion(unit)
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.set_staff_lock(True)
section.set_staff_lock(False)
unit = section.subsection_at(0).unit_at(0).go_to()
unit.view_published_version()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 0)
def test_toggling_staff_lock_on_subsection_does_not_publish_draft_units(self):
"""
Scenario: Locking and unlocking a subsection will not publish its draft units
Given I have a course with a subsection and unit
And the unit has a draft and published version
When I enable explicit staff lock on the subsection
And I disable explicit staff lock on the subsection
And I click the View Live button to switch to staff view
Then I see the published version of the unit
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
add_discussion(unit)
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.set_staff_lock(True)
subsection.set_staff_lock(False)
unit = subsection.unit_at(0).go_to()
unit.view_published_version()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 0)
def test_removing_staff_lock_from_unit_without_inherited_lock_shows_warning(self):
"""
Scenario: Removing explicit staff lock from a unit which does not inherit staff lock displays a warning.
Given I have a course with a subsection and unit
When I enable explicit staff lock on the unit
And I disable explicit staff lock on the unit
Then I see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0)
unit.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(unit, True)
def test_removing_staff_lock_from_subsection_without_inherited_lock_shows_warning(self):
"""
Scenario: Removing explicit staff lock from a subsection which does not inherit staff lock displays a warning.
Given I have a course with a section and subsection
When I enable explicit staff lock on the subsection
And I disable explicit staff lock on the subsection
Then I see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(subsection, True)
def test_removing_staff_lock_from_unit_with_inherited_lock_shows_no_warning(self):
"""
Scenario: Removing explicit staff lock from a unit which also inherits staff lock displays no warning.
Given I have a course with a subsection and unit
When I enable explicit staff lock on the subsection
And I enable explicit staff lock on the unit
When I disable explicit staff lock on the unit
Then I do not see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
subsection.set_staff_lock(True)
unit.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(unit, False)
def test_removing_staff_lock_from_subsection_with_inherited_lock_shows_no_warning(self):
"""
Scenario: Removing explicit staff lock from a subsection which also inherits staff lock displays no warning.
Given I have a course with a section and subsection
When I enable explicit staff lock on the section
And I enable explicit staff lock on the subsection
When I disable explicit staff lock on the subsection
Then I do not see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
subsection = section.subsection_at(0)
section.set_staff_lock(True)
subsection.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(subsection, False)
@attr(shard=3)
class EditNamesTest(CourseOutlineTest):
"""
Feature: Click-to-edit section/subsection names
"""
__test__ = True
def set_name_and_verify(self, item, old_name, new_name, expected_name):
"""
Changes the display name of item from old_name to new_name, then verifies that its value is expected_name.
"""
self.assertEqual(item.name, old_name)
item.change_name(new_name)
self.assertFalse(item.in_editable_form())
self.assertEqual(item.name, expected_name)
def test_edit_section_name(self):
"""
Scenario: Click-to-edit section name
Given that I have created a section
When I click on the name of section
Then the section name becomes editable
And given that I have edited the section name
When I click outside of the edited section name
Then the section name saves
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0),
'Test Section',
'Changed',
'Changed'
)
def test_edit_subsection_name(self):
"""
Scenario: Click-to-edit subsection name
Given that I have created a subsection
When I click on the name of subsection
Then the subsection name becomes editable
And given that I have edited the subsection name
When I click outside of the edited subsection name
Then the subsection name saves
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0).subsection_at(0),
'Test Subsection',
'Changed',
'Changed'
)
def test_edit_empty_section_name(self):
"""
Scenario: Click-to-edit section name, enter empty name
Given that I have created a section
And I have clicked to edit the name of the section
And I have entered an empty section name
When I click outside of the edited section name
Then the section name does not change
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0),
'Test Section',
'',
'Test Section'
)
def test_edit_empty_subsection_name(self):
"""
Scenario: Click-to-edit subsection name, enter empty name
Given that I have created a subsection
And I have clicked to edit the name of the subsection
And I have entered an empty subsection name
When I click outside of the edited subsection name
Then the subsection name does not change
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0).subsection_at(0),
'Test Subsection',
'',
'Test Subsection'
)
def test_editing_names_does_not_expand_collapse(self):
"""
Scenario: A section stays in the same expand/collapse state while its name is edited
Given that I have created a section
And the section is collapsed
When I click on the name of the section
Then the section is collapsed
And given that I have entered a new name
Then the section is collapsed
And given that I press ENTER to finalize the name
Then the section is collapsed
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).expand_subsection()
self.assertFalse(self.course_outline_page.section_at(0).in_editable_form())
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).edit_name()
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).enter_name('Changed')
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).finalize_name()
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
@attr(shard=3)
class CreateSectionsTest(CourseOutlineTest):
"""
Feature: Create new sections/subsections/units
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with a completely empty course to easily test adding things to it """
pass
def test_create_new_section_from_top_button(self):
"""
Scenario: Create new section from button at top of page
Given that I am on the course outline
When I click the "+ Add section" button at the top of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_section_from_bottom_button(self):
"""
Scenario: Create new section from button at bottom of page
Given that I am on the course outline
When I click the "+ Add section" button at the bottom of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_bottom_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_section_from_bottom_button_plus_icon(self):
"""
Scenario: Create new section from button plus icon at bottom of page
Given that I am on the course outline
When I click the plus icon in "+ Add section" button at the bottom of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_bottom_button(click_child_icon=True)
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_subsection(self):
"""
Scenario: Create new subsection
Given that I have created a section
When I click the "+ Add subsection" button in that section
Then I see a new subsection added to the bottom of the section
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).add_subsection()
subsections = self.course_outline_page.section_at(0).subsections()
self.assertEqual(len(subsections), 1)
self.assertTrue(subsections[0].in_editable_form())
def test_create_new_unit(self):
"""
Scenario: Create new unit
Given that I have created a section
And that I have created a subsection within that section
When I click the "+ Add unit" button in that subsection
Then I am redirected to a New Unit page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).add_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).add_unit()
unit_page = ContainerPage(self.browser, None)
unit_page.wait_for_page()
self.assertTrue(unit_page.is_inline_editing_display_name())
@attr(shard=3)
class DeleteContentTest(CourseOutlineTest):
"""
Feature: Deleting sections/subsections/units
"""
__test__ = True
def test_delete_section(self):
"""
Scenario: Delete section
Given that I am on the course outline
When I click the delete button for a section on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the section
When I click "Yes, I want to delete this component"
Then the confirmation message should close
And the section should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).delete()
self.assertEqual(len(self.course_outline_page.sections()), 0)
def test_cancel_delete_section(self):
"""
Scenario: Cancel delete of section
Given that I clicked the delte button for a section on the course outline
And I received a confirmation message, asking me if I really want to delete the component
When I click "Cancel"
Then the confirmation message should close
And the section should remain in the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.sections()), 1)
def test_delete_subsection(self):
"""
Scenario: Delete subsection
Given that I am on the course outline
When I click the delete button for a subsection on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the subsection
When I click "Yes, I want to delete this component"
Then the confiramtion message should close
And the subsection should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).delete()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 0)
def test_cancel_delete_subsection(self):
"""
Scenario: Cancel delete of subsection
Given that I clicked the delete button for a subsection on the course outline
And I received a confirmation message, asking me if I really want to delete the subsection
When I click "cancel"
Then the confirmation message should close
And the subsection should remain in the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
def test_delete_unit(self):
"""
Scenario: Delete unit
Given that I am on the course outline
When I click the delete button for a unit on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the unit
When I click "Yes, I want to delete this unit"
Then the confirmation message should close
And the unit should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).delete()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 0)
def test_cancel_delete_unit(self):
"""
Scenario: Cancel delete of unit
Given that I clicked the delete button for a unit on the course outline
And I received a confirmation message, asking me if I really want to delete the unit
When I click "Cancel"
Then the confirmation message should close
And the unit should remain in the course outline
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
def test_delete_all_no_content_message(self):
"""
Scenario: Delete all sections/subsections/units in a course, "no content" message should appear
Given that I delete all sections, subsections, and units in a course
When I visit the course outline
Then I will see a message that says, "You haven't added any content to this course yet"
Add see a + Add Section button
"""
self.course_outline_page.visit()
self.assertFalse(self.course_outline_page.has_no_content_message)
self.course_outline_page.section_at(0).delete()
self.assertEqual(len(self.course_outline_page.sections()), 0)
self.assertTrue(self.course_outline_page.has_no_content_message)
@attr(shard=3)
class ExpandCollapseMultipleSectionsTest(CourseOutlineTest):
"""
Feature: Courses with multiple sections can expand and collapse all sections.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with a course with two sections """
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('vertical', 'Test Unit 2')
)
)
)
def verify_all_sections(self, collapsed):
"""
Verifies that all sections are collapsed if collapsed is True, otherwise all expanded.
"""
for section in self.course_outline_page.sections():
self.assertEqual(collapsed, section.is_collapsed)
def toggle_all_sections(self):
"""
Toggles the expand collapse state of all sections.
"""
for section in self.course_outline_page.sections():
section.expand_subsection()
def test_expanded_by_default(self):
"""
Scenario: The default layout for the outline page is to show sections in expanded view
Given I have a course with sections
When I navigate to the course outline page
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Collapse link is removed after last section of a course is deleted
Given I have a course with multiple sections
And I navigate to the course outline page
When I will confirm all alerts
And I press the "section" delete icon
Then I do not see the "Collapse All Sections" link
And I will see a message that says "You haven't added any content to this course yet"
"""
self.course_outline_page.visit()
for section in self.course_outline_page.sections():
section.delete()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.assertTrue(self.course_outline_page.has_no_content_message)
def test_collapse_all_when_all_expanded(self):
"""
Scenario: Collapse all sections when all sections are expanded
Given I navigate to the outline page of a course with sections
And all sections are expanded
When I click the "Collapse All Sections" link
Then I see the "Expand All Sections" link
And all sections are collapsed
"""
self.course_outline_page.visit()
self.verify_all_sections(collapsed=False)
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.verify_all_sections(collapsed=True)
def test_collapse_all_when_some_expanded(self):
"""
Scenario: Collapsing all sections when 1 or more sections are already collapsed
Given I navigate to the outline page of a course with sections
And all sections are expanded
When I collapse the first section
And I click the "Collapse All Sections" link
Then I see the "Expand All Sections" link
And all sections are collapsed
"""
self.course_outline_page.visit()
self.verify_all_sections(collapsed=False)
self.course_outline_page.section_at(0).expand_subsection()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.verify_all_sections(collapsed=True)
def test_expand_all_when_all_collapsed(self):
"""
Scenario: Expanding all sections when all sections are collapsed
Given I navigate to the outline page of a course with multiple sections
And I click the "Collapse All Sections" link
When I click the "Expand All Sections" link
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
def test_expand_all_when_some_collapsed(self):
"""
Scenario: Expanding all sections when 1 or more sections are already expanded
Given I navigate to the outline page of a course with multiple sections
And I click the "Collapse All Sections" link
When I expand the first section
And I click the "Expand All Sections" link
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
# We have seen unexplainable sporadic failures in this test. Try disabling animations to see
# if that helps.
disable_animations(self.course_outline_page)
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.verify_all_sections(collapsed=True)
self.course_outline_page.section_at(0).expand_subsection()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
@attr(shard=3)
class ExpandCollapseSingleSectionTest(CourseOutlineTest):
"""
Feature: Courses with a single section can expand and collapse all sections.
"""
__test__ = True
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Collapse link is removed after last section of a course is deleted
Given I have a course with one section
And I navigate to the course outline page
When I will confirm all alerts
And I press the "section" delete icon
Then I do not see the "Collapse All Sections" link
And I will see a message that says "You haven't added any content to this course yet"
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).delete()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.assertTrue(self.course_outline_page.has_no_content_message)
def test_old_subsection_stays_collapsed_after_creation(self):
"""
Scenario: Collapsed subsection stays collapsed after creating a new subsection
Given I have a course with one section and subsection
And I navigate to the course outline page
Then the subsection is collapsed
And when I create a new subsection
Then the first subsection is collapsed
And the second subsection is expanded
"""
self.course_outline_page.visit()
self.assertTrue(self.course_outline_page.section_at(0).subsection_at(0).is_collapsed)
self.course_outline_page.section_at(0).add_subsection()
self.assertTrue(self.course_outline_page.section_at(0).subsection_at(0).is_collapsed)
self.assertFalse(self.course_outline_page.section_at(0).subsection_at(1).is_collapsed)
@attr(shard=3)
class ExpandCollapseEmptyTest(CourseOutlineTest):
"""
Feature: Courses with no sections initially can expand and collapse all sections after addition.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with an empty course """
pass
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Expand/collapse for a course with no sections
Given I have a course with no sections
When I navigate to the course outline page
Then I do not see the "Collapse All Sections" link
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
def test_link_appears_after_section_creation(self):
"""
Scenario: Collapse link appears after creating first section of a course
Given I have a course with no sections
When I navigate to the course outline page
And I add a section
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.course_outline_page.add_section_from_top_button()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.assertFalse(self.course_outline_page.section_at(0).is_collapsed)
@attr(shard=3)
class DefaultStatesEmptyTest(CourseOutlineTest):
"""
Feature: Misc course outline default states/actions when starting with an empty course
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with an empty course """
pass
def test_empty_course_message(self):
"""
Scenario: Empty course state
Given that I am in a course with no sections, subsections, nor units
When I visit the course outline
Then I will see a message that says "You haven't added any content to this course yet"
And see a + Add Section button
"""
self.course_outline_page.visit()
self.assertTrue(self.course_outline_page.has_no_content_message)
self.assertTrue(self.course_outline_page.bottom_add_section_button.is_present())
@attr(shard=3)
class DefaultStatesContentTest(CourseOutlineTest):
"""
Feature: Misc course outline default states/actions when starting with a course with content
"""
__test__ = True
def test_view_live(self):
"""
Scenario: View Live version from course outline
Given that I am on the course outline
When I click the "View Live" button
Then a new tab will open to the course on the LMS
"""
self.course_outline_page.visit()
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 3)
self.assertEqual(courseware.xblock_component_type(0), 'problem')
self.assertEqual(courseware.xblock_component_type(1), 'html')
self.assertEqual(courseware.xblock_component_type(2), 'discussion')
@attr(shard=3)
class UnitNavigationTest(CourseOutlineTest):
"""
Feature: Navigate to units
"""
__test__ = True
def test_navigate_to_unit(self):
"""
Scenario: Click unit name to navigate to unit page
Given that I have expanded a section/subsection so I can see unit names
When I click on a unit name
Then I will be taken to the appropriate unit page
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
unit.wait_for_page()
@attr(shard=3)
class PublishSectionTest(CourseOutlineTest):
"""
Feature: Publish sections.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with 2 subsections inside a single section.
The first subsection has 2 units, and the second subsection has one unit.
"""
self.courseware = CoursewarePage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME),
XBlockFixtureDesc('vertical', 'Test Unit 2'),
),
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('vertical', 'Test Unit 3'),
),
),
)
def test_unit_publishing(self):
"""
Scenario: Can publish a unit and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the first unit, subsection, section
When I publish the first unit
Then I see that publish button for the first unit disappears
And I see publish buttons for subsection, section
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
unit.publish()
self.assertFalse(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
def test_subsection_publishing(self):
"""
Scenario: Can publish a subsection and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the unit, subsection, section
When I publish the first subsection
Then I see that publish button for the first subsection disappears
And I see that publish buttons disappear for the child units of the subsection
And I see publish button for section
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME).publish()
self.assertFalse(unit.publish_action)
self.assertFalse(subsection.publish_action)
self.assertTrue(section.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
self.courseware.go_to_sequential_position(2)
self.assertEqual(1, self.courseware.num_xblock_components)
def test_section_publishing(self):
"""
Scenario: Can publish a section and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the unit, subsection, section
When I publish the section
Then I see that publish buttons disappears
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.assertTrue(unit.publish_action)
self.course_outline_page.section(SECTION_NAME).publish()
self.assertFalse(subsection.publish_action)
self.assertFalse(section.publish_action)
self.assertFalse(unit.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
self.courseware.go_to_sequential_position(2)
self.assertEqual(1, self.courseware.num_xblock_components)
self.course_nav.go_to_section(SECTION_NAME, 'Test Subsection 2')
self.assertEqual(1, self.courseware.num_xblock_components)
def _add_unpublished_content(self):
"""
Adds unpublished HTML content to first three units in the course.
"""
for index in xrange(3):
self.course_fixture.create_xblock(
self.course_fixture.get_nested_xblocks(category="vertical")[index].locator,
XBlockFixtureDesc('html', 'Unpublished HTML Component ' + str(index)),
)
def _get_items(self):
"""
Returns first section, subsection, and unit on the page.
"""
section = self.course_outline_page.section(SECTION_NAME)
subsection = section.subsection(SUBSECTION_NAME)
unit = subsection.expand_subsection().unit(UNIT_NAME)
return (section, subsection, unit)
@attr(shard=3)
class DeprecationWarningMessageTest(CourseOutlineTest):
"""
Feature: Verify deprecation warning message.
"""
HEADING_TEXT = 'This course uses features that are no longer supported.'
COMPONENT_LIST_HEADING = 'You must delete or replace the following components.'
ADVANCE_MODULES_REMOVE_TEXT = (
u'To avoid errors, édX strongly recommends that you remove unsupported features '
u'from the course advanced settings. To do this, go to the Advanced Settings '
u'page, locate the "Advanced Module List" setting, and then delete the following '
u'modules from the list.'
)
DEFAULT_DISPLAYNAME = "Deprecated Component"
def _add_deprecated_advance_modules(self, block_types):
"""
Add `block_types` into `Advanced Module List`
Arguments:
block_types (list): list of block types
"""
self.advanced_settings.visit()
self.advanced_settings.set_values({"Advanced Module List": json.dumps(block_types)})
def _create_deprecated_components(self):
"""
Create deprecated components.
"""
parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
parent_vertical.locator,
XBlockFixtureDesc('poll', "Poll", data=load_data_str('poll_markdown.xml'))
)
self.course_fixture.create_xblock(parent_vertical.locator, XBlockFixtureDesc('survey', 'Survey'))
def _verify_deprecation_warning_info(
self,
deprecated_blocks_present,
components_present,
components_display_name_list=None,
deprecated_modules_list=None
):
"""
Verify deprecation warning
Arguments:
deprecated_blocks_present (bool): deprecated blocks remove text and
is list is visible if True else False
components_present (bool): components list shown if True else False
components_display_name_list (list): list of components display name
deprecated_modules_list (list): list of deprecated advance modules
"""
self.assertTrue(self.course_outline_page.deprecated_warning_visible)
self.assertEqual(self.course_outline_page.warning_heading_text, self.HEADING_TEXT)
self.assertEqual(self.course_outline_page.modules_remove_text_shown, deprecated_blocks_present)
if deprecated_blocks_present:
self.assertEqual(self.course_outline_page.modules_remove_text, self.ADVANCE_MODULES_REMOVE_TEXT)
self.assertEqual(self.course_outline_page.deprecated_advance_modules, deprecated_modules_list)
self.assertEqual(self.course_outline_page.components_visible, components_present)
if components_present:
self.assertEqual(self.course_outline_page.components_list_heading, self.COMPONENT_LIST_HEADING)
self.assertItemsEqual(self.course_outline_page.components_display_names, components_display_name_list)
def test_no_deprecation_warning_message_present(self):
"""
Scenario: Verify that deprecation warning message is not shown if no deprecated
advance modules are not present and also no deprecated component exist in
course outline.
When I goto course outline
Then I don't see any deprecation warning
"""
self.course_outline_page.visit()
self.assertFalse(self.course_outline_page.deprecated_warning_visible)
def test_deprecation_warning_message_present(self):
"""
Scenario: Verify deprecation warning message if deprecated modules
and components are present.
Given I have "poll" advance modules present in `Advanced Module List`
And I have created 2 poll components
When I go to course outline
Then I see poll deprecated warning
And I see correct poll deprecated warning heading text
And I see correct poll deprecated warning advance modules remove text
And I see list of poll components with correct display names
"""
self._add_deprecated_advance_modules(block_types=['poll', 'survey'])
self._create_deprecated_components()
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=True,
components_present=True,
components_display_name_list=['Poll', 'Survey'],
deprecated_modules_list=['poll', 'survey']
)
def test_deprecation_warning_with_no_displayname(self):
"""
Scenario: Verify deprecation warning message if poll components are present.
Given I have created 1 poll deprecated component
When I go to course outline
Then I see poll deprecated warning
And I see correct poll deprecated warning heading text
And I see list of poll components with correct message
"""
parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
# Create a deprecated component with display_name to be empty and make sure
# the deprecation warning is displayed with
self.course_fixture.create_xblock(
parent_vertical.locator,
XBlockFixtureDesc(category='poll', display_name="", data=load_data_str('poll_markdown.xml'))
)
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=False,
components_present=True,
components_display_name_list=[self.DEFAULT_DISPLAYNAME],
)
def test_warning_with_poll_advance_modules_only(self):
"""
Scenario: Verify that deprecation warning message is shown if only
poll advance modules are present and no poll component exist.
Given I have poll advance modules present in `Advanced Module List`
When I go to course outline
Then I see poll deprecated warning
And I see correct poll deprecated warning heading text
And I see correct poll deprecated warning advance modules remove text
And I don't see list of poll components
"""
self._add_deprecated_advance_modules(block_types=['poll', 'survey'])
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=True,
components_present=False,
deprecated_modules_list=['poll', 'survey']
)
def test_warning_with_poll_components_only(self):
"""
Scenario: Verify that deprecation warning message is shown if only
poll component exist and no poll advance modules are present.
Given I have created two poll components
When I go to course outline
Then I see poll deprecated warning
And I see correct poll deprecated warning heading text
And I don't see poll deprecated warning advance modules remove text
And I see list of poll components with correct display names
"""
self._create_deprecated_components()
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=False,
components_present=True,
components_display_name_list=['Poll', 'Survey']
)
@attr(shard=4)
class SelfPacedOutlineTest(CourseOutlineTest):
"""Test the course outline for a self-paced course."""
def populate_course_fixture(self, course_fixture):
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME)
)
),
)
self.course_fixture.add_course_details({
'self_paced': True,
'start_date': datetime.now() + timedelta(days=1)
})
ConfigModelFixture('/config/self_paced', {'enabled': True}).install()
def test_release_dates_not_shown(self):
"""
Scenario: Ensure that block release dates are not shown on the
course outline page of a self-paced course.
Given I am the author of a self-paced course
When I go to the course outline
Then I should not see release dates for course content
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
self.assertEqual(section.release_date, '')
subsection = section.subsection(SUBSECTION_NAME)
self.assertEqual(subsection.release_date, '')
def test_edit_section_and_subsection(self):
"""
Scenario: Ensure that block release/due dates are not shown
in their settings modals.
Given I am the author of a self-paced course
When I go to the course outline
And I click on settings for a section or subsection
Then I should not see release or due date settings
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
modal = section.edit()
self.assertFalse(modal.has_release_date())
self.assertFalse(modal.has_due_date())
modal.cancel()
subsection = section.subsection(SUBSECTION_NAME)
modal = subsection.edit()
self.assertFalse(modal.has_release_date())
self.assertFalse(modal.has_due_date())
| synergeticsedx/deployment-wipro | common/test/acceptance/tests/studio/test_studio_outline.py | Python | agpl-3.0 | 81,985 | [
"VisIt"
] | 3c94d71695a09fbd5c604167e0504a48618cd67dc0505e42c99f60f3279c9617 |
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS.
"""
import time
from ..helpers import UniqueCourseTest
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.problem import ProblemPage
from ...pages.common.logout import LogoutPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
class CoursewareTest(UniqueCourseTest):
"""
Test courseware.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
def setUp(self):
super(CoursewareTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1')
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('problem', 'Test Problem 2')
)
)
).install()
# Auto-auth register for the course.
self._auto_auth(self.USERNAME, self.EMAIL, False)
def _goto_problem_page(self):
"""
Open problem page with assertion.
"""
self.courseware_page.visit()
self.problem_page = ProblemPage(self.browser)
self.assertEqual(self.problem_page.problem_name, 'TEST PROBLEM 1')
def _change_problem_release_date_in_studio(self):
"""
"""
self.course_outline.q(css=".subsection-header-actions .configure-button").first.click()
self.course_outline.q(css="#start_date").fill("01/01/2015")
self.course_outline.q(css=".action-save").first.click()
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
AutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
def test_courseware(self):
"""
Test courseware if recent visited subsection become unpublished.
"""
# Visit problem page as a student.
self._goto_problem_page()
# Logout and login as a staff user.
LogoutPage(self.browser).visit()
self._auto_auth("STAFF_TESTER", "staff101@example.com", True)
# Visit course outline page in studio.
self.course_outline.visit()
# Set release date for subsection in future.
self._change_problem_release_date_in_studio()
# Wait for 2 seconds to save new date.
time.sleep(2)
# Logout and login as a student.
LogoutPage(self.browser).visit()
self._auto_auth(self.USERNAME, self.EMAIL, False)
# Visit courseware as a student.
self.courseware_page.visit()
# Problem name should be "TEST PROBLEM 2".
self.assertEqual(self.problem_page.problem_name, 'TEST PROBLEM 2')
| peterm-itr/edx-platform | common/test/acceptance/tests/lms/test_lms_courseware.py | Python | agpl-3.0 | 3,652 | [
"VisIt"
] | 57683293a3c729363d5ea310d52020c8ccc422af615c78d7fa89a083fe187b6a |
'''
The settings for OSMC are handled by the OSMC Settings Addon (OSA).
In order to more easily accomodate future changes and enhancements, each OSMC settings bundle (module) is a separate addon.
The module can take the form of an xbmc service, an xbmc script, or an xbmc module, but it must be installed into the users'
/usr/share/kodi/addons folder.
The OSA collects the modules it can find, loads their icons, and launches them individually when the user clicks on an icon.
The modules can either have their own GUI, or they can leverage the settings interface provided by XBMC. If the OSG uses the XBMC
settings interface, then all of their settings must be stored in the addons settings.xml. This is true even if the source of record
is a separate config file.
An example of this type is the Pi settings module; the actual settings are read from the config.txt, then written to the
settings.xml for display in kodi, then finally all changes are written back to the config.txt. The Pi module detects user
changes to the settings by identifying the differences between a newly read settings.xml and the values from a previously
read settings.xml.
The values of the settings displayed by this module are only ever populated by the items in the settings.xml. [Note: meaning that
if the settings data is retrieved from a different source, it will need to be populated in the module before it is displayed
to the user.]
Each module must have in its folder, a sub-folder called 'resources/osmc'. Within that folder must reside this script (OSMCSetting.py),
and the icons to be used in the OSG to represent the module (FX_Icon.png and FO_Icon.png for unfocused and focused images
respectively).
When the OSA creates the OSMC Settings GUI (OSG), these modules are identified and the OSMCSetting.py script in each of them
is imported. This script provides the mechanism for the OSG to apply the changes required from a change in a setting.
The OSMCSetting.py file must have a class called OSMCSettingClass as shown below.
The key variables in this class are:
addonid : The id for the addon. This must be the id declared in the addons addon.xml.
description : The description for the module, shown in the OSA
reboot_required : A boolean to declare if the OS needs to be rebooted. If a change in a specific setting
requires an OS reboot to take affect, this is flag that will let the OSG know.
setting_data_method : This dictionary contains:
- the name of all settings in the module
- the current value of those settings
- [optional] apply - a method to call for each setting when the value changes
- [optional] translate - a method to call to translate the data before adding it to the
setting_data_method dict. The translate method must have a 'reverse' argument which
when set to True, reverses the transformation.
The key methods of this class are:
open_settings_window : This is called by the OSG when the icon is clicked. This will open the settings window.
Usually this would be __addon__.OpenSettings(), but it could be any other script.
This allows the creation of action buttons in the GUI, as well as allowing developers
to script and skin their own user interfaces.
[optional] first_method : called before any individual settings changes are applied.
[optional] final_method : called after all the individual settings changes are done.
[optional] boot_method : called when the OSA is first started.
apply_settings : This is called by the OSG to apply the changes to any settings that have changed.
It calls the first setting method, if it exists.
Then it calls the method listed in setting_data_method for each setting. Then it
calls the final method, again, if it exists.
populate_setting_data_method : This method is used to populate the setting_data_method with the current settings data.
Usually this will be from the addons setting data stored in settings.xml and retrieved
using the settings_retriever_xml method.
Sometimes the user is able to edit external setting files (such as the Pi's config.txt).
If the developer wants to use this source in place of the data stored in the
settings.xml, then they should edit this method to include a mechanism to retrieve and
parse that external data. As the window shown in the OSG populates only with data from
the settings.xml, the developer should ensure that the external data is loaded into that
xml before the settings window is opened.
settings_retriever_xml : This method is used to retrieve all the data for the settings listed in the
setting_data_method from the addons settings.xml.
The developer is free to create any methods they see fit, but the ones listed above are specifically used by the OSA.
Specifically, the apply_settings method is called when the OSA closes.
Settings changes are applied when the OSG is called to close. But this behaviour can be changed to occur when the addon
settings window closes by editing the open_settings_window. The method apply_settings will still be called by OSA, so
keep that in mind.
'''
# XBMC Modules
import xbmcaddon
import xbmc
import xbmcgui
import sys
import os
import threading
addonid = "script.module.osmcsetting.networking"
__addon__ = xbmcaddon.Addon(addonid)
# Custom modules
sys.path.append(xbmc.translatePath(os.path.join(xbmcaddon.Addon(addonid).getAddonInfo('path'), 'resources','lib')))
# OSMC SETTING Modules
from networking_gui import networking_gui
import osmc_network
from osmc_advset_editor import AdvancedSettingsEditor
DIALOG = xbmcgui.Dialog()
def log(message):
try:
message = str(message)
except UnicodeEncodeError:
message = message.encode('utf-8', 'ignore' )
xbmc.log('OSMC NETWORKING ' + str(message), level=xbmc.LOGDEBUG)
def lang(id):
san = __addon__.getLocalizedString(id).encode('utf-8', 'ignore')
return san
class OSMCSettingClass(threading.Thread):
'''
A OSMCSettingClass is way to substantiate the settings of an OSMC settings module, and make them available to the
OSMC Settings Addon (OSA).
'''
def __init__(self):
'''
The setting_data_method contains all the settings in the settings group, as well as the methods to call when a
setting_value has changed and the existing setting_value.
'''
super(OSMCSettingClass, self).__init__()
self.addonid = "script.module.osmcsetting.networking"
self.me = xbmcaddon.Addon(self.addonid)
# this is what is displayed in the main settings gui
self.shortname = 'Network'
self.description = """
This is network settings, it contains settings for the network.
MORE TEXT SHOULD GO HERE
"""
self.setting_data_method = { }
# populate the settings data in the setting_data_method
self.populate_setting_data_method()
# create the advanced settings reader to determine if Wait_for_Network should be activated
self.ASE = AdvancedSettingsEditor(log)
# read advancedsettings.xml and convert it into a dictionary
advset_dict = self.ASE.parse_advanced_settings()
#check whether the advanced settings dict contains valid MySQL information
valid_advset_dict, _ = self.ASE.validate_advset_dict(advset_dict, reject_empty=True, exclude_name=True)
# when a valid MySQL advanced settings file is found, toggle the Wait_for_Network setting to ON
if valid_advset_dict:
# only proceed if the (either) server is not on the localhost
if self.ASE.server_not_localhost(advset_dict):
# confirm that wait_for_network is not already enabled
if not osmc_network.is_connman_wait_for_network_enabled():
undo_change = DIALOG.yesno('MyOSMC', lang(32078),nolabel=lang(32080), yeslabel=lang(32079), autoclose=10000)
if not undo_change:
osmc_network.toggle_wait_for_network(True)
# a flag to determine whether a setting change requires a reboot to take effect
self.reboot_required = False
log('START')
for x, k in self.setting_data_method.iteritems():
log("%s = %s" % (x, k.get('setting_value','no setting value')))
def populate_setting_data_method(self):
'''
Populates the setting_value in the setting_data_method.
'''
# this is the method to use if you are populating the dict from the settings.xml
latest_settings = self.settings_retriever_xml()
# cycle through the setting_data_method dict, and populate with the settings values
for key in self.setting_data_method.keys():
# grab the translate method (if there is one)
translate_method = self.setting_data_method.get(key,{}).get('translate',{})
# get the setting value, translate it if needed
if translate_method:
setting_value = translate_method(latest_settings[key])
else:
setting_value = latest_settings[key]
# add it to the dictionary
self.setting_data_method[key]['setting_value'] = setting_value
def run(self, usePreseed = False):
'''
The method that determines what happens when the item is clicked in the settings GUI.
Usually this would be __addon__.OpenSettings(), but it could be any other script.
This allows the creation of action buttons in the GUI, as well as allowing developers to script and skin their
own user interfaces.
'''
log(xbmcaddon.Addon("script.module.osmcsetting.networking").getAddonInfo('id'))
me = xbmcaddon.Addon(self.addonid)
scriptPath = me.getAddonInfo('path')
xml = "network_gui_720.xml" if xbmcgui.Window(10000).getProperty("SkinHeight") == '720' else "network_gui.xml"
self.GUI = networking_gui(xml, scriptPath, 'Default')
self.GUI.setUsePreseed(usePreseed)
self.GUI.doModal()
del self.GUI
log('END')
def apply_settings(self):
'''
This method will apply all of the settings. It calls the first_method, if it exists.
Then it calls the method listed in setting_data_method for each setting. Then it calls the
final_method, again, if it exists.
'''
# retrieve the current settings from the settings.xml (this is where the user has made changes)
new_settings = self.settings_retriever_xml()
# call the first method, if there is one
try:
self.first_method()
except:
pass
# apply the individual settings changes
for k, v in self.setting_data_method.iteritems():
# get the application method and stored setting value from the dictionary
method = v.get('apply', False)
value = v.get('setting_value', '')
# if the new setting is different to the stored setting then change the dict and run the 'apply' method
if new_settings[k] != value:
# change stored setting_value to the new value
self.setting_data_method[k]['setting_value'] = new_settings[k]
# if a specific apply method exists for the setting, then call that
try:
method(setting_value)
except:
pass
# call the final method if there is one
try:
self.final_method()
except:
pass
def settings_retriever_xml(self):
'''
Reads the stored settings (in settings.xml) and returns a dictionary with the setting_name: setting_value. This
method cannot be overwritten.
'''
latest_settings = {}
addon = xbmcaddon.Addon(self.addonid)
for key in self.setting_data_method.keys():
latest_settings[key] = addon.getSetting(key)
return latest_settings
def check_network(self, online):
return osmc_network.has_network_connection(online)
def is_ftr_running(self):
return osmc_network.is_ftr_running()
##############################################################################################################################
# #
def first_method(self):
'''
The method to call before all the other setting methods are called.
For example, this could be a call to stop a service. The final method could then restart the service again.
This can be used to apply the setting changes.
'''
pass
def final_method(self):
'''
The method to call after all the other setting methods have been called.
For example, in the case of the Raspberry Pi's settings module, the final writing to the config.txt can be delayed
until all the settings have been updated in the setting_data_method.
'''
pass
def boot_method(self):
'''
The method to call when the OSA is first activated (on reboot)
'''
pass
# #
##############################################################################################################################
##############################################################################################################################
# #
'''
Methods beyond this point are for specific settings.
'''
# SETTING METHOD
def method_to_apply_changes_X(self, data):
'''
Method for implementing changes to setting x.
'''
log('hells yeah!')
def translate_on_populate_X(self, data, reverse=False):
'''
Method to translate the data before adding to the setting_data_method dict.
This is useful if you are getting the populating from an external source like the Pi's config.txt.
This method could end with a call to another method to populate the settings.xml from that same source.
'''
# this is how you would negate the translateing of the data when the settings window closes.
if reverse:
return data
# #
##############################################################################################################################
if __name__ == "__main__":
pass
| srmo/osmc | package/mediacenter-addon-osmc/src/script.module.osmcsetting.networking/resources/osmc/OSMCSetting.py | Python | gpl-2.0 | 13,802 | [
"ASE"
] | 036dbbd64452a209cabda9750d5b889c575380f80dc85776b7e70d9191928144 |
# -*- encoding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 6
_modified_time = 1417442141.780621
_template_filename=u'templates/grid_base.mako'
_template_uri=u'/history/../grid_base.mako'
_template_cache=cache.Cache(__name__, _modified_time)
_source_encoding='ascii'
_exports = ['body', 'load', 'get_grid_config', 'title', 'center_panel', 'init']
# SOURCE LINE 1
from galaxy.web.framework.helpers.grids import TextColumn
def inherit(context):
kwargs = context.get( 'kwargs', {} )
if kwargs.get( 'embedded', False ):
# No inheritance - using only embeddable content (self.body)
return None
if context.get('use_panels'):
if context.get('webapp'):
webapp = context.get('webapp')
else:
webapp = 'galaxy'
return '/webapps/%s/base_panels.mako' % webapp
else:
return '/base.mako'
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
# SOURCE LINE 20
ns = runtime.TemplateNamespace('__anon_0x7f5a1844eb10', context._clean_inheritance_tokens(), templateuri=u'/display_common.mako', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, '__anon_0x7f5a1844eb10')] = ns
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, (inherit(context)), _template_uri)
def render_body(context,**pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f5a1844eb10')._populate(_import_ns, [u'get_class_plural'])
__M_writer = context.writer()
# SOURCE LINE 18
__M_writer(u'\n')
# SOURCE LINE 19
__M_writer(u'\n')
# SOURCE LINE 20
__M_writer(u'\n\n')
# SOURCE LINE 25
__M_writer(u'\n')
# SOURCE LINE 34
__M_writer(u'\n\n')
# SOURCE LINE 37
__M_writer(u'\n\n')
# SOURCE LINE 42
__M_writer(u'\n\n')
# SOURCE LINE 47
__M_writer(u'\n\n')
# SOURCE LINE 80
__M_writer(u'\n\n')
# SOURCE LINE 256
__M_writer(u'\n\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_body(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f5a1844eb10')._populate(_import_ns, [u'get_class_plural'])
self = _import_ns.get('self', context.get('self', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 45
__M_writer(u'\n ')
# SOURCE LINE 46
__M_writer(unicode(self.load()))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_load(context,embedded=False,insert=None):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f5a1844eb10')._populate(_import_ns, [u'get_class_plural'])
h = _import_ns.get('h', context.get('h', UNDEFINED))
self = _import_ns.get('self', context.get('self', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 50
__M_writer(u'\n <!-- grid_base.mako -->\n')
# SOURCE LINE 53
__M_writer(u' ')
__M_writer(unicode(h.css( "autocomplete_tagging", "jquery.rating" )))
__M_writer(u'\n ')
# SOURCE LINE 54
__M_writer(unicode(h.js("libs/jquery/jquery.autocomplete", "galaxy.autocom_tagging", "libs/jquery/jquery.rating" )))
__M_writer(u'\n\n')
# SOURCE LINE 57
__M_writer(u' <div id="grid-container"></div>\n\n')
# SOURCE LINE 60
__M_writer(u' <script type="text/javascript">\n var gridView = null;\n function add_tag_to_grid_filter( tag_name, tag_value ){\n // Put tag name and value together.\n var tag = tag_name + ( tag_value !== undefined && tag_value !== "" ? ":" + tag_value : "" );\n var advanced_search = $( \'#advanced-search\').is(":visible" );\n if( !advanced_search ){\n $(\'#standard-search\').slideToggle(\'fast\');\n $(\'#advanced-search\').slideToggle(\'fast\');\n }\n gridView.add_filter_condition( "tags", tag );\n };\n\n // load grid viewer\n require([\'mvc/grid/grid-view\'], function(GridView) {\n $(function() {\n gridView = new GridView( ')
# SOURCE LINE 76
__M_writer(unicode( h.dumps( self.get_grid_config( embedded=embedded, insert=insert ) ) ))
__M_writer(u' );\n });\n });\n </script>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_get_grid_config(context,embedded=False,insert=None):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f5a1844eb10')._populate(_import_ns, [u'get_class_plural'])
cur_page_num = _import_ns.get('cur_page_num', context.get('cur_page_num', UNDEFINED))
unicode = _import_ns.get('unicode', context.get('unicode', UNDEFINED))
enumerate = _import_ns.get('enumerate', context.get('enumerate', UNDEFINED))
query = _import_ns.get('query', context.get('query', UNDEFINED))
message = _import_ns.get('message', context.get('message', UNDEFINED))
isinstance = _import_ns.get('isinstance', context.get('isinstance', UNDEFINED))
self = _import_ns.get('self', context.get('self', UNDEFINED))
sort_key = _import_ns.get('sort_key', context.get('sort_key', UNDEFINED))
dict = _import_ns.get('dict', context.get('dict', UNDEFINED))
num_page_links = _import_ns.get('num_page_links', context.get('num_page_links', UNDEFINED))
status = _import_ns.get('status', context.get('status', UNDEFINED))
advanced_search = _import_ns.get('advanced_search', context.get('advanced_search', UNDEFINED))
endfor = _import_ns.get('endfor', context.get('endfor', UNDEFINED))
default_filter_dict = _import_ns.get('default_filter_dict', context.get('default_filter_dict', UNDEFINED))
get_class_plural = _import_ns.get('get_class_plural', context.get('get_class_plural', UNDEFINED))
util = _import_ns.get('util', context.get('util', UNDEFINED))
refresh_frames = _import_ns.get('refresh_frames', context.get('refresh_frames', UNDEFINED))
num_pages = _import_ns.get('num_pages', context.get('num_pages', UNDEFINED))
cur_filter_dict = _import_ns.get('cur_filter_dict', context.get('cur_filter_dict', UNDEFINED))
url = _import_ns.get('url', context.get('url', UNDEFINED))
current_item = _import_ns.get('current_item', context.get('current_item', UNDEFINED))
str = _import_ns.get('str', context.get('str', UNDEFINED))
endif = _import_ns.get('endif', context.get('endif', UNDEFINED))
grid = _import_ns.get('grid', context.get('grid', UNDEFINED))
trans = _import_ns.get('trans', context.get('trans', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 82
__M_writer(u'\n')
# SOURCE LINE 84
self.grid_config = {
'title' : grid.title,
'url_base' : trans.request.path_url,
'async' : grid.use_async,
'async_ops' : [],
'categorical_filters' : {},
'filters' : cur_filter_dict,
'sort_key' : sort_key,
'show_item_checkboxes' : context.get('show_item_checkboxes', False),
'cur_page_num' : cur_page_num,
'num_pages' : num_pages,
'num_page_links' : num_page_links,
'history_tag_autocomplete_url' : url( controller='tag', action='tag_autocomplete_data', item_class='History' ),
'history_name_autocomplete_url' : url( controller='history', action='name_autocomplete_data' ),
'status' : status,
'message' : util.restore_text(message),
'global_actions' : [],
'operations' : [],
'items' : [],
'columns' : [],
'get_class_plural' : get_class_plural( grid.model_class ).lower(),
'use_paging' : grid.use_paging,
'legend' : grid.legend,
'current_item_id' : False,
'use_panels' : context.get('use_panels'),
'use_hide_message' : grid.use_hide_message,
'insert' : insert,
'default_filter_dict' : default_filter_dict,
'advanced_search' : advanced_search,
'refresh_frames' : [],
'embedded' : embedded,
'info_text' : grid.info_text,
'url' : url(dict())
}
## add refresh frames
if refresh_frames:
self.grid_config['refresh_frames'] = refresh_frames
## add current item if exists
if current_item:
self.grid_config['current_item_id'] = current_item.id
endif
## column
for column in grid.columns:
## add column sort links
href = None
extra = ''
if column.sortable:
if sort_key.endswith(column.key):
if not sort_key.startswith("-"):
href = url( sort=( "-" + column.key ) )
extra = "↓"
else:
href = url( sort=( column.key ) )
extra = "↑"
else:
href = url( sort=column.key )
## add to configuration
self.grid_config['columns'].append({
'key' : column.key,
'visible' : column.visible,
'nowrap' : column.nowrap,
'attach_popup' : column.attach_popup,
'label_id_prefix' : column.label_id_prefix,
'sortable' : column.sortable,
'label' : column.label,
'filterable' : column.filterable,
'is_text' : isinstance(column, TextColumn),
'href' : href,
'extra' : extra
})
endfor
## operations
for operation in grid.operations:
self.grid_config['operations'].append({
'allow_multiple' : operation.allow_multiple,
'allow_popup' : operation.allow_popup,
'target' : operation.target,
'label' : operation.label,
'confirm' : operation.confirm,
'inbound' : operation.inbound,
'global_operation' : False
})
if operation.allow_multiple:
self.grid_config['show_item_checkboxes'] = True
if operation.global_operation:
self.grid_config['global_operation'] = url( ** (operation.global_operation()) )
endfor
## global actions
for action in grid.global_actions:
self.grid_config['global_actions'].append({
'url_args' : url(**action.url_args),
'label' : action.label,
'inbound' : action.inbound
})
endfor
## Operations that are async (AJAX) compatible.
for operation in [op for op in grid.operations if op.async_compatible]:
self.grid_config['async_ops'].append(operation.label.lower());
endfor
## Filter values for categorical filters.
for column in grid.columns:
if column.filterable is not None and not isinstance( column, TextColumn ):
self.grid_config['categorical_filters'][column.key] = dict([ (filter.label, filter.args) for filter in column.get_accepted_filters() ])
endif
endfor
# items
for i, item in enumerate( query ):
item_dict = {
'id' : item.id,
'encode_id' : trans.security.encode_id(item.id),
'link' : [],
'operation_config' : {},
'column_config' : {}
}
## data columns
for column in grid.columns:
if column.visible:
## get link
link = column.get_link(trans, grid, item)
if link:
link = url(**link)
else:
link = None
endif
## inbound
inbound = column.inbound
## get value
value = column.get_value( trans, grid, item )
# Handle non-ascii chars.
if isinstance(value, str):
value = unicode(value, 'utf-8')
value = value.replace('/', '//')
endif
## Item dictionary
item_dict['column_config'][column.label] = {
'link' : link,
'value' : value,
'inbound' : inbound
}
endif
endfor
## add operation details to item
for operation in grid.operations:
item_dict['operation_config'][operation.label] = {
'allowed' : operation.allowed(item),
'url_args' : url( **operation.get_url_args( item ) )
}
endfor
## add item to list
self.grid_config['items'].append(item_dict)
endfor
return self.grid_config
# SOURCE LINE 255
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_title(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f5a1844eb10')._populate(_import_ns, [u'get_class_plural'])
grid = _import_ns.get('grid', context.get('grid', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 37
__M_writer(unicode(grid.title))
return ''
finally:
context.caller_stack._pop_frame()
def render_center_panel(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f5a1844eb10')._populate(_import_ns, [u'get_class_plural'])
self = _import_ns.get('self', context.get('self', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 40
__M_writer(u'\n ')
# SOURCE LINE 41
__M_writer(unicode(self.load()))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_init(context,embedded=False,insert=None):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f5a1844eb10')._populate(_import_ns, [u'get_class_plural'])
self = _import_ns.get('self', context.get('self', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 26
__M_writer(u'\n')
# SOURCE LINE 27
self.has_left_panel = False
self.has_right_panel = False
self.message_box_visible = False
self.overlay_visible = False
self.active_view = 'user'
# SOURCE LINE 33
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/database/compiled_templates/grid_base.mako.py | Python | gpl-3.0 | 16,882 | [
"Galaxy"
] | f3d554e8300dda957ac4b87e1784310408befd745852ffbd6d17cf733968dcd7 |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['text.usetex'] = True
from matplotlib.ticker import MultipleLocator
from astropy.io import fits
from astropy.time import Time
from PyAstronomy import pyasl
from scipy import ndimage
import pandas as pd
import gaussfitter as gf
import BF_functions as bff
'''
Program to extract radial velocities from a double-lined binary star spectrum.
Uses the Broadening Function technique.
Meredith Rawls
2014-2015
Based loosely on Rucinski's BFall_IDL.pro, and uses the PyAstronomy tools.
http://www.astro.utoronto.ca/~rucinski/BFdescription.html
http://www.hs.uni-hamburg.de/DE/Ins/Per/Czesla/PyA/PyA/pyaslDoc/aslDoc/svd.html
In practice, you will run this twice: once to do the initial BF, and then again
to properly fit the peaks of each BF with a Gaussian.
INPUT
infiles: single-column file with one FITS or TXT filename (w/ full path) per line
1st entry must be for the template star (e.g., arcturus or phoenix model)
(the same template is used to find RVs for both stars)
NO comments are allowed in this file
FUN FACT: unless APOGEE, these should be continuum-normalized to 1 !!!
bjdinfile: columns 0,1,2 must be filename, BJD, BCV (e.g., from IRAF bcvcorr)
top row must be for the template star (e.g., arcturus)
(the 0th column is never used, but typically looks like infiles_BF.txt)
one line per observation
comments are allowed in this file using #
gausspars: your best initial guesses for fitting gaussians to the BF peaks
the parameters are [amp1, offset1, width1, amp2, offset2, width2]
the top line is ignored (template), but must have six values
one line per observation
comments are allowed in this file using #
OUTPUT
outfile: a file that will be created with 8 columns: BJD midpoint, orbital phase,
Kepler BJD, RV1, RV1 error, RV2, RV2 error
bfoutfile: a file that contains all the BF function data (raw RV, BF, gaussian model)
IMMEDIATELY BELOW, IN THE CODE
You need to specify whether you have APOGEE (near-IR) or "regular" (e.g., ARCES)
spectra with the 'isAPOGEE' flag. You also need to set the binary's PERIOD and BJD0,
both in days, and the constant RV and BCV of whatever template you are using.
'''
##########
# YOU NEED TO HAVE THESE INPUT FILES !!!
# THE OUTPUT FILE WILL BE CREATED FOR YOU
# EXAMPLE INFILES AND OUTFILES
#infiles = 'infiles.txt'; bjdinfile = 'bjdinfile.txt'
#gausspars = 'gausspars.txt'
#outfile = 'rvoutfile.txt'; bfoutfile = 'bfoutfile.txt'
#4851217
#infiles = 'data/4851217/4851217infiles.txt'; bjdinfile = 'data/4851217/4851217bjdinfile.txt'
#gausspars = 'data/4851217/4851217gausspars.txt'
#outfile = 'data/4851217/4851217Outfile.txt'; bfoutfile = 'data/4851217/4851217BFOut.txt'
#5285607
#infiles = 'data/5285607/5285607infiles.txt'; bjdinfile = 'data/5285607/5285607bjdinfile.txt'
#gausspars = 'data/5285607/5285607gausspars.txt'
#outfile = 'data/5285607/5285607OutfileJC.txt'; bfoutfile = 'data/5285607/5285607BFOut1.txt'
#gaussoutfile = 'data/5285607/5285607gaussout.txt'; areaout = 'data/5285607/5285607BFArea.txt'
#5285607 APSTAR ORDER
#infiles = 'data/5285607/5285607infilesApstar.txt'; bjdinfile = 'data/5285607/5285607bjdinfileApstar.txt'
#gausspars = 'data/5285607/5285607gaussparsApstar.txt'
#outfile = 'data/5285607/5285607OutfileApstar.txt'; bfoutfile = 'data/5285607/5285607BFOutApstar.txt'
#4075064
#infiles = 'data/4075064/4075064infiles.txt'; bjdinfile = 'data/4075064/4075064bjdinfile.txt'
#gausspars = 'data/4075064/4075064gausspars.txt'
#outfile = 'data/4075064/4075064outfile.txt'; bfoutfile = 'data/4075064/4075064BFdata.txt'
#3848919
#infiles = 'data/3848919/3848919infiles.txt'; bjdinfile = 'data/3848919/3848919bjdinfile.txt'
#gausspars = 'data/3848919/3848919gausspars.txt'
#outfile = 'data/3848919/3848919outfile.txt'; bfoutfile = 'data/3848919/3848919BFdata.txt'
#6610219
#infiles = 'data/6610219/6610219infiles.txt'; bjdinfile = 'data/6610219/6610219bjdinfile.txt'
#gausspars = 'data/6610219/6610219gausspars1.txt'
#outfile = 'data/6610219/6610219outfile.txt'; bfoutfile = 'data/6610219/6610219BFOut.txt'
#4285087
#infiles = 'data/4285087/4285087infiles.txt'; bjdinfile = 'data/4285087/4285087bjdinfile.txt'
#gausspars = 'data/4285087/4285087gausspars.txt'
#outfile = 'data/4285087/4285087outfile.txt'; bfoutfile = 'data/4285087/4285087BFOut.txt'
#gaussoutfile = 'data/4285087/4285087gaussout.txt'; areaout = 'data/4285087/4285087BFArea.txt'
#6131659
#infiles = 'data/6131659/6131659infiles.txt'; bjdinfile = 'data/6131659/6131659bjdinfile.txt'
#gausspars = 'data/6131659/6131659gausspars.txt'
#outfile = 'data/6131659/6131659outfile.txt'; bfoutfile = 'data/6131659/6131659BFOut.txt'
#6449358
#infiles = 'data/6449358/6449358infilesALL.txt'; bjdinfile = 'data/6449358/6449358bjdinfileALL.txt'
#gausspars = 'data/6449358/6449358gaussparsALL.txt'
#outfile = 'data/6449358/6449358OutfileALL.txt'; bfoutfile = 'data/6449358/6449358BFOutALL.txt'
#gaussoutfile = 'data/6449358/6449358gaussoutALL.txt'
#5284133
#infiles = 'data/5284133/5284133infiles.txt'; bjdinfile = 'data/5284133/5284133bjdinfile.txt'
#gausspars = 'data/5284133/5284133gausspars.txt'
#outfile = 'data/5284133/5284133Outfile.txt'; bfoutfile = 'data/5284133/5284133BFOut.txt'
#6778289
#infiles = 'data/6778289/6778289infiles.txt'; bjdinfile = 'data/6778289/6778289bjdinfiles.txt'
#gausspars = 'data/6778289/6778289gausspars.txt'
#outfile = 'data/6778289/6778289OutfileNEW.txt'; bfoutfile = 'data/6778289/6778289BFOutNEW.txt'
#gaussoutfile = 'data/6778289/6778289gaussout.txt'; areaout = 'data/6778289/6778289BFAreaNEW.txt'
#6778289 Visible
#infiles = 'data/6778289/V6778289infiles.txt'; bjdinfile = 'data/6778289/V6778289bjdinfile.txt'
#gausspars = 'data/6778289/V6778289gausspars.txt'
#outfile = 'data/6778289/V6778289Outfile.txt'; bfoutfile = 'data/6778289/V6778289BFOut.txt'
#gaussoutfile = 'data/6778289/V6778289gaussout.txt'; areaout = 'data/6778289/V6778289BFArea.txt'
#6781535 (Suspected Triple System)
#infiles = 'data/6781535/6781535infiles.txt'; bjdinfile = 'data/6781535/6781535bjdinfile.txt'
#gausspars = 'data/6781535/6781535gausspars.txt'
#outfile = 'data/6781535/6781535Outfile.txt'; bfoutfile = 'data/6781535/6781535BFOut.txt'
#gaussoutfile = 'data/6781535/6781535gaussout.txt'; areaout = 'data/6781535/6781535BFArea.txt'
#6864859
infiles = 'data/6864859/6864859infiles.txt'; bjdinfile = 'data/6864859/6864859bjdinfile.txt'
gausspars = 'data/6864859/6864859gausspars.txt'
outfile = 'data/6864859/6864859Outfile.txt'; bfoutfile = 'data/6864859/6864859BFOut.txt'
gaussoutfile = 'data/6864859/6864859gaussout.txt'; areaout = 'data/6864859/6864859BFArea.txt'
#3247294
#infiles = 'data/3247294/3247294infiles.txt'; bjdinfile = 'data/3247294/3247294bjdinfile.txt'
#gausspars = 'data/3247294/3247294gausspars.txt'
#outfile = 'data/3247294/3247294Outfile.txt'; bfoutfile = 'data/3247294/3247294BFOut.txt'
# ORBITAL PERIOD AND ZEROPOINT !!!
#period = 2.47028; BJD0 = 2455813.69734 # 4851217
#period = 3.8994011; BJD0 = 2454959.576010 # 5285607
#period = 5.7767904; BJD0 = 2454955.073410 # 6449358
#####period = 8.7845759; BJD0 = 245800.46231 #5284133
#period = 30.13015; BJD0 = 2454971.834534 #6778289 FIXED BJD0 01/23/2019
#period = 9.1220856; BJD0 = 2454971.834534 #6781535
period = 40.8778427; BJD0 = 2454955.556300 #6864859
#period = 61.4228063; BJD0 = 2455813.69734 #4075064
#period = 1.0472603; BJD0 = 2455811.61005 #3848919
#period = 11.3009948; BJD0 = 2456557.73097 #6610219
#period = 4.4860312; BJD0 = 2454966.450124 #4285087
#period = 17.5278303; BJD0 = 2454960.041397 #6131659
#period = 67.4188276; BJD0 = 2454966.433454 #3247294
# STUFF YOU NEED TO DEFINE CORRECTLY !!!
# if you are fitting three gaussians, you had better give 3 sets of amplimits and widlimits
isAPOGEE = True # toggle to use near-IR stuff, or not
SpecPlot = False # toggle to plot spectra before BFs, or not
bjdoffset = 2454833. # difference between real BJDs and 'bjdfunny' (truncated BJDs)
amplimits = [0,1.2, 0,1.2, 0,1.2] # limits for gaussian normalized amplitude [min1,max1,min2,max2]
threshold = 10 # margin for gaussian position (raw RV in km/s)
#widlimits = [0,25, 0,22] # limits for gaussian width (km/s) [min1,max1,min2,max2]
# ^^^ widlimits IS NOW SPECIFIED ON A PER-STAR BASIS BELOW
# RADIAL VELOCITY AND BCV INFO FOR TEMPLATE (km/s; set both to 0 if using a model !!!)
rvstd = 0; bcvstd = 0 # model template
# PARAMETERS FOR THE BROADENING FUNCTION (IMPORTANT PAY ATTENTION !!!)
smoothstd = 1.5 # stdev of Gaussian to smooth BFs by (~slit width in pixels)
#w00 = 5400 # starting wavelength for new grid
#n = 38750 # number of wavelength points for new grid
#stepV = 1.7 # roughly 3e5 / (max_wavelength / wavelength_step) km/s, rounded down
m = 401 # length of the BF (must be longer if RVs are far from 0)
## good values for APOGEE:
#w00 = 15170; n = 32000; stepV = 1.0 # Visible?
#w00 = 15170; n = 32000; stepV = 1.0 # all of APOGEE, (too) high res
#w00 = 15170; n = 10000; stepV = 1.5 # all of APOGEE, still pretty high res
w00 = 15170; n = 10000; stepV = 2.0 # all of APOGEE, still pretty high res
#w00 = 15170; n = 6000; stepV = 4.0 # a little piece of APOGEE (lower res, apStar)
# CUSTOMIZED BF WIDTH (for gausspars) AND PLOT LIMITS
#widlimits = [0,15, 0,15]; rvneg = -100; rvpos = 300; ymin = -0.15; ymax = 1.19 # good starting default
#widlimits = [0,9, 0,7, 0,9]; rvneg = 0; rvpos = 149; ymin = -0.15; ymax = 1.19 # 3247294 #weird triple only one panel
#widlimits = [0,12, 0,11, 0,11]; rvneg = -75; rvpos = 199; ymin = -0.15; ymax = 1.18 # 6781535
#widlimits = [0,9, 0,9, 0,11]; rvneg = 0; rvpos = 199; ymin = -0.15; ymax = 1.18 # 6131659
#widlimits = [0,9, 0,7]; rvneg = -300; rvpos = 300; ymin = -0.15; ymax = 1.19 # 6131659 Xtra large
#widlimits = [0,13, 0,13]; rvneg = -50; rvpos = 249; ymin = -0.15; ymax = 1.19 # 4285087
#widlimits = [0,18, 0,19]; rvneg = -70; rvpos = 270; ymin = -0.15; ymax = 1.19 # 5285607
#widlimits = [0,16, 0,11]; rvneg = -300; rvpos = 500; ymin = -0.15; ymax = 1.2 #6449358 extra wide
#widlimits = [0,16, 0,11]; rvneg = -50; rvpos = 199; ymin = -0.15; ymax = 1.10 #6449358
#widlimits = [0,12, 0,8]; rvneg = -45; rvpos = 199; ymin = -0.15; ymax = 1.4 #6778289
widlimits = [0,11, 0,10]; rvneg = 30; rvpos = 170; ymin = -0.15; ymax = 1.19 # 6864859
#widlimits = [0,9, 0,9]; rvneg = -150; rvpos = 50; ymin = -0.15; ymax = 1.19 # 6610259a
#widlimits = [0,15, 0,15]; rvneg = -50; rvpos = 10; ymin = -0.15; ymax = 1.19 # 6610219b
colors = bff.user_rc()
print('Welcome to the Broadening Function party!')
print('')
print('MAKE SURE THIS IS WHAT YOU WANT:')
print('You set Porb = {0} days, BJD0 = {1} days'.format(period, BJD0))
# CREATE NEW SPECTRUM IN LOG SPACE
# This uses w00, n, and stepV, defined above. The new wavelength grid is w1.
# The BF will be evenly spaced in velocity with length m.
# The velocity steps are r (km/s/pix).
w1, m, r = bff.logify_spec(isAPOGEE, w00, n, stepV, m)
# READ IN ALL THE THINGS
specdata = bff.read_specfiles(infiles, bjdinfile, isAPOGEE)
nspec = specdata[0]; filenamelist = specdata[1]
datetimelist = specdata[2]; wavelist = specdata[3]; speclist = specdata[4]
# INTERPOLATE THE TEMPLATE AND OBJECT SPECTRA ONTO THE NEW LOG-WAVELENGTH GRID
# OPTION TO PLOT THIS
newspeclist = []
yoffset = 0
if SpecPlot == True:
plt.axis([w1[0], w1[-1], 0, nspec+3])
plt.xlabel(r'Wavelength ({\AA})')
for i in range (0, nspec):
newspec = np.interp(w1, wavelist[i], speclist[i])
newspeclist.append(newspec)
if SpecPlot == True:
if i == 0: # plot template in red
plt.plot(w1, newspec+yoffset, label=datetimelist[i].iso[0:10], color=colors[6], marker='.')
else: # plot the rest in blue
plt.plot(w1, newspec+yoffset, label=datetimelist[i].iso[0:10], color=colors[0], marker='.')
yoffset = yoffset + 1
if SpecPlot == True:
##plt.legend()
plt.show()
# BROADENING FUNCTION TIME
svd = pyasl.SVD()
# Single Value Decomposition
svd.decompose(newspeclist[0], m)
singularvals = svd.getSingularValues()
bflist = []
bfsmoothlist = []
for i in range (0, nspec):
# Obtain the broadening function
bf = svd.getBroadeningFunction(newspeclist[i]) # this is a full matrix
bfarray = svd.getBroadeningFunction(newspeclist[i], asarray=True)
# Smooth the array-like broadening function
# 1ST LINE - python 2.7 with old version of pandas; 2ND LINE - python 3.5 with new version of pandas
#bfsmooth = pd.rolling_window(bfarray, window=5, win_type='gaussian', std=smoothstd, center=True)
bfsmooth = pd.Series(bfarray).rolling(window=5, win_type='gaussian', center=True).mean(std=smoothstd)
# The rolling window makes nans at the start because it's a punk.
for j in range(0,len(bfsmooth)):
if np.isnan(bfsmooth[j]) == True:
bfsmooth[j] = 0
else:
bfsmooth[j] = bfsmooth[j]
bflist.append(bf)
bfsmoothlist.append(bfsmooth)
bfnormlist = []
for a in bfsmoothlist:
bfnormlist.append((a-np.min(a))/(np.max(a)-np.min(a)))
# Obtain the indices in RV space that correspond to the BF
bf_ind = svd.getRVAxis(r, 1) + rvstd - bcvstd
# OPTION TO PLOT THE SINGULAR VALUES TO SEE WHERE THEY AREN'T A MESS
# this probably isn't important, because instead of choosing which values to throw out,
# we use "Route #2" as described by Rucinski and just use the final row of the BF array
# and smooth it with a Gaussian to get rid of noise problems.
# for more info, seriously, read http://www.astro.utoronto.ca/~rucinski/SVDcookbook.html
##plt.figure(2)
#plt.semilogy(singularvals, 'b-')
#plt.xlabel('BF Index')
#plt.ylabel('Singular Values')
#plt.show()
# OPTION TO PLOT THE SMOOTHED BFs
plt.axis([rvneg, rvpos, -0.2, float(nspec)+1])
plt.xlabel('Radial Velocity (km s$^{-1}$)')
plt.ylabel('Broadening Function (arbitrary amplitude)')
yoffset = 0.0
for i in range(1, nspec):
plt.plot(bf_ind, bfnormlist[i]+yoffset, color=colors[0], marker='.')
plt.axhline(y=yoffset, color=colors[15], ls=':')
yoffset = yoffset + 1.0
plt.show()
# FIT THE SMOOTHED BF PEAKS WITH TWO GAUSSIANS
# you have to have pretty decent guesses in the gausspars file for this to work.
bffitlist = bff.gaussparty(gausspars, nspec, filenamelist, bfnormlist, bf_ind, amplimits, threshold, widlimits)
rvraw1 = []; rvraw2 = []; rvraw1_err = []; rvraw2_err = []; rvraw3 = []; rvraw3_err = []
rvraw1.append(0); rvraw2.append(0); rvraw3.append(0)
rvraw1_err.append(0); rvraw2_err.append(0), rvraw3_err.append(0)
for i in range(1, len(bffitlist)):
rvraw1.append(bffitlist[i][0][1]) # indices are [visit][parameter, BF, error array][amp,rv,width x N]
rvraw2.append(bffitlist[i][0][4]) # [0,1,2] is amp,rv,width for star1; [3,4,5] is same for star2, etc.
if len(bffitlist[i][0]) == 9:
rvraw3.append(bffitlist[i][0][7])
else:
rvraw3.append(None)
rvraw1_err.append(bffitlist[i][2][1])
rvraw2_err.append(bffitlist[i][2][4])
if len(bffitlist[i][2]) == 9:
rvraw3_err.append(bffitlist[i][2][7])
else:
rvraw3_err.append(None)
rvrawlist = [rvraw1, rvraw1_err, rvraw2, rvraw2_err, rvraw3, rvraw3_err]
# CALCULATE ORBITAL PHASES AND FINAL RV CURVE
rvdata = bff.rvphasecalc(bjdinfile, bjdoffset, nspec, period, BJD0, rvrawlist, rvstd, bcvstd)
phase = rvdata[0]; bjdfunny = rvdata[1]
rvfinals = rvdata[2]
g2 = open(outfile, 'w')
print('# RVs calculated with BF_python.py', file=g2)
print('#', file=g2)
print('# Porb = {0} days, BJD0 = {1} days'.format(period, BJD0), file=g2)
print('# Wavelength axis = [{0} - {1}] Angstroms'.format(w1[0], w1[-1]), file=g2)
print('#', file=g2)
print('# Template spectrum (line 0 of infiles): {0}'.format(filenamelist[0]), file=g2)
print('# RV of template, BCV of template (km/s): {0}, {1}'.format(rvstd, bcvstd), file=g2)
print('#', file=g2)
print('# List of all input spectra (infiles): {0}'.format(infiles), file=g2)
print('# Target BJD and BCV info (bjdinfile): {0}'.format(bjdinfile), file=g2)
print('# Gaussian fit guesses (gausspars): {0}'.format(gausspars), file=g2)
print('#', file=g2)
print('# BF parameters: w00 = {0}; n = {1}; stepV = {2}'.format(w00, n, stepV), file=g2)
print('# BF parameters: smoothstd = {0}; m = {1}'.format(smoothstd, m), file=g2)
print('# gaussfit: amplimits = {0}; threshold = {1}, widlimits = {2}'.format(amplimits, threshold, widlimits), file=g2)
print('#', file=g2)
print('# time, phase, adjusted_time, RV1 [km/s], error1 [km/s], RV2 [km/s], error2 [km/s]', file=g2)
print('#', file=g2)
for i in range(1, nspec):
if rvfinals[4][i] and rvfinals[5][i]:
print ('%.9f %.9f %.9f %.5f %.5f %.5f %.5f %.5f %.5f' % (bjdfunny[i] + bjdoffset, phase[i], bjdfunny[i],
rvfinals[0][i], rvfinals[1][i], rvfinals[2][i], rvfinals[3][i], rvfinals[4][i], rvfinals[5][i]), file=g2)
else:
print ('%.9f %.9f %.9f %.5f %.5f %.5f %.5f %s %s' % (bjdfunny[i] + bjdoffset, phase[i], bjdfunny[i],
rvfinals[0][i], rvfinals[1][i], rvfinals[2][i], rvfinals[3][i], 'nan', 'nan'), file=g2)
g2.close()
print('BJD, phase, and RVs written to %s.' % outfile)
print('Use rvplotmaker.py to plot the RV curve.')
try:
bfout = open(bfoutfile, 'w')
for idx in range(1, nspec):
print('###', file=bfout)
print('# timestamp: {0}'.format(datetimelist[idx]), file=bfout)
print('# Gaussian 1 [amp, RV +/- err, wid]: {0:.2f} {1:.2f} {2:.2f} {3:.2f}'.format(bffitlist[idx][0][0], rvraw1[idx], rvraw1_err[idx], bffitlist[idx][0][2]), file=bfout)
print('# Gaussian 2 [amp, RV +/- err, wid]: {0:.2f} {1:.2f} {2:.2f} {3:.2f}'.format(bffitlist[idx][0][3], rvraw2[idx], rvraw2_err[idx], bffitlist[idx][0][5]), file=bfout)
print('# Uncorrected_RV, BF_amp, Gaussian_fit', file=bfout)
print('###', file=bfout)
for vel, amp, modamp in zip(bf_ind, bfsmoothlist[idx], bffitlist[idx][1]):
print(vel, amp, modamp, file=bfout)
bfout.close()
except:
print('No BF outfile specified, not saving BF data to file')
###Functionality to print out Gaussian peak information for primary and secondary separately###
try:
gout = open(gaussoutfile, 'w')
for idx in range(1, nspec):
print('Primary Amplitude: {0} +/- {1} width {2} xmax {3}'.format(bffitlist[idx][0][0], bffitlist[idx][2][0], bffitlist[idx][0][2], bffitlist[idx][0][1]), file=gout)
print('Secondary Amplitude: {0} +/- {1} width {2} xmax {3}'.format(bffitlist[idx][0][3], bffitlist[idx][2][3], bffitlist[idx][0][5], bffitlist[idx][0][4]), file=gout)
gout.close()
except:
print('No gaussoutfile specified, not saving gauss data to file')
# handy little gaussian function maker
def gaussian(x, amp, mu, sig): # i.e., (xarray, amp, rv, width)
return amp * np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
# PLOT THE FINAL SMOOTHED BFS + GAUSSIAN FITS IN INDIVIDUAL PANELS
# manually adjust this multi-panel plot based on how many spectra you have
windowcols = 3 # 4 # how many columns the plot should have
#windowrows = 3
#windowrows = 8 #6864859 manually set number of plot rows here, or automatically below
#windowrows = 8 #6778289
#windowrows = 10
windowrows = int([np.rint((nspec-1)/windowcols) if (np.float(nspec-1)/windowcols)%windowcols == 0 else np.rint((nspec-1)/windowcols)+1][0])
xmin = rvneg
xmax = rvpos
#fig = plt.figure(1, figsize=(12,16))
fig = plt.figure(1, figsize=(16,12))
#fig = plt.figure(1, figsize=(15,7))
#fig = plt.figure(1, figsize=(15,5)) #5285607 (6 Visits)
fig.text(0.5, 0.04, 'Uncorrected Radial Velocity (km s$^{-1}$)', ha='center', va='center', size='large')
fig.text(0.07, 0.5, 'Broadening Function', ha='center', va='center', size='large', rotation='vertical')
for i in range (1, nspec):
ax = fig.add_subplot(windowrows, windowcols, i) # out of range if windowcols x windowrows < nspec
ax.yaxis.set_major_locator(MultipleLocator(0.4)) #increments of y axis tic marks
if windowcols == 4 and (i!=1 and i!=5 and i!=9 and i!=13 and i!=17 and i!=21 and i!=25):
ax.set_yticklabels(())
if windowcols == 3 and (i!=1 and i!=4 and i!=7 and i!=10 and i!=13 and i!=16 and i!=19 and i!=22 and i!=25):
ax.set_yticklabels(())
if i < nspec-windowcols:
ax.set_xticklabels(())
plt.subplots_adjust(wspace=0, hspace=0.0)
# plt.subplots_adjust(wspace=0, hspace=0.0, bottom=0.2) #6131659
# plt.subplots_adjust(wspace=0, hspace=0.0, bottom=0.2) #6449358
# plt.plot_adjust(wspace=0, hspace=0)
plt.axis([xmin, xmax, ymin, ymax])
plt.tick_params(axis='both', which='major')
plt.text(xmax - 0.19*(np.abs(xmax-xmin)), 0.60*ymax, '%.3f $\phi$' % (phase[i]), size='small')
plt.text(xmax - 0.26*(np.abs(xmax-xmin)), 0.35*ymax, '%s' % (datetimelist[i].iso[0:10]), size='small')
#plt.plot(bf_ind, bfsmoothlist[i], color=colors[14], lw=1.5, ls='-', label='Smoothed BF')
plt.plot(bf_ind, bfnormlist[i], color=colors[14], lw=2, ls='-', label='Normalized Smoothed BF')
plt.plot(bf_ind, bffitlist[i][1], color=colors[0], lw=2, ls='-', label='Two Gaussian fit')
#gauss1 = gaussian(bf_ind, bffitlist[i][0][0], bffitlist[i][0][1], bffitlist[i][0][2])
#gauss2 = gaussian(bf_ind, bffitlist[i][0][3], bffitlist[i][0][4], bffitlist[i][0][5])
plt.plot(rvraw1[i], 0.1, color=colors[6], marker='|', ms=15)#, label='RV 1')
plt.plot(rvraw2[i], 0.1, color=colors[2], marker='|', ms=15)#, label='RV 2')
#plt.plot(thirdpeak[i-1], 0.1, color=colors[8], marker ='|', ms=15)
if rvraw3[i] is not None:
plt.plot(rvraw3[i], 0.1, color=colors[8], marker='|', ms=15)#, label='RV 3')
#plt.plot(bf_ind, gauss1, color=colors[6], lw=3, ls='--')#, label='Gaussian fit 1')
#plt.plot(bf_ind, gauss2, color=colors[2], lw=3, ls='--')#, label='Gaussian fit 2')
# OPTION TO PLOT VERTICAL LINE AT ZERO
#plt.axvline(x=0, color=colors[15])
#ax.legend(bbox_to_anchor=(0.5,-1.5), loc=4, borderaxespad=0.,
# frameon=False, handlelength=3, prop={'size':16})
# MAKE A LEGEND
#1.2
ax.legend(bbox_to_anchor=(2.5,0.7), loc=1, borderaxespad=0.,
frameon=False, handlelength=3, prop={'size':18})
if nspec - 1 == windowcols * (windowrows - 1): # square plot, you must adjust the rows for room
# in this situation, the legend is printed below the final subplot
if i==nspec-1:
ax.legend(bbox_to_anchor=(0.5,-1.2), loc=4, borderaxespad=0.,
frameon=False, handlelength=3, prop={'size':16})
else:
# in this situation, the legend is printed to the right of the final subplot
if i==nspec-1:
ax.legend(bbox_to_anchor=(2.1,0.7), loc=1, borderaxespad=0.,
frameon=False, handlelength=3, prop={'size':18})
plt.show()
fig.savefig('6864859bfrv_new.eps')
#fig.savefig('3247294bfrv.eps') | savvytruffle/cauldron | rvs/BF_python.py | Python | mit | 23,146 | [
"Gaussian",
"VisIt"
] | 5c1440e9e388bae89867134027c3de2694b553fa855c675724634fa03488242b |
#
# GMSK modulation and demodulation.
#
#
# Copyright 2005-2007,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
# See gnuradio-examples/python/digital for examples
from math import pi
from math import log as ln
from pprint import pprint
import inspect
import numpy
from gnuradio import gr, blocks, analog, filter
from . import modulation_utils
from . import digital_python as digital
# default values (used in __init__ and add_options)
_def_samples_per_symbol = 2
_def_bt = 0.35
_def_verbose = False
_def_log = False
_def_do_unpack = True
_def_gain_mu = None
_def_mu = 0.5
_def_freq_error = 0.0
_def_omega_relative_limit = 0.005
# FIXME: Figure out how to make GMSK work with pfb_arb_resampler_fff for both
# transmit and receive so we don't require integer samples per symbol.
# /////////////////////////////////////////////////////////////////////////////
# GMSK modulator
# /////////////////////////////////////////////////////////////////////////////
class gmsk_mod(gr.hier_block2):
"""
Hierarchical block for Gaussian Minimum Shift Key (GMSK)
modulation.
The input is a byte stream (unsigned char with packed bits)
and the output is the complex modulated signal at baseband.
Args:
samples_per_symbol: samples per baud >= 2 (integer)
bt: Gaussian filter bandwidth * symbol time (float)
verbose: Print information about modulator? (boolean)
log: Print modulation data to files? (boolean)
"""
def __init__(self,
samples_per_symbol=_def_samples_per_symbol,
bt=_def_bt,
verbose=_def_verbose,
log=_def_log,
do_unpack=_def_do_unpack):
gr.hier_block2.__init__(self, "gmsk_mod",
# Input signature
gr.io_signature(1, 1, gr.sizeof_char),
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
samples_per_symbol = int(samples_per_symbol)
self._samples_per_symbol = samples_per_symbol
self._bt = bt
self._differential = False
if not isinstance(samples_per_symbol, int) or samples_per_symbol < 2:
raise TypeError(
"samples_per_symbol must be an integer >= 2, is %r" % (samples_per_symbol,))
# up to 3 bits in filter at once
ntaps = 4 * samples_per_symbol
# phase change per bit = pi / 2
sensitivity = (pi / 2) / samples_per_symbol
# Turn it into NRZ data.
#self.nrz = digital.bytes_to_syms()
self.nrz = digital.chunks_to_symbols_bf([-1, 1], 1)
# Form Gaussian filter
# Generate Gaussian response (Needs to be convolved with window below).
self.gaussian_taps = filter.firdes.gaussian(
1, # gain
samples_per_symbol, # symbol_rate
bt, # bandwidth * symbol time
ntaps # number of taps
)
self.sqwave = (1,) * samples_per_symbol # rectangular window
self.taps = numpy.convolve(numpy.array(
self.gaussian_taps), numpy.array(self.sqwave))
self.gaussian_filter = filter.interp_fir_filter_fff(
samples_per_symbol, self.taps)
# FM modulation
self.fmmod = analog.frequency_modulator_fc(sensitivity)
if verbose:
self._print_verbage()
if log:
self._setup_logging()
# Connect & Initialize base class
if do_unpack:
self.unpack = blocks.packed_to_unpacked_bb(1, gr.GR_MSB_FIRST)
self.connect(self, self.unpack, self.nrz,
self.gaussian_filter, self.fmmod, self)
else:
self.connect(self, self.nrz, self.gaussian_filter,
self.fmmod, self)
def samples_per_symbol(self):
return self._samples_per_symbol
@staticmethod
# staticmethod that's also callable on an instance
def bits_per_symbol(self=None):
return 1
def _print_verbage(self):
print("bits per symbol = %d" % self.bits_per_symbol())
print("Gaussian filter bt = %.2f" % self._bt)
def _setup_logging(self):
print("Modulation logging turned on.")
self.connect(self.nrz,
blocks.file_sink(gr.sizeof_float, "nrz.dat"))
self.connect(self.gaussian_filter,
blocks.file_sink(gr.sizeof_float, "gaussian_filter.dat"))
self.connect(self.fmmod,
blocks.file_sink(gr.sizeof_gr_complex, "fmmod.dat"))
@staticmethod
def add_options(parser):
"""
Adds GMSK modulation-specific options to the standard parser
"""
parser.add_option("", "--bt", type="float", default=_def_bt,
help="set bandwidth-time product [default=%default] (GMSK)")
@staticmethod
def extract_kwargs_from_options(options):
"""
Given command line options, create dictionary suitable for passing to __init__
"""
return modulation_utils.extract_kwargs_from_options(gmsk_mod.__init__,
('self',), options)
# /////////////////////////////////////////////////////////////////////////////
# GMSK demodulator
# /////////////////////////////////////////////////////////////////////////////
class gmsk_demod(gr.hier_block2):
"""
Hierarchical block for Gaussian Minimum Shift Key (GMSK)
demodulation.
The input is the complex modulated signal at baseband.
The output is a stream of bits packed 1 bit per byte (the LSB)
Args:
samples_per_symbol: samples per baud (integer)
gain_mu: controls rate of mu adjustment (float)
mu: unused but unremoved for backward compatibility (unused)
omega_relative_limit: sets max variation in omega (float)
freq_error: bit rate error as a fraction (float)
verbose: Print information about modulator? (boolean)
log: Print modualtion data to files? (boolean)
"""
def __init__(self,
samples_per_symbol=_def_samples_per_symbol,
gain_mu=_def_gain_mu,
mu=_def_mu,
omega_relative_limit=_def_omega_relative_limit,
freq_error=_def_freq_error,
verbose=_def_verbose,
log=_def_log):
gr.hier_block2.__init__(self, "gmsk_demod",
# Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(1, 1, gr.sizeof_char)) # Output signature
self._samples_per_symbol = samples_per_symbol
self._gain_mu = gain_mu
self._omega_relative_limit = omega_relative_limit
self._freq_error = freq_error
self._differential = False
if samples_per_symbol < 2:
raise TypeError("samples_per_symbol >= 2, is %f" %
samples_per_symbol)
self._omega = samples_per_symbol * (1 + self._freq_error)
if not self._gain_mu:
self._gain_mu = 0.175
self._gain_omega = .25 * self._gain_mu * \
self._gain_mu # critically damped
self._damping = 1.0
# critically damped
self._loop_bw = -ln((self._gain_mu + self._gain_omega) / (-2.0) + 1)
self._max_dev = self._omega_relative_limit * self._samples_per_symbol
# Demodulate FM
sensitivity = (pi / 2) / samples_per_symbol
self.fmdemod = analog.quadrature_demod_cf(1.0 / sensitivity)
# the clock recovery block tracks the symbol clock and resamples as needed.
# the output of the block is a stream of soft symbols (float)
self.clock_recovery = self.digital_symbol_sync_xx_0 = digital.symbol_sync_ff(digital.TED_MUELLER_AND_MULLER,
self._omega,
self._loop_bw,
self._damping,
1.0, # Expected TED gain
self._max_dev,
1, # Output sps
digital.constellation_bpsk().base(),
digital.IR_MMSE_8TAP,
128,
[])
# slice the floats at 0, outputting 1 bit (the LSB of the output byte) per sample
self.slicer = digital.binary_slicer_fb()
if verbose:
self._print_verbage()
if log:
self._setup_logging()
# Connect & Initialize base class
self.connect(self, self.fmdemod,
self.clock_recovery, self.slicer, self)
def samples_per_symbol(self):
return self._samples_per_symbol
@staticmethod
def bits_per_symbol(self=None): # staticmethod that's also callable on an instance
return 1
def _print_verbage(self):
print("bits per symbol = %d" % self.bits_per_symbol())
print("Symbol Sync M&M omega = %f" % self._omega)
print("Symbol Sync M&M gain mu = %f" % self._gain_mu)
print("M&M clock recovery mu (Unused) = %f" % self._mu)
print("Symbol Sync M&M omega rel. limit = %f" %
self._omega_relative_limit)
print("frequency error = %f" % self._freq_error)
def _setup_logging(self):
print("Demodulation logging turned on.")
self.connect(self.fmdemod,
blocks.file_sink(gr.sizeof_float, "fmdemod.dat"))
self.connect(self.clock_recovery,
blocks.file_sink(gr.sizeof_float, "clock_recovery.dat"))
self.connect(self.slicer,
blocks.file_sink(gr.sizeof_char, "slicer.dat"))
@staticmethod
def add_options(parser):
"""
Adds GMSK demodulation-specific options to the standard parser
"""
parser.add_option("", "--gain-mu", type="float", default=_def_gain_mu,
help="Symbol Sync M&M gain mu [default=%default] (GMSK/PSK)")
parser.add_option("", "--mu", type="float", default=_def_mu,
help="M&M clock recovery mu [default=%default] (Unused)")
parser.add_option("", "--omega-relative-limit", type="float", default=_def_omega_relative_limit,
help="Symbol Sync M&M omega relative limit [default=%default] (GMSK/PSK)")
parser.add_option("", "--freq-error", type="float", default=_def_freq_error,
help="Symbol Sync M&M frequency error [default=%default] (GMSK)")
@staticmethod
def extract_kwargs_from_options(options):
"""
Given command line options, create dictionary suitable for passing to __init__
"""
return modulation_utils.extract_kwargs_from_options(gmsk_demod.__init__,
('self',), options)
#
# Add these to the mod/demod registry
#
modulation_utils.add_type_1_mod('gmsk', gmsk_mod)
modulation_utils.add_type_1_demod('gmsk', gmsk_demod)
| dl1ksv/gnuradio | gr-digital/python/digital/gmsk.py | Python | gpl-3.0 | 11,903 | [
"Gaussian"
] | 4e3fe94aa81b7faae3bb94a593296cf30b19df9891c77efa683881de274c00dd |
# Natural Language Toolkit: Maximum Entropy Classifiers
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Dmitry Chichkov <dchichkov@gmail.com> (TypedMaxentFeatureEncoding)
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
A classifier model based on maximum entropy modeling framework. This
framework considers all of the probability distributions that are
empirically consistent with the training data; and chooses the
distribution with the highest entropy. A probability distribution is
"empirically consistent" with a set of training data if its estimated
frequency with which a class and a feature vector value co-occur is
equal to the actual frequency in the data.
Terminology: 'feature'
======================
The term *feature* is usually used to refer to some property of an
unlabeled token. For example, when performing word sense
disambiguation, we might define a ``'prevword'`` feature whose value is
the word preceding the target word. However, in the context of
maxent modeling, the term *feature* is typically used to refer to a
property of a "labeled" token. In order to prevent confusion, we
will introduce two distinct terms to disambiguate these two different
concepts:
- An "input-feature" is a property of an unlabeled token.
- A "joint-feature" is a property of a labeled token.
In the rest of the ``nltk.classify`` module, the term "features" is
used to refer to what we will call "input-features" in this module.
In literature that describes and discusses maximum entropy models,
input-features are typically called "contexts", and joint-features
are simply referred to as "features".
Converting Input-Features to Joint-Features
-------------------------------------------
In maximum entropy models, joint-features are required to have numeric
values. Typically, each input-feature ``input_feat`` is mapped to a
set of joint-features of the form:
| joint_feat(token, label) = { 1 if input_feat(token) == feat_val
| { and label == some_label
| {
| { 0 otherwise
For all values of ``feat_val`` and ``some_label``. This mapping is
performed by classes that implement the ``MaxentFeatureEncodingI``
interface.
"""
from __future__ import print_function
__docformat__ = 'epytext en'
import numpy
import time
import tempfile
import os
import gzip
from collections import defaultdict
from nltk.util import OrderedDict
from nltk.probability import DictionaryProbDist
from nltk.classify.api import ClassifierI
from nltk.classify.util import attested_labels, CutoffChecker, accuracy, log_likelihood
from nltk.classify.megam import call_megam, write_megam_file, parse_megam_weights
from nltk.classify.tadm import call_tadm, write_tadm_file, parse_tadm_weights
######################################################################
#{ Classifier Model
######################################################################
class MaxentClassifier(ClassifierI):
"""
A maximum entropy classifier (also known as a "conditional
exponential classifier"). This classifier is parameterized by a
set of "weights", which are used to combine the joint-features
that are generated from a featureset by an "encoding". In
particular, the encoding maps each ``(featureset, label)`` pair to
a vector. The probability of each label is then computed using
the following equation::
dotprod(weights, encode(fs,label))
prob(fs|label) = ---------------------------------------------------
sum(dotprod(weights, encode(fs,l)) for l in labels)
Where ``dotprod`` is the dot product::
dotprod(a,b) = sum(x*y for (x,y) in zip(a,b))
"""
def __init__(self, encoding, weights, logarithmic=True):
"""
Construct a new maxent classifier model. Typically, new
classifier models are created using the ``train()`` method.
:type encoding: MaxentFeatureEncodingI
:param encoding: An encoding that is used to convert the
featuresets that are given to the ``classify`` method into
joint-feature vectors, which are used by the maxent
classifier model.
:type weights: list of float
:param weights: The feature weight vector for this classifier.
:type logarithmic: bool
:param logarithmic: If false, then use non-logarithmic weights.
"""
self._encoding = encoding
self._weights = weights
self._logarithmic = logarithmic
#self._logarithmic = False
assert encoding.length() == len(weights)
def labels(self):
return self._encoding.labels()
def set_weights(self, new_weights):
"""
Set the feature weight vector for this classifier.
:param new_weights: The new feature weight vector.
:type new_weights: list of float
"""
self._weights = new_weights
assert (self._encoding.length() == len(new_weights))
def weights(self):
"""
:return: The feature weight vector for this classifier.
:rtype: list of float
"""
return self._weights
def classify(self, featureset):
return self.prob_classify(featureset).max()
def prob_classify(self, featureset):
prob_dict = {}
for label in self._encoding.labels():
feature_vector = self._encoding.encode(featureset, label)
if self._logarithmic:
total = 0.0
for (f_id, f_val) in feature_vector:
total += self._weights[f_id] * f_val
prob_dict[label] = total
else:
prod = 1.0
for (f_id, f_val) in feature_vector:
prod *= self._weights[f_id] ** f_val
prob_dict[label] = prod
# Normalize the dictionary to give a probability distribution
return DictionaryProbDist(prob_dict, log=self._logarithmic,
normalize=True)
def explain(self, featureset, columns=4):
"""
Print a table showing the effect of each of the features in
the given feature set, and how they combine to determine the
probabilities of each label for that featureset.
"""
descr_width = 50
TEMPLATE = ' %-'+str(descr_width-2)+'s%s%8.3f'
pdist = self.prob_classify(featureset)
labels = sorted(pdist.samples(), key=pdist.prob, reverse=True)
labels = labels[:columns]
print(' Feature'.ljust(descr_width)+''.join(
'%8s' % str(l)[:7] for l in labels))
print(' '+'-'*(descr_width-2+8*len(labels)))
sums = defaultdict(int)
for i, label in enumerate(labels):
feature_vector = self._encoding.encode(featureset, label)
feature_vector.sort(key=lambda (fid,_): abs(self._weights[fid]),
reverse=True)
for (f_id, f_val) in feature_vector:
if self._logarithmic: score = self._weights[f_id] * f_val
else: score = self._weights[fid] ** f_val
descr = self._encoding.describe(f_id)
descr = descr.split(' and label is ')[0] # hack
descr += ' (%s)' % f_val # hack
if len(descr) > 47: descr = descr[:44]+'...'
print(TEMPLATE % (descr, i*8*' ', score))
sums[label] += score
print(' '+'-'*(descr_width-1+8*len(labels)))
print(' TOTAL:'.ljust(descr_width)+''.join(
'%8.3f' % sums[l] for l in labels))
print(' PROBS:'.ljust(descr_width)+''.join(
'%8.3f' % pdist.prob(l) for l in labels))
def show_most_informative_features(self, n=10, show='all'):
"""
:param show: all, neg, or pos (for negative-only or positive-only)
"""
fids = sorted(range(len(self._weights)),
key=lambda fid: abs(self._weights[fid]),
reverse=True)
if show == 'pos':
fids = [fid for fid in fids if self._weights[fid]>0]
elif show == 'neg':
fids = [fid for fid in fids if self._weights[fid]<0]
for fid in fids[:n]:
print('%8.3f %s' % (self._weights[fid],
self._encoding.describe(fid)))
def __repr__(self):
return ('<ConditionalExponentialClassifier: %d labels, %d features>' %
(len(self._encoding.labels()), self._encoding.length()))
#: A list of the algorithm names that are accepted for the
#: ``train()`` method's ``algorithm`` parameter.
ALGORITHMS = ['GIS', 'IIS', 'CG', 'BFGS', 'Powell', 'LBFGSB',
'Nelder-Mead', 'MEGAM', 'TADM']
@classmethod
def train(cls, train_toks, algorithm=None, trace=3, encoding=None,
labels=None, sparse=True, gaussian_prior_sigma=0, **cutoffs):
"""
Train a new maxent classifier based on the given corpus of
training samples. This classifier will have its weights
chosen to maximize entropy while remaining empirically
consistent with the training corpus.
:rtype: MaxentClassifier
:return: The new maxent classifier
:type train_toks: list
:param train_toks: Training data, represented as a list of
pairs, the first member of which is a featureset,
and the second of which is a classification label.
:type algorithm: str
:param algorithm: A case-insensitive string, specifying which
algorithm should be used to train the classifier. The
following algorithms are currently available.
- Iterative Scaling Methods: Generalized Iterative Scaling (``'GIS'``),
Improved Iterative Scaling (``'IIS'``)
- Optimization Methods (requiring scipy): Conjugate gradient (``'CG'``)
Broyden-Fletcher-Goldfarb-Shanno algorithm (``'BFGS'``),
Powell algorithm (``'Powell'``),
A limited-memory variant of the BFGS algorithm (``'LBFGSB'``),
The Nelder-Mead algorithm (``'Nelder-Mead'``).
- External Libraries (requiring megam):
LM-BFGS algorithm, with training performed by Megam (``'megam'``)
The default algorithm is ``'CG'`` if scipy is
installed; and ``'IIS'`` otherwise.
:type trace: int
:param trace: The level of diagnostic tracing output to produce.
Higher values produce more verbose output.
:type encoding: MaxentFeatureEncodingI
:param encoding: A feature encoding, used to convert featuresets
into feature vectors. If none is specified, then a
``BinaryMaxentFeatureEncoding`` will be built based on the
features that are attested in the training corpus.
:type labels: list(str)
:param labels: The set of possible labels. If none is given, then
the set of all labels attested in the training data will be
used instead.
:param sparse: If True, then use sparse matrices instead of
dense matrices. Currently, this is only supported by
the scipy (optimization method) algorithms. For other
algorithms, its value is ignored.
:param gaussian_prior_sigma: The sigma value for a gaussian
prior on model weights. Currently, this is supported by
the scipy (optimization method) algorithms and ``megam``.
For other algorithms, its value is ignored.
:param cutoffs: Arguments specifying various conditions under
which the training should be halted. (Some of the cutoff
conditions are not supported by some algorithms.)
- ``max_iter=v``: Terminate after ``v`` iterations.
- ``min_ll=v``: Terminate after the negative average
log-likelihood drops under ``v``.
- ``min_lldelta=v``: Terminate if a single iteration improves
log likelihood by less than ``v``.
- ``tolerance=v``: Terminate a scipy optimization method when
improvement drops below a tolerance level ``v``. The
exact meaning of this tolerance depends on the scipy
algorithm used. See ``scipy`` documentation for more
info. Default values: 1e-3 for CG, 1e-5 for LBFGSB,
and 1e-4 for other algorithms. (``scipy`` only)
"""
if algorithm is None:
try:
import scipy
algorithm = 'cg'
except ImportError:
algorithm = 'iis'
for key in cutoffs:
if key not in ('max_iter', 'min_ll', 'min_lldelta', 'tolerance',
'max_acc', 'min_accdelta', 'count_cutoff',
'norm', 'explicit', 'bernoulli'):
raise TypeError('Unexpected keyword arg %r' % key)
algorithm = algorithm.lower()
if algorithm == 'iis':
return train_maxent_classifier_with_iis(
train_toks, trace, encoding, labels, **cutoffs)
elif algorithm == 'gis':
return train_maxent_classifier_with_gis(
train_toks, trace, encoding, labels, **cutoffs)
elif algorithm in cls._SCIPY_ALGS:
return train_maxent_classifier_with_scipy(
train_toks, trace, encoding, labels,
cls._SCIPY_ALGS[algorithm], sparse,
gaussian_prior_sigma, **cutoffs)
elif algorithm == 'megam':
return train_maxent_classifier_with_megam(
train_toks, trace, encoding, labels,
gaussian_prior_sigma, **cutoffs)
elif algorithm == 'tadm':
kwargs = cutoffs
kwargs['trace'] = trace
kwargs['encoding'] = encoding
kwargs['labels'] = labels
kwargs['gaussian_prior_sigma'] = gaussian_prior_sigma
return TadmMaxentClassifier.train(train_toks, **kwargs)
else:
raise ValueError('Unknown algorithm %s' % algorithm)
_SCIPY_ALGS = {'cg':'CG', 'bfgs':'BFGS', 'powell':'Powell',
'lbfgsb':'LBFGSB', 'nelder-mead':'Nelder-Mead'}
#: Alias for MaxentClassifier.
ConditionalExponentialClassifier = MaxentClassifier
######################################################################
#{ Feature Encodings
######################################################################
class MaxentFeatureEncodingI(object):
"""
A mapping that converts a set of input-feature values to a vector
of joint-feature values, given a label. This conversion is
necessary to translate featuresets into a format that can be used
by maximum entropy models.
The set of joint-features used by a given encoding is fixed, and
each index in the generated joint-feature vectors corresponds to a
single joint-feature. The length of the generated joint-feature
vectors is therefore constant (for a given encoding).
Because the joint-feature vectors generated by
``MaxentFeatureEncodingI`` are typically very sparse, they are
represented as a list of ``(index, value)`` tuples, specifying the
value of each non-zero joint-feature.
Feature encodings are generally created using the ``train()``
method, which generates an appropriate encoding based on the
input-feature values and labels that are present in a given
corpus.
"""
def encode(self, featureset, label):
"""
Given a (featureset, label) pair, return the corresponding
vector of joint-feature values. This vector is represented as
a list of ``(index, value)`` tuples, specifying the value of
each non-zero joint-feature.
:type featureset: dict
:rtype: list(tuple(int, int))
"""
raise NotImplementedError()
def length(self):
"""
:return: The size of the fixed-length joint-feature vectors
that are generated by this encoding.
:rtype: int
"""
raise NotImplementedError()
def labels(self):
"""
:return: A list of the \"known labels\" -- i.e., all labels
``l`` such that ``self.encode(fs,l)`` can be a nonzero
joint-feature vector for some value of ``fs``.
:rtype: list
"""
raise NotImplementedError()
def describe(self, fid):
"""
:return: A string describing the value of the joint-feature
whose index in the generated feature vectors is ``fid``.
:rtype: str
"""
raise NotImplementedError()
def train(cls, train_toks):
"""
Construct and return new feature encoding, based on a given
training corpus ``train_toks``.
:type train_toks: list(tuple(dict, str))
:param train_toks: Training data, represented as a list of
pairs, the first member of which is a feature dictionary,
and the second of which is a classification label.
"""
raise NotImplementedError()
class FunctionBackedMaxentFeatureEncoding(MaxentFeatureEncodingI):
"""
A feature encoding that calls a user-supplied function to map a
given featureset/label pair to a sparse joint-feature vector.
"""
def __init__(self, func, length, labels):
"""
Construct a new feature encoding based on the given function.
:type func: (callable)
:param func: A function that takes two arguments, a featureset
and a label, and returns the sparse joint feature vector
that encodes them:
>>> func(featureset, label) -> feature_vector
This sparse joint feature vector (``feature_vector``) is a
list of ``(index,value)`` tuples.
:type length: int
:param length: The size of the fixed-length joint-feature
vectors that are generated by this encoding.
:type labels: list
:param labels: A list of the \"known labels\" for this
encoding -- i.e., all labels ``l`` such that
``self.encode(fs,l)`` can be a nonzero joint-feature vector
for some value of ``fs``.
"""
self._length = length
self._func = func
self._labels = labels
def encode(self, featureset, label):
return self._func(featureset, label)
def length(self):
return self._length
def labels(self):
return self._labels
def describe(self, fid):
return 'no description available'
class BinaryMaxentFeatureEncoding(MaxentFeatureEncodingI):
"""
A feature encoding that generates vectors containing a binary
joint-features of the form:
| joint_feat(fs, l) = { 1 if (fs[fname] == fval) and (l == label)
| {
| { 0 otherwise
Where ``fname`` is the name of an input-feature, ``fval`` is a value
for that input-feature, and ``label`` is a label.
Typically, these features are constructed based on a training
corpus, using the ``train()`` method. This method will create one
feature for each combination of ``fname``, ``fval``, and ``label``
that occurs at least once in the training corpus.
The ``unseen_features`` parameter can be used to add "unseen-value
features", which are used whenever an input feature has a value
that was not encountered in the training corpus. These features
have the form:
| joint_feat(fs, l) = { 1 if is_unseen(fname, fs[fname])
| { and l == label
| {
| { 0 otherwise
Where ``is_unseen(fname, fval)`` is true if the encoding does not
contain any joint features that are true when ``fs[fname]==fval``.
The ``alwayson_features`` parameter can be used to add "always-on
features", which have the form::
| joint_feat(fs, l) = { 1 if (l == label)
| {
| { 0 otherwise
These always-on features allow the maxent model to directly model
the prior probabilities of each label.
"""
def __init__(self, labels, mapping, unseen_features=False,
alwayson_features=False):
"""
:param labels: A list of the \"known labels\" for this encoding.
:param mapping: A dictionary mapping from ``(fname,fval,label)``
tuples to corresponding joint-feature indexes. These
indexes must be the set of integers from 0...len(mapping).
If ``mapping[fname,fval,label]=id``, then
``self.encode(..., fname:fval, ..., label)[id]`` is 1;
otherwise, it is 0.
:param unseen_features: If true, then include unseen value
features in the generated joint-feature vectors.
:param alwayson_features: If true, then include always-on
features in the generated joint-feature vectors.
"""
if set(mapping.values()) != set(range(len(mapping))):
raise ValueError('Mapping values must be exactly the '
'set of integers from 0...len(mapping)')
self._labels = list(labels)
"""A list of attested labels."""
self._mapping = mapping
"""dict mapping from (fname,fval,label) -> fid"""
self._length = len(mapping)
"""The length of generated joint feature vectors."""
self._alwayson = None
"""dict mapping from label -> fid"""
self._unseen = None
"""dict mapping from fname -> fid"""
if alwayson_features:
self._alwayson = dict([(label,i+self._length)
for (i,label) in enumerate(labels)])
self._length += len(self._alwayson)
if unseen_features:
fnames = set(fname for (fname, fval, label) in mapping)
self._unseen = dict([(fname, i+self._length)
for (i, fname) in enumerate(fnames)])
self._length += len(fnames)
def encode(self, featureset, label):
# Inherit docs.
encoding = []
# Convert input-features to joint-features:
for fname, fval in featureset.items():
# Known feature name & value:
if (fname, fval, label) in self._mapping:
encoding.append((self._mapping[fname, fval, label], 1))
# Otherwise, we might want to fire an "unseen-value feature".
elif self._unseen:
# Have we seen this fname/fval combination with any label?
for label2 in self._labels:
if (fname, fval, label2) in self._mapping:
break # we've seen this fname/fval combo
# We haven't -- fire the unseen-value feature
else:
if fname in self._unseen:
encoding.append((self._unseen[fname], 1))
# Add always-on features:
if self._alwayson and label in self._alwayson:
encoding.append((self._alwayson[label], 1))
return encoding
def describe(self, f_id):
# Inherit docs.
if not isinstance(f_id, (int, long)):
raise TypeError('describe() expected an int')
try:
self._inv_mapping
except AttributeError:
self._inv_mapping = [-1]*len(self._mapping)
for (info, i) in self._mapping.items():
self._inv_mapping[i] = info
if f_id < len(self._mapping):
(fname, fval, label) = self._inv_mapping[f_id]
return '%s==%r and label is %r' % (fname, fval, label)
elif self._alwayson and f_id in self._alwayson.values():
for (label, f_id2) in self._alwayson.items():
if f_id==f_id2: return 'label is %r' % label
elif self._unseen and f_id in self._unseen.values():
for (fname, f_id2) in self._unseen.items():
if f_id==f_id2: return '%s is unseen' % fname
else:
raise ValueError('Bad feature id')
def labels(self):
# Inherit docs.
return self._labels
def length(self):
# Inherit docs.
return self._length
@classmethod
def train(cls, train_toks, count_cutoff=0, labels=None, **options):
"""
Construct and return new feature encoding, based on a given
training corpus ``train_toks``. See the class description
``BinaryMaxentFeatureEncoding`` for a description of the
joint-features that will be included in this encoding.
:type train_toks: list(tuple(dict, str))
:param train_toks: Training data, represented as a list of
pairs, the first member of which is a feature dictionary,
and the second of which is a classification label.
:type count_cutoff: int
:param count_cutoff: A cutoff value that is used to discard
rare joint-features. If a joint-feature's value is 1
fewer than ``count_cutoff`` times in the training corpus,
then that joint-feature is not included in the generated
encoding.
:type labels: list
:param labels: A list of labels that should be used by the
classifier. If not specified, then the set of labels
attested in ``train_toks`` will be used.
:param options: Extra parameters for the constructor, such as
``unseen_features`` and ``alwayson_features``.
"""
mapping = {} # maps (fname, fval, label) -> fid
seen_labels = set() # The set of labels we've encountered
count = defaultdict(int) # maps (fname, fval) -> count
for (tok, label) in train_toks:
if labels and label not in labels:
raise ValueError('Unexpected label %s' % label)
seen_labels.add(label)
# Record each of the features.
for (fname, fval) in tok.items():
# If a count cutoff is given, then only add a joint
# feature once the corresponding (fname, fval, label)
# tuple exceeds that cutoff.
count[fname,fval] += 1
if count[fname,fval] >= count_cutoff:
if (fname, fval, label) not in mapping:
mapping[fname, fval, label] = len(mapping)
if labels is None: labels = seen_labels
return cls(labels, mapping, **options)
class GISEncoding(BinaryMaxentFeatureEncoding):
"""
A binary feature encoding which adds one new joint-feature to the
joint-features defined by ``BinaryMaxentFeatureEncoding``: a
correction feature, whose value is chosen to ensure that the
sparse vector always sums to a constant non-negative number. This
new feature is used to ensure two preconditions for the GIS
training algorithm:
- At least one feature vector index must be nonzero for every
token.
- The feature vector must sum to a constant non-negative number
for every token.
"""
def __init__(self, labels, mapping, unseen_features=False,
alwayson_features=False, C=None):
"""
:param C: The correction constant. The value of the correction
feature is based on this value. In particular, its value is
``C - sum([v for (f,v) in encoding])``.
:seealso: ``BinaryMaxentFeatureEncoding.__init__``
"""
BinaryMaxentFeatureEncoding.__init__(
self, labels, mapping, unseen_features, alwayson_features)
if C is None:
C = len(set([fname for (fname,fval,label) in mapping]))+1
self._C = C
@property
def C(self):
"""The non-negative constant that all encoded feature vectors
will sum to."""
return self._C
def encode(self, featureset, label):
# Get the basic encoding.
encoding = BinaryMaxentFeatureEncoding.encode(self, featureset, label)
base_length = BinaryMaxentFeatureEncoding.length(self)
# Add a correction feature.
total = sum([v for (f,v) in encoding])
if total >= self._C:
raise ValueError('Correction feature is not high enough!')
encoding.append( (base_length, self._C-total) )
# Return the result
return encoding
def length(self):
return BinaryMaxentFeatureEncoding.length(self) + 1
def describe(self, f_id):
if f_id == BinaryMaxentFeatureEncoding.length(self):
return 'Correction feature (%s)' % self._C
else:
return BinaryMaxentFeatureEncoding.describe(self, f_id)
class TadmEventMaxentFeatureEncoding(BinaryMaxentFeatureEncoding):
def __init__(self, labels, mapping, unseen_features=False,
alwayson_features=False):
self._mapping = OrderedDict(mapping)
self._label_mapping = OrderedDict()
BinaryMaxentFeatureEncoding.__init__(self, labels, self._mapping,
unseen_features,
alwayson_features)
def encode(self, featureset, label):
encoding = []
for feature, value in featureset.items():
if (feature, label) not in self._mapping:
self._mapping[(feature, label)] = len(self._mapping)
if value not in self._label_mapping:
if not isinstance(value, int):
self._label_mapping[value] = len(self._label_mapping)
else:
self._label_mapping[value] = value
encoding.append((self._mapping[(feature, label)],
self._label_mapping[value]))
return encoding
def labels(self):
return self._labels
def describe(self, fid):
for (feature, label) in self._mapping:
if self._mapping[(feature, label)] == fid:
return (feature, label)
def length(self):
return len(self._mapping)
@classmethod
def train(cls, train_toks, count_cutoff=0, labels=None, **options):
mapping = OrderedDict()
if not labels:
labels = []
# This gets read twice, so compute the values in case it's lazy.
train_toks = list(train_toks)
for (featureset, label) in train_toks:
if label not in labels:
labels.append(label)
for (featureset, label) in train_toks:
for label in labels:
for feature in featureset:
if (feature, label) not in mapping:
mapping[(feature, label)] = len(mapping)
return cls(labels, mapping, **options)
class TypedMaxentFeatureEncoding(MaxentFeatureEncodingI):
"""
A feature encoding that generates vectors containing integer,
float and binary joint-features of the form:
Binary (for string and boolean features):
| joint_feat(fs, l) = { 1 if (fs[fname] == fval) and (l == label)
| {
| { 0 otherwise
Value (for integer and float features):
| joint_feat(fs, l) = { fval if (fs[fname] == type(fval))
| { and (l == label)
| {
| { not encoded otherwise
Where ``fname`` is the name of an input-feature, ``fval`` is a value
for that input-feature, and ``label`` is a label.
Typically, these features are constructed based on a training
corpus, using the ``train()`` method.
For string and boolean features [type(fval) not in (int, float)]
this method will create one feature for each combination of
``fname``, ``fval``, and ``label`` that occurs at least once in the
training corpus.
For integer and float features [type(fval) in (int, float)] this
method will create one feature for each combination of ``fname``
and ``label`` that occurs at least once in the training corpus.
For binary features the ``unseen_features`` parameter can be used
to add "unseen-value features", which are used whenever an input
feature has a value that was not encountered in the training
corpus. These features have the form:
| joint_feat(fs, l) = { 1 if is_unseen(fname, fs[fname])
| { and l == label
| {
| { 0 otherwise
Where ``is_unseen(fname, fval)`` is true if the encoding does not
contain any joint features that are true when ``fs[fname]==fval``.
The ``alwayson_features`` parameter can be used to add "always-on
features", which have the form:
| joint_feat(fs, l) = { 1 if (l == label)
| {
| { 0 otherwise
These always-on features allow the maxent model to directly model
the prior probabilities of each label.
"""
def __init__(self, labels, mapping, unseen_features=False,
alwayson_features=False):
"""
:param labels: A list of the \"known labels\" for this encoding.
:param mapping: A dictionary mapping from ``(fname,fval,label)``
tuples to corresponding joint-feature indexes. These
indexes must be the set of integers from 0...len(mapping).
If ``mapping[fname,fval,label]=id``, then
``self.encode({..., fname:fval, ...``, label)[id]} is 1;
otherwise, it is 0.
:param unseen_features: If true, then include unseen value
features in the generated joint-feature vectors.
:param alwayson_features: If true, then include always-on
features in the generated joint-feature vectors.
"""
if set(mapping.values()) != set(range(len(mapping))):
raise ValueError('Mapping values must be exactly the '
'set of integers from 0...len(mapping)')
self._labels = list(labels)
"""A list of attested labels."""
self._mapping = mapping
"""dict mapping from (fname,fval,label) -> fid"""
self._length = len(mapping)
"""The length of generated joint feature vectors."""
self._alwayson = None
"""dict mapping from label -> fid"""
self._unseen = None
"""dict mapping from fname -> fid"""
if alwayson_features:
self._alwayson = dict([(label,i+self._length)
for (i,label) in enumerate(labels)])
self._length += len(self._alwayson)
if unseen_features:
fnames = set(fname for (fname, fval, label) in mapping)
self._unseen = dict([(fname, i+self._length)
for (i, fname) in enumerate(fnames)])
self._length += len(fnames)
def encode(self, featureset, label):
# Inherit docs.
encoding = []
# Convert input-features to joint-features:
for fname, fval in featureset.items():
if(type(fval) in (int, float)):
# Known feature name & value:
if (fname, type(fval), label) in self._mapping:
encoding.append((self._mapping[fname, type(fval), label], fval))
else:
# Known feature name & value:
if (fname, fval, label) in self._mapping:
encoding.append((self._mapping[fname, fval, label], 1))
# Otherwise, we might want to fire an "unseen-value feature".
elif self._unseen:
# Have we seen this fname/fval combination with any label?
for label2 in self._labels:
if (fname, fval, label2) in self._mapping:
break # we've seen this fname/fval combo
# We haven't -- fire the unseen-value feature
else:
if fname in self._unseen:
encoding.append((self._unseen[fname], 1))
# Add always-on features:
if self._alwayson and label in self._alwayson:
encoding.append((self._alwayson[label], 1))
return encoding
def describe(self, f_id):
# Inherit docs.
if not isinstance(f_id, (int, long)):
raise TypeError('describe() expected an int')
try:
self._inv_mapping
except AttributeError:
self._inv_mapping = [-1]*len(self._mapping)
for (info, i) in self._mapping.items():
self._inv_mapping[i] = info
if f_id < len(self._mapping):
(fname, fval, label) = self._inv_mapping[f_id]
return '%s==%r and label is %r' % (fname, fval, label)
elif self._alwayson and f_id in self._alwayson.values():
for (label, f_id2) in self._alwayson.items():
if f_id==f_id2: return 'label is %r' % label
elif self._unseen and f_id in self._unseen.values():
for (fname, f_id2) in self._unseen.items():
if f_id==f_id2: return '%s is unseen' % fname
else:
raise ValueError('Bad feature id')
def labels(self):
# Inherit docs.
return self._labels
def length(self):
# Inherit docs.
return self._length
@classmethod
def train(cls, train_toks, count_cutoff=0, labels=None, **options):
"""
Construct and return new feature encoding, based on a given
training corpus ``train_toks``. See the class description
``TypedMaxentFeatureEncoding`` for a description of the
joint-features that will be included in this encoding.
Note: recognized feature values types are (int, float), over
types are interpreted as regular binary features.
:type train_toks: list(tuple(dict, str))
:param train_toks: Training data, represented as a list of
pairs, the first member of which is a feature dictionary,
and the second of which is a classification label.
:type count_cutoff: int
:param count_cutoff: A cutoff value that is used to discard
rare joint-features. If a joint-feature's value is 1
fewer than ``count_cutoff`` times in the training corpus,
then that joint-feature is not included in the generated
encoding.
:type labels: list
:param labels: A list of labels that should be used by the
classifier. If not specified, then the set of labels
attested in ``train_toks`` will be used.
:param options: Extra parameters for the constructor, such as
``unseen_features`` and ``alwayson_features``.
"""
mapping = {} # maps (fname, fval, label) -> fid
seen_labels = set() # The set of labels we've encountered
count = defaultdict(int) # maps (fname, fval) -> count
for (tok, label) in train_toks:
if labels and label not in labels:
raise ValueError('Unexpected label %s' % label)
seen_labels.add(label)
# Record each of the features.
for (fname, fval) in tok.items():
if(type(fval) in (int, float)): fval = type(fval)
# If a count cutoff is given, then only add a joint
# feature once the corresponding (fname, fval, label)
# tuple exceeds that cutoff.
count[fname,fval] += 1
if count[fname,fval] >= count_cutoff:
if (fname, fval, label) not in mapping:
mapping[fname, fval, label] = len(mapping)
if labels is None: labels = seen_labels
return cls(labels, mapping, **options)
######################################################################
#{ Classifier Trainer: Generalized Iterative Scaling
######################################################################
def train_maxent_classifier_with_gis(train_toks, trace=3, encoding=None,
labels=None, **cutoffs):
"""
Train a new ``ConditionalExponentialClassifier``, using the given
training samples, using the Generalized Iterative Scaling
algorithm. This ``ConditionalExponentialClassifier`` will encode
the model that maximizes entropy from all the models that are
empirically consistent with ``train_toks``.
:see: ``train_maxent_classifier()`` for parameter descriptions.
"""
cutoffs.setdefault('max_iter', 100)
cutoffchecker = CutoffChecker(cutoffs)
# Construct an encoding from the training data.
if encoding is None:
encoding = GISEncoding.train(train_toks, labels=labels)
if not hasattr(encoding, 'C'):
raise TypeError('The GIS algorithm requires an encoding that '
'defines C (e.g., GISEncoding).')
# Cinv is the inverse of the sum of each joint feature vector.
# This controls the learning rate: higher Cinv (or lower C) gives
# faster learning.
Cinv = 1.0/encoding.C
# Count how many times each feature occurs in the training data.
empirical_fcount = calculate_empirical_fcount(train_toks, encoding)
# Check for any features that are not attested in train_toks.
unattested = set(numpy.nonzero(empirical_fcount==0)[0])
# Build the classifier. Start with weight=0 for each attested
# feature, and weight=-infinity for each unattested feature.
weights = numpy.zeros(len(empirical_fcount), 'd')
for fid in unattested: weights[fid] = numpy.NINF
classifier = ConditionalExponentialClassifier(encoding, weights)
# Take the log of the empirical fcount.
log_empirical_fcount = numpy.log2(empirical_fcount)
del empirical_fcount
# Old log-likelihood and accuracy; used to check if the change
# in log-likelihood or accuracy is sufficient to indicate convergence.
ll_old = None
acc_old = None
if trace > 0: print(' ==> Training (%d iterations)' % cutoffs['max_iter'])
if trace > 2:
print()
print(' Iteration Log Likelihood Accuracy')
print(' ---------------------------------------')
# Train the classifier.
try:
while True:
if trace > 2:
ll = cutoffchecker.ll or log_likelihood(classifier, train_toks)
acc = cutoffchecker.acc or accuracy(classifier, train_toks)
iternum = cutoffchecker.iter
print(' %9d %14.5f %9.3f' % (iternum, ll, acc))
# Use the model to estimate the number of times each
# feature should occur in the training data.
estimated_fcount = calculate_estimated_fcount(
classifier, train_toks, encoding)
# Take the log of estimated fcount (avoid taking log(0).)
for fid in unattested: estimated_fcount[fid] += 1
log_estimated_fcount = numpy.log2(estimated_fcount)
del estimated_fcount
# Update the classifier weights
weights = classifier.weights()
weights += (log_empirical_fcount - log_estimated_fcount) * Cinv
classifier.set_weights(weights)
# Check the log-likelihood & accuracy cutoffs.
if cutoffchecker.check(classifier, train_toks):
break
except KeyboardInterrupt:
print(' Training stopped: keyboard interrupt')
except:
raise
if trace > 2:
ll = log_likelihood(classifier, train_toks)
acc = accuracy(classifier, train_toks)
print(' Final %14.5f %9.3f' % (ll, acc))
# Return the classifier.
return classifier
def calculate_empirical_fcount(train_toks, encoding):
fcount = numpy.zeros(encoding.length(), 'd')
for tok, label in train_toks:
for (index, val) in encoding.encode(tok, label):
fcount[index] += val
return fcount
def calculate_estimated_fcount(classifier, train_toks, encoding):
fcount = numpy.zeros(encoding.length(), 'd')
for tok, label in train_toks:
pdist = classifier.prob_classify(tok)
for label in pdist.samples():
prob = pdist.prob(label)
for (fid, fval) in encoding.encode(tok, label):
fcount[fid] += prob*fval
return fcount
######################################################################
#{ Classifier Trainer: Improved Iterative Scaling
######################################################################
def train_maxent_classifier_with_iis(train_toks, trace=3, encoding=None,
labels=None, **cutoffs):
"""
Train a new ``ConditionalExponentialClassifier``, using the given
training samples, using the Improved Iterative Scaling algorithm.
This ``ConditionalExponentialClassifier`` will encode the model
that maximizes entropy from all the models that are empirically
consistent with ``train_toks``.
:see: ``train_maxent_classifier()`` for parameter descriptions.
"""
cutoffs.setdefault('max_iter', 100)
cutoffchecker = CutoffChecker(cutoffs)
# Construct an encoding from the training data.
if encoding is None:
encoding = BinaryMaxentFeatureEncoding.train(train_toks, labels=labels)
# Count how many times each feature occurs in the training data.
empirical_ffreq = (calculate_empirical_fcount(train_toks, encoding) /
len(train_toks))
# Find the nf map, and related variables nfarray and nfident.
# nf is the sum of the features for a given labeled text.
# nfmap compresses this sparse set of values to a dense list.
# nfarray performs the reverse operation. nfident is
# nfarray multiplied by an identity matrix.
nfmap = calculate_nfmap(train_toks, encoding)
nfarray = numpy.array(sorted(nfmap, key=nfmap.__getitem__), 'd')
nftranspose = numpy.reshape(nfarray, (len(nfarray), 1))
# Check for any features that are not attested in train_toks.
unattested = set(numpy.nonzero(empirical_ffreq==0)[0])
# Build the classifier. Start with weight=0 for each attested
# feature, and weight=-infinity for each unattested feature.
weights = numpy.zeros(len(empirical_ffreq), 'd')
for fid in unattested: weights[fid] = numpy.NINF
classifier = ConditionalExponentialClassifier(encoding, weights)
if trace > 0: print(' ==> Training (%d iterations)' % cutoffs['max_iter'])
if trace > 2:
print()
print(' Iteration Log Likelihood Accuracy')
print(' ---------------------------------------')
# Old log-likelihood and accuracy; used to check if the change
# in log-likelihood or accuracy is sufficient to indicate convergence.
ll_old = None
acc_old = None
# Train the classifier.
try:
while True:
if trace > 2:
ll = cutoffchecker.ll or log_likelihood(classifier, train_toks)
acc = cutoffchecker.acc or accuracy(classifier, train_toks)
iternum = cutoffchecker.iter
print(' %9d %14.5f %9.3f' % (iternum, ll, acc))
# Calculate the deltas for this iteration, using Newton's method.
deltas = calculate_deltas(
train_toks, classifier, unattested, empirical_ffreq,
nfmap, nfarray, nftranspose, encoding)
# Use the deltas to update our weights.
weights = classifier.weights()
weights += deltas
classifier.set_weights(weights)
# Check the log-likelihood & accuracy cutoffs.
if cutoffchecker.check(classifier, train_toks):
break
except KeyboardInterrupt:
print(' Training stopped: keyboard interrupt')
except:
raise
if trace > 2:
ll = log_likelihood(classifier, train_toks)
acc = accuracy(classifier, train_toks)
print(' Final %14.5f %9.3f' % (ll, acc))
# Return the classifier.
return classifier
def calculate_nfmap(train_toks, encoding):
"""
Construct a map that can be used to compress ``nf`` (which is
typically sparse).
*nf(feature_vector)* is the sum of the feature values for
*feature_vector*.
This represents the number of features that are active for a
given labeled text. This method finds all values of *nf(t)*
that are attested for at least one token in the given list of
training tokens; and constructs a dictionary mapping these
attested values to a continuous range *0...N*. For example,
if the only values of *nf()* that were attested were 3, 5, and
7, then ``_nfmap`` might return the dictionary ``{3:0, 5:1, 7:2}``.
:return: A map that can be used to compress ``nf`` to a dense
vector.
:rtype: dict(int -> int)
"""
# Map from nf to indices. This allows us to use smaller arrays.
nfset = set()
for tok, _ in train_toks:
for label in encoding.labels():
nfset.add(sum([val for (id,val) in encoding.encode(tok,label)]))
return dict([(nf, i) for (i, nf) in enumerate(nfset)])
def calculate_deltas(train_toks, classifier, unattested, ffreq_empirical,
nfmap, nfarray, nftranspose, encoding):
"""
Calculate the update values for the classifier weights for
this iteration of IIS. These update weights are the value of
``delta`` that solves the equation::
ffreq_empirical[i]
=
SUM[fs,l] (classifier.prob_classify(fs).prob(l) *
feature_vector(fs,l)[i] *
exp(delta[i] * nf(feature_vector(fs,l))))
Where:
- *(fs,l)* is a (featureset, label) tuple from ``train_toks``
- *feature_vector(fs,l)* = ``encoding.encode(fs,l)``
- *nf(vector)* = ``sum([val for (id,val) in vector])``
This method uses Newton's method to solve this equation for
*delta[i]*. In particular, it starts with a guess of
``delta[i]`` = 1; and iteratively updates ``delta`` with:
| delta[i] -= (ffreq_empirical[i] - sum1[i])/(-sum2[i])
until convergence, where *sum1* and *sum2* are defined as:
| sum1[i](delta) = SUM[fs,l] f[i](fs,l,delta)
| sum2[i](delta) = SUM[fs,l] (f[i](fs,l,delta).nf(feature_vector(fs,l)))
| f[i](fs,l,delta) = (classifier.prob_classify(fs).prob(l) .
| feature_vector(fs,l)[i] .
| exp(delta[i] . nf(feature_vector(fs,l))))
Note that *sum1* and *sum2* depend on ``delta``; so they need
to be re-computed each iteration.
The variables ``nfmap``, ``nfarray``, and ``nftranspose`` are
used to generate a dense encoding for *nf(ltext)*. This
allows ``_deltas`` to calculate *sum1* and *sum2* using
matrices, which yields a significant performance improvement.
:param train_toks: The set of training tokens.
:type train_toks: list(tuple(dict, str))
:param classifier: The current classifier.
:type classifier: ClassifierI
:param ffreq_empirical: An array containing the empirical
frequency for each feature. The *i*\ th element of this
array is the empirical frequency for feature *i*.
:type ffreq_empirical: sequence of float
:param unattested: An array that is 1 for features that are
not attested in the training data; and 0 for features that
are attested. In other words, ``unattested[i]==0`` iff
``ffreq_empirical[i]==0``.
:type unattested: sequence of int
:param nfmap: A map that can be used to compress ``nf`` to a dense
vector.
:type nfmap: dict(int -> int)
:param nfarray: An array that can be used to uncompress ``nf``
from a dense vector.
:type nfarray: array(float)
:param nftranspose: The transpose of ``nfarray``
:type nftranspose: array(float)
"""
# These parameters control when we decide that we've
# converged. It probably should be possible to set these
# manually, via keyword arguments to train.
NEWTON_CONVERGE = 1e-12
MAX_NEWTON = 300
deltas = numpy.ones(encoding.length(), 'd')
# Precompute the A matrix:
# A[nf][id] = sum ( p(fs) * p(label|fs) * f(fs,label) )
# over all label,fs s.t. num_features[label,fs]=nf
A = numpy.zeros((len(nfmap), encoding.length()), 'd')
for tok, label in train_toks:
dist = classifier.prob_classify(tok)
for label in encoding.labels():
# Generate the feature vector
feature_vector = encoding.encode(tok,label)
# Find the number of active features
nf = sum([val for (id, val) in feature_vector])
# Update the A matrix
for (id, val) in feature_vector:
A[nfmap[nf], id] += dist.prob(label) * val
A /= len(train_toks)
# Iteratively solve for delta. Use the following variables:
# - nf_delta[x][y] = nfarray[x] * delta[y]
# - exp_nf_delta[x][y] = exp(nf[x] * delta[y])
# - nf_exp_nf_delta[x][y] = nf[x] * exp(nf[x] * delta[y])
# - sum1[i][nf] = sum p(fs)p(label|fs)f[i](label,fs)
# exp(delta[i]nf)
# - sum2[i][nf] = sum p(fs)p(label|fs)f[i](label,fs)
# nf exp(delta[i]nf)
for rangenum in range(MAX_NEWTON):
nf_delta = numpy.outer(nfarray, deltas)
exp_nf_delta = 2 ** nf_delta
nf_exp_nf_delta = nftranspose * exp_nf_delta
sum1 = numpy.sum(exp_nf_delta * A, axis=0)
sum2 = numpy.sum(nf_exp_nf_delta * A, axis=0)
# Avoid division by zero.
for fid in unattested: sum2[fid] += 1
# Update the deltas.
deltas -= (ffreq_empirical - sum1) / -sum2
# We can stop once we converge.
n_error = (numpy.sum(abs((ffreq_empirical-sum1)))/
numpy.sum(abs(deltas)))
if n_error < NEWTON_CONVERGE:
return deltas
return deltas
######################################################################
#{ Classifier Trainer: scipy algorithms (GC, LBFGSB, etc.)
######################################################################
# [xx] n.b.: it's possible to supply custom trace functions, which
# could be used to make trace output consistent with iis/gis.
def train_maxent_classifier_with_scipy(train_toks, trace=3, encoding=None,
labels=None, algorithm='CG',
sparse=True, gaussian_prior_sigma=0,
**cutoffs):
"""
Train a new ``ConditionalExponentialClassifier``, using the given
training samples, using the specified ``scipy`` optimization
algorithm. This ``ConditionalExponentialClassifier`` will encode
the model that maximizes entropy from all the models that are
empirically consistent with ``train_toks``.
:see: ``train_maxent_classifier()`` for parameter descriptions.
:require: The ``scipy`` package must be installed.
"""
try:
import scipy
except ImportError as e:
raise ValueError('The maxent training algorithm %r requires '
'that the scipy package be installed. See '
'http://www.scipy.org/' % algorithm)
try:
# E.g., if libgfortran.2.dylib is not found.
import scipy.sparse, scipy.maxentropy
except ImportError as e:
raise ValueError('Import of scipy package failed: %s' % e)
# Construct an encoding from the training data.
if encoding is None:
encoding = BinaryMaxentFeatureEncoding.train(train_toks, labels=labels)
elif labels is not None:
raise ValueError('Specify encoding or labels, not both')
labels = encoding.labels()
labelnum = dict([(label, i) for (i, label) in enumerate(labels)])
num_features = encoding.length()
num_toks = len(train_toks)
num_labels = len(labels)
# Decide whether to use a sparse matrix or a dense one. Very
# limited testing has shown that the lil matrix format
# (list-of-lists) performs better than csr and csc formats.
# Limited testing also suggests that the sparse matrix format
# doesn't save much memory over the dense format in practice
# (in terms of max memory usage).
if sparse: zeros = scipy.sparse.lil_matrix
else: zeros = numpy.zeros
# Construct the 'F' matrix, which lists the feature values for
# each training instance. F[i, j*len(labels)+k] is equal to the
# value of the i'th feature for the feature vector corresponding
# to (tok[j], label[k]).
F = zeros((num_features, num_toks*num_labels))
# Construct the 'N' matrix, which specifies the correct label for
# each training instance. N[0, j*len(labels)+k] is equal to one
# iff label[k] is the correct label for tok[j].
N = zeros((1, num_toks*num_labels))
# Fill in the 'F' and 'N' matrices (just make one pass through the
# training tokens.)
for toknum, (featureset, label) in enumerate(train_toks):
N[0, toknum*len(labels) + labelnum[label]] += 1
for label2 in labels:
for (fid, fval) in encoding.encode(featureset, label2):
F[fid, toknum*len(labels) + labelnum[label2]] = fval
# Set up the scipy model, based on the matrices F and N.
model = scipy.maxentropy.conditionalmodel(F, N, num_toks)
# note -- model.setsmooth() is buggy.
if gaussian_prior_sigma:
model.sigma2 = gaussian_prior_sigma**2
if algorithm == 'LBFGSB':
model.log = None
if trace >= 3:
model.verbose = True
if 'max_iter' in cutoffs:
model.maxiter = cutoffs['max_iter']
if 'tolerance' in cutoffs:
if algorithm == 'CG': model.avegtol = cutoffs['tolerance']
elif algorithm == 'LBFGSB': model.maxgtol = cutoffs['tolerance']
else: model.tol = cutoffs['tolerance']
# Train the model.
model.fit(algorithm=algorithm)
# Convert the model's weights from base-e to base-2 weights.
weights = model.params * numpy.log2(numpy.e)
# Build the classifier
return MaxentClassifier(encoding, weights)
######################################################################
#{ Classifier Trainer: megam
######################################################################
# [xx] possible extension: add support for using implicit file format;
# this would need to put requirements on what encoding is used. But
# we may need this for other maxent classifier trainers that require
# implicit formats anyway.
def train_maxent_classifier_with_megam(train_toks, trace=3, encoding=None,
labels=None, gaussian_prior_sigma=0,
**kwargs):
"""
Train a new ``ConditionalExponentialClassifier``, using the given
training samples, using the external ``megam`` library. This
``ConditionalExponentialClassifier`` will encode the model that
maximizes entropy from all the models that are empirically
consistent with ``train_toks``.
:see: ``train_maxent_classifier()`` for parameter descriptions.
:see: ``nltk.classify.megam``
"""
explicit = True
bernoulli = True
if 'explicit' in kwargs: explicit = kwargs['explicit']
if 'bernoulli' in kwargs: bernoulli = kwargs['bernoulli']
# Construct an encoding from the training data.
if encoding is None:
# Count cutoff can also be controlled by megam with the -minfc
# option. Not sure where the best place for it is.
count_cutoff = kwargs.get('count_cutoff', 0)
encoding = BinaryMaxentFeatureEncoding.train(train_toks, count_cutoff,
labels=labels,
alwayson_features=True)
elif labels is not None:
raise ValueError('Specify encoding or labels, not both')
# Write a training file for megam.
try:
fd, trainfile_name = tempfile.mkstemp(prefix='nltk-', suffix='.gz')
trainfile = gzip.open(trainfile_name, 'wb')
write_megam_file(train_toks, encoding, trainfile, \
explicit=explicit, bernoulli=bernoulli)
trainfile.close()
except (OSError, IOError, ValueError) as e:
raise ValueError('Error while creating megam training file: %s' % e)
# Run megam on the training file.
options = []
options += ['-nobias', '-repeat', '10']
if explicit:
options += ['-explicit']
if not bernoulli:
options += ['-fvals']
if gaussian_prior_sigma:
# Lambda is just the precision of the Gaussian prior, i.e. it's the
# inverse variance, so the parameter conversion is 1.0/sigma**2.
# See http://www.cs.utah.edu/~hal/docs/daume04cg-bfgs.pdf.
inv_variance = 1.0 / gaussian_prior_sigma**2
else:
inv_variance = 0
options += ['-lambda', '%.2f' % inv_variance, '-tune']
if trace < 3:
options += ['-quiet']
if 'max_iter' in kwargs:
options += ['-maxi', '%s' % kwargs['max_iter']]
if 'll_delta' in kwargs:
# [xx] this is actually a perplexity delta, not a log
# likelihood delta
options += ['-dpp', '%s' % abs(kwargs['ll_delta'])]
if hasattr(encoding, 'cost'):
options += ['-multilabel'] # each possible la
options += ['multiclass', trainfile_name]
stdout = call_megam(options)
# print './megam_i686.opt ', ' '.join(options)
# Delete the training file
try: os.remove(trainfile_name)
except (OSError, IOError) as e:
print('Warning: unable to delete %s: %s' % (trainfile_name, e))
# Parse the generated weight vector.
weights = parse_megam_weights(stdout, encoding.length(), explicit)
# Convert from base-e to base-2 weights.
weights *= numpy.log2(numpy.e)
# Build the classifier
return MaxentClassifier(encoding, weights)
######################################################################
#{ Classifier Trainer: tadm
######################################################################
class TadmMaxentClassifier(MaxentClassifier):
@classmethod
def train(cls, train_toks, **kwargs):
algorithm = kwargs.get('algorithm', 'tao_lmvm')
trace = kwargs.get('trace', 3)
encoding = kwargs.get('encoding', None)
labels = kwargs.get('labels', None)
sigma = kwargs.get('gaussian_prior_sigma', 0)
count_cutoff = kwargs.get('count_cutoff', 0)
max_iter = kwargs.get('max_iter')
ll_delta = kwargs.get('min_lldelta')
# Construct an encoding from the training data.
if not encoding:
encoding = TadmEventMaxentFeatureEncoding.train(train_toks,
count_cutoff,
labels=labels)
trainfile_fd, trainfile_name = \
tempfile.mkstemp(prefix='nltk-tadm-events-', suffix='.gz')
weightfile_fd, weightfile_name = \
tempfile.mkstemp(prefix='nltk-tadm-weights-')
trainfile = gzip.open(trainfile_name, 'wb')
write_tadm_file(train_toks, encoding, trainfile)
trainfile.close()
options = []
options.extend(['-monitor'])
options.extend(['-method', algorithm])
if sigma:
options.extend(['-l2', '%.6f' % sigma**2])
if max_iter:
options.extend(['-max_it', '%d' % max_iter])
if ll_delta:
options.extend(['-fatol', '%.6f' % abs(ll_delta)])
options.extend(['-events_in', trainfile_name])
options.extend(['-params_out', weightfile_name])
if trace < 3:
options.extend(['2>&1'])
else:
options.extend(['-summary'])
call_tadm(options)
weightfile = open(weightfile_name, 'rb')
weights = parse_tadm_weights(weightfile)
weightfile.close()
os.remove(trainfile_name)
os.remove(weightfile_name)
# Convert from base-e to base-2 weights.
weights *= numpy.log2(numpy.e)
# Build the classifier
return cls(encoding, weights)
######################################################################
#{ Demo
######################################################################
def demo():
from nltk.classify.util import names_demo
classifier = names_demo(MaxentClassifier.train)
if __name__ == '__main__':
demo()
| abad623/verbalucce | verbalucce/nltk/classify/maxent.py | Python | apache-2.0 | 65,039 | [
"Gaussian"
] | f33f19c4283ad83a79290879e084c06d469f173247a7e031b65da57ce66d6017 |
# Copyright (c) 2015-2016, the authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import GPy
from GPy.util.pca import PCA
from GPy.core.parameterization.variational import VariationalPosterior, NormalPosterior
import sys
def initialize_latent(init, datanum, input_dim, Y):
Xr = np.random.randn(datanum, input_dim)
if init == 'PCA':
# print 'Initializing latent with PCA...'
p = PCA(Y)
PC = p.project(Y, min(input_dim, Y.shape[1]))
Xr[:PC.shape[0], :PC.shape[1]] = PC
var = .1*p.fracs[:input_dim]
elif init == 'bgplvm':
# print 'Initializing latent with bgplvm...'
# m = GPy.models.BayesianGPLVM(Y, input_dim, kernel=kernel, num_inducing=20, init_x='PCA')
X_var = 0.5*np.ones((datanum, input_dim)) + 0.05*np.random.randn(datanum, input_dim)
likelihood = GPy.likelihoods.Gaussian(variance = Y.var()*0.01)
m = GPy.models.BayesianGPLVM(Y, input_dim, likelihood=likelihood, init='PCA', num_inducing=np.min((Y.shape[0], 25)), X_variance = X_var)
m['Gaussian_noise.variance'].fix()
m.optimize(max_iters=300,messages=False)
m['Gaussian_noise.variance'].constrain_positive()
m.optimize(max_iters=50,messages=False)
Xr = m.X.mean
var = X_var
#print m
print('Init SNR:' + str(Y.var() / m['Gaussian_noise.variance']))
elif init == 'randomProjection':
# print 'Initializing latent with Random projection...'
Ycent = (Y-Y.mean())/Y.std()
rr = np.random.rand(Ycent.shape[1], input_dim)
Xr = np.dot(Ycent,rr)
var = Xr.var(0)
else:
# print 'Initializing latent with Random...'
var = Xr.var(0)
if init not in ['bgplvm','randomProjection']:
Xr -= Xr.mean(0)
Xr /= Xr.std(0)
return Xr, var/var.max()
def check_snr(m, messages=True):
snr = []
for i in range(len(m.layers)):
if hasattr(m.layers[i],'views'):
snr.append(list())
for j in range(len(m.layers[i].views)):
if isinstance(m.layers[i].views[j].Y, NormalPosterior) or isinstance(m.layers[i].views[j].Y, VariationalPosterior):
cur_var = m.layers[i].views[j].Y.mean.var()
else:
cur_var = m.layers[i].views[j].Y.var()
cur_snr = cur_var / m.layers[i].views[j].Gaussian_noise.variance.values
if messages:
print('SNR layer ' + str(i) + ' view ' + str(j) + ':' + str(cur_snr))
snr[-1].append(cur_snr)
else:
if isinstance(m.layers[i].Y, NormalPosterior) or isinstance(m.layers[i].Y, VariationalPosterior):
cur_var = m.layers[i].Y.mean.var()
else:
cur_var = m.layers[i].Y.var()
cur_snr = cur_var / m.layers[i].Gaussian_noise.variance.values
if messages:
print('SNR layer ' + str(i) + ':' + str(cur_snr))
snr.append(cur_snr)
sys.stdout.flush()
return snr
def linsp(startP, endP):
return np.linspace(startP, endP, endP - startP + 1)
def load_mocap_data(subjectsNum, motionsNum, standardise=True):
# Download data (if they are not there already)
#data_dir = '../../../GPy/GPy/util/datasets/mocap/cmu'
#GPy.util.mocap.fetch_data(skel_store_dir=data_dir, motion_store_dir=data_dir,subj_motions=(subjectsNum, motionsNum), store_motions=True, return_motions=False)
# Convert numbers to strings
subjects = []
motions = [list() for _ in range(len(subjectsNum))]
for i in range(len(subjectsNum)):
curSubj = str(int(subjectsNum[i]))
if subjectsNum[i] < 10:
curSubj = '0' + curSubj
subjects.append(curSubj)
for j in range(len(motionsNum[i])):
curMot = str(int(motionsNum[i][j]))
if motionsNum[i][j] < 10:
curMot = '0' + curMot
motions[i].append(curMot)
Y = np.zeros((0,62))
for i in range(len(subjects)):
data = GPy.util.datasets.cmu_mocap(subjects[i], motions[i])
Y = np.concatenate((Y,data['Y']))
# Make figure move in place.
# Y[:, 0:3] = 0.0
Y = Y[:, 3:]
meanY = Y.mean(axis=0)
Ycentered = Y - meanY
stdY = Ycentered.std(axis=0)
stdY[np.where(stdY == 0)] = 1
# Standardise
if standardise:
Y = Ycentered
Y /= stdY
return (Y, meanY, stdY)
def transform_labels(l):
import numpy as np
if l.shape[1] == 1:
l_unique = np.unique(l)
# K = len(l_unique)
ret = np.zeros((l.shape[0],np.max(l_unique)+1))
for i in l_unique:
ret[np.where(l==i)[0],i] = 1
else:
ret = np.argmax(l,axis=1)[:,None]
return ret
def visualize_DGP(model, labels, layer=0, dims=[0,1]):
"""
A small utility to visualize the latent space of a DGP.
"""
import matplotlib.pyplot as plt
colors = ['r','g', 'b', 'm']
markers = ['x','o','+', 'v']
for i in range(model.layers[layer].X.mean.shape[0]):
plt.scatter(model.layers[layer].X.mean[i,0],model.layers[layer].X.mean[i,1],color=colors[labels[i]], s=16, marker=markers[labels[i]])
| zhenwendai/DeepGP | deepgp/util/util.py | Python | bsd-3-clause | 5,254 | [
"Gaussian"
] | a43310d615961073758f73e8d2a933fb8ec50cfcfb1ebe6442cd8c1cb056e554 |
# pysam versioning information
__version__ = "0.15.0"
# TODO: upgrade number
__samtools_version__ = "1.9"
# TODO: upgrade code and number
__bcftools_version__ = "1.9"
__htslib_version__ = "1.9"
| kyleabeauchamp/pysam | pysam/version.py | Python | mit | 197 | [
"pysam"
] | 9f89788b4944d33aab59f554e747582a2552cc0add03a7ab2866ec482b009bcd |
"""\
pwq: a extensions to CCTools' WorkQueue bindings
This library provides a wrapper for running WorkQueue tasks that
allows tasks to be serialized to multiple formats and uses ZeroMQ to
support running multiple WorkQueue instances.
"""
DOCLINES = __doc__.split('\n')
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import os
import subprocess
import sys
###################################################################### prepare kw arguments to `setup`
setup_kws = dict()
###################################################################### python dependencies
dependencies = ['pyyaml', 'pyzmq']
if 'setuptools' in sys.modules:
setup_kws['install_requires'] = dependencies
else:
setup_kws['requires'] = dependencies
###################################################################### Version information
VERSION = '0.2.3'
ISRELEASED = False
__version__ = VERSION
######################################################################
# Writing version control information to the module
# adapted from MDTraj setup.py
def git_version():
# Return the git revision as a string
# copied from numpy setup.py
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = 'Unknown'
return GIT_REVISION
def write_version_py(filename):
cnt = """
# THIS FILE IS GENERATED FROM MDPREP SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
else:
GIT_REVISION = 'Unknown'
if not ISRELEASED:
FULLVERSION += '.dev-' + GIT_REVISION[:7]
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
###################################################################### Find my python modules
def find_packages():
"""Find all python packages.
Adapted from IPython's setupbase.py. Copyright IPython
contributors, licensed under the BSD license.
"""
packages = []
for dir,subdirs,files in os.walk('.'):
package = dir.replace(os.path.sep, '.')
if '__init__.py' not in files:
# not a package
continue
packages.append(package)
return packages
###################################################################### run Setup
write_version_py('pwq/version.py')
setup(name = 'pwq',
author = "Badi' Abdul-Wahid",
author_email = 'abdulwahidc@gmail.com',
description = DOCLINES[0],
long_description = '\n'.join(DOCLINES),
version = __version__,
license = 'LGPLv2',
url = 'http://github.com/badi/pwq',
platforms = ['Linux', 'Mac OS-X', 'Unix', 'Windows'],
packages = find_packages(),
**setup_kws)
| badi/pwq | setup.py | Python | gpl-2.0 | 3,798 | [
"MDTraj"
] | f77af6890760d6420a5641dadf80726b4912b675b98f61670e1182e0be2b3194 |
# -*- coding: utf-8 -*-
"""
trueskill.mathematics
~~~~~~~~~~~~~~~~~~~~~
This module contains basic mathematics functions and objects for TrueSkill
algorithm. If you have not scipy, this module provides the fallback.
:copyright: (c) 2012-2013 by Heungsub Lee.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import copy
import math
try:
from numbers import Number
except ImportError:
Number = (int, long, float, complex)
__all__ = ['Gaussian', 'Matrix', 'inf']
inf = float('inf')
class Gaussian(object):
"""A model for the normal distribution."""
#: Precision, the inverse of the variance.
pi = 0
#: Precision adjusted mean, the precision multiplied by the mean.
tau = 0
def __init__(self, mu=None, sigma=None, pi=0, tau=0):
if mu is not None:
if sigma is None:
raise TypeError('sigma argument is needed')
elif sigma == 0:
raise ValueError('sigma**2 should be greater than 0')
pi = sigma ** -2
tau = pi * mu
self.pi = pi
self.tau = tau
@property
def mu(self):
"""A property which returns the mean."""
return self.pi and self.tau / self.pi
@property
def sigma(self):
"""A property which returns the the square root of the variance."""
return math.sqrt(1 / self.pi) if self.pi else inf
def __mul__(self, other):
pi, tau = self.pi + other.pi, self.tau + other.tau
return Gaussian(pi=pi, tau=tau)
def __truediv__(self, other):
pi, tau = self.pi - other.pi, self.tau - other.tau
return Gaussian(pi=pi, tau=tau)
__div__ = __truediv__ # for Python 2
def __eq__(self, other):
return self.pi == other.pi and self.tau == other.tau
def __lt__(self, other):
return self.mu < other.mu
def __le__(self, other):
return self.mu <= other.mu
def __gt__(self, other):
return self.mu > other.mu
def __ge__(self, other):
return self.mu >= other.mu
def __repr__(self):
return 'N(mu=%.3f, sigma=%.3f)' % (self.mu, self.sigma)
def _repr_latex_(self):
latex = r'\mathcal{ N }( %.3f, %.3f^2 )' % (self.mu, self.sigma)
return '$%s$' % latex
class Matrix(list):
"""A model for matrix."""
def __init__(self, src, height=None, width=None):
if callable(src):
f, src = src, {}
size = [height, width]
if not height:
def set_height(height):
size[0] = height
size[0] = set_height
if not width:
def set_width(width):
size[1] = width
size[1] = set_width
try:
for (r, c), val in f(*size):
src[r, c] = val
except TypeError:
raise TypeError('A callable src must return an interable '
'which generates a tuple containing '
'coordinate and value')
height, width = tuple(size)
if height is None or width is None:
raise TypeError('A callable src must call set_height and '
'set_width if the size is non-deterministic')
if isinstance(src, list):
is_number = lambda x: isinstance(x, Number)
unique_col_sizes = set(map(len, src))
everything_are_number = filter(is_number, sum(src, []))
if len(unique_col_sizes) != 1 or not everything_are_number:
raise ValueError('src must be a rectangular array of numbers')
two_dimensional_array = src
elif isinstance(src, dict):
if not height or not width:
w = h = 0
for r, c in src.iterkeys():
if not height:
h = max(h, r + 1)
if not width:
w = max(w, c + 1)
if not height:
height = h
if not width:
width = w
two_dimensional_array = []
for r in range(height):
row = []
two_dimensional_array.append(row)
for c in range(width):
row.append(src.get((r, c), 0))
else:
raise TypeError('src must be a list or dict or callable')
super(Matrix, self).__init__(two_dimensional_array)
@property
def height(self):
return len(self)
@property
def width(self):
return len(self[0])
def transpose(self):
height, width = self.height, self.width
src = {}
for c in range(width):
for r in range(height):
src[c, r] = self[r][c]
return type(self)(src, height=width, width=height)
def minor(self, row_n, col_n):
height, width = self.height, self.width
if not (0 <= row_n < height):
raise ValueError('row_n should be between 0 and %d' % height)
elif not (0 <= col_n < width):
raise ValueError('col_n should be between 0 and %d' % width)
two_dimensional_array = []
for r in range(height):
if r == row_n:
continue
row = []
two_dimensional_array.append(row)
for c in range(width):
if c == col_n:
continue
row.append(self[r][c])
return type(self)(two_dimensional_array)
def determinant(self):
height, width = self.height, self.width
if height != width:
raise ValueError('Only square matrix can calculate a determinant')
tmp, rv = copy.deepcopy(self), 1.
for c in range(width - 1, 0, -1):
pivot, r = max((abs(tmp[r][c]), r) for r in range(c + 1))
pivot = tmp[r][c]
if not pivot:
return 0.
tmp[r], tmp[c] = tmp[c], tmp[r]
if r != c:
rv = -rv
rv *= pivot
fact = -1. / pivot
for r in range(c):
f = fact * tmp[r][c]
for x in range(c):
tmp[r][x] += f * tmp[c][x]
return rv * tmp[0][0]
def adjugate(self):
height, width = self.height, self.width
if height != width:
raise ValueError('Only square matrix can be adjugated')
if height == 2:
a, b = self[0][0], self[0][1]
c, d = self[1][0], self[1][1]
return type(self)([[d, -b], [-c, a]])
src = {}
for r in range(height):
for c in range(width):
sign = -1 if (r + c) % 2 else 1
src[r, c] = self.minor(r, c).determinant() * sign
return type(self)(src, height, width)
def inverse(self):
if self.height == self.width == 1:
return type(self)([[1. / self[0][0]]])
return (1. / self.determinant()) * self.adjugate()
def __add__(self, other):
height, width = self.height, self.width
if (height, width) != (other.height, other.width):
raise ValueError('Must be same size')
src = {}
for r in range(height):
for c in range(width):
src[r, c] = self[r][c] + other[r][c]
return type(self)(src, height, width)
def __mul__(self, other):
if self.width != other.height:
raise ValueError('Bad size')
height, width = self.height, other.width
src = {}
for r in range(height):
for c in range(width):
src[r, c] = sum(self[r][x] * other[x][c]
for x in range(self.width))
return type(self)(src, height, width)
def __rmul__(self, other):
if not isinstance(other, Number):
raise TypeError('The operand should be a number')
height, width = self.height, self.width
src = {}
for r in range(height):
for c in range(width):
src[r, c] = other * self[r][c]
return type(self)(src, height, width)
def __repr__(self):
return '%s(%s)' % (type(self).__name__, super(Matrix, self).__repr__())
def _repr_latex_(self):
rows = [' && '.join(['%.3f' % cell for cell in row]) for row in self]
latex = r'\begin{matrix} %s \end{matrix}' % r'\\'.join(rows)
return '$%s$' % latex
| amit-bansil/netsci | robocompviz/trueskill/trueskill/mathematics.py | Python | mit | 8,571 | [
"Gaussian"
] | 20aaeb11668d21e47d588140f36cc31b9f2aef17396078144e6046b4a2dc945d |
'''
Created on Apr 4, 2013
@author: Jeff
TrainNetworkQt.py
A Qt version of the network trainer that uses the python version of the deep net code
'''
from PyQt4.QtGui import QMainWindow, QKeySequence, QAction, QFileDialog, QMessageBox, QWidget, \
QPushButton, QLineEdit, QHBoxLayout, QVBoxLayout, QLabel, QApplication, QIntValidator, QDoubleValidator, \
QCheckBox, QDialog, QTextBrowser, QDialogButtonBox
from PyQt4.QtCore import SIGNAL, pyqtSignal, QThread, QObject
import sys
import backprop
import deepnet
import autoencoder
import loadData
import numpy as np
import scipy.io
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.mainWidget = MainWidget(self)
self.setCentralWidget(self.mainWidget)
self.setWindowTitle(self.tr("Train Network"))
self.createActions()
self.createMenus()
self.stream = EmittingStream(textWritten=self.write)
sys.stdout = self.stream
#sys.stderr = self.stream
self.dataDir = ''
#set the default training parameters in case user doesn't use the parameter dialog
limit = False
limit_num = 100
layer_sizes = [-1,-1,-1,-1]
layer_types = ['sigmoid', 'sigmoid', 'sigmoid', 'sigmoid']
pretrain_iter = [225,75,75]
pretrain_lr = 0.0025
backprop_iter = 30
self.trainingParameters = [layer_sizes, layer_types, pretrain_iter, pretrain_lr, backprop_iter, limit, limit_num]
self.thread = TrainThread(self.stream)
self.thread.setArgs(self.trainingParameters)
self.connect(self.thread, SIGNAL('trainingFinished()'), self.trainingFinished)
def __del__(self):
sys.stdout = sys.__stdout__
#sys.stderr = sys.__stderr__
def createActions(self):
self.openAction = QAction(self.tr("Set &data directory"), self)
self.openAction.setShortcut(QKeySequence.Open)
self.openAction.setStatusTip(self.tr("Set the data directory"))
self.connect(self.openAction, SIGNAL('triggered()'), self.openDataDir)
self.exitAction = QAction(self.tr("E&xit"), self)
self.exitAction.setShortcut(self.tr("Ctrl+Q"))
self.exitAction.setStatusTip(self.tr("Exit the application"))
self.connect(self.exitAction, SIGNAL('triggered()'), self.onClose)
self.parametersAction = QAction(self.tr("Set ¶meters"), self)
self.parametersAction.setStatusTip(self.tr("Set the training parameters"))
self.connect(self.parametersAction, SIGNAL('triggered()'), self.setParameters)
def createMenus(self):
fileMenu = self.menuBar().addMenu(self.tr("&File"))
fileMenu.addAction(self.openAction)
fileMenu.addSeparator()
fileMenu.addAction(self.exitAction)
optionsMenu = self.menuBar().addMenu(self.tr("&Options"))
optionsMenu.addAction(self.parametersAction)
def setParameters(self):
dialog = ParametersDialog()
if dialog.exec_():
self.trainingParameters = dialog.getValues()
self.thread.setArgs(self.trainingParameters)
def onClose(self):
self.close()
def trainingFinished(self):
self.mainWidget.textBrowser.append("Training finished")
self.mainWidget.trainButton.setEnabled(True)
def browseClicked(self):
self.openDataDir()
def openDataDir(self):
fileDialog = QFileDialog()
fileDialog.setFileMode(fileDialog.DirectoryOnly)
self.dataDir = fileDialog.getExistingDirectory(caption="Choose the data directory")
self.mainWidget.dataDirLineEdit.setText(self.dataDir)
self.thread.setDataDir(self.dataDir)
def trainClicked(self):
if self.dataDir == '':
QMessageBox.warning(self, 'No data specified', 'Please specify a data directory',
QMessageBox.Ok)
else:
self.mainWidget.textBrowser.append(self.tr("Training started"))
self.mainWidget.trainButton.setEnabled(False)
self.trainNetwork()
def trainNetwork(self):
self.thread.start()
def write(self, text):
self.mainWidget.textBrowser.append(text)
class MainWidget(QWidget):
def __init__(self, parent):
super(MainWidget, self).__init__(parent)
self.dataDirLineEdit = QLineEdit()
self.dataLabel = QLabel(self.tr("Data Directory:"))
self.dataLabel.setBuddy(self.dataDirLineEdit)
self.dataBrowseButton = QPushButton(self.tr("&Browse..."))
self.connect(self.dataBrowseButton, SIGNAL('clicked()'), parent.browseClicked)
self.closeButton = QPushButton(self.tr("Close"))
self.connect(self.closeButton, SIGNAL('clicked()'), parent.onClose)
self.trainButton = QPushButton(self.tr("&Train"))
self.connect(self.trainButton, SIGNAL('clicked()'), parent.trainClicked)
self.textBrowser = QTextBrowser()
self.dataLayout = QHBoxLayout()
self.dataLayout.addWidget(self.dataLabel)
self.dataLayout.addWidget(self.dataDirLineEdit)
self.dataLayout.addWidget(self.dataBrowseButton)
self.buttonLayout = QHBoxLayout()
self.buttonLayout.addWidget(self.closeButton)
self.buttonLayout.addWidget(self.trainButton)
self.mainLayout = QVBoxLayout()
self.mainLayout.addLayout(self.dataLayout)
self.mainLayout.addWidget(self.textBrowser)
self.mainLayout.addLayout(self.buttonLayout)
self.setLayout(self.mainLayout)
class ParametersDialog(QDialog):
def __init__(self, parent=None):
super(ParametersDialog, self).__init__(parent)
self.limitImagesCheckBox = QCheckBox(self.tr("&Limit number of images"))
self.limitLineEdit = QLineEdit()
self.connect(self.limitImagesCheckBox, SIGNAL('clicked()'), self.limitClicked)
self.limitLineEdit.setText("100")
self.limitLineEdit.setValidator(QIntValidator())
self.limitLineEdit.setEnabled(False)
self.useDefaultCheckBox = QCheckBox(self.tr("Use &default parameters"))
self.useDefaultCheckBox.setChecked(True)
self.connect(self.useDefaultCheckBox, SIGNAL('clicked()'), self.defaultClicked)
self.layerSizesLabel = QLabel(self.tr("Layer &sizes (-1 for input data size):"))
self.layerSizesLineEdit = QLineEdit()
self.layerSizesLineEdit.setText("-1, -1, -1, -1")
self.layerSizesLineEdit.setEnabled(False)
self.layerSizesLabel.setBuddy(self.layerSizesLineEdit)
self.layerTypesLabel = QLabel(self.tr("Layer &types (sigmoid or gaussian):"))
self.layerTypesLineEdit = QLineEdit()
self.layerTypesLineEdit.setText("sigmoid, sigmoid, sigmoid, sigmoid")
self.layerTypesLineEdit.setEnabled(False)
self.layerTypesLabel.setBuddy(self.layerTypesLineEdit)
self.pretrainIterLabel = QLabel(self.tr("Pretraining &iterations for each layer:"))
self.pretrainIterLineEdit = QLineEdit()
self.pretrainIterLineEdit.setText("225, 75, 75")
self.pretrainIterLineEdit.setEnabled(False)
self.pretrainIterLabel.setBuddy(self.pretrainIterLineEdit)
self.pretrainLRLabel = QLabel(self.tr("Pretraining &learning rate:"))
self.pretrainLRLineEdit = QLineEdit()
self.pretrainLRLineEdit.setText("0.0025")
self.pretrainLRLineEdit.setValidator(QDoubleValidator())
self.pretrainLRLineEdit.setEnabled(False)
self.pretrainLRLabel.setBuddy(self.pretrainLRLineEdit)
self.backpropIterLabel = QLabel(self.tr("&Backprop iterations:"))
self.backpropIterLineEdit = QLineEdit()
self.backpropIterLineEdit.setText("30")
self.backpropIterLineEdit.setValidator(QIntValidator())
self.backpropIterLineEdit.setEnabled(False)
self.backpropIterLabel.setBuddy(self.backpropIterLineEdit)
self.buttonBox = QDialogButtonBox(QDialogButtonBox.Cancel | QDialogButtonBox.Ok)
self.connect(self.buttonBox, SIGNAL('accepted()'), self.accept)
self.connect(self.buttonBox, SIGNAL('rejected()'), self.reject)
self.limitLayout = QHBoxLayout()
self.limitLayout.addWidget(self.limitImagesCheckBox)
self.limitLayout.addWidget(self.limitLineEdit)
self.layerSizesLayout = QHBoxLayout()
self.layerSizesLayout.addWidget(self.layerSizesLabel)
self.layerSizesLayout.addWidget(self.layerSizesLineEdit)
self.layerTypesLayout = QHBoxLayout()
self.layerTypesLayout.addWidget(self.layerTypesLabel)
self.layerTypesLayout.addWidget(self.layerTypesLineEdit)
self.pretrainIterLayout = QHBoxLayout()
self.pretrainIterLayout.addWidget(self.pretrainIterLabel)
self.pretrainIterLayout.addWidget(self.pretrainIterLineEdit)
self.pretrainLRLayout = QHBoxLayout()
self.pretrainLRLayout.addWidget(self.pretrainLRLabel)
self.pretrainLRLayout.addWidget(self.pretrainLRLineEdit)
self.backpropIterLayout = QHBoxLayout()
self.backpropIterLayout.addWidget(self.backpropIterLabel)
self.backpropIterLayout.addWidget(self.backpropIterLineEdit)
self.mainLayout = QVBoxLayout()
self.mainLayout.addLayout(self.limitLayout)
self.mainLayout.addWidget(self.useDefaultCheckBox)
self.mainLayout.addLayout(self.layerSizesLayout)
self.mainLayout.addLayout(self.layerTypesLayout)
self.mainLayout.addLayout(self.pretrainIterLayout)
self.mainLayout.addLayout(self.pretrainLRLayout)
self.mainLayout.addLayout(self.backpropIterLayout)
self.mainLayout.addWidget(self.buttonBox)
self.setLayout(self.mainLayout)
self.setWindowTitle(self.tr("Set training parameters"))
self.setFixedHeight(self.sizeHint().height())
def getValues(self):
limit = self.limitImagesCheckBox.isChecked()
limit_num = int(self.limitLineEdit.text())
if self.useDefaultCheckBox.isChecked():
layer_sizes = [-1,-1,-1,-1]
layer_types = ['sigmoid', 'sigmoid', 'sigmoid', 'sigmoid']
pretrain_iter = [225,75,75]
pretrain_lr = 0.0025
backprop_iter = 30
else:
layer_sizes = [int(x) for x in (self.layerSizesLineEdit.text()).split(',')]
layer_types = [str(x) for x in (self.layerTypesLineEdit.text()).split(',')]
pretrain_iter = [int(x) for x in (self.pretrainIterLineEdit.text()).split(',')]
pretrain_lr = float(self.pretrainLRLineEdit.text())
backprop_iter = int(self.backpropIterLineEdit.text())
return [layer_sizes, layer_types, pretrain_iter, pretrain_lr, backprop_iter, limit, limit_num]
def limitClicked(self):
self.limitLineEdit.setEnabled(self.limitImagesCheckBox.isChecked())
def defaultClicked(self):
self.layerSizesLineEdit.setEnabled(not self.useDefaultCheckBox.isChecked())
self.layerTypesLineEdit.setEnabled(not self.useDefaultCheckBox.isChecked())
self.pretrainIterLineEdit.setEnabled(not self.useDefaultCheckBox.isChecked())
self.pretrainLRLineEdit.setEnabled(not self.useDefaultCheckBox.isChecked())
self.backpropIterLineEdit.setEnabled(not self.useDefaultCheckBox.isChecked())
class EmittingStream(QObject):
textWritten = pyqtSignal(str)
def write(self, text):
self.textWritten.emit(str(text))
class TrainThread(QThread):
def __init__(self, outputStream, parent=None):
super(TrainThread, self).__init__(parent)
self.stream = outputStream
def setArgs(self, args):
self.layer_sizes = args[0]
self.layer_types = args[1]
self.pretrain_iter = args[2]
self.pretrain_lr = args[3]
self.backprop_iter = args[4]
self.limit = args[5]
self.limit_num = args[6]
def setDataDir(self, directory):
self.dataDir = directory
def run(self):
self.stream.write("Input parameters")
self.stream.write("\tLayer sizes: {}".format(self.layer_sizes))
self.stream.write("\tLayer types: {}".format(self.layer_types))
self.stream.write("\tPre-train LR: {}".format(self.pretrain_lr))
self.stream.write("\tPre-train iterations: {}".format(self.pretrain_iter))
self.stream.write("\tBackprop iterations: {}".format(self.backprop_iter))
if self.limit:
self.stream.write("Limiting input to %d images" % self.limit_num)
self.train()
self.emit(SIGNAL('trainingFinished()'))
def save(self, network, name):
mdic = {}
for i in range(len(network)):
try:
mdic['W%d'%(i+1)] = network[i].W.as_numpy_array()
mdic['b%d'%(i+1)] = network[i].hbias.as_numpy_array()
except AttributeError:
mdic['W%d'%(i+1)] = network[i].W
mdic['b%d'%(i+1)] = network[i].hbias
mdic['hidtype%d'%(i+1)] = network[i].hidtype
scipy.io.savemat(name, mdic)
def train(self):
# this will be replaced by calls to loadData.py
#data = np.load('scaled_images.npy')
#data = np.asarray(data, dtype='float32')
#data /= 255.0
l = loadData.Loader(str(self.dataDir),stream=self.stream)
if self.layer_types[0] != 'sigmoid':
layer1_sigmoid = False
else:
layer1_sigmoid = True
l.loadData(layer1_sigmoid)
data = l.XC
if self.limit:
inds = np.arange(data.shape[0])
np.random.shuffle(inds)
data = data[inds[:self.limit_num],:]
self.stream.write(data.shape)
# parse the layer sizes
sizes = []
for i in self.layer_sizes:
if i == -1:
sizes.append(data.shape[1])
else:
sizes.append(i)
#set up and train the initial deepnet
dnn = deepnet.DeepNet(sizes, self.layer_types, stream=self.stream)
dnn.train(data, self.pretrain_iter, self.pretrain_lr)
#save the trained deepnet
#pickle.dump(dnn, file('pretrained.pkl','wb')) # Looks like pickle won't work with Qt :(
self.save(dnn.network, 'pretrained.mat')
#unroll the deepnet into an autoencoder
autoenc = autoencoder.unroll_network(dnn.network)
#fine-tune with backprop
mlp = backprop.NeuralNet(network=autoenc, stream=self.stream)
trained = mlp.train(mlp.network, data, data, max_iter=self.backprop_iter,
validErrFunc='reconstruction', targetCost='linSquaredErr')
#save
#pickle.dump(trained, file('network.pkl','wb'))
self.save(trained, 'network.mat')
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
| JRMeyer/Autotrace | under-development/TrainNetwork_Qt.py | Python | mit | 15,367 | [
"Gaussian"
] | ddcf3936ae6253337dee0a8f5bfa137f3b75324af8fbe760647ad044a1efa01a |
"""
Provides rolling statistical moments and related descriptive
statistics implemented in Cython
"""
from __future__ import division
import warnings
import numpy as np
from pandas.core.dtypes.common import is_scalar
from pandas.core.api import DataFrame, Series
from pandas.util._decorators import Substitution, Appender
__all__ = ['rolling_count', 'rolling_max', 'rolling_min',
'rolling_sum', 'rolling_mean', 'rolling_std', 'rolling_cov',
'rolling_corr', 'rolling_var', 'rolling_skew', 'rolling_kurt',
'rolling_quantile', 'rolling_median', 'rolling_apply',
'rolling_window',
'ewma', 'ewmvar', 'ewmstd', 'ewmvol', 'ewmcorr', 'ewmcov',
'expanding_count', 'expanding_max', 'expanding_min',
'expanding_sum', 'expanding_mean', 'expanding_std',
'expanding_cov', 'expanding_corr', 'expanding_var',
'expanding_skew', 'expanding_kurt', 'expanding_quantile',
'expanding_median', 'expanding_apply']
# -----------------------------------------------------------------------------
# Docs
# The order of arguments for the _doc_template is:
# (header, args, kwargs, returns, notes)
_doc_template = """
%s
Parameters
----------
%s%s
Returns
-------
%s
%s
"""
_roll_kw = """window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
how : string, default '%s'
Method for down- or re-sampling
"""
_roll_notes = r"""
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
_ewm_kw = r"""com : float, optional
Specify decay in terms of center of mass,
:math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`
span : float, optional
Specify decay in terms of span,
:math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`
halflife : float, optional
Specify decay in terms of half-life,
:math:`\alpha = 1 - exp(log(0.5) / halflife),\text{ for } halflife > 0`
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly,
:math:`0 < \alpha \leq 1`
.. versionadded:: 0.18.0
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
adjust : boolean, default True
Divide by decaying adjustment factor in beginning periods to account for
imbalance in relative weightings (viewing EWMA as a moving average)
how : string, default 'mean'
Method for down- or re-sampling
ignore_na : boolean, default False
Ignore missing values when calculating weights;
specify True to reproduce pre-0.15.0 behavior
"""
_ewm_notes = r"""
Notes
-----
Exactly one of center of mass, span, half-life, and alpha must be provided.
Allowed values and relationship between the parameters are specified in the
parameter descriptions above; see the link at the end of this section for
a detailed explanation.
When adjust is True (default), weighted averages are calculated using weights
(1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
When adjust is False, weighted averages are calculated recursively as:
weighted_average[0] = arg[0];
weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
When ignore_na is False (default), weights are based on absolute positions.
For example, the weights of x and y used in calculating the final weighted
average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
(1-alpha)**2 and alpha (if adjust is False).
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based on
relative positions. For example, the weights of x and y used in calculating
the final weighted average of [x, None, y] are 1-alpha and 1 (if adjust is
True), and 1-alpha and alpha (if adjust is False).
More details can be found at
http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-windows
"""
_expanding_kw = """min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
"""
_type_of_input_retval = "y : type of input argument"
_flex_retval = """y : type depends on inputs
DataFrame / DataFrame -> DataFrame (matches on columns) or Panel (pairwise)
DataFrame / Series -> Computes result for each column
Series / Series -> Series"""
_pairwise_retval = "y : Panel whose items are df1.index values"
_unary_arg = "arg : Series, DataFrame\n"
_binary_arg_flex = """arg1 : Series, DataFrame, or ndarray
arg2 : Series, DataFrame, or ndarray, optional
if not supplied then will default to arg1 and produce pairwise output
"""
_binary_arg = """arg1 : Series, DataFrame, or ndarray
arg2 : Series, DataFrame, or ndarray
"""
_pairwise_arg = """df1 : DataFrame
df2 : DataFrame
"""
_pairwise_kw = """pairwise : bool, default False
If False then only matching columns between arg1 and arg2 will be used and
the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the output
will be a Panel in the case of DataFrame inputs. In the case of missing
elements, only complete pairwise observations will be used.
"""
_ddof_kw = """ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
"""
_bias_kw = r"""bias : boolean, default False
Use a standard estimation bias correction
"""
def ensure_compat(dispatch, name, arg, func_kw=None, *args, **kwargs):
"""
wrapper function to dispatch to the appropriate window functions
wraps/unwraps ndarrays for compat
can be removed when ndarray support is removed
"""
is_ndarray = isinstance(arg, np.ndarray)
if is_ndarray:
if arg.ndim == 1:
arg = Series(arg)
elif arg.ndim == 2:
arg = DataFrame(arg)
else:
raise AssertionError("cannot support ndim > 2 for ndarray compat")
warnings.warn("pd.{dispatch}_{name} is deprecated for ndarrays and "
"will be removed "
"in a future version"
.format(dispatch=dispatch, name=name),
FutureWarning, stacklevel=3)
# get the functional keywords here
if func_kw is None:
func_kw = []
kwds = {}
for k in func_kw:
value = kwargs.pop(k, None)
if value is not None:
kwds[k] = value
# how is a keyword that if not-None should be in kwds
how = kwargs.pop('how', None)
if how is not None:
kwds['how'] = how
r = getattr(arg, dispatch)(**kwargs)
if not is_ndarray:
# give a helpful deprecation message
# with copy-pastable arguments
pargs = ','.join("{a}={b}".format(a=a, b=b)
for a, b in kwargs.items() if b is not None)
aargs = ','.join(args)
if len(aargs):
aargs += ','
def f(a, b):
if is_scalar(b):
return "{a}={b}".format(a=a, b=b)
return "{a}=<{b}>".format(a=a, b=type(b).__name__)
aargs = ','.join(f(a, b) for a, b in kwds.items() if b is not None)
warnings.warn("pd.{dispatch}_{name} is deprecated for {klass} "
"and will be removed in a future version, replace with "
"\n\t{klass}.{dispatch}({pargs}).{name}({aargs})"
.format(klass=type(arg).__name__, pargs=pargs,
aargs=aargs, dispatch=dispatch, name=name),
FutureWarning, stacklevel=3)
result = getattr(r, name)(*args, **kwds)
if is_ndarray:
result = result.values
return result
def rolling_count(arg, window, **kwargs):
"""
Rolling count of number of non-NaN observations inside provided window.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
how : string, default 'mean'
Method for down- or re-sampling
Returns
-------
rolling_count : type of caller
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return ensure_compat('rolling', 'count', arg, window=window, **kwargs)
@Substitution("Unbiased moving covariance.", _binary_arg_flex,
_roll_kw % 'None' + _pairwise_kw + _ddof_kw, _flex_retval,
_roll_notes)
@Appender(_doc_template)
def rolling_cov(arg1, arg2=None, window=None, pairwise=None, **kwargs):
if window is None and isinstance(arg2, (int, float)):
window = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
elif arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
return ensure_compat('rolling',
'cov',
arg1,
other=arg2,
window=window,
pairwise=pairwise,
func_kw=['other', 'pairwise', 'ddof'],
**kwargs)
@Substitution("Moving sample correlation.", _binary_arg_flex,
_roll_kw % 'None' + _pairwise_kw, _flex_retval, _roll_notes)
@Appender(_doc_template)
def rolling_corr(arg1, arg2=None, window=None, pairwise=None, **kwargs):
if window is None and isinstance(arg2, (int, float)):
window = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
elif arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
return ensure_compat('rolling',
'corr',
arg1,
other=arg2,
window=window,
pairwise=pairwise,
func_kw=['other', 'pairwise'],
**kwargs)
# -----------------------------------------------------------------------------
# Exponential moving moments
@Substitution("Exponentially-weighted moving average", _unary_arg, _ewm_kw,
_type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewma(arg, com=None, span=None, halflife=None, alpha=None, min_periods=0,
freq=None, adjust=True, how=None, ignore_na=False):
return ensure_compat('ewm',
'mean',
arg,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
adjust=adjust,
how=how,
ignore_na=ignore_na)
@Substitution("Exponentially-weighted moving variance", _unary_arg,
_ewm_kw + _bias_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmvar(arg, com=None, span=None, halflife=None, alpha=None, min_periods=0,
bias=False, freq=None, how=None, ignore_na=False, adjust=True):
return ensure_compat('ewm',
'var',
arg,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
adjust=adjust,
how=how,
ignore_na=ignore_na,
bias=bias,
func_kw=['bias'])
@Substitution("Exponentially-weighted moving std", _unary_arg,
_ewm_kw + _bias_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmstd(arg, com=None, span=None, halflife=None, alpha=None, min_periods=0,
bias=False, freq=None, how=None, ignore_na=False, adjust=True):
return ensure_compat('ewm',
'std',
arg,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
adjust=adjust,
how=how,
ignore_na=ignore_na,
bias=bias,
func_kw=['bias'])
ewmvol = ewmstd
@Substitution("Exponentially-weighted moving covariance", _binary_arg_flex,
_ewm_kw + _pairwise_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, alpha=None,
min_periods=0, bias=False, freq=None, pairwise=None, how=None,
ignore_na=False, adjust=True):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and com is None:
com = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
return ensure_compat('ewm',
'cov',
arg1,
other=arg2,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
bias=bias,
freq=freq,
how=how,
ignore_na=ignore_na,
adjust=adjust,
pairwise=pairwise,
func_kw=['other', 'pairwise', 'bias'])
@Substitution("Exponentially-weighted moving correlation", _binary_arg_flex,
_ewm_kw + _pairwise_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmcorr(arg1, arg2=None, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, pairwise=None, how=None, ignore_na=False,
adjust=True):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and com is None:
com = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
return ensure_compat('ewm',
'corr',
arg1,
other=arg2,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
how=how,
ignore_na=ignore_na,
adjust=adjust,
pairwise=pairwise,
func_kw=['other', 'pairwise'])
# ---------------------------------------------------------------------
# Python interface to Cython functions
def _rolling_func(name, desc, how=None, func_kw=None, additional_kw=''):
if how is None:
how_arg_str = 'None'
else:
how_arg_str = "'{how}".format(how=how)
@Substitution(desc, _unary_arg, _roll_kw % how_arg_str + additional_kw,
_type_of_input_retval, _roll_notes)
@Appender(_doc_template)
def f(arg, window, min_periods=None, freq=None, center=False,
**kwargs):
return ensure_compat('rolling',
name,
arg,
window=window,
min_periods=min_periods,
freq=freq,
center=center,
func_kw=func_kw,
**kwargs)
return f
rolling_max = _rolling_func('max', 'Moving maximum.', how='max')
rolling_min = _rolling_func('min', 'Moving minimum.', how='min')
rolling_sum = _rolling_func('sum', 'Moving sum.')
rolling_mean = _rolling_func('mean', 'Moving mean.')
rolling_median = _rolling_func('median', 'Moving median.', how='median')
rolling_std = _rolling_func('std', 'Moving standard deviation.',
func_kw=['ddof'],
additional_kw=_ddof_kw)
rolling_var = _rolling_func('var', 'Moving variance.',
func_kw=['ddof'],
additional_kw=_ddof_kw)
rolling_skew = _rolling_func('skew', 'Unbiased moving skewness.')
rolling_kurt = _rolling_func('kurt', 'Unbiased moving kurtosis.')
def rolling_quantile(arg, window, quantile, min_periods=None, freq=None,
center=False):
"""Moving quantile.
Parameters
----------
arg : Series, DataFrame
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
quantile : float
0 <= quantile <= 1
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
Returns
-------
y : type of input argument
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return ensure_compat('rolling',
'quantile',
arg,
window=window,
freq=freq,
center=center,
min_periods=min_periods,
func_kw=['quantile'],
quantile=quantile)
def rolling_apply(arg, window, func, min_periods=None, freq=None,
center=False, args=(), kwargs={}):
"""Generic moving function application.
Parameters
----------
arg : Series, DataFrame
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
func : function
Must produce a single value from an ndarray input
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
args : tuple
Passed on to func
kwargs : dict
Passed on to func
Returns
-------
y : type of input argument
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return ensure_compat('rolling',
'apply',
arg,
window=window,
freq=freq,
center=center,
min_periods=min_periods,
func_kw=['func', 'args', 'kwargs'],
func=func,
args=args,
kwargs=kwargs)
def rolling_window(arg, window=None, win_type=None, min_periods=None,
freq=None, center=False, mean=True,
axis=0, how=None, **kwargs):
"""
Applies a moving window of type ``window_type`` and size ``window``
on the data.
Parameters
----------
arg : Series, DataFrame
window : int or ndarray
Weighting window specification. If the window is an integer, then it is
treated as the window length and win_type is required
win_type : str, default None
Window type (see Notes)
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
mean : boolean, default True
If True computes weighted mean, else weighted sum
axis : {0, 1}, default 0
how : string, default 'mean'
Method for down- or re-sampling
Returns
-------
y : type of input argument
Notes
-----
The recognized window types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width).
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
func = 'mean' if mean else 'sum'
return ensure_compat('rolling',
func,
arg,
window=window,
win_type=win_type,
freq=freq,
center=center,
min_periods=min_periods,
axis=axis,
func_kw=kwargs.keys(),
**kwargs)
def _expanding_func(name, desc, func_kw=None, additional_kw=''):
@Substitution(desc, _unary_arg, _expanding_kw + additional_kw,
_type_of_input_retval, "")
@Appender(_doc_template)
def f(arg, min_periods=1, freq=None, **kwargs):
return ensure_compat('expanding',
name,
arg,
min_periods=min_periods,
freq=freq,
func_kw=func_kw,
**kwargs)
return f
expanding_max = _expanding_func('max', 'Expanding maximum.')
expanding_min = _expanding_func('min', 'Expanding minimum.')
expanding_sum = _expanding_func('sum', 'Expanding sum.')
expanding_mean = _expanding_func('mean', 'Expanding mean.')
expanding_median = _expanding_func('median', 'Expanding median.')
expanding_std = _expanding_func('std', 'Expanding standard deviation.',
func_kw=['ddof'],
additional_kw=_ddof_kw)
expanding_var = _expanding_func('var', 'Expanding variance.',
func_kw=['ddof'],
additional_kw=_ddof_kw)
expanding_skew = _expanding_func('skew', 'Unbiased expanding skewness.')
expanding_kurt = _expanding_func('kurt', 'Unbiased expanding kurtosis.')
def expanding_count(arg, freq=None):
"""
Expanding count of number of non-NaN observations.
Parameters
----------
arg : DataFrame or numpy ndarray-like
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
Returns
-------
expanding_count : type of caller
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return ensure_compat('expanding', 'count', arg, freq=freq)
def expanding_quantile(arg, quantile, min_periods=1, freq=None):
"""Expanding quantile.
Parameters
----------
arg : Series, DataFrame
quantile : float
0 <= quantile <= 1
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
Returns
-------
y : type of input argument
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return ensure_compat('expanding',
'quantile',
arg,
freq=freq,
min_periods=min_periods,
func_kw=['quantile'],
quantile=quantile)
@Substitution("Unbiased expanding covariance.", _binary_arg_flex,
_expanding_kw + _pairwise_kw + _ddof_kw, _flex_retval, "")
@Appender(_doc_template)
def expanding_cov(arg1, arg2=None, min_periods=1, freq=None,
pairwise=None, ddof=1):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and min_periods is None:
min_periods = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
return ensure_compat('expanding',
'cov',
arg1,
other=arg2,
min_periods=min_periods,
pairwise=pairwise,
freq=freq,
ddof=ddof,
func_kw=['other', 'pairwise', 'ddof'])
@Substitution("Expanding sample correlation.", _binary_arg_flex,
_expanding_kw + _pairwise_kw, _flex_retval, "")
@Appender(_doc_template)
def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, pairwise=None):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and min_periods is None:
min_periods = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
return ensure_compat('expanding',
'corr',
arg1,
other=arg2,
min_periods=min_periods,
pairwise=pairwise,
freq=freq,
func_kw=['other', 'pairwise', 'ddof'])
def expanding_apply(arg, func, min_periods=1, freq=None,
args=(), kwargs={}):
"""Generic expanding function application.
Parameters
----------
arg : Series, DataFrame
func : function
Must produce a single value from an ndarray input
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
args : tuple
Passed on to func
kwargs : dict
Passed on to func
Returns
-------
y : type of input argument
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return ensure_compat('expanding',
'apply',
arg,
freq=freq,
min_periods=min_periods,
func_kw=['func', 'args', 'kwargs'],
func=func,
args=args,
kwargs=kwargs)
| winklerand/pandas | pandas/stats/moments.py | Python | bsd-3-clause | 31,628 | [
"Gaussian"
] | 3fa183d34dc16302744699d3cabc9ad376012d8522a396d82026272134504ad5 |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from urllib import quote
from json import dumps
from Plugins.Extensions.OpenWebif.local import tstrings
import datetime
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.30715
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/mobile/timerlist.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class timerlist(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(timerlist, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<html>\r
<head>\r
\t<title>OpenWebif</title>\r
\t<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />\r
\t<meta name="viewport" content="user-scalable=no, width=device-width"/>\r
\t<meta name="apple-mobile-web-app-capable" content="yes" />\r
\t<link rel="stylesheet" type="text/css" href="/css/jquery.mobile-1.0.min.css" media="screen"/>\r
\t<link rel="stylesheet" type="text/css" href="/css/iphone.css" media="screen"/>\r
\t<script src="/js/jquery-1.6.2.min.js"></script>\r
\t<script src="/js/jquery.mobile-1.0.min.js"></script>\r
\t<script type="text/javascript" src="/js/openwebif.js"></script>\r
\t<script type="text/javascript">initJsTranslation(''')
_v = VFFSL(SL,"dumps",False)(VFFSL(SL,"tstrings",True)) # u'$dumps($tstrings)' on line 15, col 51
if _v is not None: write(_filter(_v, rawExpr=u'$dumps($tstrings)')) # from line 15, col 51.
write(u''')</script>\r
</head>\r
<body> \r
\t<div data-role="page">\r
\r
\t\t<div id="header">\r
\t\t\t<div class="button" onClick="history.back()">''')
_v = VFFSL(SL,"tstrings",True)['back'] # u"$tstrings['back']" on line 22, col 49
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['back']")) # from line 22, col 49.
write(u'''</div>\r
\t\t\t<!-- <div class="button-bold">+</div> -->\r
\t\t\t<h1><a style="color:#FFF;text-decoration:none;" href=\'/mobile\'>OpenWebif</a></h1>
\t\t</div>\r
\r
\t\t<div id="contentContainer">\r
\t\t\t<ul data-role="listview" data-inset="true" data-theme="d">\r
\t\t\t\t<li data-role="list-divider" role="heading" data-theme="b">''')
_v = VFFSL(SL,"tstrings",True)['timer_list'] # u"$tstrings['timer_list']" on line 29, col 64
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['timer_list']")) # from line 29, col 64.
write(u'''</li>\r
''')
for timer in VFFSL(SL,"timers",True): # generated from line 30, col 5
duration = VFFSL(SL,"timer.duration",True)/60
starttime = datetime.datetime.fromtimestamp(VFFSL(SL,"timer.begin",True)).strftime("%d.%m.%Y")
endtime = datetime.datetime.fromtimestamp(VFFSL(SL,"timer.end",True)).strftime("%d.%m.%Y")
write(u'''\t\t\t\t<li>\r
''')
sref = quote(VFFSL(SL,"timer.serviceref",True), safe=' ~@#$&()*!+=:;,.?/\'')
name = quote(VFFSL(SL,"timer.name",True), safe=' ~@#$&()*!+=:;,.?/\'').replace("'","\\'")
write(u'''\t\t\t\t\t<a href="javascript:history.go(0)" onClick="deleteTimer(\'''')
_v = VFFSL(SL,"sref",True) # u'$sref' on line 37, col 63
if _v is not None: write(_filter(_v, rawExpr=u'$sref')) # from line 37, col 63.
write(u"""', '""")
_v = VFFSL(SL,"timer.begin",True) # u'$timer.begin' on line 37, col 72
if _v is not None: write(_filter(_v, rawExpr=u'$timer.begin')) # from line 37, col 72.
write(u"""', '""")
_v = VFFSL(SL,"timer.end",True) # u'$timer.end' on line 37, col 88
if _v is not None: write(_filter(_v, rawExpr=u'$timer.end')) # from line 37, col 88.
write(u"""', '""")
_v = VFFSL(SL,"name",True) # u'$name' on line 37, col 102
if _v is not None: write(_filter(_v, rawExpr=u'$name')) # from line 37, col 102.
write(u'''\');">\r
\t\t\t\t\t\t<span class="ui-li-heading" style="margin-top: 3px; margin-bottom: 3px;">''')
_v = VFFSL(SL,"timer.name",True) # u'$timer.name' on line 38, col 80
if _v is not None: write(_filter(_v, rawExpr=u'$timer.name')) # from line 38, col 80.
write(u''' (''')
_v = VFFSL(SL,"timer.servicename",True) # u'$timer.servicename' on line 38, col 93
if _v is not None: write(_filter(_v, rawExpr=u'$timer.servicename')) # from line 38, col 93.
write(u''')</span>\r
\t\t\t\t\t\t<span class="ui-li-desc" style="margin-top: 3px; margin-bottom: 3px;">''')
_v = VFFSL(SL,"starttime",True) # u'$starttime' on line 39, col 77
if _v is not None: write(_filter(_v, rawExpr=u'$starttime')) # from line 39, col 77.
write(u''' - ''')
_v = VFFSL(SL,"endtime",True) # u'$endtime' on line 39, col 90
if _v is not None: write(_filter(_v, rawExpr=u'$endtime')) # from line 39, col 90.
write(u''' (''')
_v = VFFSL(SL,"duration",True) # u'$duration' on line 39, col 100
if _v is not None: write(_filter(_v, rawExpr=u'$duration')) # from line 39, col 100.
write(u''' min)</span>\r
\t\t\t\t\t</a>\r
\t\t\t\t</li>\r
''')
write(u'''\t\t\t</ul>\r
\t\t\t<button onClick="document.location.reload(true)">''')
_v = VFFSL(SL,"tstrings",True)['refresh'] # u"$tstrings['refresh']" on line 44, col 53
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['refresh']")) # from line 44, col 53.
write(u'''</button>\r
\t\t</div>\r
\r
\t\t<div id="footer">\r
\t\t\t<p>OpenWebif Mobile</p>\r
\t\t\t<a onclick="document.location.href=\'/index?mode=fullpage\';return false;" href="#">''')
_v = VFFSL(SL,"tstrings",True)['show_full_openwebif'] # u"$tstrings['show_full_openwebif']" on line 49, col 86
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['show_full_openwebif']")) # from line 49, col 86.
write(u'''</a>\r
\t\t</div>\r
\t\t\r
\t</div>\r
</body>\r
</html>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_timerlist= 'respond'
## END CLASS DEFINITION
if not hasattr(timerlist, '_initCheetahAttributes'):
templateAPIClass = getattr(timerlist, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(timerlist)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=timerlist()).run()
| pli3/e2-openwbif | plugin/controllers/views/mobile/timerlist.py | Python | gpl-2.0 | 9,716 | [
"VisIt"
] | 503205bcb53b684d8b8fc4d1bd13fd2437ac7d867c5ebba1a57447e2c8bf07cd |
import os
import yaml
from bcbio import utils
from bcbio.install import _get_data_dir
from bcbio.distributed import clargs
from bcbio.provenance import system
import bcbio.distributed.resources as res
from bcbio.distributed.ipython import create
# from bcbio import log
import log
from cluster_helper import cluster as ipc
config_default = {'name': 'std', 'mem': 8, 'cores': 1}
def get_cluster_view(args):
if not os.path.exists("ipython"):
utils.safe_makedir("ipython")
utils.safe_makedir("checkpoint")
return ipc.cluster_view(args['scheduler'], args['queue'],
args['num_jobs'], args['cores_per_job'],
start_wait=args['timeout'],
profile="ipython",
extra_params={"resources": args['resources'],
"mem": args['mem'],
"tag": "ichwrapper",
"run_local": args['run_local']})
def wait_until_complete(jobs):
return [[j.get()] for j in jobs]
def is_done(step):
if os.path.exists(os.path.join("checkpoint", step)):
return True
return False
def flag_done(step):
with open(os.path.join("checkpoint", step), "w") as handle:
handle.write("done")
def _calculate_resources(data, args, resources):
parallel = clargs.to_parallel(args)
config = data[0][0]['config']
config['resources'].update({resources['name']: {'memory': "%sg" % resources['mem'], 'cores': resources['cores']}})
parallel.update({'progs': [resources['name']]})
# parallel = log.create_base_logger(config, parallel)
# log.setup_local_logging(config, parallel)
log.setup_log(config, parallel)
dirs = {'work': os.path.abspath(os.getcwd())}
system.write_info(dirs, parallel, config)
sysinfo = system.machine_info()[0]
log.logger.info("Number of items %s" % len(data))
parallel = res.calculate(parallel, data, sysinfo, config)
log.logger.info(parallel)
# print parallel
# raise
return parallel
def _check_items(data):
"""
First check items are as expected
"""
msg = ("\nYou can use ichwrapper.cluster.update_samples to add the config structure."
"\nExample of list of samples to parallelize:"
"\n[sample1, sample2, sample3]"
"\nsample1=[{..., 'config':{'algorithm', ...}}]")
assert isinstance(data, list), "data needs to be a list"
assert isinstance(data[0], list), "each item inside data needs to be like this [{}]"
assert data[0][0]['config'], "each item inside data needs to have a config key with the info from galaxy/bcbio_system.yaml." + msg
assert data[0][0]['config']['algorithm'], "config key inside item dict needs to have algorithm key." + msg
def send_job(fn, data, args, resources=None):
"""decide if send jobs with ipython or run locally"""
utils.safe_makedir("checkpoint")
_check_items(data)
res = []
dirs = {'work': os.path.abspath(os.getcwd())}
config = data[0][0]['config']
if not resources:
resources = config_default
step = resources['name']
if 'mem' not in resources or 'cores' not in resources:
raise ValueError("resources without mem or cores keys: %s" % resources)
par = _calculate_resources(data, args, resources)
# args.memory_per_job = resources['mem']
# args.cores_per_job = resources['cores']
# log.setup_log(args)
log.logger.debug("doing %s" % step)
if par['type'] == "ipython" and not is_done(step):
with create(par, dirs, config) as view:
for sample in data:
res.append(view.apply_async(fn, sample[0], args))
res = wait_until_complete(res)
flag_done(step)
return res
for sample in data:
res.append([fn(sample[0], args)])
return res
def update_samples(data, resources, args):
"""
Update algorithm dict with new cores set
"""
if args.galaxy:
system_config = args.galaxy
else:
system_config = os.path.join(_get_data_dir(), "galaxy", "bcbio_system.yaml")
config = yaml.load(open(system_config))
config['algorithm'] = {}
new_data = []
for sample in data:
sample['config'] = config
sample['config']['algorithm'] = resources
new_data.append([sample])
return new_data
| lpantano/ich-wrapper | ichwrapper/cluster.py | Python | mit | 4,416 | [
"Galaxy"
] | 2d00c0db361b9a52bfbcc7d8cf9c80bbd29f3daf960626da19f732417c5c7dd1 |
#!/usr/bin/env python
"""
Rough concolic execution implementation
Limitations
- tested only on the simpleassert example program in examples/
- only works for 3 ints of stdin
Bugs
- Will probably break if a newly discovered branch gets more input/does another read(2)
- possibly unnecessary deepcopies
"""
import queue
import struct
import itertools
from manticore import set_verbosity
from manticore.native import Manticore
from manticore.core.plugin import ExtendedTracer, Follower, Plugin
from manticore.core.smtlib.constraints import ConstraintSet
from manticore.core.smtlib.solver import Z3Solver
from manticore.core.smtlib.visitors import GetDeclarations
from manticore.utils import config
import copy
from manticore.core.smtlib.expression import *
from pathlib import Path
prog = str(Path(__file__).parent.resolve().parent.joinpath("linux").joinpath("simpleassert"))
VERBOSITY = 0
def _partition(pred, iterable):
t1, t2 = itertools.tee(iterable)
return (list(itertools.filterfalse(pred, t1)), list(filter(pred, t2)))
def log(s):
print("[+]", s)
class TraceReceiver(Plugin):
def __init__(self, tracer):
self._trace = None
self._tracer = tracer
super().__init__()
@property
def trace(self):
return self._trace
def will_terminate_state_callback(self, state, reason):
self._trace = state.context.get(self._tracer.context_key, [])
instructions, writes = _partition(lambda x: x["type"] == "regs", self._trace)
total = len(self._trace)
log(
f"Recorded concrete trace: {len(instructions)}/{total} instructions, {len(writes)}/{total} writes"
)
def flip(constraint):
"""
flips a constraint (Equal)
(Equal (BitVecITE Cond IfC ElseC) IfC)
->
(Equal (BitVecITE Cond IfC ElseC) ElseC)
"""
equal = copy.copy(constraint)
assert len(equal.operands) == 2
# assume they are the equal -> ite form that we produce on standard branches
ite, forcepc = equal.operands
if not (isinstance(ite, BitVecITE) and isinstance(forcepc, BitVecConstant)):
return constraint
assert isinstance(ite, BitVecITE) and isinstance(forcepc, BitVecConstant)
assert len(ite.operands) == 3
cond, iifpc, eelsepc = ite.operands
assert isinstance(iifpc, BitVecConstant) and isinstance(eelsepc, BitVecConstant)
equal._operands = (equal.operands[0], eelsepc if forcepc.value == iifpc.value else iifpc)
return equal
def eq(a, b):
# this ignores checking the conditions, only checks the 2 possible pcs
# the one that it is forced to
ite1, force1 = a.operands
ite2, force2 = b.operands
if force1.value != force2.value:
return False
_, first1, second1 = ite1.operands
_, first2, second2 = ite1.operands
if first1.value != first2.value:
return False
if second1.value != second2.value:
return False
return True
def perm(lst, func):
"""Produce permutations of `lst`, where permutations are mutated by `func`. Used for flipping constraints. highly
possible that returned constraints can be unsat this does it blindly, without any attention to the constraints
themselves
Considering lst as a list of constraints, e.g.
[ C1, C2, C3 ]
we'd like to consider scenarios of all possible permutations of flipped constraints, excluding the original list.
So we'd like to generate:
[ func(C1), C2 , C3 ],
[ C1 , func(C2), C3 ],
[ func(C1), func(C2), C3 ],
[ C1 , C2 , func(C3)],
.. etc
This is effectively treating the list of constraints as a bitmask of width len(lst) and counting up, skipping the
0th element (unmodified array).
The code below yields lists of constraints permuted as above by treating list indeces as bitmasks from 1 to
2**len(lst) and applying func to all the set bit offsets.
"""
for i in range(1, 2 ** len(lst)):
yield [func(item) if (1 << j) & i else item for (j, item) in enumerate(lst)]
def constraints_to_constraintset(constupl):
# originally those constraints belonged to a different ConstraintSet
# This is a hack
x = ConstraintSet()
declarations = GetDeclarations()
for a in constupl:
declarations.visit(a)
x.add(a)
for d in declarations.result:
x._declare(d)
return x
def input_from_cons(constupl, datas):
"solve bytes in |datas| based on"
def make_chr(c):
try:
return chr(c)
except Exception:
return c
newset = constraints_to_constraintset(constupl)
ret = ""
for data in datas:
for c in data:
ret += make_chr(Z3Solver.instance().get_value(newset, c))
return ret
# Run a concrete run with |inp| as stdin
def concrete_run_get_trace(inp):
consts = config.get_group("core")
consts.mprocessing = consts.mprocessing.single
m1 = Manticore.linux(prog, concrete_start=inp, workspace_url="mem:")
t = ExtendedTracer()
# r = TraceReceiver(t)
set_verbosity(VERBOSITY)
m1.register_plugin(t)
# m1.register_plugin(r)
m1.run()
for st in m1.all_states:
return t.get_trace(st)
# return r.trace
def symbolic_run_get_cons(trace):
"""
Execute a symbolic run that follows a concrete run; return constraints generated
and the stdin data produced
"""
# mem: has no concurrency support. Manticore should be 'Single' process
m2 = Manticore.linux(prog, workspace_url="mem:")
f = Follower(trace)
set_verbosity(VERBOSITY)
m2.register_plugin(f)
def on_term_testcase(mm, state, err):
with m2.locked_context() as ctx:
readdata = []
for name, fd, data in state.platform.syscall_trace:
if name in ("_receive", "_read") and fd == 0:
readdata.append(data)
ctx["readdata"] = readdata
ctx["constraints"] = list(state.constraints.constraints)
m2.subscribe("will_terminate_state", on_term_testcase)
m2.run()
constraints = m2.context["constraints"]
datas = m2.context["readdata"]
return constraints, datas
def contains(new, olds):
"__contains__ operator using the `eq` function"
return any(eq(new, old) for old in olds)
def getnew(oldcons, newcons):
"return all constraints in newcons that aren't in oldcons"
return [new for new in newcons if not contains(new, oldcons)]
def constraints_are_sat(cons):
"Whether constraints are sat"
return Z3Solver.instance().check(constraints_to_constraintset(cons))
def get_new_constrs_for_queue(oldcons, newcons):
ret = []
# i'm pretty sure its correct to assume newcons is a superset of oldcons
new_constraints = getnew(oldcons, newcons)
if not new_constraints:
return ret
perms = perm(new_constraints, flip)
for p in perms:
candidate = oldcons + p
# candidate new constraint sets might not be sat because we blindly
# permute the new constraints that the path uncovered and append them
# back onto the original set. we do this without regard for how the
# permutation of the new constraints combines with the old constraints
# to affect the satisfiability of the whole
if constraints_are_sat(candidate):
ret.append(candidate)
return ret
def inp2ints(inp):
a, b, c = struct.unpack("<iii", inp)
return f"a={a} b={b} c={c}"
def ints2inp(*ints):
return struct.pack("<" + "i" * len(ints), *ints)
traces = set()
def concrete_input_to_constraints(ci, prev=None):
global traces
if prev is None:
prev = []
trc = concrete_run_get_trace(ci)
# Only heed new traces
trace_rips = tuple(
x["values"]["RIP"] for x in trc if x["type"] == "regs" and "RIP" in x["values"]
)
if trace_rips in traces:
return [], []
traces.add(trace_rips)
log("getting constraints from symbolic run")
cons, datas = symbolic_run_get_cons(trc)
# hmmm: ideally, do some smart stuff so we don't have to check if the
# constraints are unsat. something like the compare the constraints set
# which you used to generate the input, and the constraint set you got
# from the symex. sounds pretty hard
#
# but maybe a dumb way where we blindly permute the constraints
# and just check if they're sat before queueing will work
new_constraints = get_new_constrs_for_queue(prev, cons)
log(f"permuting constraints and adding {len(new_constraints)} constraints sets to queue")
return new_constraints, datas
def main():
q = queue.Queue()
# todo randomly generated concrete start
stdin = ints2inp(0, 5, 0)
log(f"seed input generated ({inp2ints(stdin)}), running initial concrete run.")
to_queue, datas = concrete_input_to_constraints(stdin)
for each in to_queue:
q.put(each)
# hmmm: probably issues with the datas stuff here? probably need to store
# the datas in the queue or something. what if there was a new read(2) deep in the program, changing the datas?
while not q.empty():
log(f"get constraint set from queue, queue size: {q.qsize()}")
cons = q.get()
inp = input_from_cons(cons, datas)
to_queue, new_datas = concrete_input_to_constraints(inp, cons)
if len(new_datas) > 0:
datas = new_datas
for each in to_queue:
q.put(each)
log(f"paths found: {len(traces)}")
if __name__ == "__main__":
main()
| trailofbits/manticore | examples/script/concolic.py | Python | agpl-3.0 | 9,601 | [
"VisIt"
] | 50438efe13ae75bee85f655ae81d115e43ab80dd48359608130a4b6571685a66 |
# Conversion Module
import numpy as np
import data as data
import math as math
def conversion(mask_id, n_horizontal, m_vertical, datastruct):
"""Convert the Moire 3D array into the Crystal 2D array and store it in datastruct.
The Moire 3D array is loaded from the data structure (datastruct) using the keyword (mask_id = string).
The horizontal and vertical component of the Moire 3D array are separated and each component are converted using
the integer n_horizontal and m_vertical and the pixel size (strictly positive real number loaded from datastuct)."""
# Load the pixel size (p) from the data structure and check if p is strictly positive
p = data.SMGData.load(datastruct, 'p')
if p <= 0:
raise Exception('Pixel size negative or zero, conversion cannot be performed')
# Normalize the unstrained reference Moire 3D array (gMuns = 3D array -- 2D vector on each pixel of a
# 2D image and separate components)
g_m_uns = data.SMGData.load_g(datastruct, mask_id, 'gMuns')
# Generate the correction 3D array to apply on the unstrained reference Moire 3D array on each component
correction = np.ones(g_m_uns.shape)
# Warning g[0] component along x (vertical axis pointing down)
correction[0, :, :] = - m_vertical * correction[0, :, :]
# Warning g[1] component along y (horizontal axis pointing right)
correction[1, :, :] = n_horizontal * correction[1, :, :]
# Apply correction to get the unstrained reference crystalline 3D array and store it in the data structure
g_c_uns = g_m_uns + correction
data.SMGData.store_g(datastruct, mask_id, 'gCuns', g_c_uns)
# Inform user of the completion of the conversion and provide the norm of the crystalline wave vector
norm = 1 / p * math.sqrt(g_c_uns[0, 0, 0] ** 2 + g_c_uns[1, 0, 0] ** 2)
print('Conversion done !!')
print('g norm = ', norm, ' nm-1')
| slimpotatoes/STEM_Moire_GPA | src/conversion.py | Python | bsd-3-clause | 1,900 | [
"CRYSTAL"
] | ade6806cf19c61315a143237cdb678d6d3c6f3cae81d65c0f29815d2f078ee3c |
#!/usr/bin/env python
"""
This is a script for quick Mayavi-based visualizations of finite element
computations results.
Examples
--------
The examples assume that run_tests.py has been run successfully and the
resulting data files are present.
- view data in output-tests/test_navier_stokes.vtk
$ python postproc.py output-tests/test_navier_stokes.vtk
$ python postproc.py output-tests/test_navier_stokes.vtk --3d
- save a snapshot image and exit
$ python postproc.py output-tests/test_poisson.vtk -o image.png -n
- save a snapshot image without off-screen rendering and exit
$ python postproc.py output-tests/test_poisson.vtk -o image.png -n --no-offscreen
- create animation (forces offscreen rendering) from
output-tests/test_time_poisson.*.vtk
$ python postproc.py output-tests/test_time_poisson.*.vtk -a mov
- create animation (forces offscreen rendering) from
output-tests/test_hyperelastic.*.vtk
The range specification for the displacements 'u' is required, as
output-tests/test_hyperelastic.00.vtk contains only zero
displacements which leads to invisible glyph size.
$ python postproc.py output-tests/test_hyperelastic.*.vtk --ranges=u,0,0.02 -a mov
- same as above, but slower frame rate
$ python postproc.py output-tests/test_hyperelastic_TL.*.vtk --ranges=u,0,0.02 -a mov --ffmpeg-options="-framerate 2"
"""
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser, Action, RawDescriptionHelpFormatter
import os
import glob
import sfepy
from sfepy.base.base import assert_, get_default, output, nm
from sfepy.postprocess.viewer import (Viewer, get_data_ranges,
create_file_source)
from sfepy.postprocess.domain_specific import DomainSpecificPlot
import six
helps = {
'debug':
'automatically start debugger when an exception is raised',
'filename' :
'view image file name [default: "view.png"]',
'output_dir' :
'output directory for saving view images; ignored when -o option is' \
' given, as the directory part of the filename is taken instead' \
' [default: "."]',
'no_show' :
'do not call mlab.show()',
'no_offscreen' :
'force no offscreen rendering for --no-show',
'anim_format' :
'if set to a ffmpeg-supported format (e.g. mov, avi, mpg), ffmpeg is' \
' installed and results of multiple time steps are given, an animation is' \
' created in the same directory as the view images',
'ffmpeg_options' :
'ffmpeg animation encoding options (enclose in "")' \
'[default: "%(default)s"]',
'step' :
'set the time step. Negative indices are allowed, -1 means the last step.'
' The closest higher step is used if the desired one is not available.'
' Has precedence over --time. [default: the first step]',
'time' :
'set the time. The closest higher time is used if the desired one is not'
' available. [default: None]',
'watch' :
'watch the results file for changes (single file mode only)',
'all' :
'draw all data (normally, node_groups and mat_id are omitted)',
'only_names' :
'draw only named data',
'list_ranges' :
'do not plot, only list names and ranges of all data',
'ranges' :
'force data ranges [default: automatic from data]',
'resolution' :
'image resolution in NxN format [default: shorter axis: 600;'\
' depends on layout: for rowcol it is 800x600]',
'layout' :
'layout for multi-field plots, one of: rowcol, colrow, row, col, row#n,' \
'col#n, where #n is the number of plots in the specified direction ' \
'[default: %(default)s]',
'is_3d' :
'3d plot mode',
'view' :
'camera azimuth, elevation angles, and optionally also '
'distance and focal point coordinates (without []) as in `mlab.view()` '
'[default: if --3d is True: "45,45", else: "0,0"]',
'roll' :
'camera roll angle [default: %(default)s]',
'parallel_projection' :
'use parallel projection',
'fgcolor' :
'foreground color, that is the color of all text annotation labels'
' (axes, orientation axes, scalar bar labels) [default: %(default)s]',
'bgcolor' :
'background color [default: %(default)s]',
'colormap' :
'mayavi2 colormap name [default: %(default)s]',
'anti_aliasing' :
'value of anti-aliasing [default: mayavi2 default]',
'is_scalar_bar' :
'show scalar bar for each data',
'is_wireframe' :
'show wireframe of mesh surface for each data',
'group_names' :
'superimpose plots of data in each group',
'subdomains' :
'superimpose surfaces of subdomains over each data;' \
' example value: mat_id,0,None,True',
'domain_specific' :
'domain specific drawing functions and configurations',
'scalar_mode' :
'mode for plotting scalars with --3d, one of: cut_plane, iso_surface,'\
' both [default: %(default)s]',
'vector_mode' :
'mode for plotting vectors, one of: arrows, norm, arrows_norm, warp_norm'\
' [default: %(default)s]',
'rel_scaling' :
'relative scaling of glyphs (vector field visualization)' \
' [default: %(default)s]',
'clamping' :
'glyph clamping mode',
'opacity' :
'opacity in [0.0, 1.0]. Can be given either globally'
' as a single float, or per module, e.g.'
' "wireframe=0.1,scalar_cut_plane=0.5". Possible keywords are: wireframe,'
' scalar_cut_plane, vector_cut_plane, surface, iso_surface,'
' arrows_surface, glyphs. [default: 1.0]',
'rel_text_width' :
'relative text annotation width [default: %(default)s]',
}
class ParseView(Action):
def __call__(self, parser, namespace, value, option_string=None):
vals = value.split(',')
assert_(len(vals) in [2, 3, 6])
val = tuple(float(ii) for ii in vals)
if len(vals) == 6:
val = val[:3] + (list(val[3:]),)
setattr(namespace, self.dest, val)
class ParseResolution(Action):
def __call__(self, parser, namespace, value, option_string=None):
if value is not None:
print(value)
setattr(namespace, self.dest,
tuple([int(r) for r in value.split('x')]))
class ParseRanges(Action):
def __call__(self, parser, namespace, value, option_string=None):
if value is not None:
print(value)
ranges = {}
for rng in value.split(':'):
aux = rng.split(',')
ranges[aux[0]] = (float(aux[1]), float(aux[2]))
setattr(namespace, self.dest, ranges)
class ParseOpacity(Action):
def __call__(self, parser, namespace, value, option_string=None):
try:
opacity = float(value)
assert_(0.0 <= opacity <= 1.0)
except:
opacity = {}
for vals in value.split(','):
key, val = vals.split('=')
val = float(val)
assert_(0.0 <= val <= 1.0)
opacity[key] = val
setattr(namespace, self.dest, opacity)
class ParseGroupNames(Action):
def __call__(self, parser, namespace, value, option_string=None):
if value is not None:
print(value)
group_names = [tuple(group.split(','))
for group in value.split(':')]
setattr(namespace, self.dest, group_names)
class ParseSubdomains(Action):
def __call__(self, parser, namespace, value, option_string=None):
if value is not None:
print(value)
aux = value.split(',')
try:
tmin = int(aux[1])
except ValueError:
tmin = None
try:
tmax = int(aux[2])
except ValueError:
tmax = None
subdomains_args = {'mat_id_name' : aux[0],
'threshold_limits' : (tmin, tmax),
'single_color' : aux[3] == 'True'}
setattr(namespace, self.dest, subdomains_args)
class ParseDomainSpecific(Action):
def __call__(self, parser, namespace, value, option_string=None):
if value is not None:
print(value)
out = {}
confs = value.split(':')
for conf in confs:
aux = conf.split(',')
var_name, fun_name = aux[:2]
args = aux[2:]
out[var_name] = DomainSpecificPlot(fun_name, args)
setattr(namespace, self.dest, out)
def view_file(filename, filter_names, options, view=None):
if view is None:
if options.show:
offscreen = False
else:
offscreen = get_default(options.offscreen, True)
view = Viewer(filename, watch=options.watch,
ffmpeg_options=options.ffmpeg_options,
output_dir=options.output_dir,
offscreen=offscreen)
if options.only_names is not None:
options.only_names = options.only_names.split(',')
view(show=options.show, is_3d=options.is_3d, view=options.view,
roll=options.roll,
parallel_projection=options.parallel_projection,
fgcolor=options.fgcolor, bgcolor=options.bgcolor,
colormap=options.colormap,
layout=options.layout,
scalar_mode=options.scalar_mode,
vector_mode=options.vector_mode,
rel_scaling=options.rel_scaling,
clamping=options.clamping, ranges=options.ranges,
is_scalar_bar=options.is_scalar_bar,
is_wireframe=options.is_wireframe,
opacity=options.opacity,
subdomains_args=options.subdomains_args,
rel_text_width=options.rel_text_width,
fig_filename=options.filename, resolution=options.resolution,
filter_names=filter_names, only_names=options.only_names,
group_names=options.group_names,
step=options.step, time=options.time,
anti_aliasing=options.anti_aliasing,
domain_specific=options.domain_specific)
else:
view.set_source_filename(filename)
view.save_image(options.filename)
return view
def main():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version',
version='%(prog)s ' + sfepy.__version__)
parser.add_argument('--debug',
action='store_true', dest='debug',
default=False, help=helps['debug'])
group = parser.add_argument_group('Output Options')
group.add_argument('-o', '--output', metavar='filename',
action='store', dest='filename',
default=None, help=helps['filename'])
group.add_argument('--output-dir', metavar='directory',
action='store', dest='output_dir',
default=None, help=helps['output_dir'])
group.add_argument('-n', '--no-show',
action='store_false', dest='show',
default=True, help=helps['no_show'])
group.add_argument('--no-offscreen',
action='store_false', dest='offscreen',
default=None, help=helps['no_offscreen'])
group.add_argument('-a', '--animation',
metavar='<ffmpeg-supported format>', action='store',
dest='anim_format', default=None,
help=helps['anim_format'])
group.add_argument('--ffmpeg-options', metavar='<ffmpeg options>',
action='store', dest='ffmpeg_options',
default='-framerate 10',
help=helps['ffmpeg_options'])
group = parser.add_argument_group('Data Options')
group.add_argument('--step', type=int, metavar='step',
action='store', dest='step',
default=None, help=helps['step'])
group.add_argument('--time', type=float, metavar='time',
action='store', dest='time',
default=None, help=helps['time'])
group.add_argument('-w', '--watch',
action='store_true', dest='watch',
default=False, help=helps['watch'])
group.add_argument('--all',
action='store_true', dest='all',
default=False, help=helps['all'])
group.add_argument('--only-names', metavar='list of names',
action='store', dest='only_names',
default=None, help=helps['only_names'])
group.add_argument('-l', '--list-ranges',
action='store_true', dest='list_ranges',
default=False, help=helps['list_ranges'])
group.add_argument('--ranges', type=str,
metavar='name1,min1,max1:name2,min2,max2:...',
action=ParseRanges, dest='ranges',
help=helps['ranges'])
group = parser.add_argument_group('View Options')
group.add_argument('-r', '--resolution', type=str, metavar='resolution',
action=ParseResolution, dest='resolution',
help=helps['resolution'])
group.add_argument('--layout', metavar='layout',
action='store', dest='layout',
default='rowcol', help=helps['layout'])
group.add_argument('--3d',
action='store_true', dest='is_3d',
default=False, help=helps['is_3d'])
group.add_argument('--view', type=str,
metavar='angle,angle[,distance[,focal_point]]',
action=ParseView, dest='view',
help=helps['view'])
group.add_argument('--roll', type=float, metavar='angle',
action='store', dest='roll',
default=0.0, help=helps['roll'])
group.add_argument('--parallel-projection',
action='store_true', dest='parallel_projection',
default=False, help=helps['parallel_projection'])
group.add_argument('--fgcolor', metavar='R,G,B',
action='store', dest='fgcolor',
default='0.0,0.0,0.0', help=helps['fgcolor'])
group.add_argument('--bgcolor', metavar='R,G,B',
action='store', dest='bgcolor',
default='1.0,1.0,1.0', help=helps['bgcolor'])
group.add_argument('--colormap', metavar='colormap',
action='store', dest='colormap',
default='blue-red', help=helps['colormap'])
group.add_argument('--anti-aliasing', type=int, metavar='value',
action='store', dest='anti_aliasing',
default=None, help=helps['anti_aliasing'])
group = parser.add_argument_group('Custom Plots Options')
group.add_argument('-b', '--scalar-bar',
action='store_true', dest='is_scalar_bar',
default=False, help=helps['is_scalar_bar'])
group.add_argument('--wireframe',
action='store_true', dest='is_wireframe',
default=False, help=helps['is_wireframe'])
group.add_argument('--group-names', type=str,
metavar='name1,...,nameN:...', action=ParseGroupNames,
dest='group_names', help=helps['group_names'])
group.add_argument('--subdomains', type=str,
metavar='mat_id_name,threshold_limits,single_color',
action=ParseSubdomains, dest='subdomains_args',
default=None,
help=helps['subdomains'])
group.add_argument('-d', '--domain-specific', type=str,
metavar='"var_name0,function_name0,' \
'par0=val0,par1=val1,...:var_name1,..."',
action=ParseDomainSpecific, dest='domain_specific',
default=None,
help=helps['domain_specific'])
group = parser.add_argument_group('Mayavi Options')
group.add_argument('--scalar-mode', metavar='mode',
action='store', dest='scalar_mode',
default='iso_surface', help=helps['scalar_mode'])
group.add_argument('--vector-mode', metavar='mode',
action='store', dest='vector_mode',
default='arrows_norm', help=helps['vector_mode'])
group.add_argument('-s', '--scale-glyphs', type=float, metavar='scale',
action='store', dest='rel_scaling',
default=0.05, help=helps['rel_scaling'])
group.add_argument('--clamping',
action='store_true', dest='clamping',
default=False, help=helps['clamping'])
group.add_argument('--opacity', type=str, metavar='opacity',
action=ParseOpacity, dest='opacity',
help=helps['opacity'])
group.add_argument('--rel-text-width', type=float, metavar='width',
action='store', dest='rel_text_width',
default=0.02, help=helps['rel_text_width'])
parser.add_argument('filenames', nargs='+')
options = parser.parse_args()
if options.debug:
from sfepy.base.base import debug_on_error; debug_on_error()
filenames = options.filenames
options.fgcolor = tuple([float(ii) for ii in
options.fgcolor.split(',')])
assert_(len(options.fgcolor) == 3)
options.bgcolor = tuple([float(ii) for ii in
options.bgcolor.split(',')])
assert_(len(options.bgcolor) == 3)
can_save = not options.show
# Output dir / file names.
if options.filename is None:
can_save = False
options.filename = 'view.png'
if options.output_dir is None:
options.output_dir = '.'
else:
options.output_dir, options.filename = os.path.split(options.filename)
# Data filtering,
if not options.all:
filter_names = ['node_groups', 'mat_id']
else:
filter_names = []
if options.anim_format is not None:
# Do not call show when saving an animation.
options.show = False
if options.list_ranges:
all_ranges = {}
for ii, filename in enumerate(filenames):
output('%d: %s' % (ii, filename))
file_source = create_file_source(filename)
if (options.step is None) and (options.time is None):
steps, _ = file_source.get_ts_info()
else:
if options.step is not None:
step, _ = file_source.get_step_time(step=options.step)
else:
step, _ = file_source.get_step_time(time=options.time)
steps = [step]
if not len(steps):
steps = [0]
for iis, step in enumerate(steps):
output('%d: step %d' %(iis, step))
file_source.get_step_time(step=step)
source = file_source.create_source()
ranges = get_data_ranges(source, return_only=True)
for key, val in six.iteritems(ranges):
all_ranges.setdefault(key, []).append(val[3:])
if (len(filenames) > 1) or (len(steps) > 1):
output('union of ranges:')
else:
output('ranges:')
for key, ranges in six.iteritems(all_ranges):
aux = nm.array(ranges)
mins = aux[:, [0, 2]].min(axis=0)
maxs = aux[:, [1, 3]].max(axis=0)
output(' items: %s,%e,%e' % (key, mins[0], maxs[0]))
output(' norms: %s,%e,%e' % (key, mins[1], maxs[1]))
else:
if len(filenames) == 1:
filenames = filenames[0]
view = view_file(filenames, filter_names, options)
if can_save:
view.save_image(options.filename)
if options.anim_format is not None:
view.save_animation(options.filename)
view.encode_animation(options.filename, options.anim_format,
options.ffmpeg_options)
if __name__ == '__main__':
main()
| sfepy/sfepy | postproc.py | Python | bsd-3-clause | 20,509 | [
"Mayavi",
"VTK"
] | ceb48782076ac5f5b4e78339453dd7663ab03c8983ef2aa11dac899f34a33ad0 |
# coding: utf-8
import cv2
import numpy as np
import pprint
class ScaleSpace(object):
def __init__(self, K = 3, O = 8, sigma_0 = 0.8, delta_0 = 0.5):
"""
This class is model of Gaussian Scale space
:param K: number of scales per octave
:param O: number of octaves
:param sigma_0: initial value of sigma
:param delta_0 initial ratio of subsampling image
"""
self.K = K
self.O = O
self.sigma_0 = sigma_0
self.delta_0 = delta_0
self.image_0 = None
self.images = {}
def generate(self, image_in):
"""
generate gaussian scale space
:param image_in:
:return:
"""
# initialize this object
self._init(image_in)
# generate uin
self.image_0 = self._gen_image_0(image_in, self.delta_0)
cv2.imwrite("./data/lena_std_org.tif", self.image_0)
# generate each octave
for o in range(self.O):
# set 1st image in o th octave
if o == 0:
self.images[o][0] = self._do_gaussian(self.image_0, self.sigma_0)
else:
self.images[o][0] = self._gen_image_0(self.images[o-1][self.K], o+1)
cv2.imwrite("./data/g_scale_space/lena_std_" + str(o) + "_" + str(0) + ".tif", self.images[o][0])
for k in range(1, self.K + 3):
sigma = np.float_power(2.0, float(k)/float(self.K)) * self.sigma_0
self.images[o][k] = self._do_gaussian(self.images[o][k-1], sigma)
cv2.imwrite("./data/g_scale_space/lena_std_" + str(o) + "_" + str(k) + ".tif", self.images[o][k])
def _gen_image_0(self, image, delta_0):
"""
:param image:
:param delta_0:
:return:
"""
h0, w0 = image.shape[:2]
h, w = int(h0/delta_0), int(w0/delta_0)
image_0 = cv2.resize(image, (h, w), interpolation=cv2.INTER_LINEAR)
return image_0
def _do_gaussian(self, image, sigma):
"""
:param image:
:param sigma:
:return:
"""
#return cv2.GaussianBlur(image, (length, length), sigma)
kernel = []
y_min = -4.0*sigma
x_min = -4.0*sigma
length = 2*int(4*sigma)+1
a = 1.0/(2.0*np.pi*sigma*sigma)
b = -1.0/(2.0*sigma*sigma)
for row in range(length):
rows = []
y = float(row) + y_min
y2 = y*y
for col in range(length):
x = float(col) + x_min
x2 = x*x
gauss = a*np.exp(b*(x2 + y2))
rows.append(gauss)
kernel.append(rows)
return cv2.filter2D(image, -1, np.array(kernel))
def _init(self, image):
"""
initialize images
:return:
"""
o_max = self.O
h0, w0 = image.shape[:2]
h, w = int(h0 / self.delta_0), int(w0 / self.delta_0)
for o in range(self.O):
o_dash = o + 1
h_dash, w_dash = int(h/o_dash), int(w/o_dash)
pprint.pprint([h_dash, w_dash])
if h_dash == 0 or w_dash == 0:
o_max = o - 1
self.O = o_max - 1
# init O
for o in range(self.O):
dic = {}
for k in range(self.K+2):
dic[k] = None
self.images[o] = dic | Tukamotosan/KeypointDetection | python/ScaleSpace.py | Python | apache-2.0 | 3,397 | [
"Gaussian"
] | f8f15d6397c10b7e11a2c0731c14918b2f2a51fba72352992cc52a5388de70d9 |
import sympy
from sympy.core import Symbol, Wild, S
from sympy.functions import DiracDelta, Heaviside
from sympy.solvers import solve
#from sympy.integrals import Integral
def change_mul(node,x):
"""change_mul(node,x)
Rearranges the operands of a product, bringing to front any simple
DiracDelta expression.
If no simple DiracDelta expression was found, then all the DiracDelta
expressions are simplified(using DiracDelta.simplify).
Return: (dirac,nnode)
Where:
dirac is a simple DiracDelta expression. None if no simple expression has been found
nnode is a new node where all the DiracDelta expressions where simplified,
and finally the node was expanded. if nnode is None, means that no DiracDelta expression
could be simplified
Examples
--------
>>change_mul(x*y*DiracDelta(x)*cos(x),x)
(DiracDelta(x),x*y*cos(x))
>>change_mul(x*y*DiracDelta(x**2-1)*cos(x),x)
(None,x*y*cos(x),x*y*DiracDelta(1 + x)*cos(x)/2 + x*y*DiracDelta(-1 + x)*cos(x)/2)
>>change_mul(x*y*DiracDelta(cos(x))*cos(x),x)
(None,None)
"""
if not node.is_Mul:
return node
new_args = []
dirac = None
for arg in node.args:
if isinstance(arg, DiracDelta) and arg.is_simple(x) and (len(arg.args) <= 1 or arg.args[1]==0):
dirac = arg
else:
new_args.append(change_mul(arg,x))
if not dirac:#we didn't find any simple dirac
new_args = []
for arg in node.args:
if isinstance(arg, DiracDelta):
new_args.append(arg.simplify(x))
else:
new_args.append(change_mul(arg,x))
if tuple(new_args) != node.args:
nnode = node.__class__(*new_args).expand()
else:#if the node didn't change there is nothing to do
nnode = None
return (None, nnode)
return (dirac,node.__class__(*new_args))
def deltaintegrate(f, x):
'''The idea for integration is the following:
-If we are dealing with a DiracDelta expresion, ie:
DiracDelta(g(x)), we try to simplify it.
If we could simplify it, then we integrate the resulting expression.
We already know we can integrate a simplified expression, because only
simple DiracDelta expressions are involved.
If we couldn't simplify it, there are two cases:
1) The expression is a simple expression, then we return the integral
Taking care if we are dealing with a Derivative or with a proper DiracDelta
2) The expression is not simple(ie. DiracDelta(cos(x))), we can do nothing at all
-If the node is a multiplication node having a DiracDelta term
First we expand it.
If the expansion did work, the we try to integrate the expansion
If not, we try to extrat a simple DiracDelta term, then we have two cases
1)We have a simple DiracDelta term, so we return the integral
2)We didn't have a simple term, but we do have an expression with simplified
DiracDelta terms, so we integrate this expresion
'''
if not f.has(DiracDelta):
return None
# g(x) = DiracDelta(h(x))
if isinstance(f,DiracDelta):
h = f.simplify(x)
if h == f:#can't simplify the expression
#FIXME: the second term tells wether is DeltaDirac or Derivative
#For integrating derivatives of DiracDelta we need the chain rule
if f.is_simple(x):
if (len(f.args) <= 1 or f.args[1]==0):
return Heaviside(f.args[0])
else:
return (DiracDelta(f.args[0],f.args[1]-1)/ f.args[0].as_poly().coeffs[0])
else:#let's try to integrate the simplified expression
fh = sympy.integrals.integrate(h,x)
return fh
elif f.is_Mul: #g(x)=a*b*c*f(DiracDelta(h(x)))*d*e
g = f.expand()
if f != g:#the expansion worked
fh = sympy.integrals.integrate(g,x)
if fh and not isinstance(fh,sympy.integrals.Integral):
return fh
else:#no expansion performed, try to extract a simple DiracDelta term
dg, rest_mult = change_mul(f,x)
if not dg:
if rest_mult:
fh = sympy.integrals.integrate(rest_mult,x)
return fh
else:
point = solve(dg.args[0],x)[0]
return (rest_mult.subs(x,point)*Heaviside(dg.args[0]))
return None
| hazelnusse/sympy-old | sympy/integrals/deltafunctions.py | Python | bsd-3-clause | 4,487 | [
"DIRAC"
] | 747493ff27970cf905e2ff32dd607df65a7fd6b94ca759b44c1181b7b7df53be |
"""
main.py - Part of millennium-compact-groups package
Use a clustering algorithm to find compact groups in the Millennium
simulation.
Copyright(C) 2016 by
Trey Wenger; tvwenger@gmail.com
Chris Wiens; cdw9bf@virginia.edu
Kelsey Johnson; kej7a@virginia.edu
GNU General Public License v3 (GNU GPLv3)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
14 Mar 2016 - TVW Finalized version 1.0
"""
_PACK_NAME = 'millennium-compact-groups'
_PROG_NAME = 'main.py'
_VERSION = 'v1.0'
# System utilities
import os
import argparse
import time
import traceback
# Numerical utilities
import numpy as np
# Other utilities
import multiprocessing as mp
import ipyparallel as ipp
import itertools
# Classes for this project
import cg_logger
import worker
def main(snapnums=np.arange(64),size=100.,
cluster=False,
use_dbscan=False,neighborhood=0.05,bandwidth=0.1,
min_members=3,dwarf_limit=0.05,crit_velocity=1000.,
annular_radius=1.,max_annular_mass_ratio=1.e-4,min_secondtwo_mass_ratio=0.1,
num_cpus=1,profile=None,
datadir='data',outdir='results',overwrite=False,
verbose=False,nolog=False,test=False):
"""
Set up workers to perform clustering and calculate group and
member statistics
"""
start_time = time.time()
if not os.path.isdir(outdir):
os.mkdir(outdir)
#
# Handle test case
#
if test:
snapnums = np.array([50])
size = 100.
#
# Open main log file
#
logfile = os.path.join(outdir,'log_{0}.txt'.format(time.strftime('%Y%m%d%H%M%S')))
logger = cg_logger.Logger(logfile,nolog=nolog,verbose=verbose)
logger.log("Using the following parameters:")
logger.log("snapnums: {0}".format(snapnums))
logger.log("size: {0}".format(size))
logger.log("cluster: {0}".format(cluster))
logger.log("use_dbscan: {0}".format(use_dbscan))
logger.log("neighborhood: {0}".format(neighborhood))
logger.log("bandwidth: {0}".format(bandwidth))
logger.log("min_members: {0}".format(min_members))
logger.log("dwarf_limit: {0}".format(dwarf_limit))
logger.log("crit_velocity: {0}".format(crit_velocity))
logger.log("annular_radius: {0}".format(annular_radius))
logger.log("max_annular_mass_ratio: {0}".format(max_annular_mass_ratio))
logger.log("min_secondtwo_mass_ratio: {0}".format(min_secondtwo_mass_ratio))
logger.log("num_cpus: {0}".format(num_cpus))
logger.log("profile: {0}".format(profile))
logger.log("datadir: {0}".format(datadir))
logger.log("outdir: {0}".format(outdir))
logger.log("overwrite: {0}".format(outdir))
logger.log("verbose: {0}".format(verbose))
logger.log("test: {0}".format(test))
#
# Set up output directories
#
for snapnum in snapnums:
directory = os.path.join(outdir,"snapnum_{0:02g}".\
format(snapnum))
if not os.path.isdir(directory):
os.mkdir(directory)
logger.log('Created {0}'.format(directory))
cluster_directory = os.path.join(directory,'cluster')
if not os.path.isdir(cluster_directory):
os.mkdir(cluster_directory)
logger.log('Created {0}'.format(cluster_directory))
members_directory = os.path.join(directory,'members')
if not os.path.isdir(members_directory):
os.mkdir(members_directory)
logger.log('Created {0}'.format(members_directory))
groups_directory = os.path.join(directory,'groups')
if not os.path.isdir(groups_directory):
os.mkdir(groups_directory)
logger.log('Created {0}'.format(groups_directory))
#
# Set up simulation chunk boundaries
#
if test:
mins = np.array([0])
else:
mins = np.arange(0,500,size)
maxs = mins + size
#
# adjust mins and maxs to overlap by annular_radius, but do not
# go beyond simulation boundaries
#
mins = mins - annular_radius
mins[mins < 0.] = 0.
maxs = maxs + annular_radius
maxs[maxs > 500.] = 500.
boundaries = list(zip(mins,maxs))
#
# Set up worker pool
#
jobs = []
for snapnum,xbounds,ybounds,zbounds in \
itertools.product(snapnums,boundaries,boundaries,boundaries):
# Set-up a new Worker
job = worker.Worker(snapnum,xbounds,ybounds,zbounds,
cluster=cluster,
use_dbscan=use_dbscan,neighborhood=neighborhood,bandwidth=bandwidth,
min_members=min_members,dwarf_limit=dwarf_limit,
crit_velocity=crit_velocity,annular_radius=annular_radius,
max_annular_mass_ratio=max_annular_mass_ratio,
min_secondtwo_mass_ratio=min_secondtwo_mass_ratio,
datadir=datadir,outdir=outdir,overwrite=overwrite,
verbose=verbose,nolog=nolog)
# Append to list of worker arguments
jobs.append(job)
logger.log('Created worker for snapnum: {0:02g}, xmin: {1:03g}, ymin: {2:03g}, zmin: {3:03g}'.\
format(snapnum,xbounds[0],ybounds[0],zbounds[0]))
logger.log("Found {0} jobs".format(len(jobs)))
#
# Set up IPython.parallel
#
if profile is not None:
logger.log("Using IPython.parallel")
engines = ipp.Client(profile=profile,block=False)
logger.log("Found {0} IPython.parallel engines".\
format(len(engines)))
balancer = engines.load_balanced_view()
balancer.block = False
results = balancer.map(worker.run_worker,jobs)
try:
results.get()
except Exception as e:
logger.log("Caught exception")
logger.log(traceback.format_exc())
#
# Set up multiprocessing
#
elif num_cpus > 1:
logger.log("Using multiprocessing with {0} cpus".format(num_cpus))
pool = mp.Pool(num_cpus)
results = pool.map_async(worker.run_worker,jobs)
pool.close()
pool.join()
#
# One job at a time
#
else:
logger.log("Not using parallel processing.")
for job in jobs:
worker.run_worker(job)
logger.log("All jobs done.")
#
# Clean up
#
# calculate run-time
time_diff = time.time() - start_time
hours = int(time_diff/3600.)
mins = int((time_diff - hours*3600.)/60.)
secs = time_diff - hours*3600. - mins*60.
logger.log("Runtime: {0}h {1}m {2:.2f}s".format(hours,mins,secs))
#=====================================================================
# Command Line Arguments
#=====================================================================
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Find Compact Groups in Full Millenium Simulation",
prog=_PROG_NAME,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#
# Simulation parameters
#
parser.add_argument('--snapnums',nargs="+",type=int,
default=np.arange(64),
help="snapnums to process. Default: All (0 to 63)")
parser.add_argument('--size',type=int,
default=100,
help="Simulation chunk cube side length in Mpc/h. Default: 100")
#
# Clustering parameters
#
parser.add_argument('--cluster',action='store_true',
help='Re-do clustering even if clustering output already exists.')
parser.add_argument('--use_dbscan',action='store_true',
help='If set, use DBSCAN for clustering. Default: MeanShift')
parser.add_argument('--neighborhood',type=float,default=0.05,
help='Neighborhood parameter for DBSCAN. Default 0.05')
parser.add_argument('--bandwidth',type=float,default=0.1,
help='Bandwidth parameter for MeanShift. Default 0.1')
#
# Filter parameters
#
parser.add_argument('--min_members',type=int,default=3,
help='Minimum members to be considered a group. Default: 3')
parser.add_argument('--dwarf_limit',type=float,default=0.05,
help=('Stellar mass limit for dwarf galaxies in '
'10^10 Msun/h. Default: 0.05'))
parser.add_argument('--crit_velocity',type=float,default=1000.0,
help=('Velocity difference (km/s) between a '
'galaxy and median group velocity to '
'exclude (i.e. high-velocity fly-bys). '
'Default: 1000.0'))
parser.add_argument('--annular_radius',type=float,default=1.0,
help=('Size (in Mpc/h) of outer annular radius '
'for annular mass ratio calculation. Default: 1.0'))
parser.add_argument('--max_annular_mass_ratio',type=float,default=1.e-4,
help=('Maximum allowed value for the ratio of mass '
'in annulus to total mass. Default: 1.e-4'))
parser.add_argument('--min_secondtwo_mass_ratio',type=float,default=0.1,
help=('Minimum allowed value for the ratio of mass '
'of the second two most massive galaxies to '
' the most massive galaxy. Default: 0.1'))
#
# Multiprocessing parameters
#
parser.add_argument('--num_cpus',type=int,default=1,
help=("Number of cores to use with "
"multiprocessing (not "
"IPython.parallel). Default: 1"))
parser.add_argument('--profile',type=str,default=None,
help=("IPython profile if running on computing "
"cluster using IPython.parallel. "
"Default: None (use multiprocessing "
"on single machine)"))
#
# Data parameters
#
parser.add_argument('--outdir',type=str,default='results',
help="directory to save results. Default: results/")
parser.add_argument('--datadir',type=str,default='data',
help="directory where data lives. Default: data/")
parser.add_argument('--overwrite',action='store_true',
help='Re-do analysis if member file and group file exists.')
#
# Other
#
parser.add_argument('--verbose',action='store_true',
help='Output messages along the way.')
parser.add_argument('--nolog',action='store_true',
help="Do not save log files")
parser.add_argument('--test',action='store_true',
help="Run a test on one chunk. (snapnum=50-60, box=0,0,0, size=100)")
#
# Parse the arguments and send to main function
#
args = parser.parse_args()
main(snapnums=args.snapnums,size=args.size,
cluster=args.cluster,
use_dbscan=args.use_dbscan,neighborhood=args.neighborhood,bandwidth=args.bandwidth,
min_members=args.min_members,dwarf_limit=args.dwarf_limit,
crit_velocity=args.crit_velocity,annular_radius=args.annular_radius,
max_annular_mass_ratio=args.max_annular_mass_ratio,min_secondtwo_mass_ratio=args.min_secondtwo_mass_ratio,
num_cpus=args.num_cpus,profile=args.profile,
datadir=args.datadir,outdir=args.outdir,overwrite=args.overwrite,
verbose=args.verbose,nolog=args.nolog,test=args.test)
| tvwenger/millennium-compact-groups | main.py | Python | gpl-3.0 | 12,161 | [
"Galaxy"
] | f8e2ddf23037e0fb5be266406313b5a8c33096450a81aacd3c23feddf34ef70d |
"""Definitions for the `Gaussian` class."""
import numpy as np
from scipy.special import erfinv
from mosfit.modules.parameters.parameter import Parameter
# Important: Only define one ``Module`` class per file.
class Gaussian(Parameter):
"""Parameter with Gaussian prior.
If the parameter must be positive, set the `pos` keyword to True.
"""
def __init__(self, **kwargs):
"""Initialize module."""
super(Gaussian, self).__init__(**kwargs)
self._mu = kwargs.get(self.key('mu'), None)
self._sigma = kwargs.get(self.key('sigma'), None)
if self._log:
self._mu = np.log(self._mu)
self._sigma = np.log(10.0 ** self._sigma)
if not self._mu:
raise ValueError('Need to set a value for mu!')
if not self._sigma:
raise ValueError('Need to set a value for sigma!')
def lnprior_pdf(self, x):
"""Evaluate natural log of probability density function."""
value = self.value(x)
if self._log:
value = np.log(value)
return -(value - self._mu) ** 2 / (2. * self._sigma ** 2)
def prior_icdf(self, u):
"""Evaluate inverse cumulative density function."""
value = (erfinv(2.0 * u - 1.0) * np.sqrt(2.)) * self._sigma + self._mu
value = (value - self._min_value) / (self._max_value - self._min_value)
return np.clip(value, 0.0, 1.0)
| mnicholl/MOSFiT | mosfit/modules/parameters/gaussian.py | Python | mit | 1,421 | [
"Gaussian"
] | 6ae1e6370f69d8d58b70ff6bd52cc2066601c6f5f4171b9d3973cc6bff7b86a8 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGeoquery(RPackage):
"""The NCBI Gene Expression Omnibus (GEO) is a public repository of
microarray data. Given the rich and varied nature of this resource,
it is only natural to want to apply BioConductor tools to these data.
GEOquery is the bridge between GEO and BioConductor."""
homepage = "https://bioconductor.org/packages/GEOquery/"
url = "https://git.bioconductor.org/packages/GEOquery"
list_url = homepage
version('2.42.0', git='https://git.bioconductor.org/packages/GEOquery', commit='c26adef8d3ddbd6932a3170f2f84f6e4327641fb')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-xml', type=('build', 'run'))
depends_on('r-rcurl', type=('build', 'run'))
depends_on('r-httr', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@2.42.0')
| EmreAtes/spack | var/spack/repos/builtin/packages/r-geoquery/package.py | Python | lgpl-2.1 | 2,093 | [
"Bioconductor"
] | 988be652fe531ec0f9582df72e57912f742e040b4bbfb6d5b433b51dffcf9664 |
""" This module will run some job descriptions defined with an older version of DIRAC
"""
#pylint: disable=protected-access, wrong-import-position, invalid-name, missing-docstring
import unittest
import os
import shutil
#!/usr/bin/env python
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC import gLogger
from DIRAC.tests.Utilities.utils import find_all
from DIRAC.tests.Utilities.IntegrationTest import IntegrationTest
from DIRAC.Interfaces.API.Job import Job
from DIRAC.Interfaces.API.Dirac import Dirac
class RegressionTestCase( IntegrationTest ):
""" Base class for the Regression test cases
"""
def setUp( self ):
super( RegressionTestCase, self ).setUp()
gLogger.setLevel('DEBUG')
self.dirac = Dirac()
exeScriptLoc = find_all( 'exe-script.py', '..', '/DIRAC/tests/Workflow/Regression' )[0]
helloWorldLoc = find_all( 'helloWorld.py', '..', '/DIRAC/tests/Workflow/Regression' )[0]
shutil.copyfile( exeScriptLoc, './exe-script.py' )
shutil.copyfile( helloWorldLoc, './helloWorld.py' )
helloWorldXMLLocation = find_all( 'helloWorld.xml', '..', '/DIRAC/tests/Workflow/Regression' )[0]
self.j_u_hello = Job( helloWorldXMLLocation )
helloWorldXMLFewMoreLocation = find_all( 'helloWorld.xml', '..', '/DIRAC/tests/Workflow/Regression' )[0]
self.j_u_helloPlus = Job( helloWorldXMLFewMoreLocation )
def tearDown( self ):
os.remove( 'exe-script.py' )
os.remove( 'helloWorld.py' )
class HelloWorldSuccess( RegressionTestCase ):
def test_Regression_User( self ):
res = self.j_u_hello.runLocal( self.dirac )
self.assertTrue( res['OK'] )
class HelloWorldPlusSuccess( RegressionTestCase ):
def test_Regression_User( self ):
res = self.j_u_helloPlus.runLocal( self.dirac )
self.assertTrue( res['OK'] )
#############################################################################
# Test Suite run
#############################################################################
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( RegressionTestCase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( HelloWorldSuccess ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( HelloWorldPlusSuccess ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
| Andrew-McNab-UK/DIRAC | tests/Workflow/Regression/Test_RegressionUserJobs.py | Python | gpl-3.0 | 2,354 | [
"DIRAC"
] | 36c43073b54c71026dd704728f1b394aef6daed8a091d8b1d701939b1afae21a |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import base64
import calendar
import copy
import hashlib
import hmac
import json
import pytz
import six
import telegram
import time
import urllib2
import uuid
from datetime import timedelta, date
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core import mail
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.utils import timezone
from django.template import loader, Context
from django_redis import get_redis_connection
from mock import patch
from smartmin.tests import SmartminTest
from temba.api.models import WebHookEvent, SMS_RECEIVED
from temba.contacts.models import Contact, ContactGroup, ContactURN, URN, TEL_SCHEME, TWITTER_SCHEME, EXTERNAL_SCHEME, LINE_SCHEME
from temba.msgs.models import Broadcast, Msg, IVR, WIRED, FAILED, SENT, DELIVERED, ERRORED, INCOMING, PENDING
from temba.contacts.models import TELEGRAM_SCHEME, FACEBOOK_SCHEME, VIBER_SCHEME
from temba.ivr.models import IVRCall
from temba.msgs.models import MSG_SENT_KEY, SystemLabel
from temba.orgs.models import Org, ALL_EVENTS, ACCOUNT_SID, ACCOUNT_TOKEN, APPLICATION_SID, NEXMO_KEY, NEXMO_SECRET, FREE_PLAN, NEXMO_UUID, \
NEXMO_APP_ID, NEXMO_APP_PRIVATE_KEY
from temba.tests import TembaTest, MockResponse, MockTwilioClient, MockRequestValidator, AnonymousOrg
from temba.triggers.models import Trigger
from temba.utils import dict_to_struct
from telegram import User as TelegramUser
from twilio import TwilioRestException
from twilio.util import RequestValidator
from twython import TwythonError
from urllib import urlencode
from xml.etree import ElementTree as ET
from .models import Channel, ChannelCount, ChannelEvent, SyncEvent, Alert, ChannelLog, TEMBA_HEADERS, HUB9_ENDPOINT
from .models import DART_MEDIA_ENDPOINT
from .tasks import check_channels_task, squash_channelcounts
from .views import TWILIO_SUPPORTED_COUNTRIES
class ChannelTest(TembaTest):
def setUp(self):
super(ChannelTest, self).setUp()
self.channel.delete()
self.tel_channel = Channel.create(self.org, self.user, 'RW', 'A', name="Test Channel", address="+250785551212",
role="SR", secret="12345", gcm_id="123")
self.twitter_channel = Channel.create(self.org, self.user, None, 'TT', name="Twitter Channel",
address="billy_bob", role="SR", scheme='twitter')
self.released_channel = Channel.create(None, self.user, None, 'NX', name="Released Channel", address=None,
secret=None, gcm_id="000")
def send_message(self, numbers, message, org=None, user=None):
if not org:
org = self.org
if not user:
user = self.user
group = ContactGroup.get_or_create(org, user, 'Numbers: %s' % ','.join(numbers))
contacts = list()
for number in numbers:
contacts.append(Contact.get_or_create(org, user, name=None, urns=[URN.from_tel(number)]))
group.contacts.add(*contacts)
broadcast = Broadcast.create(org, user, message, [group])
broadcast.send()
msg = Msg.objects.filter(broadcast=broadcast).order_by('text', 'pk')
if len(numbers) == 1:
return msg.first()
else:
return list(msg)
def assertHasCommand(self, cmd_name, response):
self.assertEquals(200, response.status_code)
data = response.json()
for cmd in data['cmds']:
if cmd['cmd'] == cmd_name:
return
raise Exception("Did not find '%s' cmd in response: '%s'" % (cmd_name, response.content))
def test_message_context(self):
context = self.tel_channel.build_message_context()
self.assertEqual(context['__default__'], '+250 785 551 212')
self.assertEqual(context['name'], 'Test Channel')
self.assertEqual(context['address'], '+250 785 551 212')
self.assertEqual(context['tel'], '+250 785 551 212')
self.assertEqual(context['tel_e164'], '+250785551212')
context = self.twitter_channel.build_message_context()
self.assertEqual(context['__default__'], '@billy_bob')
self.assertEqual(context['name'], 'Twitter Channel')
self.assertEqual(context['address'], '@billy_bob')
self.assertEqual(context['tel'], '')
self.assertEqual(context['tel_e164'], '')
context = self.released_channel.build_message_context()
self.assertEqual(context['__default__'], 'Released Channel')
self.assertEqual(context['name'], 'Released Channel')
self.assertEqual(context['address'], '')
self.assertEqual(context['tel'], '')
self.assertEqual(context['tel_e164'], '')
def test_deactivate(self):
self.login(self.admin)
self.tel_channel.is_active = False
self.tel_channel.save()
response = self.client.get(reverse('channels.channel_read', args=[self.tel_channel.uuid]))
self.assertEquals(404, response.status_code)
def test_delegate_channels(self):
self.login(self.admin)
# we don't support IVR yet
self.assertFalse(self.org.supports_ivr())
# pretend we are connected to twiliko
self.org.config = json.dumps(dict(ACCOUNT_SID='AccountSid', ACCOUNT_TOKEN='AccountToken', APPLICATION_SID='AppSid'))
self.org.save()
# add a delegate caller
post_data = dict(channel=self.tel_channel.pk, connection='T')
response = self.client.post(reverse('channels.channel_create_caller'), post_data)
# now we should be IVR capable
self.assertTrue(self.org.supports_ivr())
# should now have the option to disable
self.login(self.admin)
response = self.client.get(reverse('channels.channel_read', args=[self.tel_channel.uuid]))
self.assertContains(response, 'Disable Voice Calls')
# try adding a caller for an invalid channel
response = self.client.post('%s?channel=20000' % reverse('channels.channel_create_caller'))
self.assertEquals(200, response.status_code)
self.assertEquals('Sorry, a caller cannot be added for that number', response.context['form'].errors['channel'][0])
# disable our twilio connection
self.org.remove_twilio_account(self.admin)
self.assertFalse(self.org.supports_ivr())
# we should lose our caller
response = self.client.get(reverse('channels.channel_read', args=[self.tel_channel.uuid]))
self.assertNotContains(response, 'Disable Voice Calls')
# now try and add it back without a twilio connection
response = self.client.post(reverse('channels.channel_create_caller'), post_data)
# shouldn't have added, so no ivr yet
self.assertFalse(self.assertFalse(self.org.supports_ivr()))
self.assertEquals('A connection to a Twilio account is required', response.context['form'].errors['connection'][0])
def test_get_channel_type_name(self):
self.assertEquals(self.tel_channel.get_channel_type_name(), "Android Phone")
self.assertEquals(self.twitter_channel.get_channel_type_name(), "Twitter Channel")
self.assertEquals(self.released_channel.get_channel_type_name(), "Nexmo Channel")
def test_channel_selection(self):
# make our default tel channel MTN
mtn = self.tel_channel
mtn.name = "MTN"
mtn.save()
# create a channel for Tigo too
tigo = Channel.create(self.org, self.user, 'RW', 'A', "Tigo", "+250725551212", secret="11111", gcm_id="456")
# new contact on MTN should send with the MTN channel
msg = self.send_message(['+250788382382'], "Sent to an MTN number")
self.assertEquals(mtn, self.org.get_send_channel(contact_urn=msg.contact_urn))
self.assertEquals(mtn, msg.channel)
# new contact on Tigo should send with the Tigo channel
msg = self.send_message(['+250728382382'], "Sent to a Tigo number")
self.assertEquals(tigo, self.org.get_send_channel(contact_urn=msg.contact_urn))
self.assertEquals(tigo, msg.channel)
# now our MTN contact texts, the tigo number which should change their affinity
msg = Msg.create_incoming(tigo, "tel:+250788382382", "Send an inbound message to Tigo")
self.assertEquals(tigo, msg.channel)
self.assertEquals(tigo, self.org.get_send_channel(contact_urn=msg.contact_urn))
self.assertEquals(tigo, ContactURN.objects.get(path='+250788382382').channel)
# new contact on Airtel (some overlap) should send with the Tigo channel since it is newest
msg = self.send_message(['+250738382382'], "Sent to a Airtel number")
self.assertEquals(tigo, self.org.get_send_channel(contact_urn=msg.contact_urn))
self.assertEquals(tigo, msg.channel)
# add a voice caller
caller = Channel.add_call_channel(self.org, self.user, self.tel_channel)
# set our affinity to the caller (ie, they were on an ivr call)
ContactURN.objects.filter(path='+250788382382').update(channel=caller)
self.assertEquals(mtn, self.org.get_send_channel(contact_urn=ContactURN.objects.get(path='+250788382382')))
# change channel numbers to be shortcodes, i.e. no overlap with contact numbers
mtn.address = '1234'
mtn.save()
tigo.address = '1235'
tigo.save()
# should return the newest channel which is TIGO
msg = self.send_message(['+250788382382'], "Sent to an MTN number, but with shortcode channels")
self.assertEquals(tigo, msg.channel)
self.assertEquals(tigo, self.org.get_send_channel(contact_urn=msg.contact_urn))
# check for twitter
self.assertEquals(self.twitter_channel, self.org.get_send_channel(scheme=TWITTER_SCHEME))
contact = self.create_contact("Billy", number="+250722222222", twitter="billy_bob")
twitter_urn = contact.get_urn(schemes=[TWITTER_SCHEME])
self.assertEquals(self.twitter_channel, self.org.get_send_channel(contact_urn=twitter_urn))
# calling without scheme or urn should raise exception
self.assertRaises(ValueError, self.org.get_send_channel)
def test_message_splitting(self):
# external API requires messages to be <= 160 chars
self.tel_channel.channel_type = 'EX'
self.tel_channel.save()
msg = Msg.create_outgoing(self.org, self.user, 'tel:+250738382382', 'x' * 400) # 400 chars long
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
self.assertEqual(3, Msg.objects.get(pk=msg.id).msg_count)
# Nexmo limit is 1600
self.tel_channel.channel_type = 'NX'
self.tel_channel.save()
cache.clear() # clear the channel from cache
msg = Msg.create_outgoing(self.org, self.user, 'tel:+250738382382', 'y' * 400)
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
self.assertEqual(self.tel_channel, Msg.objects.get(pk=msg.id).channel)
self.assertEqual(1, Msg.objects.get(pk=msg.id).msg_count)
def test_ensure_normalization(self):
self.tel_channel.country = 'RW'
self.tel_channel.save()
contact1 = self.create_contact("contact1", "0788111222")
contact2 = self.create_contact("contact2", "+250788333444")
contact3 = self.create_contact("contact3", "+18006927753")
self.org.normalize_contact_tels()
norm_c1 = Contact.objects.get(pk=contact1.pk)
norm_c2 = Contact.objects.get(pk=contact2.pk)
norm_c3 = Contact.objects.get(pk=contact3.pk)
self.assertEquals(norm_c1.get_urn(TEL_SCHEME).path, "+250788111222")
self.assertEquals(norm_c2.get_urn(TEL_SCHEME).path, "+250788333444")
self.assertEquals(norm_c3.get_urn(TEL_SCHEME).path, "+18006927753")
def test_channel_create(self):
# can't use an invalid scheme for a fixed-scheme channel type
with self.assertRaises(ValueError):
Channel.create(self.org, self.user, 'KE', 'AT', None, '+250788123123',
config=dict(username='at-user', api_key='africa-key'),
uuid='00000000-0000-0000-0000-000000001234',
scheme='fb')
# a scheme is required
with self.assertRaises(ValueError):
Channel.create(self.org, self.user, 'US', 'EX', None, '+12065551212',
uuid='00000000-0000-0000-0000-000000001234',
scheme=None)
# country channels can't have scheme
with self.assertRaises(ValueError):
Channel.create(self.org, self.user, 'US', 'EX', None, '+12065551212',
uuid='00000000-0000-0000-0000-000000001234',
scheme='fb')
def test_delete(self):
self.org.administrators.add(self.user)
self.user.set_org(self.org)
self.login(self.user)
# a message, a call, and a broadcast
msg = self.send_message(['250788382382'], "How is it going?")
call = ChannelEvent.create(self.tel_channel, "tel:+250788383385", ChannelEvent.TYPE_CALL_IN, timezone.now(), 5)
self.assertEqual(self.org, msg.org)
self.assertEqual(self.tel_channel, msg.channel)
self.assertEquals(1, Msg.get_messages(self.org).count())
self.assertEquals(1, ChannelEvent.get_all(self.org).count())
self.assertEquals(1, Broadcast.get_broadcasts(self.org).count())
# start off in the pending state
self.assertEquals('P', msg.status)
response = self.fetch_protected(reverse('channels.channel_delete', args=[self.tel_channel.pk]), self.user)
self.assertContains(response, 'Test Channel')
response = self.fetch_protected(reverse('channels.channel_delete', args=[self.tel_channel.pk]),
post_data=dict(remove=True), user=self.user)
self.assertRedirect(response, reverse("orgs.org_home"))
msg = Msg.objects.get(pk=msg.pk)
self.assertIsNotNone(msg.channel)
self.assertIsNone(msg.channel.gcm_id)
self.assertIsNone(msg.channel.secret)
self.assertEquals(self.org, msg.org)
# queued messages for the channel should get marked as failed
self.assertEquals('F', msg.status)
call = ChannelEvent.objects.get(pk=call.pk)
self.assertIsNotNone(call.channel)
self.assertIsNone(call.channel.gcm_id)
self.assertIsNone(call.channel.secret)
self.assertEquals(self.org, call.org)
broadcast = Broadcast.objects.get(pk=msg.broadcast.pk)
self.assertEquals(self.org, broadcast.org)
# should still be considered that user's message, call and broadcast
self.assertEquals(1, Msg.get_messages(self.org).count())
self.assertEquals(1, ChannelEvent.get_all(self.org).count())
self.assertEquals(1, Broadcast.get_broadcasts(self.org).count())
# syncing this channel should result in a release
post_data = dict(cmds=[dict(cmd="status", p_sts="CHA", p_src="BAT", p_lvl="60", net="UMTS", pending=[], retry=[])])
# now send the channel's updates
response = self.sync(self.tel_channel, post_data)
# our response should contain a release
self.assertHasCommand('rel', response)
# create a channel
channel = Channel.create(self.org, self.user, 'RW', 'A', "Test Channel", "0785551212",
secret="12345", gcm_id="123")
response = self.fetch_protected(reverse('channels.channel_delete', args=[channel.pk]), self.superuser)
self.assertContains(response, 'Test Channel')
response = self.fetch_protected(reverse('channels.channel_delete', args=[channel.pk]),
post_data=dict(remove=True), user=self.superuser)
self.assertRedirect(response, reverse("orgs.org_home"))
# create a channel
channel = Channel.create(self.org, self.user, 'RW', 'A', "Test Channel", "0785551212",
secret="12345", gcm_id="123")
# add channel trigger
from temba.triggers.models import Trigger
Trigger.objects.create(org=self.org, flow=self.create_flow(), channel=channel,
modified_by=self.admin, created_by=self.admin)
self.assertTrue(Trigger.objects.filter(channel=channel, is_active=True))
response = self.fetch_protected(reverse('channels.channel_delete', args=[channel.pk]),
post_data=dict(remove=True), user=self.superuser)
self.assertRedirect(response, reverse("orgs.org_home"))
# channel trigger should have be removed
self.assertFalse(Trigger.objects.filter(channel=channel, is_active=True))
def test_list(self):
# de-activate existing channels
Channel.objects.all().update(is_active=False)
# list page redirects to claim page
self.login(self.user)
response = self.client.get(reverse('channels.channel_list'))
self.assertRedirect(response, reverse('channels.channel_claim'))
# unless you're a superuser
self.login(self.superuser)
response = self.client.get(reverse('channels.channel_list'))
self.assertEqual(response.status_code, 200)
self.assertEqual(list(response.context['object_list']), [])
# re-activate one of the channels so org has a single channel
self.tel_channel.is_active = True
self.tel_channel.save()
# list page now redirects to channel read page
self.login(self.user)
response = self.client.get(reverse('channels.channel_list'))
self.assertRedirect(response, reverse('channels.channel_read', args=[self.tel_channel.uuid]))
# unless you're a superuser
self.login(self.superuser)
response = self.client.get(reverse('channels.channel_list'))
self.assertEqual(response.status_code, 200)
self.assertEqual(list(response.context['object_list']), [self.tel_channel])
# re-activate other channel so org now has two channels
self.twitter_channel.is_active = True
self.twitter_channel.save()
# no-more redirection for anyone
self.login(self.user)
response = self.client.get(reverse('channels.channel_list'))
self.assertEqual(response.status_code, 200)
self.assertEqual(set(response.context['object_list']), {self.tel_channel, self.twitter_channel})
# clear out the phone and name for the Android channel
self.tel_channel.name = None
self.tel_channel.address = None
self.tel_channel.save()
response = self.client.get(reverse('channels.channel_list'))
self.assertContains(response, "Unknown")
self.assertContains(response, "Android Phone")
def test_channel_status(self):
# visit page as a viewer
self.login(self.user)
response = self.client.get('/', follow=True)
self.assertNotIn('unsent_msgs', response.context, msg="Found unsent_msgs in context")
self.assertNotIn('delayed_syncevents', response.context, msg="Found delayed_syncevents in context")
# visit page as superuser
self.login(self.superuser)
response = self.client.get('/', follow=True)
# superusers doesn't have orgs thus cannot have both values
self.assertNotIn('unsent_msgs', response.context, msg="Found unsent_msgs in context")
self.assertNotIn('delayed_syncevents', response.context, msg="Found delayed_syncevents in context")
# visit page as administrator
self.login(self.admin)
response = self.client.get('/', follow=True)
# there is not unsent nor delayed syncevents
self.assertNotIn('unsent_msgs', response.context, msg="Found unsent_msgs in context")
self.assertNotIn('delayed_syncevents', response.context, msg="Found delayed_syncevents in context")
# replace existing channels with a single Android device
Channel.objects.update(is_active=False)
channel = Channel.create(self.org, self.user, None, Channel.TYPE_ANDROID, None, "+250781112222", gcm_id="asdf", secret="asdf")
channel.created_on = timezone.now() - timedelta(hours=2)
channel.save()
response = self.client.get('/', Follow=True)
self.assertNotIn('delayed_syncevents', response.context)
self.assertNotIn('unsent_msgs', response.context, msg="Found unsent_msgs in context")
# simulate a sync in back in two hours
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="CHA", p_src="BAT", p_lvl="60",
net="UMTS", pending=[], retry=[])])
self.sync(channel, post_data)
sync_event = SyncEvent.objects.all()[0]
sync_event.created_on = timezone.now() - timedelta(hours=2)
sync_event.save()
response = self.client.get('/', Follow=True)
self.assertIn('delayed_syncevents', response.context)
self.assertNotIn('unsent_msgs', response.context, msg="Found unsent_msgs in context")
# add a message, just sent so shouldn't have delayed
msg = Msg.create_outgoing(self.org, self.user, 'tel:250788123123', "test")
response = self.client.get('/', Follow=True)
self.assertIn('delayed_syncevents', response.context)
self.assertNotIn('unsent_msgs', response.context, msg="Found unsent_msgs in context")
# but put it in the past
msg.delete()
msg = Msg.create_outgoing(self.org, self.user, 'tel:250788123123', "test",
created_on=timezone.now() - timedelta(hours=3))
response = self.client.get('/', Follow=True)
self.assertIn('delayed_syncevents', response.context)
self.assertIn('unsent_msgs', response.context, msg="Found unsent_msgs in context")
# if there is a successfully sent message after sms was created we do not consider it as delayed
success_msg = Msg.create_outgoing(self.org, self.user, 'tel:+250788123123', "success-send",
created_on=timezone.now() - timedelta(hours=2))
success_msg.sent_on = timezone.now() - timedelta(hours=2)
success_msg.status = 'S'
success_msg.save()
response = self.client.get('/', Follow=True)
self.assertIn('delayed_syncevents', response.context)
self.assertNotIn('unsent_msgs', response.context, msg="Found unsent_msgs in context")
# test that editors have the channel of the the org the are using
other_user = self.create_user("Other")
self.create_secondary_org()
self.org2.administrators.add(other_user)
self.org.editors.add(other_user)
self.assertFalse(self.org2.channels.all())
self.login(other_user)
other_user.set_org(self.org2)
self.assertEquals(self.org2, other_user.get_org())
response = self.client.get('/', follow=True)
self.assertNotIn('channel_type', response.context, msg="Found channel_type in context")
other_user.set_org(self.org)
self.assertEquals(1, self.org.channels.filter(is_active=True).count())
self.assertEquals(self.org, other_user.get_org())
response = self.client.get('/', follow=True)
# self.assertIn('channel_type', response.context)
def sync(self, channel, post_data=None, signature=None):
if not post_data:
post_data = "{}"
else:
post_data = json.dumps(post_data)
ts = int(time.time())
if not signature:
# sign the request
key = str(channel.secret) + str(ts)
signature = hmac.new(key=key, msg=bytes(post_data), digestmod=hashlib.sha256).digest()
# base64 and url sanitize
signature = urllib2.quote(base64.urlsafe_b64encode(signature))
return self.client.post("%s?signature=%s&ts=%d" % (reverse('sync', args=[channel.pk]), signature, ts),
content_type='application/json', data=post_data)
def test_update(self):
update_url = reverse('channels.channel_update', args=[self.tel_channel.id])
# only user of the org can view the update page of a channel
self.client.logout()
self.login(self.user)
response = self.client.get(update_url)
self.assertEquals(302, response.status_code)
self.login(self.user)
# visit the channel's update page as a manager within the channel's organization
self.org.administrators.add(self.user)
response = self.fetch_protected(update_url, self.user)
self.assertEquals(200, response.status_code)
self.assertEquals(response.request['PATH_INFO'], update_url)
channel = Channel.objects.get(pk=self.tel_channel.id)
self.assertEquals(channel.name, "Test Channel")
self.assertEquals(channel.address, "+250785551212")
postdata = dict()
postdata['name'] = "Test Channel Update1"
postdata['address'] = "+250785551313"
self.login(self.user)
response = self.client.post(update_url, postdata, follow=True)
channel = Channel.objects.get(pk=self.tel_channel.id)
self.assertEquals(channel.name, "Test Channel Update1")
self.assertEquals(channel.address, "+250785551313")
# if we change the channel to a twilio type, shouldn't be able to edit our address
channel.channel_type = Channel.TYPE_TWILIO
channel.save()
response = self.client.get(update_url)
self.assertFalse('address' in response.context['form'].fields)
# bring it back to android
channel.channel_type = Channel.TYPE_ANDROID
channel.save()
# visit the channel's update page as administrator
self.org.administrators.add(self.user)
self.user.set_org(self.org)
response = self.fetch_protected(update_url, self.user)
self.assertEquals(200, response.status_code)
self.assertEquals(response.request['PATH_INFO'], update_url)
channel = Channel.objects.get(pk=self.tel_channel.id)
self.assertEquals(channel.name, "Test Channel Update1")
self.assertEquals(channel.address, "+250785551313")
postdata = dict()
postdata['name'] = "Test Channel Update2"
postdata['address'] = "+250785551414"
response = self.fetch_protected(update_url, self.user, postdata)
channel = Channel.objects.get(pk=self.tel_channel.id)
self.assertEquals(channel.name, "Test Channel Update2")
self.assertEquals(channel.address, "+250785551414")
# visit the channel's update page as superuser
self.superuser.set_org(self.org)
response = self.fetch_protected(update_url, self.superuser)
self.assertEquals(200, response.status_code)
self.assertEquals(response.request['PATH_INFO'], update_url)
channel = Channel.objects.get(pk=self.tel_channel.id)
self.assertEquals(channel.name, "Test Channel Update2")
self.assertEquals(channel.address, "+250785551414")
postdata = dict()
postdata['name'] = "Test Channel Update3"
postdata['address'] = "+250785551515"
response = self.fetch_protected(update_url, self.superuser, postdata)
channel = Channel.objects.get(pk=self.tel_channel.id)
self.assertEquals(channel.name, "Test Channel Update3")
self.assertEquals(channel.address, "+250785551515")
# make sure channel works with alphanumeric numbers
channel.address = "EATRIGHT"
self.assertEquals("EATRIGHT", channel.get_address_display())
self.assertEquals("EATRIGHT", channel.get_address_display(e164=True))
# change channel type to Twitter
channel.channel_type = Channel.TYPE_TWITTER
channel.address = 'billy_bob'
channel.scheme = 'twitter'
channel.config = json.dumps({'handle_id': 12345, 'oauth_token': 'abcdef', 'oauth_token_secret': '23456'})
channel.save()
self.assertEquals('@billy_bob', channel.get_address_display())
self.assertEquals('@billy_bob', channel.get_address_display(e164=True))
response = self.fetch_protected(update_url, self.user)
self.assertEquals(200, response.status_code)
self.assertIn('name', response.context['fields'])
self.assertIn('alert_email', response.context['fields'])
self.assertIn('address', response.context['fields'])
self.assertNotIn('country', response.context['fields'])
postdata = dict()
postdata['name'] = "Twitter2"
postdata['alert_email'] = "bob@example.com"
postdata['address'] = "billy_bob"
with patch('temba.utils.mage.MageClient.refresh_twitter_stream') as refresh_twitter_stream:
refresh_twitter_stream.return_value = dict()
self.fetch_protected(update_url, self.user, postdata)
channel = Channel.objects.get(pk=self.tel_channel.id)
self.assertEquals(channel.name, "Twitter2")
self.assertEquals(channel.alert_email, "bob@example.com")
self.assertEquals(channel.address, "billy_bob")
def test_read(self):
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="CHA", p_src="BAT", p_lvl="60",
net="UMTS", pending=[], retry=[])])
# now send the channel's updates
self.sync(self.tel_channel, post_data)
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="FUL", p_src="AC", p_lvl="100",
net="WIFI", pending=[], retry=[])])
# now send the channel's updates
self.sync(self.tel_channel, post_data)
self.assertEquals(2, SyncEvent.objects.all().count())
# non-org users can't view our channels
self.login(self.non_org_user)
response = self.client.get(reverse('channels.channel_read', args=[self.tel_channel.uuid]))
self.assertLoginRedirect(response)
# org users can
response = self.fetch_protected(reverse('channels.channel_read', args=[self.tel_channel.uuid]), self.user)
self.assertEquals(len(response.context['source_stats']), len(SyncEvent.objects.values_list('power_source', flat=True).distinct()))
self.assertEquals('AC', response.context['source_stats'][0][0])
self.assertEquals(1, response.context['source_stats'][0][1])
self.assertEquals('BAT', response.context['source_stats'][1][0])
self.assertEquals(1, response.context['source_stats'][0][1])
self.assertEquals(len(response.context['network_stats']), len(SyncEvent.objects.values_list('network_type', flat=True).distinct()))
self.assertEquals('UMTS', response.context['network_stats'][0][0])
self.assertEquals(1, response.context['network_stats'][0][1])
self.assertEquals('WIFI', response.context['network_stats'][1][0])
self.assertEquals(1, response.context['network_stats'][1][1])
self.assertTrue(len(response.context['latest_sync_events']) <= 5)
response = self.fetch_protected(reverse('orgs.org_home'), self.admin)
self.assertNotContains(response, 'Enable Voice')
# Add twilio credentials to make sure we can add calling for our android channel
twilio_config = {ACCOUNT_SID: 'SID', ACCOUNT_TOKEN: 'TOKEN', APPLICATION_SID: 'APP SID'}
config = self.org.config_json()
config.update(twilio_config)
self.org.config = json.dumps(config)
self.org.save(update_fields=['config'])
response = self.fetch_protected(reverse('orgs.org_home'), self.admin)
self.assertTrue(self.org.is_connected_to_twilio())
self.assertContains(response, 'Enable Voice')
two_hours_ago = timezone.now() - timedelta(hours=2)
# make sure our channel is old enough to trigger alerts
self.tel_channel.created_on = two_hours_ago
self.tel_channel.save()
# delayed sync status
for sync in SyncEvent.objects.all():
sync.created_on = two_hours_ago
sync.save()
# add a message, just sent so shouldn't be delayed
Msg.create_outgoing(self.org, self.user, 'tel:250785551212', 'delayed message', created_on=two_hours_ago)
response = self.fetch_protected(reverse('channels.channel_read', args=[self.tel_channel.uuid]), self.admin)
self.assertIn('delayed_sync_event', response.context_data.keys())
self.assertIn('unsent_msgs_count', response.context_data.keys())
# with superuser
response = self.fetch_protected(reverse('channels.channel_read', args=[self.tel_channel.uuid]), self.superuser)
self.assertEquals(200, response.status_code)
# now that we can access the channel, which messages do we display in the chart?
joe = self.create_contact('Joe', '+2501234567890')
test_contact = Contact.get_test_contact(self.admin)
# should have two series, one for incoming one for outgoing
self.assertEquals(2, len(response.context['message_stats']))
# but only an outgoing message so far
self.assertEquals(0, len(response.context['message_stats'][0]['data']))
self.assertEquals(1, response.context['message_stats'][1]['data'][-1]['count'])
# we have one row for the message stats table
self.assertEquals(1, len(response.context['message_stats_table']))
# only one outgoing message
self.assertEquals(0, response.context['message_stats_table'][0]['incoming_messages_count'])
self.assertEquals(1, response.context['message_stats_table'][0]['outgoing_messages_count'])
self.assertEquals(0, response.context['message_stats_table'][0]['incoming_ivr_count'])
self.assertEquals(0, response.context['message_stats_table'][0]['outgoing_ivr_count'])
# send messages with a test contact
Msg.create_incoming(self.tel_channel, test_contact.get_urn().urn, 'This incoming message will not be counted')
Msg.create_outgoing(self.org, self.user, test_contact, 'This outgoing message will not be counted')
response = self.fetch_protected(reverse('channels.channel_read', args=[self.tel_channel.uuid]), self.superuser)
self.assertEquals(200, response.status_code)
# nothing should change since it's a test contact
self.assertEquals(0, len(response.context['message_stats'][0]['data']))
self.assertEquals(1, response.context['message_stats'][1]['data'][-1]['count'])
# no change on the table starts too
self.assertEquals(1, len(response.context['message_stats_table']))
self.assertEquals(0, response.context['message_stats_table'][0]['incoming_messages_count'])
self.assertEquals(1, response.context['message_stats_table'][0]['outgoing_messages_count'])
self.assertEquals(0, response.context['message_stats_table'][0]['incoming_ivr_count'])
self.assertEquals(0, response.context['message_stats_table'][0]['outgoing_ivr_count'])
# send messages with a normal contact
Msg.create_incoming(self.tel_channel, joe.get_urn(TEL_SCHEME).urn, 'This incoming message will be counted')
Msg.create_outgoing(self.org, self.user, joe, 'This outgoing message will be counted')
# now we have an inbound message and two outbounds
response = self.fetch_protected(reverse('channels.channel_read', args=[self.tel_channel.uuid]), self.superuser)
self.assertEquals(200, response.status_code)
self.assertEquals(1, response.context['message_stats'][0]['data'][-1]['count'])
# this assertion is problematic causing time-sensitive failures, to reconsider
# self.assertEquals(2, response.context['message_stats'][1]['data'][-1]['count'])
# message stats table have an inbound and two outbounds in the last month
self.assertEquals(1, len(response.context['message_stats_table']))
self.assertEquals(1, response.context['message_stats_table'][0]['incoming_messages_count'])
self.assertEquals(2, response.context['message_stats_table'][0]['outgoing_messages_count'])
self.assertEquals(0, response.context['message_stats_table'][0]['incoming_ivr_count'])
self.assertEquals(0, response.context['message_stats_table'][0]['outgoing_ivr_count'])
# test cases for IVR messaging, make our relayer accept calls
self.tel_channel.role = 'SCAR'
self.tel_channel.save()
from temba.msgs.models import IVR
Msg.create_incoming(self.tel_channel, test_contact.get_urn().urn, 'incoming ivr as a test contact', msg_type=IVR)
Msg.create_outgoing(self.org, self.user, test_contact, 'outgoing ivr as a test contact', msg_type=IVR)
response = self.fetch_protected(reverse('channels.channel_read', args=[self.tel_channel.uuid]), self.superuser)
# nothing should have changed
self.assertEquals(2, len(response.context['message_stats']))
self.assertEquals(1, len(response.context['message_stats_table']))
self.assertEquals(1, response.context['message_stats_table'][0]['incoming_messages_count'])
self.assertEquals(2, response.context['message_stats_table'][0]['outgoing_messages_count'])
self.assertEquals(0, response.context['message_stats_table'][0]['incoming_ivr_count'])
self.assertEquals(0, response.context['message_stats_table'][0]['outgoing_ivr_count'])
# now let's create an ivr interaction from a real contact
Msg.create_incoming(self.tel_channel, joe.get_urn().urn, 'incoming ivr', msg_type=IVR)
Msg.create_outgoing(self.org, self.user, joe, 'outgoing ivr', msg_type=IVR)
response = self.fetch_protected(reverse('channels.channel_read', args=[self.tel_channel.uuid]), self.superuser)
self.assertEquals(4, len(response.context['message_stats']))
self.assertEquals(1, response.context['message_stats'][2]['data'][0]['count'])
self.assertEquals(1, response.context['message_stats'][3]['data'][0]['count'])
self.assertEquals(1, len(response.context['message_stats_table']))
self.assertEquals(1, response.context['message_stats_table'][0]['incoming_messages_count'])
self.assertEquals(2, response.context['message_stats_table'][0]['outgoing_messages_count'])
self.assertEquals(1, response.context['message_stats_table'][0]['incoming_ivr_count'])
self.assertEquals(1, response.context['message_stats_table'][0]['outgoing_ivr_count'])
def test_invalid(self):
# Must be POST
response = self.client.get("%s?signature=sig&ts=123" % (reverse('sync', args=[100])), content_type='application/json')
self.assertEquals(500, response.status_code)
# Unknown channel
response = self.client.post("%s?signature=sig&ts=123" % (reverse('sync', args=[999])), content_type='application/json')
self.assertEquals(200, response.status_code)
self.assertEquals('rel', response.json()['cmds'][0]['cmd'])
# too old
ts = int(time.time()) - 60 * 16
response = self.client.post("%s?signature=sig&ts=%d" % (reverse('sync', args=[self.tel_channel.pk]), ts), content_type='application/json')
self.assertEquals(401, response.status_code)
self.assertEquals(3, response.json()['error_id'])
def test_is_ussd_channel(self):
Channel.objects.all().delete()
self.login(self.admin)
# add a non USSD channel
reg_data = dict(cmds=[dict(cmd="gcm", gcm_id="GCM111", uuid='uuid'),
dict(cmd='status', cc='RW', dev='Nexus')])
response = self.client.post(reverse('register'), json.dumps(reg_data), content_type='application/json')
self.assertEqual(200, response.status_code)
# add a USSD channel
post_data = {
"country": "ZA",
"number": "+273454325324",
"account_key": "account1",
"conversation_key": "conversation1"
}
response = self.client.post(reverse('channels.channel_claim_vumi_ussd'), post_data)
self.assertEqual(302, response.status_code)
self.assertEqual(Channel.objects.first().channel_type, Channel.TYPE_VUMI_USSD)
self.assertTrue(Channel.objects.first().is_ussd())
self.assertFalse(Channel.objects.last().is_ussd())
def test_claim(self):
# no access for regular users
self.login(self.user)
response = self.client.get(reverse('channels.channel_claim'))
self.assertLoginRedirect(response)
# editor can access
self.login(self.editor)
response = self.client.get(reverse('channels.channel_claim'))
self.assertEqual(200, response.status_code)
# as can admins
self.login(self.admin)
response = self.client.get(reverse('channels.channel_claim'))
self.assertEqual(200, response.status_code)
self.assertEqual(response.context['twilio_countries'], "Belgium, Canada, Finland, Norway, Poland, Spain, "
"Sweden, United Kingdom or United States")
def test_register_and_claim_android(self):
# remove our explicit country so it needs to be derived from channels
self.org.country = None
self.org.save()
Channel.objects.all().delete()
reg_data = dict(cmds=[dict(cmd="gcm", gcm_id="GCM111", uuid='uuid'),
dict(cmd='status', cc='RW', dev='Nexus')])
# must be a post
response = self.client.get(reverse('register'), content_type='application/json')
self.assertEqual(500, response.status_code)
# try a legit register
response = self.client.post(reverse('register'), json.dumps(reg_data), content_type='application/json')
self.assertEqual(200, response.status_code)
android1 = Channel.objects.get()
self.assertIsNone(android1.org)
self.assertIsNone(android1.address)
self.assertIsNone(android1.alert_email)
self.assertEqual(android1.country, 'RW')
self.assertEqual(android1.device, 'Nexus')
self.assertEqual(android1.gcm_id, 'GCM111')
self.assertEqual(android1.uuid, 'uuid')
self.assertTrue(android1.secret)
self.assertTrue(android1.claim_code)
self.assertEqual(android1.created_by.username, settings.ANONYMOUS_USER_NAME)
# check channel JSON in response
response_json = response.json()
self.assertEqual(response_json, dict(cmds=[dict(cmd='reg',
relayer_claim_code=android1.claim_code,
relayer_secret=android1.secret,
relayer_id=android1.id)]))
# try registering again with same details
response = self.client.post(reverse('register'), json.dumps(reg_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
android1 = Channel.objects.get()
response_json = response.json()
self.assertEqual(response_json, dict(cmds=[dict(cmd='reg',
relayer_claim_code=android1.claim_code,
relayer_secret=android1.secret,
relayer_id=android1.id)]))
# try to claim as non-admin
self.login(self.user)
response = self.client.post(reverse('channels.channel_claim_android'),
dict(claim_code=android1.claim_code, phone_number="0788123123"))
self.assertLoginRedirect(response)
# try to claim with an invalid phone number
self.login(self.admin)
response = self.client.post(reverse('channels.channel_claim_android'),
dict(claim_code=android1.claim_code, phone_number="078123"))
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'phone_number', "Invalid phone number, try again.")
# claim our channel
response = self.client.post(reverse('channels.channel_claim_android'),
dict(claim_code=android1.claim_code, phone_number="0788123123"))
# redirect to welcome page
self.assertTrue('success' in response.get('Location', None))
self.assertRedirect(response, reverse('public.public_welcome'))
# channel is updated with org details and claim code is now blank
android1.refresh_from_db()
secret = android1.secret
self.assertEqual(android1.org, self.org)
self.assertEqual(android1.address, '+250788123123') # normalized
self.assertEqual(android1.alert_email, self.admin.email) # the logged-in user
self.assertEqual(android1.gcm_id, 'GCM111')
self.assertEqual(android1.uuid, 'uuid')
self.assertFalse(android1.claim_code)
# try having a device register again
response = self.client.post(reverse('register'), json.dumps(reg_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
# should return same channel but with a new claim code and secret
android1.refresh_from_db()
self.assertEqual(android1.org, self.org)
self.assertEqual(android1.address, '+250788123123')
self.assertEqual(android1.alert_email, self.admin.email)
self.assertEqual(android1.gcm_id, 'GCM111')
self.assertEqual(android1.uuid, 'uuid')
self.assertEqual(android1.is_active, True)
self.assertTrue(android1.claim_code)
self.assertNotEqual(android1.secret, secret)
# should be able to claim again
response = self.client.post(reverse('channels.channel_claim_android'),
dict(claim_code=android1.claim_code, phone_number="0788123123"))
self.assertRedirect(response, reverse('public.public_welcome'))
# try having a device register yet again with new GCM ID
reg_data['cmds'][0]['gcm_id'] = "GCM222"
response = self.client.post(reverse('register'), json.dumps(reg_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
# should return same channel but with GCM updated
android1.refresh_from_db()
self.assertEqual(android1.org, self.org)
self.assertEqual(android1.address, '+250788123123')
self.assertEqual(android1.alert_email, self.admin.email)
self.assertEqual(android1.gcm_id, 'GCM222')
self.assertEqual(android1.uuid, 'uuid')
self.assertEqual(android1.is_active, True)
# we can claim again with new phone number
response = self.client.post(reverse('channels.channel_claim_android'),
dict(claim_code=android1.claim_code, phone_number="+250788123124"))
self.assertRedirect(response, reverse('public.public_welcome'))
android1.refresh_from_db()
self.assertEqual(android1.org, self.org)
self.assertEqual(android1.address, '+250788123124')
self.assertEqual(android1.alert_email, self.admin.email)
self.assertEqual(android1.gcm_id, 'GCM222')
self.assertEqual(android1.uuid, 'uuid')
self.assertEqual(android1.is_active, True)
# release and then register with same details and claim again
old_uuid = android1.uuid
android1.release()
response = self.client.post(reverse('register'), json.dumps(reg_data), content_type='application/json')
claim_code = response.json()['cmds'][0]['relayer_claim_code']
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('channels.channel_claim_android'),
dict(claim_code=claim_code, phone_number="+250788123124"))
self.assertRedirect(response, reverse('public.public_welcome'))
android1.refresh_from_db()
self.assertNotEqual(android1.uuid, old_uuid) # inactive channel now has new UUID
# and we have a new Android channel with our UUID
android2 = Channel.objects.get(is_active=True)
self.assertNotEqual(android2, android1)
self.assertEqual(android2.uuid, 'uuid')
# try to claim a bogus channel
response = self.client.post(reverse('channels.channel_claim_android'), dict(claim_code="Your Mom"))
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'claim_code', "Invalid claim code, please check and try again.")
# check our primary tel channel is the same as our outgoing
default_sender = self.org.get_send_channel(TEL_SCHEME)
self.assertEqual(default_sender, android2)
self.assertEqual(default_sender, self.org.get_receive_channel(TEL_SCHEME))
self.assertFalse(default_sender.is_delegate_sender())
# try to claim a bulk Nexmo sender (without adding Nexmo account to org)
claim_nexmo_url = reverse('channels.channel_create_bulk_sender') + "?connection=NX&channel=%d" % android2.pk
response = self.client.post(claim_nexmo_url, dict(connection='NX', channel=android2.pk))
self.assertFormError(response, 'form', 'connection', "A connection to a Nexmo account is required")
# send channel is still our Android device
self.assertEqual(self.org.get_send_channel(TEL_SCHEME), android2)
self.assertFalse(self.org.is_connected_to_nexmo())
# now connect to nexmo
with patch('temba.utils.nexmo.NexmoClient.update_account') as connect:
connect.return_value = True
with patch('nexmo.Client.create_application') as create_app:
create_app.return_value = dict(id='app-id', keys=dict(private_key='private-key'))
self.org.connect_nexmo('123', '456', self.admin)
self.org.save()
self.assertTrue(self.org.is_connected_to_nexmo())
# now adding Nexmo bulk sender should work
response = self.client.post(claim_nexmo_url, dict(connection='NX', channel=android2.pk))
self.assertRedirect(response, reverse('orgs.org_home'))
# new Nexmo channel created for delegated sending
nexmo = self.org.get_send_channel(TEL_SCHEME)
self.assertEqual(nexmo.channel_type, 'NX')
self.assertEqual(nexmo.parent, android2)
self.assertTrue(nexmo.is_delegate_sender())
# reading our nexmo channel should now offer a disconnect option
nexmo = self.org.channels.filter(channel_type='NX').first()
response = self.client.get(reverse('channels.channel_read', args=[nexmo.uuid]))
self.assertContains(response, 'Disable Bulk Sending')
# receiving still job of our Android device
self.assertEqual(self.org.get_receive_channel(TEL_SCHEME), android2)
# re-register device with country as US
reg_data = dict(cmds=[dict(cmd="gcm", gcm_id="GCM222", uuid='uuid'),
dict(cmd='status', cc='US', dev="Nexus 5X")])
response = self.client.post(reverse('register'), json.dumps(reg_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
# channel country and device updated
android2.refresh_from_db()
self.assertEqual(android2.country, 'US')
self.assertEqual(android2.device, "Nexus 5X")
self.assertEqual(android2.org, self.org)
self.assertEqual(android2.gcm_id, "GCM222")
self.assertEqual(android2.uuid, "uuid")
self.assertTrue(android2.is_active)
# set back to RW...
android2.country = 'RW'
android2.save()
# our country is RW
self.assertEqual(self.org.get_country_code(), 'RW')
# remove nexmo
nexmo.release()
self.assertEqual(self.org.get_country_code(), 'RW')
# register another device with country as US
reg_data = dict(cmds=[dict(cmd="gcm", gcm_id="GCM444", uuid='uuid4'),
dict(cmd='status', cc='US', dev="Nexus 6P")])
response = self.client.post(reverse('register'), json.dumps(reg_data), content_type='application/json')
claim_code = response.json()['cmds'][0]['relayer_claim_code']
# try to claim it...
self.client.post(reverse('channels.channel_claim_android'), dict(claim_code=claim_code, phone_number="12065551212"))
# should work, can have two channels in different countries
channel = Channel.objects.get(country='US')
self.assertEqual(channel.address, '+12065551212')
self.assertEqual(Channel.objects.filter(org=self.org, is_active=True).count(), 2)
# normalize a URN with a fully qualified number
number, valid = URN.normalize_number('+12061112222', None)
self.assertTrue(valid)
# not international format
number, valid = URN.normalize_number('0788383383', None)
self.assertFalse(valid)
# get our send channel without a URN, should just default to last
default_channel = self.org.get_send_channel(TEL_SCHEME)
self.assertEqual(default_channel, channel)
# get our send channel for a Rwandan URN
rwanda_channel = self.org.get_send_channel(TEL_SCHEME, ContactURN.create(self.org, None, 'tel:+250788383383'))
self.assertEqual(rwanda_channel, android2)
# and a US one
us_channel = self.org.get_send_channel(TEL_SCHEME, ContactURN.create(self.org, None, 'tel:+12065555353'))
self.assertEqual(us_channel, channel)
# a different country altogether should just give us the default
us_channel = self.org.get_send_channel(TEL_SCHEME, ContactURN.create(self.org, None, 'tel:+593997290044'))
self.assertEqual(us_channel, channel)
self.org = Org.objects.get(id=self.org.id)
self.assertIsNone(self.org.get_country_code())
# yet another registration in rwanda
reg_data = dict(cmds=[dict(cmd="gcm", gcm_id="GCM555", uuid='uuid5'),
dict(cmd='status', cc='RW', dev="Nexus 5")])
response = self.client.post(reverse('register'), json.dumps(reg_data), content_type='application/json')
claim_code = response.json()['cmds'][0]['relayer_claim_code']
# try to claim it with number taken by other Android channel
response = self.client.post(reverse('channels.channel_claim_android'),
dict(claim_code=claim_code, phone_number="+250788123124"))
self.assertFormError(response, 'form', 'phone_number', "Another channel has this number. Please remove that channel first.")
# create channel in another org
self.create_secondary_org()
Channel.create(self.org2, self.admin2, 'RW', 'A', "", "+250788382382")
# can claim it with this number, and because it's a fully qualified RW number, doesn't matter that channel is US
response = self.client.post(reverse('channels.channel_claim_android'),
dict(claim_code=claim_code, phone_number="+250788382382"))
self.assertRedirect(response, reverse('public.public_welcome'))
# should be added with RW as the country
self.assertTrue(Channel.objects.get(address='+250788382382', country='RW', org=self.org))
@patch('temba.orgs.models.TwilioRestClient', MockTwilioClient)
@patch('temba.ivr.clients.TwilioClient', MockTwilioClient)
@patch('twilio.util.RequestValidator', MockRequestValidator)
def test_claim_twilio(self):
self.login(self.admin)
# remove any existing channels
self.org.channels.update(is_active=False, org=None)
# make sure twilio is on the claim page
response = self.client.get(reverse('channels.channel_claim'))
self.assertContains(response, "Twilio")
self.assertContains(response, reverse('orgs.org_twilio_connect'))
twilio_config = dict()
twilio_config[ACCOUNT_SID] = 'account-sid'
twilio_config[ACCOUNT_TOKEN] = 'account-token'
twilio_config[APPLICATION_SID] = 'TwilioTestSid'
self.org.config = json.dumps(twilio_config)
self.org.save()
# hit the claim page, should now have a claim twilio link
claim_twilio = reverse('channels.channel_claim_twilio')
response = self.client.get(reverse('channels.channel_claim'))
self.assertContains(response, claim_twilio)
response = self.client.get(claim_twilio)
self.assertTrue('account_trial' in response.context)
self.assertFalse(response.context['account_trial'])
with patch('temba.orgs.models.Org.get_twilio_client') as mock_get_twilio_client:
mock_get_twilio_client.return_value = None
response = self.client.get(claim_twilio)
self.assertRedirects(response, reverse('channels.channel_claim'))
mock_get_twilio_client.side_effect = TwilioRestException(401, 'http://twilio', msg='Authentication Failure', code=20003)
response = self.client.get(claim_twilio)
self.assertRedirects(response, reverse('channels.channel_claim'))
with patch('temba.tests.MockTwilioClient.MockAccounts.get') as mock_get:
mock_get.return_value = MockTwilioClient.MockAccount('Trial')
response = self.client.get(claim_twilio)
self.assertTrue('account_trial' in response.context)
self.assertTrue(response.context['account_trial'])
with patch('temba.tests.MockTwilioClient.MockPhoneNumbers.search') as mock_search:
search_url = reverse('channels.channel_search_numbers')
# try making empty request
response = self.client.post(search_url, {})
self.assertEqual(response.json(), [])
# try searching for US number
mock_search.return_value = [MockTwilioClient.MockPhoneNumber('+12062345678')]
response = self.client.post(search_url, {'country': 'US', 'area_code': '206'})
self.assertEqual(response.json(), ['+1 206-234-5678', '+1 206-234-5678'])
# try searching without area code
response = self.client.post(search_url, {'country': 'US', 'area_code': ''})
self.assertEqual(response.json(), ['+1 206-234-5678', '+1 206-234-5678'])
mock_search.return_value = []
response = self.client.post(search_url, {'country': 'US', 'area_code': ''})
self.assertEquals(json.loads(response.content)['error'],
"Sorry, no numbers found, please enter another area code and try again.")
# try searching for non-US number
mock_search.return_value = [MockTwilioClient.MockPhoneNumber('+442812345678')]
response = self.client.post(search_url, {'country': 'GB', 'area_code': '028'})
self.assertEqual(response.json(), ['+44 28 1234 5678', '+44 28 1234 5678'])
mock_search.return_value = []
response = self.client.post(search_url, {'country': 'GB', 'area_code': ''})
self.assertEquals(json.loads(response.content)['error'],
"Sorry, no numbers found, please enter another pattern and try again.")
with patch('temba.tests.MockTwilioClient.MockPhoneNumbers.list') as mock_numbers:
mock_numbers.return_value = [MockTwilioClient.MockPhoneNumber('+12062345678')]
with patch('temba.tests.MockTwilioClient.MockShortCodes.list') as mock_short_codes:
mock_short_codes.return_value = []
response = self.client.get(claim_twilio)
self.assertContains(response, '206-234-5678')
# claim it
response = self.client.post(claim_twilio, dict(country='US', phone_number='12062345678'))
self.assertRedirects(response, reverse('public.public_welcome') + "?success")
# make sure it is actually connected
channel = Channel.objects.get(channel_type='T', org=self.org)
self.assertEqual(channel.role, Channel.ROLE_CALL + Channel.ROLE_ANSWER + Channel.ROLE_SEND + Channel.ROLE_RECEIVE)
# voice only number
with patch('temba.tests.MockTwilioClient.MockPhoneNumbers.list') as mock_numbers:
mock_numbers.return_value = [MockTwilioClient.MockPhoneNumber('+554139087835')]
with patch('temba.tests.MockTwilioClient.MockShortCodes.list') as mock_short_codes:
mock_short_codes.return_value = []
Channel.objects.all().delete()
response = self.client.get(claim_twilio)
self.assertContains(response, '+55 41 3908-7835')
# claim it
response = self.client.post(claim_twilio, dict(country='BR', phone_number='554139087835'))
self.assertRedirects(response, reverse('public.public_welcome') + "?success")
# make sure it is actually connected
channel = Channel.objects.get(channel_type='T', org=self.org)
self.assertEqual(channel.role, Channel.ROLE_CALL + Channel.ROLE_ANSWER)
with patch('temba.tests.MockTwilioClient.MockPhoneNumbers.list') as mock_numbers:
mock_numbers.return_value = [MockTwilioClient.MockPhoneNumber('+4545335500')]
with patch('temba.tests.MockTwilioClient.MockShortCodes.list') as mock_short_codes:
mock_short_codes.return_value = []
Channel.objects.all().delete()
response = self.client.get(claim_twilio)
self.assertContains(response, '45 33 55 00')
# claim it
response = self.client.post(claim_twilio, dict(country='DK', phone_number='4545335500'))
self.assertRedirects(response, reverse('public.public_welcome') + "?success")
# make sure it is actually connected
Channel.objects.get(channel_type='T', org=self.org)
with patch('temba.tests.MockTwilioClient.MockPhoneNumbers.list') as mock_numbers:
mock_numbers.return_value = []
with patch('temba.tests.MockTwilioClient.MockShortCodes.list') as mock_short_codes:
mock_short_codes.return_value = [MockTwilioClient.MockShortCode('8080')]
Channel.objects.all().delete()
self.org.timezone = 'America/New_York'
self.org.save()
response = self.client.get(claim_twilio)
self.assertContains(response, '8080')
self.assertContains(response, 'class="country">US') # we look up the country from the timezone
# claim it
response = self.client.post(claim_twilio, dict(country='US', phone_number='8080'))
self.assertRedirects(response, reverse('public.public_welcome') + "?success")
# make sure it is actually connected
Channel.objects.get(channel_type='T', org=self.org)
twilio_channel = self.org.channels.all().first()
self.assertEquals('T', twilio_channel.channel_type)
with patch('temba.tests.MockTwilioClient.MockPhoneNumbers.update') as mock_numbers:
# our twilio channel removal should fail on bad auth
mock_numbers.side_effect = TwilioRestException(401, 'http://twilio', msg='Authentication Failure', code=20003)
self.client.post(reverse('channels.channel_delete', args=[twilio_channel.pk]))
self.assertIsNotNone(self.org.channels.all().first())
# or other arbitrary twilio errors
mock_numbers.side_effect = TwilioRestException(400, 'http://twilio', msg='Twilio Error', code=123)
self.client.post(reverse('channels.channel_delete', args=[twilio_channel.pk]))
self.assertIsNotNone(self.org.channels.all().first())
# now lets be successful
mock_numbers.side_effect = None
self.client.post(reverse('channels.channel_delete', args=[twilio_channel.pk]))
self.assertIsNone(self.org.channels.all().first())
@patch('temba.orgs.models.TwilioRestClient', MockTwilioClient)
@patch('temba.ivr.clients.TwilioClient', MockTwilioClient)
@patch('twilio.util.RequestValidator', MockRequestValidator)
def test_claim_twilio_messaging_service(self):
self.login(self.admin)
# remove any existing channels
self.org.channels.all().delete()
# make sure twilio is on the claim page
response = self.client.get(reverse('channels.channel_claim'))
self.assertContains(response, "Twilio")
self.assertContains(response, reverse('orgs.org_twilio_connect'))
twilio_config = dict()
twilio_config[ACCOUNT_SID] = 'account-sid'
twilio_config[ACCOUNT_TOKEN] = 'account-token'
twilio_config[APPLICATION_SID] = 'TwilioTestSid'
self.org.config = json.dumps(twilio_config)
self.org.save()
claim_twilio_ms = reverse('channels.channel_claim_twilio_messaging_service')
response = self.client.get(reverse('channels.channel_claim'))
self.assertContains(response, claim_twilio_ms)
response = self.client.get(claim_twilio_ms)
self.assertTrue('account_trial' in response.context)
self.assertFalse(response.context['account_trial'])
with patch('temba.orgs.models.Org.get_twilio_client') as mock_get_twilio_client:
mock_get_twilio_client.return_value = None
response = self.client.get(claim_twilio_ms)
self.assertRedirects(response, reverse('channels.channel_claim'))
mock_get_twilio_client.side_effect = TwilioRestException(401, 'http://twilio', msg='Authentication Failure', code=20003)
response = self.client.get(claim_twilio_ms)
self.assertRedirects(response, reverse('channels.channel_claim'))
with patch('temba.tests.MockTwilioClient.MockAccounts.get') as mock_get:
mock_get.return_value = MockTwilioClient.MockAccount('Trial')
response = self.client.get(claim_twilio_ms)
self.assertTrue('account_trial' in response.context)
self.assertTrue(response.context['account_trial'])
response = self.client.get(claim_twilio_ms)
self.assertEqual(response.context['form'].fields['country'].choices, list(TWILIO_SUPPORTED_COUNTRIES))
self.assertContains(response, "icon-channel-twilio")
response = self.client.post(claim_twilio_ms, dict())
self.assertTrue(response.context['form'].errors)
response = self.client.post(claim_twilio_ms, dict(country='US', messaging_service_sid='MSG-SERVICE-SID'))
channel = self.org.channels.get()
self.assertRedirects(response, reverse('channels.channel_configuration', args=[channel.pk]))
self.assertEqual(channel.channel_type, "TMS")
self.assertEqual(channel.config_json(), dict(messaging_service_sid="MSG-SERVICE-SID"))
@patch('temba.orgs.models.TwilioRestClient', MockTwilioClient)
@patch('temba.ivr.clients.TwilioClient', MockTwilioClient)
@patch('twilio.util.RequestValidator', MockRequestValidator)
def test_claim_twiml_api(self):
self.login(self.admin)
# remove any existing channels
self.org.channels.update(is_active=False, org=None)
claim_url = reverse('channels.channel_claim_twiml_api')
response = self.client.get(reverse('channels.channel_claim'))
self.assertContains(response, "TwiML")
self.assertContains(response, claim_url)
# can fetch the claim page
response = self.client.get(claim_url)
self.assertEqual(200, response.status_code)
self.assertContains(response, 'TwiML')
response = self.client.post(claim_url, dict(number='5512345678', country='AA'))
self.assertTrue(response.context['form'].errors)
response = self.client.post(claim_url, dict(country='US', number='12345678', url='https://twilio.com', role='SR', account_sid='abcd1234', account_token='abcd1234'))
channel = self.org.channels.all().first()
self.assertRedirects(response, reverse('channels.channel_configuration', args=[channel.pk]))
self.assertEqual(channel.channel_type, "TW")
self.assertEqual(channel.config_json(), dict(ACCOUNT_TOKEN='abcd1234', send_url='https://twilio.com', ACCOUNT_SID='abcd1234'))
response = self.client.post(claim_url, dict(country='US', number='12345678', url='https://twilio.com', role='SR', account_sid='abcd4321', account_token='abcd4321'))
channel = self.org.channels.all().first()
self.assertRedirects(response, reverse('channels.channel_configuration', args=[channel.pk]))
self.assertEqual(channel.channel_type, "TW")
self.assertEqual(channel.config_json(), dict(ACCOUNT_TOKEN='abcd4321', send_url='https://twilio.com', ACCOUNT_SID='abcd4321'))
self.org.channels.update(is_active=False, org=None)
response = self.client.post(claim_url, dict(country='US', number='8080', url='https://twilio.com', role='SR', account_sid='abcd1234', account_token='abcd1234'))
channel = self.org.channels.all().first()
self.assertRedirects(response, reverse('channels.channel_configuration', args=[channel.pk]))
self.assertEqual(channel.channel_type, "TW")
self.assertEqual(channel.config_json(), dict(ACCOUNT_TOKEN='abcd1234', send_url='https://twilio.com', ACCOUNT_SID='abcd1234'))
def test_claim_facebook(self):
self.login(self.admin)
# remove any existing channels
Channel.objects.all().delete()
claim_facebook_url = reverse('channels.channel_claim_facebook')
token = 'x' * 200
with patch('requests.get') as mock:
mock.return_value = MockResponse(400, json.dumps(dict(error=dict(message="Failed validation"))))
# try to claim facebook, should fail because our verification of the token fails
response = self.client.post(claim_facebook_url, dict(page_access_token=token))
# assert we got a normal 200 and it says our token is wrong
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Failed validation")
# ok this time claim with a success
with patch('requests.get') as mock_get:
mock_get.return_value = MockResponse(200, json.dumps(dict(name='Temba', id=10)))
response = self.client.post(claim_facebook_url, dict(page_access_token=token), follow=True)
# assert our channel got created
channel = Channel.objects.get()
self.assertEqual(channel.config_json()[Channel.CONFIG_AUTH_TOKEN], token)
self.assertEqual(channel.config_json()[Channel.CONFIG_PAGE_NAME], 'Temba')
self.assertEqual(channel.address, '10')
# should be on our configuration page displaying our secret
self.assertContains(response, channel.secret)
# test validating our secret
handler_url = reverse('handlers.facebook_handler', args=['invalid'])
response = self.client.get(handler_url)
self.assertEqual(response.status_code, 400)
# test invalid token
handler_url = reverse('handlers.facebook_handler', args=[channel.uuid])
payload = {'hub.mode': 'subscribe', 'hub.verify_token': 'invalid', 'hub.challenge': 'challenge'}
response = self.client.get(handler_url, payload)
self.assertEqual(response.status_code, 400)
# test actual token
payload['hub.verify_token'] = channel.secret
# try with unsuccessful callback to subscribe (this fails silently)
with patch('requests.post') as mock_post:
mock_post.return_value = MockResponse(400, json.dumps(dict(success=False)))
response = self.client.get(handler_url, payload)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'challenge')
# assert we subscribed to events
self.assertEqual(mock_post.call_count, 1)
# but try again and we should try again
with patch('requests.post') as mock_post:
mock_post.return_value = MockResponse(200, json.dumps(dict(success=True)))
response = self.client.get(handler_url, payload)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'challenge')
# assert we subscribed to events
self.assertEqual(mock_post.call_count, 1)
# release the channel
with patch('requests.delete') as mock_delete:
mock_delete.return_value = MockResponse(200, json.dumps(dict(success=True)))
channel.release()
mock_delete.assert_called_once_with('https://graph.facebook.com/v2.5/me/subscribed_apps',
params=dict(access_token=channel.config_json()[Channel.CONFIG_AUTH_TOKEN]))
def test_claim_viber_public(self):
self.login(self.admin)
# remove any existing channels
Channel.objects.all().delete()
url = reverse('channels.channel_claim_viber_public')
token = "auth"
with patch('requests.post') as mock:
mock.side_effect = [MockResponse(400, json.dumps(dict(status=3, status_message="Invalid token")))]
response = self.client.post(url, dict(auth_token=token))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Error validating authentication token")
with patch('requests.post') as mock:
mock.side_effect = [MockResponse(200, json.dumps(dict(status=3, status_message="Invalid token")))]
response = self.client.post(url, dict(auth_token=token))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Error validating authentication token")
with patch('requests.post') as mock:
mock.side_effect = [MockResponse(200, json.dumps(dict(status=0, status_message="ok"))),
MockResponse(400, json.dumps(dict(status=3, status_message="Invalid token")))]
response = self.client.post(url, dict(auth_token=token))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Invalid authentication token")
# ok this time claim with a success
with patch('requests.post') as mock:
mock.side_effect = [MockResponse(200, json.dumps(dict(status=0, status_message="ok"))),
MockResponse(200, json.dumps(dict(status=0, status_message="ok", id="viberId", uri="viberName"))),
MockResponse(200, json.dumps(dict(status=0, status_message="ok")))]
response = self.client.post(url, dict(auth_token=token), follow=True)
# assert our channel got created
channel = Channel.objects.get()
self.assertEqual(channel.config_json()[Channel.CONFIG_AUTH_TOKEN], token)
self.assertEqual(channel.address, 'viberId')
self.assertEqual(channel.name, 'viberName')
# should have been called with our webhook URL
self.assertEqual(mock.call_args[0][0], 'https://chatapi.viber.com/pa/set_webhook')
# remove the channel
with patch('requests.post') as mock:
mock.side_effect = [MockResponse(200, json.dumps(dict(status=0, status_message="ok")))]
channel.release()
self.assertEqual(mock.call_args[0][0], 'https://chatapi.viber.com/pa/set_webhook')
def test_search_nexmo(self):
self.login(self.admin)
self.org.channels.update(is_active=False, org=None)
self.channel = Channel.create(self.org, self.user, 'RW', 'NX', None, '+250788123123',
uuid='00000000-0000-0000-0000-000000001234')
self.nexmo_uuid = str(uuid.uuid4())
nexmo_config = {NEXMO_KEY: '1234', NEXMO_SECRET: '1234', NEXMO_UUID: self.nexmo_uuid,
NEXMO_APP_ID: 'nexmo-app-id', NEXMO_APP_PRIVATE_KEY: 'nexmo-private-key'}
org = self.channel.org
config = org.config_json()
config.update(nexmo_config)
org.config = json.dumps(config)
org.save()
search_nexmo_url = reverse('channels.channel_search_nexmo')
response = self.client.get(search_nexmo_url)
self.assertTrue('area_code' in response.context['form'].fields)
self.assertTrue('country' in response.context['form'].fields)
with patch('requests.get') as nexmo_get:
nexmo_get.side_effect = [MockResponse(200,
'{"count":1,"numbers":[{"features": ["SMS", "VOICE"], '
'"type":"mobile-lvn","country":"US","msisdn":"13607884540"}] }'),
MockResponse(200,
'{"count":1,"numbers":[{"features": ["SMS", "VOICE"], '
'"type":"mobile-lvn","country":"US","msisdn":"13607884550"}] }'),
]
post_data = dict(country='US', area_code='360')
response = self.client.post(search_nexmo_url, post_data, follow=True)
self.assertEquals(response.json(), ['+1 360-788-4540', '+1 360-788-4550'])
def test_claim_nexmo(self):
self.login(self.admin)
# remove any existing channels
self.org.channels.update(is_active=False, org=None)
# make sure nexmo is on the claim page
response = self.client.get(reverse('channels.channel_claim'))
self.assertContains(response, "Nexmo")
self.assertContains(response, reverse('orgs.org_nexmo_connect'))
nexmo_config = dict(NEXMO_KEY='nexmo-key', NEXMO_SECRET='nexmo-secret', NEXMO_UUID='nexmo-uuid',
NEXMO_APP_ID='nexmo-app-id', NEXMO_APP_PRIVATE_KEY='nexmo-app-private-key')
self.org.config = json.dumps(nexmo_config)
self.org.save()
# hit the claim page, should now have a claim nexmo link
claim_nexmo = reverse('channels.channel_claim_nexmo')
response = self.client.get(reverse('channels.channel_claim'))
self.assertContains(response, claim_nexmo)
# try adding a shortcode
with patch('requests.get') as nexmo_get:
with patch('requests.post') as nexmo_post:
nexmo_get.side_effect = [
MockResponse(200, '{"count":0,"numbers":[] }'),
MockResponse(200,
'{"count":1,"numbers":[{"features": ["SMS"], "type":"mobile-lvn",'
'"country":"US","msisdn":"8080"}] }'),
MockResponse(200,
'{"count":1,"numbers":[{"features": ["SMS"], "type":"mobile-lvn",'
'"country":"US","msisdn":"8080"}] }'),
]
response = self.client.post(claim_nexmo, dict(country='US', phone_number='8080'))
self.assertRedirects(response, reverse('public.public_welcome') + "?success")
channel = Channel.objects.filter(address='8080').first()
self.assertTrue(Channel.ROLE_SEND in channel.role)
self.assertTrue(Channel.ROLE_RECEIVE in channel.role)
self.assertFalse(Channel.ROLE_ANSWER in channel.role)
self.assertFalse(Channel.ROLE_CALL in channel.role)
Channel.objects.all().delete()
# try buying a number not on the account
with patch('requests.get') as nexmo_get:
with patch('requests.post') as nexmo_post:
nexmo_get.side_effect = [
MockResponse(200, '{"count":0,"numbers":[] }'),
MockResponse(200, '{"count":0,"numbers":[] }'),
MockResponse(200,
'{"count":1,"numbers":[{"features": ["sms", "voice"], "type":"mobile",'
'"country":"US","msisdn":"+12065551212"}] }'),
]
nexmo_post.return_value = MockResponse(200, '{"error-code": "200"}')
response = self.client.post(claim_nexmo, dict(country='US', phone_number='+12065551212'))
self.assertRedirects(response, reverse('public.public_welcome') + "?success")
channel = Channel.objects.filter(address='+12065551212').first()
self.assertTrue(Channel.ROLE_SEND in channel.role)
self.assertTrue(Channel.ROLE_RECEIVE in channel.role)
self.assertTrue(Channel.ROLE_ANSWER in channel.role)
self.assertTrue(Channel.ROLE_CALL in channel.role)
Channel.objects.all().delete()
# try failing to buy a number not on the account
with patch('requests.get') as nexmo_get:
with patch('requests.post') as nexmo_post:
nexmo_get.side_effect = [
MockResponse(200, '{"count":0,"numbers":[] }'),
MockResponse(200, '{"count":0,"numbers":[] }'),
]
nexmo_post.side_effect = Exception('Error')
response = self.client.post(claim_nexmo, dict(country='US', phone_number='+12065551212'))
self.assertTrue(response.context['form'].errors)
self.assertContains(response, "There was a problem claiming that number, "
"please check the balance on your account. "
"Note that you can only claim numbers after "
"adding credit to your Nexmo account.")
Channel.objects.all().delete()
# let's add a number already connected to the account
with patch('requests.get') as nexmo_get:
with patch('requests.post') as nexmo_post:
nexmo_get.return_value = MockResponse(200,
'{"count":1,"numbers":[{"features": ["SMS", "VOICE"], '
'"type":"mobile-lvn","country":"US","msisdn":"13607884540"}] }')
nexmo_post.return_value = MockResponse(200, '{"error-code": "200"}')
# make sure our number appears on the claim page
response = self.client.get(claim_nexmo)
self.assertFalse('account_trial' in response.context)
self.assertContains(response, '360-788-4540')
# claim it
response = self.client.post(claim_nexmo, dict(country='US', phone_number='13607884540'))
self.assertRedirects(response, reverse('public.public_welcome') + "?success")
# make sure it is actually connected
channel = Channel.objects.get(channel_type='NX', org=self.org)
self.assertTrue(Channel.ROLE_SEND in channel.role)
self.assertTrue(Channel.ROLE_RECEIVE in channel.role)
self.assertTrue(Channel.ROLE_ANSWER in channel.role)
self.assertTrue(Channel.ROLE_CALL in channel.role)
# test the update page for nexmo
update_url = reverse('channels.channel_update', args=[channel.pk])
response = self.client.get(update_url)
# try changing our address
updated = response.context['form'].initial
updated['address'] = 'MTN'
updated['alert_email'] = 'foo@bar.com'
response = self.client.post(update_url, updated)
channel = Channel.objects.get(pk=channel.id)
self.assertEquals('MTN', channel.address)
# add a canada number
nexmo_get.return_value = MockResponse(200, '{"count":1,"numbers":[{"features": ["SMS", "VOICE"], "type":"mobile-lvn","country":"CA","msisdn":"15797884540"}] }')
nexmo_post.return_value = MockResponse(200, '{"error-code": "200"}')
# make sure our number appears on the claim page
response = self.client.get(claim_nexmo)
self.assertFalse('account_trial' in response.context)
self.assertContains(response, '579-788-4540')
# claim it
response = self.client.post(claim_nexmo, dict(country='CA', phone_number='15797884540'))
self.assertRedirects(response, reverse('public.public_welcome') + "?success")
# make sure it is actually connected
self.assertTrue(Channel.objects.filter(channel_type='NX', org=self.org, address='+15797884540').first())
# as is our old one
self.assertTrue(Channel.objects.filter(channel_type='NX', org=self.org, address='MTN').first())
config_url = reverse('channels.channel_configuration', args=[channel.pk])
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
self.assertContains(response, reverse('handlers.nexmo_handler', args=['receive', channel.org.nexmo_uuid()]))
self.assertContains(response, reverse('handlers.nexmo_handler', args=['status', channel.org.nexmo_uuid()]))
self.assertContains(response, reverse('handlers.nexmo_call_handler', args=['answer', channel.uuid]))
call_handler_event_url = reverse('handlers.nexmo_call_handler', args=['event', channel.uuid])
response = self.client.get(call_handler_event_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "")
def test_claim_plivo(self):
self.login(self.admin)
# remove any existing channels
self.org.channels.update(is_active=False, org=None)
connect_plivo_url = reverse('orgs.org_plivo_connect')
claim_plivo_url = reverse('channels.channel_claim_plivo')
# make sure plivo is on the claim page
response = self.client.get(reverse('channels.channel_claim'))
self.assertContains(response, "Connect plivo")
self.assertContains(response, reverse('orgs.org_plivo_connect'))
with patch('requests.get') as plivo_get:
plivo_get.return_value = MockResponse(400, json.dumps(dict()))
# try hit the claim page, should be redirected; no credentials in session
response = self.client.get(claim_plivo_url, follow=True)
self.assertFalse('account_trial' in response.context)
self.assertContains(response, connect_plivo_url)
# let's add a number already connected to the account
with patch('requests.get') as plivo_get:
with patch('requests.post') as plivo_post:
plivo_get.return_value = MockResponse(200,
json.dumps(dict(objects=[dict(number='16062681435',
region="California, UNITED STATES"),
dict(number='8080',
region='GUADALAJARA, MEXICO')])))
plivo_post.return_value = MockResponse(202, json.dumps(dict(status='changed', app_id='app-id')))
# make sure our numbers appear on the claim page
response = self.client.get(claim_plivo_url)
self.assertContains(response, "+1 606-268-1435")
self.assertContains(response, "8080")
self.assertContains(response, 'US')
self.assertContains(response, 'MX')
# claim it the US number
session = self.client.session
session[Channel.CONFIG_PLIVO_AUTH_ID] = 'auth-id'
session[Channel.CONFIG_PLIVO_AUTH_TOKEN] = 'auth-token'
session.save()
self.assertTrue(Channel.CONFIG_PLIVO_AUTH_ID in self.client.session)
self.assertTrue(Channel.CONFIG_PLIVO_AUTH_TOKEN in self.client.session)
response = self.client.post(claim_plivo_url, dict(phone_number='+1 606-268-1435', country='US'))
self.assertRedirects(response, reverse('public.public_welcome') + "?success")
# make sure it is actually connected
channel = Channel.objects.get(channel_type='PL', org=self.org)
self.assertEqual(channel.role, Channel.ROLE_SEND + Channel.ROLE_RECEIVE)
self.assertEquals(channel.config_json(), {Channel.CONFIG_PLIVO_AUTH_ID: 'auth-id',
Channel.CONFIG_PLIVO_AUTH_TOKEN: 'auth-token',
Channel.CONFIG_PLIVO_APP_ID: 'app-id'})
self.assertEquals(channel.address, "+16062681435")
# no more credential in the session
self.assertFalse(Channel.CONFIG_PLIVO_AUTH_ID in self.client.session)
self.assertFalse(Channel.CONFIG_PLIVO_AUTH_TOKEN in self.client.session)
# delete existing channels
Channel.objects.all().delete()
with patch('temba.channels.views.plivo.RestAPI.get_account') as mock_plivo_get_account:
with patch('temba.channels.views.plivo.RestAPI.create_application') as mock_plivo_create_application:
with patch('temba.channels.models.plivo.RestAPI.get_number') as mock_plivo_get_number:
with patch('temba.channels.models.plivo.RestAPI.buy_phone_number') as mock_plivo_buy_phone_number:
mock_plivo_get_account.return_value = (200, MockResponse(200, json.dumps(dict())))
mock_plivo_create_application.return_value = (200, dict(app_id='app-id'))
mock_plivo_get_number.return_value = (400, MockResponse(400, json.dumps(dict())))
response_body = json.dumps({
'status': 'fulfilled',
'message': 'created',
'numbers': [{'status': 'Success', 'number': '27816855210'}],
'api_id': '4334c747-9e83-11e5-9147-22000acb8094'
})
mock_plivo_buy_phone_number.return_value = (201, MockResponse(201, response_body))
# claim it the US number
session = self.client.session
session[Channel.CONFIG_PLIVO_AUTH_ID] = 'auth-id'
session[Channel.CONFIG_PLIVO_AUTH_TOKEN] = 'auth-token'
session.save()
self.assertTrue(Channel.CONFIG_PLIVO_AUTH_ID in self.client.session)
self.assertTrue(Channel.CONFIG_PLIVO_AUTH_TOKEN in self.client.session)
response = self.client.post(claim_plivo_url, dict(phone_number='+1 606-268-1440', country='US'))
self.assertRedirects(response, reverse('public.public_welcome') + "?success")
# make sure it is actually connected
channel = Channel.objects.get(channel_type='PL', org=self.org)
self.assertEquals(channel.config_json(), {
Channel.CONFIG_PLIVO_AUTH_ID: 'auth-id',
Channel.CONFIG_PLIVO_AUTH_TOKEN: 'auth-token',
Channel.CONFIG_PLIVO_APP_ID: 'app-id'
})
self.assertEquals(channel.address, "+16062681440")
# no more credential in the session
self.assertFalse(Channel.CONFIG_PLIVO_AUTH_ID in self.client.session)
self.assertFalse(Channel.CONFIG_PLIVO_AUTH_TOKEN in self.client.session)
def test_claim_globe(self):
# disassociate all of our channels
self.org.channels.all().update(org=None, is_active=False)
self.login(self.admin)
claim_url = reverse('channels.channel_claim_globe')
response = self.client.get(claim_url)
self.assertEqual(200, response.status_code)
response = self.client.post(claim_url, dict(number=21586380, app_id="AppId", app_secret="AppSecret", passphrase="Passphrase"), follow=True)
self.assertEqual(200, response.status_code)
channel = Channel.objects.get(channel_type=Channel.TYPE_GLOBE)
self.assertEqual('21586380', channel.address)
self.assertEqual('PH', channel.country)
config = channel.config_json()
self.assertEqual(config['app_secret'], 'AppSecret')
self.assertEqual(config['app_id'], 'AppId')
self.assertEqual(config['passphrase'], 'Passphrase')
def test_claim_telegram(self):
# disassociate all of our channels
self.org.channels.all().update(org=None, is_active=False)
self.login(self.admin)
claim_url = reverse('channels.channel_claim_telegram')
# can fetch the claim page
response = self.client.get(claim_url)
self.assertEqual(200, response.status_code)
self.assertContains(response, 'Telegram Bot')
# claim with an invalid token
with patch('telegram.Bot.getMe') as get_me:
get_me.side_effect = telegram.TelegramError('Boom')
response = self.client.post(claim_url, dict(auth_token='invalid'))
self.assertEqual(200, response.status_code)
self.assertEqual('Your authentication token is invalid, please check and try again', response.context['form'].errors['auth_token'][0])
with patch('telegram.Bot.getMe') as get_me:
user = TelegramUser(123, 'Rapid')
user.last_name = 'Bot'
user.username = 'rapidbot'
get_me.return_value = user
with patch('telegram.Bot.setWebhook') as set_webhook:
set_webhook.return_value = ''
response = self.client.post(claim_url, dict(auth_token='184875172:BAEKbsOKAL23CXufXG4ksNV7Dq7e_1qi3j8'))
channel = Channel.objects.all().order_by('-pk').first()
self.assertIsNotNone(channel)
self.assertEqual(channel.channel_type, Channel.TYPE_TELEGRAM)
self.assertRedirect(response, reverse('channels.channel_read', args=[channel.uuid]))
self.assertEqual(302, response.status_code)
response = self.client.post(claim_url, dict(auth_token='184875172:BAEKbsOKAL23CXufXG4ksNV7Dq7e_1qi3j8'))
self.assertEqual('A telegram channel for this bot already exists on your account.', response.context['form'].errors['auth_token'][0])
contact = self.create_contact('Telegram User', urn=URN.from_telegram('1234'))
# make sure we our telegram channel satisfies as a send channel
self.login(self.admin)
response = self.client.get(reverse('contacts.contact_read', args=[contact.uuid]))
send_channel = response.context['send_channel']
self.assertIsNotNone(send_channel)
self.assertEqual(Channel.TYPE_TELEGRAM, send_channel.channel_type)
def test_claim_twitter(self):
self.login(self.admin)
self.twitter_channel.delete() # remove existing twitter channel
claim_url = reverse('channels.channel_claim_twitter')
with patch('twython.Twython.get_authentication_tokens') as get_authentication_tokens:
get_authentication_tokens.return_value = dict(oauth_token='abcde',
oauth_token_secret='12345',
auth_url='http://example.com/auth')
response = self.client.get(claim_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['twitter_auth_url'], 'http://example.com/auth')
self.assertEqual(self.client.session['twitter_oauth_token'], 'abcde')
self.assertEqual(self.client.session['twitter_oauth_token_secret'], '12345')
with patch('temba.utils.mage.MageClient.activate_twitter_stream') as activate_twitter_stream:
activate_twitter_stream.return_value = dict()
with patch('twython.Twython.get_authorized_tokens') as get_authorized_tokens:
get_authorized_tokens.return_value = dict(screen_name='billy_bob',
user_id=123,
oauth_token='bcdef',
oauth_token_secret='23456')
response = self.client.get(claim_url, {'oauth_verifier': 'vwxyz'}, follow=True)
self.assertNotIn('twitter_oauth_token', self.client.session)
self.assertNotIn('twitter_oauth_token_secret', self.client.session)
self.assertEqual(response.status_code, 200)
channel = response.context['object']
self.assertEqual(channel.address, 'billy_bob')
self.assertEqual(channel.name, '@billy_bob')
config = json.loads(channel.config)
self.assertEqual(config['handle_id'], 123)
self.assertEqual(config['oauth_token'], 'bcdef')
self.assertEqual(config['oauth_token_secret'], '23456')
# re-add same account but with different auth credentials
s = self.client.session
s['twitter_oauth_token'] = 'cdefg'
s['twitter_oauth_token_secret'] = '34567'
s.save()
with patch('twython.Twython.get_authorized_tokens') as get_authorized_tokens:
get_authorized_tokens.return_value = dict(screen_name='billy_bob',
user_id=123,
oauth_token='defgh',
oauth_token_secret='45678')
response = self.client.get(claim_url, {'oauth_verifier': 'uvwxy'}, follow=True)
self.assertEqual(response.status_code, 200)
channel = response.context['object']
self.assertEqual(channel.address, 'billy_bob')
config = json.loads(channel.config)
self.assertEqual(config['handle_id'], 123)
self.assertEqual(config['oauth_token'], 'defgh')
self.assertEqual(config['oauth_token_secret'], '45678')
def test_claim_line(self):
# disassociate all of our channels
self.org.channels.all().update(org=None, is_active=False)
self.login(self.admin)
claim_url = reverse('channels.channel_claim')
response = self.client.get(claim_url)
self.assertContains(response, 'LINE')
claim_line_url = reverse('channels.channel_claim_line')
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, json.dumps(dict(channelId=123456789, mid='u1234567890')))
payload = dict(channel_access_token='abcdef123456', channel_secret='123456')
response = self.client.post(claim_line_url, payload, follow=True)
channel = Channel.objects.get(channel_type=Channel.TYPE_LINE)
self.assertRedirects(response, reverse('channels.channel_configuration', args=[channel.pk]))
self.assertEqual(channel.channel_type, "LN")
self.assertEqual(channel.config_json()[Channel.CONFIG_AUTH_TOKEN], 'abcdef123456')
self.assertEqual(channel.config_json()[Channel.CONFIG_CHANNEL_SECRET], '123456')
self.assertEqual(channel.address, 'u1234567890')
response = self.client.post(claim_line_url, payload, follow=True)
self.assertContains(response, "A channel with this configuration already exists.")
self.org.channels.update(is_active=False, org=None)
with patch('requests.get') as mock:
mock.return_value = MockResponse(401, json.dumps(dict(error_desciption="invalid token")))
payload = dict(channel_auth_token='abcdef123456', channel_secret='123456')
response = self.client.post(claim_line_url, payload, follow=True)
self.assertContains(response, "invalid token")
def test_release(self):
Channel.objects.all().delete()
self.login(self.admin)
# register and claim an Android channel
reg_data = dict(cmds=[dict(cmd="gcm", gcm_id="GCM111", uuid='uuid'),
dict(cmd='status', cc='RW', dev='Nexus')])
self.client.post(reverse('register'), json.dumps(reg_data), content_type='application/json')
android = Channel.objects.get()
self.client.post(reverse('channels.channel_claim_android'),
dict(claim_code=android.claim_code, phone_number="0788123123"))
android.refresh_from_db()
# connect org to Nexmo and add bulk sender
with patch('temba.utils.nexmo.NexmoClient.update_account') as connect:
connect.return_value = True
with patch('nexmo.Client.create_application') as create_app:
create_app.return_value = dict(id='app-id', keys=dict(private_key='private-key'))
self.org.connect_nexmo('123', '456', self.admin)
self.org.save()
claim_nexmo_url = reverse('channels.channel_create_bulk_sender') + "?connection=NX&channel=%d" % android.pk
self.client.post(claim_nexmo_url, dict(connection='NX', channel=android.pk))
nexmo = Channel.objects.get(channel_type='NX')
android.release()
# check that some details are cleared and channel is now in active
self.assertIsNone(android.org)
self.assertIsNone(android.gcm_id)
self.assertIsNone(android.secret)
self.assertFalse(android.is_active)
# Nexmo delegate should have been released as well
nexmo.refresh_from_db()
self.assertIsNone(nexmo.org)
self.assertFalse(nexmo.is_active)
def test_unclaimed(self):
response = self.sync(self.released_channel)
self.assertEquals(200, response.status_code)
response = response.json()
# should be a registration command containing a new claim code
self.assertEquals(response['cmds'][0]['cmd'], 'reg')
post_data = dict(cmds=[dict(cmd="status",
org_id=self.released_channel.pk,
p_lvl=84,
net="WIFI",
p_sts="CHA",
p_src="USB",
pending=[],
retry=[])])
# try syncing against the released channel that has a secret
self.released_channel.secret = "999"
self.released_channel.save()
response = self.sync(self.released_channel, post_data=post_data)
response = response.json()
# registration command
self.assertEquals(response['cmds'][0]['cmd'], 'reg')
# claim the channel on the site
self.released_channel.org = self.org
self.released_channel.save()
post_data = dict(cmds=[dict(cmd="status",
org_id="-1",
p_lvl=84,
net="WIFI",
p_sts="STATUS_CHARGING",
p_src="USB",
pending=[],
retry=[])])
response = self.sync(self.released_channel, post_data=post_data)
response = response.json()
# should now be a claim command in return
self.assertEquals(response['cmds'][0]['cmd'], 'claim')
# now try releasing the channel from the client
post_data = dict(cmds=[dict(cmd="reset", p_id=1)])
response = self.sync(self.released_channel, post_data=post_data)
response = response.json()
# channel should be released now
channel = Channel.objects.get(pk=self.released_channel.pk)
self.assertFalse(channel.org)
self.assertFalse(channel.is_active)
def test_quota_exceeded(self):
# set our org to be on the trial plan
self.org.plan = FREE_PLAN
self.org.save()
self.org.topups.all().update(credits=10)
self.assertEquals(10, self.org.get_credits_remaining())
self.assertEquals(0, self.org.get_credits_used())
# if we sync should get one message back
self.send_message(['250788382382'], "How is it going?")
response = self.sync(self.tel_channel)
self.assertEquals(200, response.status_code)
response = response.json()
self.assertEqual(1, len(response['cmds']))
self.assertEquals(9, self.org.get_credits_remaining())
self.assertEquals(1, self.org.get_credits_used())
# let's create 10 other messages, this will put our last message above our quota
for i in range(10):
self.send_message(['250788382%03d' % i], "This is message # %d" % i)
# should get the 10 messages we are allotted back, not the 11 that exist
response = self.sync(self.tel_channel)
self.assertEquals(200, response.status_code)
response = response.json()
self.assertEqual(10, len(response['cmds']))
def test_sync(self):
date = timezone.now()
date = int(time.mktime(date.timetuple())) * 1000
# create a payload from the client
bcast = self.send_message(['250788382382', '250788383383'], "How is it going?")
msg1 = bcast[0]
msg2 = bcast[1]
msg3 = self.send_message(['250788382382'], "What is your name?")
msg4 = self.send_message(['250788382382'], "Do you have any children?")
msg5 = self.send_message(['250788382382'], "What's my dog's name?")
# an incoming message that should not be included even if it is still pending
incoming_message = Msg.create_incoming(self.tel_channel, "tel:+250788382382", 'hey')
incoming_message.status = PENDING
incoming_message.save()
self.org.administrators.add(self.user)
self.user.set_org(self.org)
# Check our sync point has all three messages queued for delivery
response = self.sync(self.tel_channel)
self.assertEquals(200, response.status_code)
response = response.json()
cmds = response['cmds']
self.assertEqual(4, len(cmds))
# assert that our first command is the two message broadcast
cmd = cmds[0]
self.assertEquals("How is it going?", cmd['msg'])
self.assertTrue('+250788382382' in [m['phone'] for m in cmd['to']])
self.assertTrue('+250788383383' in [m['phone'] for m in cmd['to']])
self.assertTrue(msg1.pk in [m['id'] for m in cmd['to']])
self.assertTrue(msg2.pk in [m['id'] for m in cmd['to']])
# add another message we'll pretend is in retry to see that we exclude them from sync
msg6 = self.send_message(['250788382382'], "Pretend this message is in retry on the client, don't send it on sync")
# a pending outgoing message should be included
Msg.create_outgoing(self.org, self.admin, msg6.contact, "Hello, we heard from you.")
post_data = dict(cmds=[
# device gcm data
dict(cmd='gcm', gcm_id='12345', uuid='abcde'),
# device details status
dict(cmd="status", p_sts="DIS", p_src="BAT", p_lvl="60",
net="UMTS", org_id=8, retry=[msg6.pk], pending=[]),
# pending incoming message that should be acknowledged but not updated
dict(cmd="mt_sent", msg_id=incoming_message.pk, ts=date),
# results for the outgoing messages
dict(cmd="mt_sent", msg_id=msg1.pk, ts=date),
dict(cmd="mt_sent", msg_id=msg2.pk, ts=date),
dict(cmd="mt_dlvd", msg_id=msg3.pk, ts=date),
dict(cmd="mt_error", msg_id=msg4.pk, ts=date),
dict(cmd="mt_fail", msg_id=msg5.pk, ts=date),
# a missed call
dict(cmd="call", phone="2505551212", type='miss', ts=date),
# incoming
dict(cmd="call", phone="2505551212", type='mt', dur=10, ts=date),
# incoming, invalid URN
dict(cmd="call", phone="*", type='mt', dur=10, ts=date),
# outgoing
dict(cmd="call", phone="+250788383383", type='mo', dur=5, ts=date),
# a new incoming message
dict(cmd="mo_sms", phone="+250788383383", msg="This is giving me trouble", p_id="1", ts=date),
# an incoming message from an empty contact
dict(cmd="mo_sms", phone="", msg="This is spam", p_id="2", ts=date)])
# now send the channel's updates
response = self.sync(self.tel_channel, post_data)
# new batch, our ack and our claim command for new org
self.assertEquals(4, len(response.json()['cmds']))
self.assertContains(response, "Hello, we heard from you.")
self.assertContains(response, "mt_bcast")
# check that our messages were updated accordingly
self.assertEqual(2, Msg.objects.filter(channel=self.tel_channel, status='S', direction='O').count())
self.assertEqual(1, Msg.objects.filter(channel=self.tel_channel, status='D', direction='O').count())
self.assertEqual(1, Msg.objects.filter(channel=self.tel_channel, status='E', direction='O').count())
self.assertEqual(1, Msg.objects.filter(channel=self.tel_channel, status='F', direction='O').count())
# we should now have two incoming messages
self.assertEqual(3, Msg.objects.filter(direction='I').count())
# one of them should have an empty 'tel'
self.assertTrue(Msg.objects.filter(direction='I', contact_urn__path='empty'))
# We should now have one sync
self.assertEquals(1, SyncEvent.objects.filter(channel=self.tel_channel).count())
# check our channel gcm and uuid were updated
self.tel_channel = Channel.objects.get(pk=self.tel_channel.pk)
self.assertEquals('12345', self.tel_channel.gcm_id)
self.assertEquals('abcde', self.tel_channel.uuid)
# should ignore incoming messages without text
post_data = dict(cmds=[
# incoming msg without text
dict(cmd="mo_sms", phone="+250788383383", p_id="1", ts=date),
])
msgs_count = Msg.objects.all().count()
response = self.sync(self.tel_channel, post_data)
# no new message
self.assertEqual(Msg.objects.all().count(), msgs_count)
# set an email on our channel
self.tel_channel.alert_email = 'fred@worldrelif.org'
self.tel_channel.save()
# We should not have an alert this time
self.assertEquals(0, Alert.objects.all().count())
# the case the status must be be reported
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="DIS", p_src="BAT", p_lvl="20", net="UMTS", retry=[], pending=[])
])
# now send the channel's updates
response = self.sync(self.tel_channel, post_data)
# we should now have an Alert
self.assertEquals(1, Alert.objects.all().count())
# and at this time it must be not ended
self.assertEquals(1, Alert.objects.filter(sync_event__channel=self.tel_channel, ended_on=None, alert_type='P').count())
# the case the status must be be reported but already notification sent
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="DIS", p_src="BAT", p_lvl="15", net="UMTS", pending=[], retry=[])
])
# now send the channel's updates
response = self.sync(self.tel_channel, post_data)
# we should not create a new alert
self.assertEquals(1, Alert.objects.all().count())
# still not ended
self.assertEquals(1, Alert.objects.filter(sync_event__channel=self.tel_channel, ended_on=None, alert_type='P').count())
# Let plug the channel to charger
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="CHA", p_src="BAT", p_lvl="15", net="UMTS", pending=[], retry=[])
])
# now send the channel's updates
response = self.sync(self.tel_channel, post_data)
# only one alert
self.assertEquals(1, Alert.objects.all().count())
# and we end all alert related to this issue
self.assertEquals(0, Alert.objects.filter(sync_event__channel=self.tel_channel, ended_on=None, alert_type='P').count())
# clear the alerts
Alert.objects.all().delete()
# the case the status is in unknown state
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="UNK", p_src="BAT", p_lvl="15", net="UMTS", pending=[], retry=[])
])
# now send the channel's updates
response = self.sync(self.tel_channel, post_data)
# we should now create a new alert
self.assertEquals(1, Alert.objects.all().count())
# one alert not ended
self.assertEquals(1, Alert.objects.filter(sync_event__channel=self.tel_channel, ended_on=None, alert_type='P').count())
# Let plug the channel to charger to end this unknown power status
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="CHA", p_src="BAT", p_lvl="15", net="UMTS", pending=[], retry=[])
])
# now send the channel's updates
response = self.sync(self.tel_channel, post_data)
# still only one alert
self.assertEquals(1, Alert.objects.all().count())
# and we end all alert related to this issue
self.assertEquals(0, Alert.objects.filter(sync_event__channel=self.tel_channel, ended_on=None, alert_type='P').count())
# clear all the alerts
Alert.objects.all().delete()
# the case the status is in not charging state
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="NOT", p_src="BAT", p_lvl="15", net="UMTS", pending=[], retry=[])
])
# now send the channel's updates
response = self.sync(self.tel_channel, post_data)
# we should now create a new alert
self.assertEquals(1, Alert.objects.all().count())
# one alert not ended
self.assertEquals(1, Alert.objects.filter(sync_event__channel=self.tel_channel, ended_on=None, alert_type='P').count())
# Let plug the channel to charger to end this unknown power status
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="CHA", p_src="BAT", p_lvl="15", net="UMTS", pending=[], retry=[])
])
# now send the channel's updates
response = self.sync(self.tel_channel, post_data)
# first we have a new alert created
self.assertEquals(1, Alert.objects.all().count())
# and we end all alert related to this issue
self.assertEquals(0, Alert.objects.filter(sync_event__channel=self.tel_channel, ended_on=None, alert_type='P').count())
def test_signing(self):
# good signature
self.assertEquals(200, self.sync(self.tel_channel).status_code)
# bad signature, should result in 401 Unauthorized
self.assertEquals(401, self.sync(self.tel_channel, signature="badsig").status_code)
def test_inbox_duplication(self):
# if the connection gets interrupted but some messages succeed, we want to make sure subsequent
# syncs do not result in duplication of messages from the inbox
date = timezone.now()
date = int(time.mktime(date.timetuple())) * 1000
post_data = dict(cmds=[
dict(cmd="mo_sms", phone="2505551212", msg="First message", p_id="1", ts=date),
dict(cmd="mo_sms", phone="2505551212", msg="First message", p_id="2", ts=date),
dict(cmd="mo_sms", phone="2505551212", msg="A second message", p_id="3", ts=date)
])
response = self.sync(self.tel_channel, post_data)
self.assertEquals(200, response.status_code)
responses = response.json()
cmds = responses['cmds']
# check the server gave us responses for our messages
r0 = self.get_response(cmds, '1')
r1 = self.get_response(cmds, '2')
r2 = self.get_response(cmds, '3')
self.assertIsNotNone(r0)
self.assertIsNotNone(r1)
self.assertIsNotNone(r2)
# first two should have the same server id
self.assertEquals(r0['extra'], r1['extra'])
# One was a duplicate, should only have 2
self.assertEqual(2, Msg.objects.filter(direction='I').count())
def get_response(self, responses, p_id):
for response in responses:
if 'p_id' in response and response['p_id'] == p_id:
return response
class ChannelBatchTest(TembaTest):
def test_time_utils(self):
from temba.utils import datetime_to_ms, ms_to_datetime
now = timezone.now()
now = now.replace(microsecond=now.microsecond / 1000 * 1000)
epoch = datetime_to_ms(now)
self.assertEquals(ms_to_datetime(epoch), now)
class ChannelEventTest(TembaTest):
def test_create(self):
now = timezone.now()
event = ChannelEvent.create(self.channel, "tel:+250783535665", ChannelEvent.TYPE_CALL_OUT, now, 300)
contact = Contact.objects.get()
self.assertEqual(contact.get_urn().urn, "tel:+250783535665")
self.assertEqual(event.org, self.org)
self.assertEqual(event.channel, self.channel)
self.assertEqual(event.contact, contact)
self.assertEqual(event.event_type, ChannelEvent.TYPE_CALL_OUT)
self.assertEqual(event.time, now)
self.assertEqual(event.duration, 300)
class ChannelEventCRUDLTest(TembaTest):
def test_calls(self):
now = timezone.now()
ChannelEvent.create(self.channel, "tel:12345", ChannelEvent.TYPE_CALL_IN, now, 600)
ChannelEvent.create(self.channel, "tel:890", ChannelEvent.TYPE_CALL_IN_MISSED, now, 0)
ChannelEvent.create(self.channel, "tel:456767", ChannelEvent.TYPE_UNKNOWN, now, 0)
list_url = reverse('channels.channelevent_calls')
response = self.fetch_protected(list_url, self.user)
self.assertEquals(response.context['object_list'].count(), 2)
self.assertContains(response, "Missed Incoming Call")
self.assertContains(response, "Incoming Call (600 seconds)")
class SyncEventTest(SmartminTest):
def setUp(self):
self.superuser = User.objects.create_superuser(username="super", email="super@user.com", password="super")
self.user = self.create_user("tito")
self.org = Org.objects.create(name="Temba", timezone="Africa/Kigali", created_by=self.user, modified_by=self.user)
self.tel_channel = Channel.create(self.org, self.user, 'RW', 'A', "Test Channel", "0785551212",
secret="12345", gcm_id="123")
def test_sync_event_model(self):
self.sync_event = SyncEvent.create(self.tel_channel, dict(p_src="AC", p_sts="DIS", p_lvl=80, net="WIFI",
pending=[1, 2], retry=[3, 4], cc='RW'), [1, 2])
self.assertEquals(SyncEvent.objects.all().count(), 1)
self.assertEquals(self.sync_event.get_pending_messages(), [1, 2])
self.assertEquals(self.sync_event.get_retry_messages(), [3, 4])
self.assertEquals(self.sync_event.incoming_command_count, 0)
self.sync_event = SyncEvent.create(self.tel_channel, dict(p_src="AC", p_sts="DIS", p_lvl=80, net="WIFI",
pending=[1, 2], retry=[3, 4], cc='US'), [1])
self.assertEquals(self.sync_event.incoming_command_count, 0)
self.tel_channel = Channel.objects.get(pk=self.tel_channel.pk)
# we shouldn't update country once the relayer is claimed
self.assertEquals('RW', self.tel_channel.country)
class ChannelAlertTest(TembaTest):
def test_no_alert_email(self):
# set our last seen to a while ago
self.channel.last_seen = timezone.now() - timedelta(minutes=40)
self.channel.save()
check_channels_task()
self.assertTrue(len(mail.outbox) == 0)
# add alert email, remove org and set last seen to now to force an resolve email to try to send
self.channel.alert_email = 'fred@unicef.org'
self.channel.org = None
self.channel.last_seen = timezone.now()
self.channel.save()
check_channels_task()
self.assertTrue(len(mail.outbox) == 0)
class ChannelClaimTest(TembaTest):
def test_external(self):
Channel.objects.all().delete()
self.login(self.admin)
# should see the general channel claim page
response = self.client.get(reverse('channels.channel_claim'))
self.assertContains(response, reverse('channels.channel_claim_external'))
# try to claim a channel
response = self.client.get(reverse('channels.channel_claim_external'))
post_data = response.context['form'].initial
url = 'http://test.com/send.php?from={{from}}&text={{text}}&to={{to}}'
post_data['number'] = '12345'
post_data['country'] = 'RW'
post_data['url'] = url
post_data['method'] = 'GET'
post_data['scheme'] = 'tel'
response = self.client.post(reverse('channels.channel_claim_external'), post_data)
channel = Channel.objects.get()
self.assertEquals('RW', channel.country)
self.assertTrue(channel.uuid)
self.assertEquals(post_data['number'], channel.address)
self.assertEquals(post_data['url'], channel.config_json()['send_url'])
self.assertEquals(post_data['method'], channel.config_json()['method'])
self.assertEquals(Channel.TYPE_EXTERNAL, channel.channel_type)
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
self.assertContains(response, reverse('handlers.external_handler', args=['sent', channel.uuid]))
self.assertContains(response, reverse('handlers.external_handler', args=['delivered', channel.uuid]))
self.assertContains(response, reverse('handlers.external_handler', args=['failed', channel.uuid]))
self.assertContains(response, reverse('handlers.external_handler', args=['received', channel.uuid]))
# test substitution in our url
self.assertEqual('http://test.com/send.php?from=5080&text=test&to=%2B250788383383',
channel.build_send_url(url, {'from': "5080", 'text': "test", 'to': "+250788383383"}))
# test substitution with unicode
self.assertEqual('http://test.com/send.php?from=5080&text=Reply+%E2%80%9C1%E2%80%9D+for+good&to=%2B250788383383',
channel.build_send_url(url, {
'from': "5080",
'text': "Reply “1” for good",
'to': "+250788383383"
}))
def test_clickatell(self):
Channel.objects.all().delete()
self.login(self.admin)
# should see the general channel claim page
response = self.client.get(reverse('channels.channel_claim'))
self.assertContains(response, reverse('channels.channel_claim_clickatell'))
# try to claim a channel
response = self.client.get(reverse('channels.channel_claim_clickatell'))
post_data = response.context['form'].initial
post_data['api_id'] = '12345'
post_data['username'] = 'uname'
post_data['password'] = 'pword'
post_data['country'] = 'US'
post_data['number'] = '(206) 555-1212'
response = self.client.post(reverse('channels.channel_claim_clickatell'), post_data)
channel = Channel.objects.get()
self.assertEquals('US', channel.country)
self.assertTrue(channel.uuid)
self.assertEquals('+12065551212', channel.address)
self.assertEquals(post_data['api_id'], channel.config_json()['api_id'])
self.assertEquals(post_data['username'], channel.config_json()['username'])
self.assertEquals(post_data['password'], channel.config_json()['password'])
self.assertEquals(Channel.TYPE_CLICKATELL, channel.channel_type)
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
self.assertContains(response, reverse('handlers.clickatell_handler', args=['status', channel.uuid]))
self.assertContains(response, reverse('handlers.clickatell_handler', args=['receive', channel.uuid]))
def test_high_connection(self):
Channel.objects.all().delete()
self.login(self.admin)
# try to claim a channel
response = self.client.get(reverse('channels.channel_claim_high_connection'))
post_data = response.context['form'].initial
post_data['username'] = 'uname'
post_data['password'] = 'pword'
post_data['number'] = '5151'
post_data['country'] = 'FR'
response = self.client.post(reverse('channels.channel_claim_high_connection'), post_data)
channel = Channel.objects.get()
self.assertEquals('FR', channel.country)
self.assertTrue(channel.uuid)
self.assertEquals(post_data['number'], channel.address)
self.assertEquals(post_data['username'], channel.config_json()['username'])
self.assertEquals(post_data['password'], channel.config_json()['password'])
self.assertEquals(Channel.TYPE_HIGH_CONNECTION, channel.channel_type)
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
self.assertContains(response, reverse('handlers.hcnx_handler', args=['receive', channel.uuid]))
@override_settings(IP_ADDRESSES=('10.10.10.10', '172.16.20.30'))
def test_claim_dart_media(self):
Channel.objects.all().delete()
self.login(self.admin)
# try to claim a channel
response = self.client.get(reverse('channels.channel_claim_dart_media'))
self.assertEquals(response.context['view'].get_country({}), 'Indonesia')
post_data = response.context['form'].initial
post_data['username'] = 'uname'
post_data['password'] = 'pword'
post_data['number'] = '5151'
post_data['country'] = 'ID'
response = self.client.post(reverse('channels.channel_claim_dart_media'), post_data)
channel = Channel.objects.get()
self.assertEquals('ID', channel.country)
self.assertTrue(channel.uuid)
self.assertEquals(post_data['number'], channel.address)
self.assertEquals(post_data['username'], channel.config_json()['username'])
self.assertEquals(post_data['password'], channel.config_json()['password'])
self.assertEquals(Channel.TYPE_DARTMEDIA, channel.channel_type)
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
self.assertContains(response, reverse('handlers.dartmedia_handler', args=['received', channel.uuid]))
# check we show the IP to whitelist
self.assertContains(response, "10.10.10.10")
self.assertContains(response, "172.16.20.30")
def test_shaqodoon(self):
Channel.objects.all().delete()
self.login(self.admin)
# try to claim a channel
response = self.client.get(reverse('channels.channel_claim_shaqodoon'))
post_data = response.context['form'].initial
post_data['username'] = 'uname'
post_data['password'] = 'pword'
post_data['url'] = 'http://test.com/send.php'
post_data['key'] = 'secret_key'
post_data['number'] = '301'
response = self.client.post(reverse('channels.channel_claim_shaqodoon'), post_data)
channel = Channel.objects.get()
self.assertEquals('SO', channel.country)
self.assertTrue(channel.uuid)
self.assertEquals(post_data['number'], channel.address)
self.assertEquals(post_data['url'], channel.config_json()['send_url'])
self.assertEquals(post_data['username'], channel.config_json()['username'])
self.assertEquals(post_data['password'], channel.config_json()['password'])
self.assertEquals(post_data['key'], channel.config_json()['key'])
self.assertEquals(Channel.TYPE_SHAQODOON, channel.channel_type)
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
self.assertContains(response, reverse('handlers.shaqodoon_handler', args=['received', channel.uuid]))
def test_kannel(self):
Channel.objects.all().delete()
self.login(self.admin)
# should see the general channel claim page
response = self.client.get(reverse('channels.channel_claim'))
self.assertContains(response, reverse('channels.channel_claim_kannel'))
# try to claim a channel
response = self.client.get(reverse('channels.channel_claim_kannel'))
post_data = response.context['form'].initial
post_data['number'] = '3071'
post_data['country'] = 'RW'
post_data['url'] = 'http://kannel.temba.com/cgi-bin/sendsms'
post_data['verify_ssl'] = False
post_data['encoding'] = Channel.ENCODING_SMART
response = self.client.post(reverse('channels.channel_claim_kannel'), post_data)
channel = Channel.objects.get()
self.assertEquals('RW', channel.country)
self.assertTrue(channel.uuid)
self.assertEquals(post_data['number'], channel.address)
self.assertEquals(post_data['url'], channel.config_json()['send_url'])
self.assertEquals(False, channel.config_json()['verify_ssl'])
self.assertEquals(Channel.ENCODING_SMART, channel.config_json()[Channel.CONFIG_ENCODING])
# make sure we generated a username and password
self.assertTrue(channel.config_json()['username'])
self.assertTrue(channel.config_json()['password'])
self.assertEquals(Channel.TYPE_KANNEL, channel.channel_type)
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
# our configuration page should list our receive URL
self.assertContains(response, reverse('handlers.kannel_handler', args=['receive', channel.uuid]))
def test_zenvia(self):
Channel.objects.all().delete()
self.login(self.admin)
# shouldn't be able to see the claim zenvia page if we aren't part of that group
response = self.client.get(reverse('channels.channel_claim'))
self.assertNotContains(response, "Zenvia")
# but if we are in the proper time zone
self.org.timezone = pytz.timezone('America/Sao_Paulo')
self.org.save()
response = self.client.get(reverse('channels.channel_claim'))
self.assertContains(response, "Zenvia")
# try to claim a channel
response = self.client.get(reverse('channels.channel_claim_zenvia'))
post_data = response.context['form'].initial
post_data['account'] = 'rapidpro.gw'
post_data['code'] = 'h7GpAIEp85'
post_data['shortcode'] = '28595'
response = self.client.post(reverse('channels.channel_claim_zenvia'), post_data)
channel = Channel.objects.get()
self.assertEquals('BR', channel.country)
self.assertEquals(post_data['account'], channel.config_json()['account'])
self.assertEquals(post_data['code'], channel.config_json()['code'])
self.assertEquals(post_data['shortcode'], channel.address)
self.assertEquals('ZV', channel.channel_type)
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
self.assertContains(response, reverse('handlers.zenvia_handler', args=['status', channel.uuid]))
self.assertContains(response, reverse('handlers.zenvia_handler', args=['receive', channel.uuid]))
def test_claim_africa(self):
Channel.objects.all().delete()
self.login(self.admin)
# visit the africa's talking page
response = self.client.get(reverse('channels.channel_claim_africas_talking'))
self.assertEquals(200, response.status_code)
post_data = response.context['form'].initial
post_data['shortcode'] = '5259'
post_data['username'] = 'temba'
post_data['api_key'] = 'asdf-asdf-asdf-asdf-asdf'
post_data['country'] = 'KE'
response = self.client.post(reverse('channels.channel_claim_africas_talking'), post_data)
channel = Channel.objects.get()
self.assertEquals('temba', channel.config_json()['username'])
self.assertEquals('asdf-asdf-asdf-asdf-asdf', channel.config_json()['api_key'])
self.assertEquals('5259', channel.address)
self.assertEquals('KE', channel.country)
self.assertEquals('AT', channel.channel_type)
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
self.assertContains(response, reverse('handlers.africas_talking_handler', args=['callback', channel.uuid]))
self.assertContains(response, reverse('handlers.africas_talking_handler', args=['delivery', channel.uuid]))
def test_claim_viber(self):
Channel.objects.all().delete()
self.login(self.admin)
response = self.client.get(reverse('channels.channel_create_viber'))
self.assertEquals(200, response.status_code)
response = self.client.post(reverse('channels.channel_create_viber'), dict(name="Macklemore"))
# should create a new viber channel, but without an address
channel = Channel.objects.get()
self.assertEqual(channel.address, Channel.VIBER_NO_SERVICE_ID)
self.assertIsNone(channel.country.code)
self.assertEqual(channel.name, "Macklemore")
self.assertEquals(Channel.TYPE_VIBER, channel.channel_type)
# we should be redirecting to the claim page to enter in our service id
claim_url = reverse('channels.channel_claim_viber', args=[channel.id])
self.assertRedirect(response, claim_url)
response = self.client.get(claim_url)
self.assertContains(response, reverse('handlers.viber_handler', args=['status', channel.uuid]))
self.assertContains(response, reverse('handlers.viber_handler', args=['receive', channel.uuid]))
# going to our account home should link to our claim page
response = self.client.get(reverse('orgs.org_home'))
self.assertContains(response, claim_url)
# ok, enter our service id
response = self.client.post(claim_url, dict(service_id=1001))
# refetch our channel
channel.refresh_from_db()
# should now have an address
self.assertEqual(channel.address, '1001')
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertContains(response, reverse('handlers.viber_handler', args=['status', channel.uuid]))
self.assertContains(response, reverse('handlers.viber_handler', args=['receive', channel.uuid]))
# once claimed, account page should go to read page
response = self.client.get(reverse('orgs.org_home'))
self.assertContains(response, reverse('channels.channel_read', args=[channel.uuid]))
def test_claim_chikka(self):
Channel.objects.all().delete()
self.login(self.admin)
response = self.client.get(reverse('channels.channel_claim_chikka'))
self.assertEquals(200, response.status_code)
self.assertEquals(response.context['view'].get_country({}), 'Philippines')
post_data = response.context['form'].initial
post_data['number'] = '5259'
post_data['username'] = 'chikka'
post_data['password'] = 'password'
response = self.client.post(reverse('channels.channel_claim_chikka'), post_data)
channel = Channel.objects.get()
self.assertEquals('chikka', channel.config_json()[Channel.CONFIG_USERNAME])
self.assertEquals('password', channel.config_json()[Channel.CONFIG_PASSWORD])
self.assertEquals('5259', channel.address)
self.assertEquals('PH', channel.country)
self.assertEquals(Channel.TYPE_CHIKKA, channel.channel_type)
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
self.assertContains(response, reverse('handlers.chikka_handler', args=[channel.uuid]))
def test_claim_vumi_ussd(self):
Channel.objects.all().delete()
self.login(self.admin)
response = self.client.get(reverse('channels.channel_claim_vumi_ussd'))
self.assertEquals(200, response.status_code)
post_data = {
"country": "ZA",
"number": "+273454325324",
"account_key": "account1",
"conversation_key": "conversation1",
}
response = self.client.post(reverse('channels.channel_claim_vumi_ussd'), post_data)
channel = Channel.objects.get()
self.assertTrue(uuid.UUID(channel.config_json()['access_token'], version=4))
self.assertEquals(channel.country, post_data['country'])
self.assertEquals(channel.address, post_data['number'])
self.assertEquals(channel.config_json()['account_key'], post_data['account_key'])
self.assertEquals(channel.config_json()['conversation_key'], post_data['conversation_key'])
self.assertEquals(channel.config_json()['api_url'], Channel.VUMI_GO_API_URL)
self.assertEquals(channel.channel_type, Channel.TYPE_VUMI_USSD)
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
self.assertContains(response, reverse('handlers.vumi_handler', args=['receive', channel.uuid]))
self.assertContains(response, reverse('handlers.vumi_handler', args=['event', channel.uuid]))
def test_claim_vumi_ussd_custom_api(self):
Channel.objects.all().delete()
self.login(self.admin)
response = self.client.get(reverse('channels.channel_claim_vumi_ussd'))
self.assertEquals(200, response.status_code)
post_data = {
"country": "ZA",
"number": "+273454325324",
"account_key": "account1",
"conversation_key": "conversation1",
"api_url": "http://custom.api.url"
}
response = self.client.post(reverse('channels.channel_claim_vumi_ussd'), post_data)
channel = Channel.objects.get()
self.assertTrue(uuid.UUID(channel.config_json()['access_token'], version=4))
self.assertEquals(channel.country, post_data['country'])
self.assertEquals(channel.address, post_data['number'])
self.assertEquals(channel.config_json()['account_key'], post_data['account_key'])
self.assertEquals(channel.config_json()['conversation_key'], post_data['conversation_key'])
self.assertEquals(channel.config_json()['api_url'], "http://custom.api.url")
self.assertEquals(channel.channel_type, Channel.TYPE_VUMI_USSD)
@override_settings(SEND_EMAILS=True)
def test_disconnected_alert(self):
# set our last seen to a while ago
self.channel.alert_email = 'fred@unicef.org'
self.channel.last_seen = timezone.now() - timedelta(minutes=40)
self.channel.save()
check_channels_task()
# should have created one alert
alert = Alert.objects.get()
self.assertEquals(self.channel, alert.channel)
self.assertEquals(Alert.TYPE_DISCONNECTED, alert.alert_type)
self.assertFalse(alert.ended_on)
self.assertTrue(len(mail.outbox) == 1)
template = 'channels/email/disconnected_alert.txt'
context = dict(org=self.channel.org, channel=self.channel, now=timezone.now(),
branding=self.channel.org.get_branding(),
last_seen=self.channel.last_seen, sync=alert.sync_event)
text_template = loader.get_template(template)
text = text_template.render(Context(context))
self.assertEquals(mail.outbox[0].body, text)
# call it again
check_channels_task()
# still only one alert
self.assertEquals(1, Alert.objects.all().count())
self.assertTrue(len(mail.outbox) == 1)
# ok, let's have the channel show up again
self.channel.last_seen = timezone.now() + timedelta(minutes=5)
self.channel.save()
check_channels_task()
# still only one alert, but it is now ended
alert = Alert.objects.get()
self.assertTrue(alert.ended_on)
self.assertTrue(len(mail.outbox) == 2)
template = 'channels/email/connected_alert.txt'
context = dict(org=self.channel.org, channel=self.channel, now=timezone.now(),
branding=self.channel.org.get_branding(),
last_seen=self.channel.last_seen, sync=alert.sync_event)
text_template = loader.get_template(template)
text = text_template.render(Context(context))
self.assertEquals(mail.outbox[1].body, text)
def test_m3tech(self):
Channel.objects.all().delete()
self.login(self.admin)
# try to claim a channel
response = self.client.get(reverse('channels.channel_claim_m3tech'))
post_data = response.context['form'].initial
post_data['country'] = 'PK'
post_data['number'] = '250788123123'
post_data['username'] = 'user1'
post_data['password'] = 'pass1'
response = self.client.post(reverse('channels.channel_claim_m3tech'), post_data)
channel = Channel.objects.get()
self.assertEquals('PK', channel.country)
self.assertEquals(post_data['username'], channel.config_json()['username'])
self.assertEquals(post_data['password'], channel.config_json()['password'])
self.assertEquals('+250788123123', channel.address)
self.assertEquals(Channel.TYPE_M3TECH, channel.channel_type)
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
self.assertContains(response, reverse('handlers.m3tech_handler', args=['received', channel.uuid]))
self.assertContains(response, reverse('handlers.m3tech_handler', args=['sent', channel.uuid]))
self.assertContains(response, reverse('handlers.m3tech_handler', args=['failed', channel.uuid]))
self.assertContains(response, reverse('handlers.m3tech_handler', args=['delivered', channel.uuid]))
def test_infobip(self):
Channel.objects.all().delete()
self.login(self.admin)
# try to claim a channel
response = self.client.get(reverse('channels.channel_claim_infobip'))
post_data = response.context['form'].initial
post_data['country'] = 'NI'
post_data['number'] = '250788123123'
post_data['username'] = 'user1'
post_data['password'] = 'pass1'
response = self.client.post(reverse('channels.channel_claim_infobip'), post_data)
channel = Channel.objects.get()
self.assertEquals('NI', channel.country)
self.assertEquals(post_data['username'], channel.config_json()['username'])
self.assertEquals(post_data['password'], channel.config_json()['password'])
self.assertEquals('+250788123123', channel.address)
self.assertEquals('IB', channel.channel_type)
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
self.assertContains(response, reverse('handlers.infobip_handler', args=['received', channel.uuid]))
self.assertContains(response, reverse('handlers.infobip_handler', args=['delivered', channel.uuid]))
@override_settings(SEND_EMAILS=True)
def test_sms_alert(self):
contact = self.create_contact("John Doe", '123')
# create a message from two hours ago
one_hour_ago = timezone.now() - timedelta(hours=1)
two_hours_ago = timezone.now() - timedelta(hours=2)
three_hours_ago = timezone.now() - timedelta(hours=3)
four_hours_ago = timezone.now() - timedelta(hours=4)
five_hours_ago = timezone.now() - timedelta(hours=5)
six_hours_ago = timezone.now() - timedelta(hours=6)
msg1 = self.create_msg(text="Message One", contact=contact, created_on=five_hours_ago, status='Q')
# make sure our channel has been seen recently
self.channel.last_seen = timezone.now()
self.channel.alert_email = 'fred@unicef.org'
self.channel.org = self.org
self.channel.save()
# ok check on our channel
check_channels_task()
# we don't have successfully sent message and we have an alert and only one
self.assertEquals(Alert.objects.all().count(), 1)
alert = Alert.objects.get()
self.assertEquals(self.channel, alert.channel)
self.assertEquals(Alert.TYPE_SMS, alert.alert_type)
self.assertFalse(alert.ended_on)
self.assertTrue(len(mail.outbox) == 1)
# let's end the alert
alert = Alert.objects.all()[0]
alert.ended_on = six_hours_ago
alert.save()
dany = self.create_contact("Dany Craig", "765")
# let have a recent sent message
sent_msg = self.create_msg(text="SENT Message", contact=dany, created_on=four_hours_ago, sent_on=one_hour_ago, status='D')
# ok check on our channel
check_channels_task()
# if latest_sent_message is after our queued message no alert is created
self.assertEquals(Alert.objects.all().count(), 1)
# consider the sent message was sent before our queued msg
sent_msg.sent_on = three_hours_ago
sent_msg.save()
msg1.delete()
msg1 = self.create_msg(text="Message One", contact=contact, created_on=two_hours_ago, status='Q')
# check our channel again
check_channels_task()
# no new alert created because we sent one in the past hour
self.assertEquals(Alert.objects.all().count(), 1)
sent_msg.sent_on = six_hours_ago
sent_msg.save()
alert = Alert.objects.all()[0]
alert.created_on = six_hours_ago
alert.save()
# check our channel again
check_channels_task()
# this time we have a new alert and should create only one
self.assertEquals(Alert.objects.all().count(), 2)
# get the alert which is not ended
alert = Alert.objects.get(ended_on=None)
self.assertEquals(self.channel, alert.channel)
self.assertEquals(Alert.TYPE_SMS, alert.alert_type)
self.assertFalse(alert.ended_on)
self.assertTrue(len(mail.outbox) == 2)
# run again, nothing should change
check_channels_task()
alert = Alert.objects.get(ended_on=None)
self.assertFalse(alert.ended_on)
self.assertTrue(len(mail.outbox) == 2)
# fix our message
msg1.status = 'D'
msg1.save()
# run again, our alert should end
check_channels_task()
# still only one alert though, and no new email sent, alert must not be ended before one hour
alert = Alert.objects.all().latest('ended_on')
self.assertTrue(alert.ended_on)
self.assertTrue(len(mail.outbox) == 2)
class ChannelCountTest(TembaTest):
def assertDailyCount(self, channel, assert_count, count_type, day):
calculated_count = ChannelCount.get_day_count(channel, count_type, day)
self.assertEquals(assert_count, calculated_count)
def test_daily_counts(self):
# test that messages to test contacts aren't counted
self.admin.set_org(self.org)
test_contact = Contact.get_test_contact(self.admin)
Msg.create_outgoing(self.org, self.admin, test_contact, "Test Message", channel=self.channel)
# no channel counts
self.assertFalse(ChannelCount.objects.all())
# real contact, but no channel
Msg.create_incoming(None, 'tel:+250788111222', "Test Message", org=self.org)
# still no channel counts
self.assertFalse(ChannelCount.objects.all())
# incoming msg with a channel
msg = Msg.create_incoming(self.channel, 'tel:+250788111222', "Test Message", org=self.org)
self.assertDailyCount(self.channel, 1, ChannelCount.INCOMING_MSG_TYPE, msg.created_on.date())
# insert another
msg = Msg.create_incoming(self.channel, 'tel:+250788111222', "Test Message", org=self.org)
self.assertDailyCount(self.channel, 2, ChannelCount.INCOMING_MSG_TYPE, msg.created_on.date())
# squash our counts
squash_channelcounts()
# same count
self.assertDailyCount(self.channel, 2, ChannelCount.INCOMING_MSG_TYPE, msg.created_on.date())
# and only one channel count
self.assertEquals(ChannelCount.objects.all().count(), 1)
# deleting a message doesn't decrement the count
msg.delete()
self.assertDailyCount(self.channel, 2, ChannelCount.INCOMING_MSG_TYPE, msg.created_on.date())
ChannelCount.objects.all().delete()
# ok, test outgoing now
real_contact = Contact.get_or_create(self.org, self.admin, urns=['tel:+250788111222'])
msg = Msg.create_outgoing(self.org, self.admin, real_contact, "Real Message", channel=self.channel)
ChannelLog.objects.create(channel=self.channel, msg=msg, description="Unable to send", is_error=True)
# squash our counts
squash_channelcounts()
self.assertDailyCount(self.channel, 1, ChannelCount.OUTGOING_MSG_TYPE, msg.created_on.date())
self.assertEqual(ChannelCount.objects.filter(count_type=ChannelCount.SUCCESS_LOG_TYPE).count(), 0)
self.assertEqual(ChannelCount.objects.filter(count_type=ChannelCount.ERROR_LOG_TYPE).count(), 1)
# deleting a message still doesn't decrement the count
msg.delete()
self.assertDailyCount(self.channel, 1, ChannelCount.OUTGOING_MSG_TYPE, msg.created_on.date())
ChannelCount.objects.all().delete()
# incoming IVR
msg = Msg.create_incoming(self.channel, 'tel:+250788111222',
"Test Message", org=self.org, msg_type=IVR)
self.assertDailyCount(self.channel, 1, ChannelCount.INCOMING_IVR_TYPE, msg.created_on.date())
# delete it, should be gone now
msg.delete()
self.assertDailyCount(self.channel, 1, ChannelCount.INCOMING_IVR_TYPE, msg.created_on.date())
ChannelCount.objects.all().delete()
# outgoing ivr
msg = Msg.create_outgoing(self.org, self.admin, real_contact, "Real Voice",
channel=self.channel, msg_type=IVR)
self.assertDailyCount(self.channel, 1, ChannelCount.OUTGOING_IVR_TYPE, msg.created_on.date())
# delete it, should be gone now
msg.delete()
self.assertDailyCount(self.channel, 1, ChannelCount.OUTGOING_IVR_TYPE, msg.created_on.date())
class AfricasTalkingTest(TembaTest):
def setUp(self):
super(AfricasTalkingTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'KE', 'AT', None, '+250788123123',
config=dict(username='at-user', api_key='africa-key'),
uuid='00000000-0000-0000-0000-000000001234')
def test_delivery(self):
# ok, what happens with an invalid uuid?
post_data = dict(id="external1", status="Success")
response = self.client.post(reverse('handlers.africas_talking_handler', args=['delivery', 'not-real-uuid']), post_data)
self.assertEquals(404, response.status_code)
# ok, try with a valid uuid, but invalid message id
delivery_url = reverse('handlers.africas_talking_handler', args=['delivery', self.channel.uuid])
response = self.client.post(delivery_url, post_data)
self.assertEquals(404, response.status_code)
# requires posts
delivery_url = reverse('handlers.africas_talking_handler', args=['delivery', self.channel.uuid])
response = self.client.get(delivery_url, post_data)
self.assertEquals(400, response.status_code)
# missing status
del post_data['status']
response = self.client.post(delivery_url, post_data)
self.assertEquals(400, response.status_code)
# ok, lets create an outgoing message to update
joe = self.create_contact("Joe Biden", "+254788383383")
msg = joe.send("Hey Joe, it's Obama, pick up!", self.admin)
msg.external_id = "external1"
msg.save(update_fields=('external_id',))
def assertStatus(sms, post_status, assert_status):
post_data['status'] = post_status
response = self.client.post(delivery_url, post_data)
self.assertEquals(200, response.status_code)
sms = Msg.objects.get(pk=sms.id)
self.assertEquals(assert_status, sms.status)
assertStatus(msg, 'Success', DELIVERED)
assertStatus(msg, 'Sent', SENT)
assertStatus(msg, 'Buffered', SENT)
assertStatus(msg, 'Failed', FAILED)
assertStatus(msg, 'Rejected', FAILED)
def test_callback(self):
post_data = {'from': "0788123123", 'text': "Hello World"}
callback_url = reverse('handlers.africas_talking_handler', args=['callback', self.channel.uuid])
# missing test data
response = self.client.post(callback_url, dict())
self.assertEquals(400, response.status_code)
response = self.client.post(callback_url, post_data)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.get()
self.assertEquals("+254788123123", msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello World", msg.text)
def test_send(self):
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("Test message", self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, json.dumps(dict(SMSMessageData=dict(Recipients=[dict(messageId='msg1', status='Success')]))))
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(SENT, msg.status)
self.assertTrue(msg.sent_on)
self.assertEquals('msg1', msg.external_id)
# check that our from was set
self.assertEquals(self.channel.address, mock.call_args[1]['data']['from'])
self.clear_cache()
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, json.dumps(
dict(SMSMessageData=dict(Recipients=[dict(messageId='msg1', status='Could Not Send')]))))
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
self.clear_cache()
# test with a non-dedicated shortcode
self.channel.config = json.dumps(dict(username='at-user', api_key='africa-key', is_shared=True))
self.channel.save()
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, json.dumps(dict(SMSMessageData=dict(Recipients=[dict(messageId='msg1', status='Success')]))))
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# assert we didn't send the short code in our data
self.assertTrue('from' not in mock.call_args[1]['data'])
self.clear_cache()
with patch('requests.post') as mock:
mock.return_value = MockResponse(400, "Error", method='POST')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
with patch('requests.post') as mock:
mock.side_effect = Exception('Kaboom!')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(FAILED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
finally:
settings.SEND_MESSAGES = False
class ExternalTest(TembaTest):
def setUp(self):
super(ExternalTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'BR', 'EX', None, '+250788123123', scheme='tel',
config={Channel.CONFIG_SEND_URL: 'http://foo.com/send', Channel.CONFIG_SEND_METHOD: 'POST'},
uuid='00000000-0000-0000-0000-000000001234')
def test_status(self):
# try with an invalid channel
response = self.client.post(reverse('handlers.external_handler', args=['sent', 'not-real-uuid']), dict(id="-1"))
self.assertEqual(response.status_code, 400)
delivery_url = reverse('handlers.external_handler', args=['sent', self.channel.uuid])
joe = self.create_contact("Joe Biden", "+254788383383")
# try with missing message id
response = self.client.post(delivery_url, {})
self.assertEqual(response.status_code, 400)
# try with an invalid message id
response = self.client.post(delivery_url, {'id': -1234})
self.assertEqual(response.status_code, 400)
# try with an incoming message id
incoming = self.create_msg(direction='I', contact=joe, text="It's me")
response = self.client.post(delivery_url, {'id': incoming.id})
self.assertEqual(response.status_code, 400)
# ok, lets create an outgoing message to update
msg = joe.send("Hey Joe, it's Obama, pick up!", self.admin)
payload = {'id': msg.id}
def assertStatus(sms, status, assert_status):
resp = self.client.post(reverse('handlers.external_handler', args=[status, self.channel.uuid]), payload)
self.assertEquals(200, resp.status_code)
sms = Msg.objects.get(pk=sms.id)
self.assertEquals(assert_status, sms.status)
assertStatus(msg, 'delivered', DELIVERED)
assertStatus(msg, 'sent', SENT)
assertStatus(msg, 'failed', FAILED)
# check when called with phone number rather than UUID
response = self.client.post(reverse('handlers.external_handler', args=['sent', '250788123123']), {'id': msg.pk})
self.assertEquals(200, response.status_code)
msg.refresh_from_db()
self.assertEqual(msg.status, SENT)
def test_receive(self):
data = {'from': '5511996458779', 'text': 'Hello World!'}
callback_url = reverse('handlers.external_handler', args=['received', self.channel.uuid])
response = self.client.post(callback_url, data)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.get()
self.assertEquals("+5511996458779", msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello World!", msg.text)
data = {'from': "", 'text': "Hi there"}
response = self.client.post(callback_url, data)
self.assertEquals(400, response.status_code)
Msg.objects.all().delete()
# receive with a date
data = {'from': '5511996458779', 'text': 'Hello World!', 'date': '2012-04-23T18:25:43.511Z'}
callback_url = reverse('handlers.external_handler', args=['received', self.channel.uuid])
response = self.client.post(callback_url, data)
self.assertEquals(200, response.status_code)
# load our message, make sure the date was saved properly
msg = Msg.objects.get()
self.assertEquals(2012, msg.sent_on.year)
self.assertEquals(18, msg.sent_on.hour)
def test_receive_external(self):
self.channel.scheme = 'ext'
self.channel.save()
data = {'from': 'lynch24', 'text': 'Beast Mode!'}
callback_url = reverse('handlers.external_handler', args=['received', self.channel.uuid])
response = self.client.post(callback_url, data)
self.assertEquals(200, response.status_code)
# check our message
msg = Msg.objects.get()
self.assertEquals('lynch24', msg.contact.get_urn(EXTERNAL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals('Beast Mode!', msg.text)
def test_send_replacement(self):
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("Test message", self.admin, trigger_send=False)
self.channel.config = json.dumps({Channel.CONFIG_SEND_URL: 'http://foo.com/send&text={{text}}&to={{to_no_plus}}',
Channel.CONFIG_SEND_METHOD: 'GET'})
self.channel.save()
with self.settings(SEND_MESSAGES=True):
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, "Sent")
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
self.assertEqual(mock.call_args[0][0], 'http://foo.com/send&text=Test+message&to=250788383383')
self.channel.config = json.dumps({Channel.CONFIG_SEND_URL: 'http://foo.com/send',
Channel.CONFIG_SEND_METHOD: 'POST'})
self.channel.save()
self.clear_cache()
with self.settings(SEND_MESSAGES=True):
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, "Sent")
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
self.assertEqual(mock.call_args[0][0], 'http://foo.com/send')
self.assertEqual(mock.call_args[1]['data'], 'id=%d&text=Test+message&to=%%2B250788383383&to_no_plus=250788383383&'
'from=%%2B250788123123&from_no_plus=250788123123&'
'channel=%d' % (msg.id, self.channel.id))
self.channel.config = json.dumps({Channel.CONFIG_SEND_URL: 'http://foo.com/send',
Channel.CONFIG_SEND_BODY: 'text={{text}}&to={{to_no_plus}}',
Channel.CONFIG_SEND_METHOD: 'POST'})
self.channel.save()
self.clear_cache()
with self.settings(SEND_MESSAGES=True):
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, "Sent")
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
self.assertEqual(mock.call_args[0][0], 'http://foo.com/send')
self.assertEqual(mock.call_args[1]['data'], 'text=Test+message&to=250788383383')
self.channel.config = json.dumps({Channel.CONFIG_SEND_URL: 'http://foo.com/send',
Channel.CONFIG_SEND_BODY: 'text={{text}}&to={{to_no_plus}}',
Channel.CONFIG_SEND_METHOD: 'PUT'})
self.channel.save()
self.clear_cache()
with self.settings(SEND_MESSAGES=True):
with patch('requests.put') as mock:
mock.return_value = MockResponse(200, "Sent")
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
self.assertEqual(mock.call_args[0][0], 'http://foo.com/send')
self.assertEqual(mock.call_args[1]['data'], 'text=Test+message&to=250788383383')
def test_send(self):
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("Test message", self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, "Sent")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
self.clear_cache()
with patch('requests.post') as mock:
mock.return_value = MockResponse(400, "Error")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
with patch('requests.post') as mock:
mock.side_effect = Exception('Kaboom!')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
finally:
settings.SEND_MESSAGES = False
# view the log item for our send
self.login(self.admin)
log_item = ChannelLog.objects.all().order_by('created_on').first()
response = self.client.get(reverse('channels.channellog_read', args=[log_item.pk]))
self.assertEquals(response.context['object'].description, 'Successfully Delivered')
# make sure we can't see it as anon
self.org.is_anon = True
self.org.save()
response = self.client.get(reverse('channels.channellog_read', args=[log_item.pk]))
self.assertEquals(302, response.status_code)
# change our admin to be a CS rep, see if they can see the page
self.admin.groups.add(Group.objects.get(name='Customer Support'))
response = self.client.get(reverse('channels.channellog_read', args=[log_item.pk]))
self.assertEquals(response.context['object'].description, 'Successfully Delivered')
class VerboiceTest(TembaTest):
def setUp(self):
super(VerboiceTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'US', 'VB', None, '+250788123123',
config=dict(username='test', password='sesame'),
uuid='00000000-0000-0000-0000-000000001234')
def test_receive(self):
callback_url = reverse('handlers.verboice_handler', args=['status', self.channel.uuid])
response = self.client.post(callback_url, dict())
self.assertEqual(response.status_code, 405)
response = self.client.get(callback_url)
self.assertEqual(response.status_code, 400)
response = self.client.get(callback_url + "?From=250788456456&CallStatus=ringing&CallSid=12345")
self.assertEqual(response.status_code, 400)
contact = self.create_contact('Bruno Mars', '+252788123123')
call = IVRCall.create_outgoing(self.channel, contact, contact.get_urn(TEL_SCHEME), self.admin)
call.external_id = "12345"
call.save()
self.assertEqual(call.status, IVRCall.PENDING)
response = self.client.get(callback_url + "?From=250788456456&CallStatus=ringing&CallSid=12345")
self.assertEqual(response.status_code, 200)
call = IVRCall.objects.get(pk=call.pk)
self.assertEqual(call.status, IVRCall.RINGING)
class YoTest(TembaTest):
def setUp(self):
super(YoTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'BR', 'YO', None, '+250788123123',
config=dict(username='test', password='sesame'),
uuid='00000000-0000-0000-0000-000000001234')
def test_receive(self):
callback_url = reverse('handlers.yo_handler', args=['received', self.channel.uuid])
response = self.client.get(callback_url + "?sender=252788123123&message=Hello+World")
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.get()
self.assertEquals("+252788123123", msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello World", msg.text)
# fails if missing sender
response = self.client.get(callback_url + "?sender=252788123123")
self.assertEquals(400, response.status_code)
# fails if missing message
response = self.client.get(callback_url + "?message=Hello+World")
self.assertEquals(400, response.status_code)
def test_send(self):
joe = self.create_contact("Joe", "+252788383383")
msg = joe.send("Test message", self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, "ybs_autocreate_status=OK")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(SENT, msg.status)
self.assertTrue(msg.sent_on)
self.clear_cache()
with patch('requests.get') as mock:
mock.side_effect = [MockResponse(401, "Error"), MockResponse(200, 'ybs_autocreate_status=OK')]
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(SENT, msg.status)
self.assertTrue(msg.sent_on)
# check that requests was called twice, using the backup URL the second time
self.assertEquals(2, mock.call_count)
self.clear_cache()
with patch('requests.get') as mock:
mock.return_value = MockResponse(400, "Kaboom")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
self.clear_cache()
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, "ybs_autocreate_status=ERROR&ybs_autocreate_message=" +
"YBS+AutoCreate+Subsystem%3A+Access+denied" +
"+due+to+wrong+authorization+code")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
# contact should not be stopped
joe.refresh_from_db()
self.assertFalse(joe.is_stopped)
self.clear_cache()
with patch('requests.get') as mock:
mock.side_effect = Exception('Kaboom!')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(FAILED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
# contact should not be stopped
joe.refresh_from_db()
self.assertFalse(joe.is_stopped)
self.clear_cache()
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, "ybs_autocreate_status=ERROR&ybs_autocreate_message=" +
"256794224665%3ABLACKLISTED")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as a failure
msg.refresh_from_db()
self.assertEquals(FAILED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
# contact should also be stopped
joe.refresh_from_db()
self.assertTrue(joe.is_stopped)
finally:
settings.SEND_MESSAGES = False
class ShaqodoonTest(TembaTest):
def setUp(self):
super(ShaqodoonTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'SO', 'SQ', None, '+250788123123',
config={Channel.CONFIG_SEND_URL: 'http://foo.com/send',
Channel.CONFIG_USERNAME: 'username',
Channel.CONFIG_PASSWORD: 'password',
Channel.CONFIG_KEY: 'key'},
uuid='00000000-0000-0000-0000-000000001234')
def test_receive(self):
data = {'from': '252788123456', 'text': 'Hello World!'}
callback_url = reverse('handlers.shaqodoon_handler', args=['received', self.channel.uuid])
response = self.client.post(callback_url, data)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.get()
self.assertEquals("+252788123456", msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello World!", msg.text)
def test_send(self):
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("Test message ☺", self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, "Sent")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
self.clear_cache()
with patch('requests.get') as mock:
mock.return_value = MockResponse(400, "Error")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
with patch('requests.get') as mock:
mock.side_effect = Exception('Kaboom!')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
finally:
settings.SEND_MESSAGES = False
class M3TechTest(TembaTest):
def setUp(self):
super(M3TechTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'PK', 'M3', None, '+250788123123',
config={Channel.CONFIG_USERNAME: 'username', Channel.CONFIG_PASSWORD: 'password'},
uuid='00000000-0000-0000-0000-000000001234')
def test_receive(self):
data = {'from': '252788123456', 'text': 'Hello World!'}
callback_url = reverse('handlers.m3tech_handler', args=['received', self.channel.uuid])
response = self.client.post(callback_url, data)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.get()
self.assertEquals("+252788123456", msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello World!", msg.text)
def test_send(self):
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("Test message ☺", self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
with patch('requests.get') as mock:
msg.text = "Test message"
mock.return_value = MockResponse(200,
"""[{"Response":"0"}]""")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
self.assertEqual(mock.call_args[1]['params']['SMSType'], '0')
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
self.clear_cache()
with patch('requests.get') as mock:
msg.text = "Test message ☺"
mock.return_value = MockResponse(200,
"""[{"Response":"0"}]""")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
self.assertEqual(mock.call_args[1]['params']['SMSType'], '7')
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
self.clear_cache()
# bogus json
with patch('requests.get') as mock:
msg.text = "Test message"
mock.return_value = MockResponse(200, """["bad json":}]""")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.clear_cache()
with patch('requests.get') as mock:
mock.return_value = MockResponse(400, "Error")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
self.clear_cache()
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, """[{"Response":"1"}]""")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(FAILED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
self.clear_cache()
with patch('requests.get') as mock:
mock.side_effect = Exception('Kaboom!')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(FAILED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
self.clear_cache()
finally:
settings.SEND_MESSAGES = False
class KannelTest(TembaTest):
def setUp(self):
super(KannelTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'RW', 'KN', None, '+250788123123',
config=dict(username='kannel-user', password='kannel-pass', send_url='http://foo/'),
uuid='00000000-0000-0000-0000-000000001234')
def test_status(self):
# ok, what happens with an invalid uuid?
data = dict(id="-1", status="4")
response = self.client.post(reverse('handlers.kannel_handler', args=['status', 'not-real-uuid']), data)
self.assertEquals(400, response.status_code)
# ok, try with a valid uuid, but invalid message id -1
delivery_url = reverse('handlers.kannel_handler', args=['status', self.channel.uuid])
response = self.client.post(delivery_url, data)
self.assertEquals(400, response.status_code)
# ok, lets create an outgoing message to update
joe = self.create_contact("Joe Biden", "+254788383383")
msg = joe.send("Hey Joe, it's Obama, pick up!", self.admin)
data['id'] = msg.pk
def assertStatus(sms, status, assert_status):
data['status'] = status
response = self.client.post(reverse('handlers.kannel_handler', args=['status', self.channel.uuid]), data)
self.assertEquals(200, response.status_code)
sms = Msg.objects.get(pk=sms.id)
self.assertEquals(assert_status, sms.status)
assertStatus(msg, '4', SENT)
assertStatus(msg, '1', DELIVERED)
assertStatus(msg, '16', FAILED)
def test_receive(self):
data = {
'sender': '0788383383',
'message': 'Hello World!',
'id': 'external1',
'ts': int(calendar.timegm(time.gmtime()))
}
callback_url = reverse('handlers.kannel_handler', args=['receive', self.channel.uuid])
response = self.client.post(callback_url, data)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.get()
self.assertEquals("+250788383383", msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello World!", msg.text)
def test_send(self):
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("Test message", self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, 'Accepted 201')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
# assert verify was set to true
self.assertTrue(mock.call_args[1]['verify'])
self.assertEquals('+250788383383', mock.call_args[1]['params']['to'])
self.clear_cache()
self.channel.config = json.dumps(dict(username='kannel-user', password='kannel-pass',
encoding=Channel.ENCODING_SMART, use_national=True,
send_url='http://foo/', verify_ssl=False))
self.channel.save()
msg.text = "No capital accented È!"
msg.save()
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, 'Accepted 201')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
# assert verify was set to true
self.assertEquals('No capital accented E!', mock.call_args[1]['params']['text'])
self.assertEquals('788383383', mock.call_args[1]['params']['to'])
self.assertFalse('coding' in mock.call_args[1]['params'])
self.clear_cache()
msg.text = "Unicode. ☺"
msg.save()
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, 'Accepted 201')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
# assert verify was set to true
self.assertEquals("Unicode. ☺", mock.call_args[1]['params']['text'])
self.assertEquals('2', mock.call_args[1]['params']['coding'])
self.assertEquals('utf8', mock.call_args[1]['params']['charset'])
self.clear_cache()
msg.text = "Normal"
msg.save()
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, 'Accepted 201')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
# assert verify was set to true
self.assertEquals("Normal", mock.call_args[1]['params']['text'])
self.assertFalse('coding' in mock.call_args[1]['params'])
self.assertFalse('charset' in mock.call_args[1]['params'])
self.clear_cache()
self.channel.config = json.dumps(dict(username='kannel-user', password='kannel-pass',
encoding=Channel.ENCODING_UNICODE,
send_url='http://foo/', verify_ssl=False))
self.channel.save()
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, 'Accepted 201')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
# assert verify was set to true
self.assertEquals("Normal", mock.call_args[1]['params']['text'])
self.assertEquals('2', mock.call_args[1]['params']['coding'])
self.assertEquals('utf8', mock.call_args[1]['params']['charset'])
self.clear_cache()
self.channel.config = json.dumps(dict(username='kannel-user', password='kannel-pass',
send_url='http://foo/', verify_ssl=False))
self.channel.save()
with patch('requests.get') as mock:
mock.return_value = MockResponse(400, "Error")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# assert verify was set to False
self.assertFalse(mock.call_args[1]['verify'])
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
with patch('requests.get') as mock:
mock.side_effect = Exception('Kaboom')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# assert verify was set to False
self.assertFalse(mock.call_args[1]['verify'])
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
finally:
settings.SEND_MESSAGES = False
class NexmoTest(TembaTest):
def setUp(self):
super(NexmoTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'RW', 'NX', None, '+250788123123',
uuid='00000000-0000-0000-0000-000000001234')
self.nexmo_uuid = str(uuid.uuid4())
nexmo_config = {NEXMO_KEY: '1234', NEXMO_SECRET: '1234', NEXMO_UUID: self.nexmo_uuid,
NEXMO_APP_ID: 'nexmo-app-id', NEXMO_APP_PRIVATE_KEY: 'nexmo-private-key'}
org = self.channel.org
config = org.config_json()
config.update(nexmo_config)
org.config = json.dumps(config)
org.save()
def test_status(self):
# ok, what happens with an invalid uuid and number
data = dict(to='250788123111', messageId='external1')
response = self.client.get(reverse('handlers.nexmo_handler', args=['status', 'not-real-uuid']), data)
self.assertEquals(404, response.status_code)
# ok, try with a valid uuid, but invalid message id -1, should return 200
# these are probably multipart message callbacks, which we don't track
data = dict(to='250788123123', messageId='-1')
delivery_url = reverse('handlers.nexmo_handler', args=['status', self.nexmo_uuid])
response = self.client.get(delivery_url, data)
self.assertEquals(200, response.status_code)
# ok, lets create an outgoing message to update
joe = self.create_contact("Joe Biden", "+254788383383")
msg = joe.send("Hey Joe, it's Obama, pick up!", self.admin)
msg.external_id = 'external1'
msg.save(update_fields=('external_id',))
data['messageId'] = 'external1'
def assertStatus(sms, status, assert_status):
data['status'] = status
response = self.client.get(reverse('handlers.nexmo_handler', args=['status', self.nexmo_uuid]), data)
self.assertEquals(200, response.status_code)
sms = Msg.objects.get(pk=sms.id)
self.assertEquals(assert_status, sms.status)
assertStatus(msg, 'delivered', DELIVERED)
assertStatus(msg, 'expired', FAILED)
assertStatus(msg, 'failed', FAILED)
assertStatus(msg, 'accepted', SENT)
assertStatus(msg, 'buffered', SENT)
def test_receive(self):
data = dict(to='250788123123', msisdn='250788111222', text='Hello World!', messageId='external1')
callback_url = reverse('handlers.nexmo_handler', args=['receive', self.nexmo_uuid])
response = self.client.get(callback_url, data)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.get()
self.assertEquals("+250788111222", msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello World!", msg.text)
self.assertEquals('external1', msg.external_id)
def test_send(self):
from temba.orgs.models import NEXMO_KEY, NEXMO_SECRET, NEXMO_APP_ID, NEXMO_APP_PRIVATE_KEY
org_config = self.org.config_json()
org_config[NEXMO_KEY] = 'nexmo_key'
org_config[NEXMO_SECRET] = 'nexmo_secret'
org_config[NEXMO_APP_ID] = 'nexmo-app-id'
org_config[NEXMO_APP_PRIVATE_KEY] = 'nexmo-private-key'
self.org.config = json.dumps(org_config)
self.org.clear_channel_caches()
self.channel.channel_type = Channel.TYPE_NEXMO
self.channel.save()
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("Test message", self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
r = get_redis_connection()
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, json.dumps(dict(messages=[{'status': 0, 'message-id': 12}])), method='POST')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(SENT, msg.status)
self.assertTrue(msg.sent_on)
self.assertEquals('12', msg.external_id)
self.clear_cache()
# test some throttling by sending three messages right after another
start = time.time()
for i in range(3):
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
r.delete(timezone.now().strftime(MSG_SENT_KEY))
msg.refresh_from_db()
self.assertEquals(SENT, msg.status)
# assert we sent the messages out in a reasonable amount of time
end = time.time()
self.assertTrue(2.5 > end - start > 2, "Sending of three messages took: %f" % (end - start))
self.clear_cache()
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, json.dumps(dict(messages=[{'status': 0, 'message-id': 12}])), method='POST')
msg.text = u"Unicode ☺"
msg.save()
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(SENT, msg.status)
self.assertTrue(msg.sent_on)
self.assertEquals('12', msg.external_id)
# assert that we were called with unicode
mock.assert_called_once_with('https://rest.nexmo.com/sms/json',
params={'from': u'250788123123',
'api_secret': u'1234',
'status-report-req': 1,
'to': u'250788383383',
'text': u'Unicode \u263a',
'api_key': u'1234',
'type': 'unicode'})
self.clear_cache()
with patch('requests.get') as mock:
mock.return_value = MockResponse(401, "Invalid API token", method='POST')
# clear out our channel log
ChannelLog.objects.all().delete()
# then send it
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check status
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
# and that we have a decent log
log = ChannelLog.objects.get(msg=msg)
self.assertEqual(log.description, "Failed sending message: Invalid API token")
with patch('requests.get') as mock:
# this hackery is so that we return a different thing on the second call as the first
def return_valid(url, params):
called = getattr(return_valid, 'called', False)
# on the first call we simulate Nexmo telling us to wait
if not called:
return_valid.called = True
err_msg = "Throughput Rate Exceeded - please wait [ 250 ] and retry"
return MockResponse(200, json.dumps(dict(messages=[{'status': 1, 'error-text': err_msg}])))
# on the second, all is well
else:
return MockResponse(200, json.dumps(dict(messages=[{'status': 0, 'message-id': 12}])),
method='POST')
mock.side_effect = return_valid
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# should be sent
msg.refresh_from_db()
self.assertEquals(SENT, msg.status)
self.clear_cache()
with patch('requests.get') as mock:
mock.return_value = MockResponse(400, "Error", method='POST')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
finally:
settings.SEND_MESSAGES = False
class VumiTest(TembaTest):
def setUp(self):
super(VumiTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'RW', 'VM', None, '+250788123123',
config=dict(account_key='vumi-key', access_token='vumi-token', conversation_key='key'),
uuid='00000000-0000-0000-0000-000000001234')
self.trey = self.create_contact("Trey Anastasio", "250788382382")
def test_receive(self):
callback_url = reverse('handlers.vumi_handler', args=['receive', self.channel.uuid])
response = self.client.get(callback_url)
self.assertEqual(response.status_code, 405)
response = self.client.post(callback_url, json.dumps(dict()), content_type="application/json")
self.assertEqual(response.status_code, 400)
data = dict(timestamp="2014-04-18 03:54:20.570618", message_id="123456", from_addr="+250788383383")
response = self.client.post(callback_url, json.dumps(data), content_type="application/json")
self.assertEqual(response.status_code, 400)
data = dict(timestamp="2014-04-18 03:54:20.570618", message_id="123456", from_addr="+250788383383",
content="Hello from Vumi")
response = self.client.post(callback_url, json.dumps(data), content_type="application/json")
self.assertEqual(response.status_code, 200)
msg = Msg.objects.get()
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello from Vumi", msg.text)
self.assertEquals('123456', msg.external_id)
def test_delivery_reports(self):
msg = self.create_msg(direction='O', text='Outgoing message', contact=self.trey, status=WIRED,
external_id=six.text_type(uuid.uuid4()),)
data = dict(event_type='delivery_report',
event_id=six.text_type(uuid.uuid4()),
message_type='event',
delivery_status='failed',
user_message_id=msg.external_id)
callback_url = reverse('handlers.vumi_handler', args=['event', self.channel.uuid])
# response = self.client.post(callback_url, json.dumps(data), content_type="application/json")
# self.assertEquals(200, response.status_code)
# check that we've become errored
# sms = Msg.objects.get(pk=sms.pk)
# self.assertEquals(ERRORED, sms.status)
# couple more failures should move to failure
# Msg.objects.filter(pk=sms.pk).update(status=WIRED)
# self.client.post(callback_url, json.dumps(data), content_type="application/json")
# Msg.objects.filter(pk=sms.pk).update(status=WIRED)
# self.client.post(callback_url, json.dumps(data), content_type="application/json")
# sms = Msg.objects.get(pk=sms.pk)
# self.assertEquals(FAILED, sms.status)
# successful deliveries shouldn't stomp on failures
# del data['delivery_status']
# self.client.post(callback_url, json.dumps(data), content_type="application/json")
# sms = Msg.objects.get(pk=sms.pk)
# self.assertEquals(FAILED, sms.status)
# if we are wired we can now be successful again
data['delivery_status'] = 'delivered'
Msg.objects.filter(pk=msg.pk).update(status=WIRED)
self.client.post(callback_url, json.dumps(data), content_type="application/json")
msg.refresh_from_db()
self.assertEquals(DELIVERED, msg.status)
def test_send(self):
joe = self.create_contact("Joe", "+250788383383")
self.create_group("Reporters", [joe])
msg = joe.send("Test message", self.admin, trigger_send=False)
r = get_redis_connection()
try:
settings.SEND_MESSAGES = True
with patch('requests.put') as mock:
mock.return_value = MockResponse(200, '{ "message_id": "1515" }')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
self.assertEqual(mock.call_args[0][0], 'https://go.vumi.org/api/v1/go/http_api_nostream/key/messages.json')
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
self.assertEquals("1515", msg.external_id)
self.assertEquals(1, mock.call_count)
# should have a failsafe that it was sent
self.assertTrue(r.sismember(timezone.now().strftime(MSG_SENT_KEY), str(msg.id)))
# try sending again, our failsafe should kick in
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# we shouldn't have been called again
self.assertEquals(1, mock.call_count)
# simulate Vumi calling back to us telling us it failed
data = dict(event_type='delivery_report',
event_id=six.text_type(uuid.uuid4()),
message_type='event',
delivery_status='failed',
user_message_id=msg.external_id)
callback_url = reverse('handlers.vumi_handler', args=['event', self.channel.uuid])
self.client.post(callback_url, json.dumps(data), content_type="application/json")
# get the message again
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
# self.assertTrue(msg.next_attempt)
# self.assertFalse(r.sismember(timezone.now().strftime(MSG_SENT_KEY), str(msg.id)))
self.clear_cache()
with patch('requests.put') as mock:
mock.return_value = MockResponse(500, "Error")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as errored, we'll retry in a bit
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt > timezone.now())
self.assertEquals(1, mock.call_count)
self.clear_cache()
with patch('requests.put') as mock:
mock.return_value = MockResponse(503, "<html><body><h1>503 Service Unavailable</h1>")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as errored, we'll retry in a bit
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt > timezone.now())
self.assertEquals(1, mock.call_count)
# Joe shouldn't be stopped and should still be in a group
joe = Contact.objects.get(id=joe.id)
self.assertFalse(joe.is_stopped)
self.assertTrue(ContactGroup.user_groups.filter(contacts=joe))
self.clear_cache()
with patch('requests.put') as mock:
# set our next attempt as if we are trying anew
msg.next_attempt = timezone.now()
msg.save()
mock.side_effect = Exception('Kaboom')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as failed
msg.refresh_from_db()
self.assertEquals(FAILED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
with patch('requests.put') as mock:
# set our next attempt as if we are trying anew
msg.next_attempt = timezone.now()
msg.save()
mock.return_value = MockResponse(400, "User has opted out")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as failed
msg.refresh_from_db()
self.assertEquals(FAILED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt < timezone.now())
self.assertEquals(1, mock.call_count)
# could should now be stopped as well and in no groups
joe = Contact.objects.get(id=joe.id)
self.assertTrue(joe.is_stopped)
self.assertFalse(ContactGroup.user_groups.filter(contacts=joe))
finally:
settings.SEND_MESSAGES = False
class ZenviaTest(TembaTest):
def setUp(self):
super(ZenviaTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'BR', 'ZV', None, '+250788123123',
config=dict(account='zv-account', code='zv-code'),
uuid='00000000-0000-0000-0000-000000001234')
def test_status(self):
# ok, what happens with an invalid uuid?
data = dict(id="-1", status="500")
response = self.client.get(reverse('handlers.zenvia_handler', args=['status', 'not-real-uuid']), data)
self.assertEquals(404, response.status_code)
# ok, try with a valid uuid, but invalid message id -1
delivery_url = reverse('handlers.zenvia_handler', args=['status', self.channel.uuid])
response = self.client.get(delivery_url, data)
self.assertEquals(404, response.status_code)
# ok, lets create an outgoing message to update
joe = self.create_contact("Joe Biden", "+254788383383")
msg = joe.send("Hey Joe, it's Obama, pick up!", self.admin)
data['id'] = msg.pk
def assertStatus(sms, status, assert_status):
data['status'] = status
response = self.client.get(delivery_url, data)
self.assertEquals(200, response.status_code)
sms = Msg.objects.get(pk=sms.id)
self.assertEquals(assert_status, sms.status)
assertStatus(msg, '120', DELIVERED)
assertStatus(msg, '111', SENT)
assertStatus(msg, '140', FAILED)
assertStatus(msg, '999', FAILED)
assertStatus(msg, '131', FAILED)
def test_receive(self):
data = {'from': '5511996458779', 'date': '31/07/2013 14:45:00'}
encoded_message = "?msg=H%E9llo World%21"
callback_url = reverse('handlers.zenvia_handler', args=['receive', self.channel.uuid]) + encoded_message
response = self.client.post(callback_url, data)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.get()
self.assertEquals("+5511996458779", msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Héllo World!", msg.text)
def test_send(self):
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("Test message", self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, '000-ok', method='GET')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
self.clear_cache()
with patch('requests.get') as mock:
mock.return_value = MockResponse(400, "Error", method='POST')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
with patch('requests.get') as mock:
mock.side_effect = Exception('Kaboom!')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, '001-error', method='GET')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(FAILED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
finally:
settings.SEND_MESSAGES = False
class InfobipTest(TembaTest):
def setUp(self):
super(InfobipTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'NG', 'IB', None, '+2347030767144',
config=dict(username='ib-user', password='ib-password'),
uuid='00000000-0000-0000-0000-000000001234')
def test_received(self):
data = {'receiver': '2347030767144', 'sender': '2347030767143', 'text': 'Hello World'}
encoded_message = urlencode(data)
callback_url = reverse('handlers.infobip_handler', args=['received', self.channel.uuid]) + "?" + encoded_message
response = self.client.get(callback_url)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.get()
self.assertEquals('+2347030767143', msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello World", msg.text)
# try it with an invalid receiver, should fail as UUID and receiver id are mismatched
data['receiver'] = '2347030767145'
encoded_message = urlencode(data)
callback_url = reverse('handlers.infobip_handler', args=['received', self.channel.uuid]) + "?" + encoded_message
response = self.client.get(callback_url)
# should get 404 as the channel wasn't found
self.assertEquals(404, response.status_code)
def test_delivered(self):
contact = self.create_contact("Joe", '+2347030767143')
msg = Msg.create_outgoing(self.org, self.user, contact, "Hi Joe")
msg.external_id = '254021015120766124'
msg.save(update_fields=('external_id',))
# mark it as delivered
base_body = '<DeliveryReport><message id="254021015120766124" sentdate="2014/02/10 16:12:07" ' \
' donedate="2014/02/10 16:13:00" status="STATUS" gsmerror="0" price="0.65" /></DeliveryReport>'
delivery_url = reverse('handlers.infobip_handler', args=['delivered', self.channel.uuid])
# assert our SENT status
response = self.client.post(delivery_url, data=base_body.replace('STATUS', 'SENT'), content_type='application/xml')
self.assertEquals(200, response.status_code)
msg = Msg.objects.get()
self.assertEquals(SENT, msg.status)
# assert our DELIVERED status
response = self.client.post(delivery_url, data=base_body.replace('STATUS', 'DELIVERED'), content_type='application/xml')
self.assertEquals(200, response.status_code)
msg = Msg.objects.get()
self.assertEquals(DELIVERED, msg.status)
# assert our FAILED status
response = self.client.post(delivery_url, data=base_body.replace('STATUS', 'NOT_SENT'), content_type='application/xml')
self.assertEquals(200, response.status_code)
msg = Msg.objects.get()
self.assertEquals(FAILED, msg.status)
def test_send(self):
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("Test message", self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, json.dumps(dict(results=[{'status': 0, 'messageid': 12}])))
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(SENT, msg.status)
self.assertTrue(msg.sent_on)
self.assertEquals('12', msg.external_id)
self.clear_cache()
with patch('requests.post') as mock:
mock.return_value = MockResponse(400, "Error", method='POST')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
finally:
settings.SEND_MESSAGES = False
class BlackmynaTest(TembaTest):
def setUp(self):
super(BlackmynaTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'NP', 'BM', None, '1212',
config=dict(username='bm-user', password='bm-password'),
uuid='00000000-0000-0000-0000-000000001234')
def test_received(self):
data = {'to': '1212', 'from': '+9771488532', 'text': 'Hello World', 'smsc': 'NTNepal5002'}
encoded_message = urlencode(data)
callback_url = reverse('handlers.blackmyna_handler', args=['receive', self.channel.uuid]) + "?" + encoded_message
response = self.client.get(callback_url)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.get()
self.assertEquals('+9771488532', msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello World", msg.text)
# try it with an invalid receiver, should fail as UUID and receiver id are mismatched
data['to'] = '1515'
encoded_message = urlencode(data)
callback_url = reverse('handlers.blackmyna_handler', args=['receive', self.channel.uuid]) + "?" + encoded_message
response = self.client.get(callback_url)
# should get 400 as the channel wasn't found
self.assertEquals(400, response.status_code)
def test_send(self):
joe = self.create_contact("Joe", "+9771488532")
msg = joe.send("Test message", self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, json.dumps([{'recipient': '+9771488532',
'id': 'asdf-asdf-asdf-asdf'}]))
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
self.assertEquals('asdf-asdf-asdf-asdf', msg.external_id)
self.clear_cache()
# return 400
with patch('requests.post') as mock:
mock.return_value = MockResponse(400, "Error", method='POST')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
# return something that isn't JSON
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, "Error", method='POST')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
# we should have "Error" in our error log
log = ChannelLog.objects.filter(msg=msg).order_by('-pk')[0]
self.assertEquals("Error", log.response)
self.assertEquals(503, log.response_status)
finally:
settings.SEND_MESSAGES = False
def test_status(self):
# an invalid uuid
data = dict(id='-1', status='10')
response = self.client.get(reverse('handlers.blackmyna_handler', args=['status', 'not-real-uuid']), data)
self.assertEquals(400, response.status_code)
# a valid uuid, but invalid data
status_url = reverse('handlers.blackmyna_handler', args=['status', self.channel.uuid])
response = self.client.get(status_url, dict())
self.assertEquals(400, response.status_code)
response = self.client.get(status_url, data)
self.assertEquals(400, response.status_code)
# ok, lets create an outgoing message to update
joe = self.create_contact("Joe Biden", "+254788383383")
msg = joe.send("Hey Joe, it's Obama, pick up!", self.admin)
msg.external_id = 'msg-uuid'
msg.save(update_fields=('external_id',))
data['id'] = msg.external_id
def assertStatus(sms, status, assert_status):
sms.status = WIRED
sms.save()
data['status'] = status
response = self.client.get(status_url, data)
self.assertEquals(200, response.status_code)
sms = Msg.objects.get(external_id=sms.external_id)
self.assertEquals(assert_status, sms.status)
assertStatus(msg, '0', WIRED)
assertStatus(msg, '1', DELIVERED)
assertStatus(msg, '2', FAILED)
assertStatus(msg, '3', WIRED)
assertStatus(msg, '4', WIRED)
assertStatus(msg, '8', SENT)
assertStatus(msg, '16', FAILED)
class SMSCentralTest(TembaTest):
def setUp(self):
super(SMSCentralTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'NP', 'SC', None, '1212',
config=dict(username='sc-user', password='sc-password'),
uuid='00000000-0000-0000-0000-000000001234')
def test_received(self):
data = {'mobile': '+9771488532', 'message': 'Hello World', 'telco': 'Ncell'}
encoded_message = urlencode(data)
callback_url = reverse('handlers.smscentral_handler', args=['receive', self.channel.uuid]) + "?" + encoded_message
response = self.client.get(callback_url)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.get()
self.assertEquals('+9771488532', msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello World", msg.text)
# try it with an invalid channel
callback_url = reverse('handlers.smscentral_handler', args=['receive', '1234-asdf']) + "?" + encoded_message
response = self.client.get(callback_url)
# should get 400 as the channel wasn't found
self.assertEquals(400, response.status_code)
def test_send(self):
joe = self.create_contact("Joe", "+9771488532")
msg = joe.send("Test message", self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, '')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
mock.assert_called_with('http://smail.smscentral.com.np/bp/ApiSms.php',
data={'user': 'sc-user', 'pass': 'sc-password',
'mobile': '9771488532', 'content': "Test message"},
headers=TEMBA_HEADERS,
timeout=30)
self.clear_cache()
# return 400
with patch('requests.post') as mock:
mock.return_value = MockResponse(400, "Error", method='POST')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
# return 400
with patch('requests.post') as mock:
mock.side_effect = Exception('Kaboom!')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
finally:
settings.SEND_MESSAGES = False
class Hub9Test(TembaTest):
def setUp(self):
super(Hub9Test, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'ID', 'H9', None, '+6289881134567',
config=dict(username='h9-user', password='h9-password'),
uuid='00000000-0000-0000-0000-000000001234')
def test_received(self):
# http://localhost:8000/api/v1/hub9/received/9bbffaeb-3b12-4fe1-bcaa-fd50cce2ada2/?
# userid=testusr&password=test&original=6289881134567&sendto=6282881134567
# &messageid=99123635&message=Test+sending+sms
data = {
'userid': 'testusr',
'password': 'test',
'original': '6289881134560',
'sendto': '6289881134567',
'message': 'Hello World'
}
encoded_message = urlencode(data)
callback_url = reverse('handlers.hub9_handler', args=['received', self.channel.uuid]) + "?" + encoded_message
response = self.client.get(callback_url)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.get()
self.assertEquals('+6289881134560', msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello World", msg.text)
# try it with an invalid receiver, should fail as UUID and receiver id are mismatched
data['sendto'] = '6289881131111'
encoded_message = urlencode(data)
callback_url = reverse('handlers.hub9_handler', args=['received', self.channel.uuid]) + "?" + encoded_message
response = self.client.get(callback_url)
# should get 404 as the channel wasn't found
self.assertEquals(404, response.status_code)
# the case of 11 digits numer from hub9
data = {
'userid': 'testusr',
'password': 'test',
'original': '62811999374',
'sendto': '6289881134567',
'message': 'Hello Jakarta'
}
encoded_message = urlencode(data)
callback_url = reverse('handlers.hub9_handler', args=['received', self.channel.uuid]) + "?" + encoded_message
response = self.client.get(callback_url)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.all().order_by('-pk').first()
self.assertEquals('+62811999374', msg.contact.raw_tel())
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello Jakarta", msg.text)
def test_send(self):
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("Test message", self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, "000")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(SENT, msg.status)
self.assertTrue(msg.sent_on)
self.assertTrue(mock.call_args[0][0].startswith(HUB9_ENDPOINT))
self.clear_cache()
with patch('requests.get') as mock:
mock.return_value = MockResponse(400, "Error", method='POST')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
finally:
settings.SEND_MESSAGES = False
class DartMediaTest(TembaTest):
def setUp(self):
super(DartMediaTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'ID', 'DA', None, '+6289881134567',
config=dict(username='dartmedia-user', password='dartmedia-password'),
uuid='00000000-0000-0000-0000-000000001234')
def test_received(self):
# http://localhost:8000/api/v1/dartmedia/received/9bbffaeb-3b12-4fe1-bcaa-fd50cce2ada2/?
# userid=testusr&password=test&original=6289881134567&sendto=6282881134567
# &messageid=99123635&message=Test+sending+sms
data = {
'userid': 'testusr',
'password': 'test',
'original': '6289881134560',
'sendto': '6289881134567',
'message': 'Hello World'
}
encoded_message = urlencode(data)
callback_url = reverse('handlers.dartmedia_handler', args=['received', self.channel.uuid]) + "?" + encoded_message
response = self.client.get(callback_url)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.get()
self.assertEquals('+6289881134560', msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello World", msg.text)
# try it with an invalid receiver, should fail as UUID and receiver id are mismatched
data['sendto'] = '6289881131111'
encoded_message = urlencode(data)
callback_url = reverse('handlers.dartmedia_handler', args=['received', self.channel.uuid]) + "?" + encoded_message
response = self.client.get(callback_url)
# should get 404 as the channel wasn't found
self.assertEquals(404, response.status_code)
# the case of 11 digits number from dartmedia
data = {
'userid': 'testusr',
'password': 'test',
'original': '62811999374',
'sendto': '6289881134567',
'message': 'Hello Jakarta'
}
encoded_message = urlencode(data)
callback_url = reverse('handlers.dartmedia_handler', args=['received', self.channel.uuid]) + "?" + encoded_message
response = self.client.get(callback_url)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.all().order_by('-pk').first()
self.assertEquals('+62811999374', msg.contact.raw_tel())
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello Jakarta", msg.text)
# short code do not have + in address
self.channel.address = '12345'
self.channel.save()
# missing parameters
data = {
'userid': 'testusr',
'password': 'test',
'original': '62811999375',
'message': 'Hello Indonesia'
}
encoded_message = urlencode(data)
callback_url = reverse('handlers.dartmedia_handler', args=['received', self.channel.uuid]) + "?" + encoded_message
response = self.client.get(callback_url)
self.assertEquals(401, response.status_code)
self.assertEquals(response.content, "Parameters message, original and sendto should not be null.")
# all needed params
data = {
'userid': 'testusr',
'password': 'test',
'original': '62811999375',
'sendto': '12345',
'message': 'Hello Indonesia'
}
encoded_message = urlencode(data)
callback_url = reverse('handlers.dartmedia_handler', args=['received', self.channel.uuid]) + "?" + encoded_message
response = self.client.get(callback_url)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.all().order_by('-pk').first()
self.assertEquals('+62811999375', msg.contact.raw_tel())
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello Indonesia", msg.text)
def test_send(self):
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("Test message", self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, "000")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(SENT, msg.status)
self.assertTrue(msg.sent_on)
self.clear_cache()
self.assertTrue(mock.call_args[0][0].startswith(DART_MEDIA_ENDPOINT))
with patch('requests.get') as mock:
mock.return_value = MockResponse(400, "Error", method='POST')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
finally:
settings.SEND_MESSAGES = False
class HighConnectionTest(TembaTest):
def setUp(self):
super(HighConnectionTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'FR', 'HX', None, '5151',
config=dict(username='hcnx-user', password='hcnx-password'),
uuid='00000000-0000-0000-0000-000000001234')
def test_handler(self):
# http://localhost:8000/api/v1/hcnx/receive/asdf-asdf-asdf-asdf/?FROM=+33610346460&TO=5151&MESSAGE=Hello+World
data = {'FROM': '+33610346460', 'TO': '5151', 'MESSAGE': 'Hello World', 'RECEPTION_DATE': '2015-04-02T14:26:06'}
callback_url = reverse('handlers.hcnx_handler', args=['receive', self.channel.uuid])
response = self.client.post(callback_url, data)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.get()
self.assertEquals('+33610346460', msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello World", msg.text)
self.assertEquals(14, msg.sent_on.astimezone(pytz.utc).hour)
# try it with an invalid receiver, should fail as UUID isn't known
callback_url = reverse('handlers.hcnx_handler', args=['receive', uuid.uuid4()])
response = self.client.post(callback_url, data)
# should get 400 as the channel wasn't found
self.assertEquals(400, response.status_code)
# create an outgoing message instead
contact = msg.contact
Msg.objects.all().delete()
contact.send("outgoing message", self.admin)
msg = Msg.objects.get()
# now update the status via a callback
data = {'ret_id': msg.id, 'status': '6'}
encoded_message = urlencode(data)
callback_url = reverse('handlers.hcnx_handler', args=['status', self.channel.uuid]) + "?" + encoded_message
response = self.client.get(callback_url)
self.assertEquals(200, response.status_code)
msg = Msg.objects.get()
self.assertEquals(DELIVERED, msg.status)
def test_send(self):
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("Test message", self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, "Sent")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
self.clear_cache()
with patch('requests.get') as mock:
mock.return_value = MockResponse(400, "Error")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
with patch('requests.get') as mock:
mock.side_effect = Exception('Kaboom!')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
finally:
settings.SEND_MESSAGES = False
class TwilioTest(TembaTest):
def setUp(self):
super(TwilioTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'RW', 'T', None, '+250785551212',
uuid='00000000-0000-0000-0000-000000001234')
# twilio test credentials
self.account_sid = "ACe54dc36bfd2a3b483b7ed854b2dd40c1"
self.account_token = "0b14d47901387c03f92253a4e4449d5e"
self.application_sid = "AP6fe2069df7f9482a8031cb61dc155de2"
self.channel.org.config = json.dumps({ACCOUNT_SID: self.account_sid,
ACCOUNT_TOKEN: self.account_token,
APPLICATION_SID: self.application_sid})
self.channel.org.save()
def signed_request(self, url, data, validator=None):
"""
Makes a post to the Twilio handler with a computed signature
"""
if not validator:
validator = RequestValidator(self.org.get_twilio_client().auth[1])
signature = validator.compute_signature('https://' + settings.TEMBA_HOST + url, data)
return self.client.post(url, data, **{'HTTP_X_TWILIO_SIGNATURE': signature})
@patch('temba.orgs.models.TwilioRestClient', MockTwilioClient)
@patch('temba.ivr.clients.TwilioClient', MockTwilioClient)
@patch('twilio.util.RequestValidator', MockRequestValidator)
def test_receive_mms(self):
post_data = dict(To=self.channel.address, From='+250788383383', Body="Test",
NumMedia='1', MediaUrl0='https://yourimage.io/IMPOSSIBLE-HASH',
MediaContentType0='audio/x-wav')
twilio_url = reverse('handlers.twilio_handler')
client = self.org.get_twilio_client()
validator = RequestValidator(client.auth[1])
signature = validator.compute_signature('https://' + settings.TEMBA_HOST + '/handlers/twilio/', post_data)
with patch('requests.get') as response:
mock = MockResponse(200, 'Fake Recording Bits')
mock.add_header('Content-Disposition', 'filename="audio0000.wav"')
mock.add_header('Content-Type', 'audio/x-wav')
response.return_value = mock
response = self.client.post(twilio_url, post_data, **{'HTTP_X_TWILIO_SIGNATURE': signature})
self.assertEquals(201, response.status_code)
# we should have two messages, one for the text, the other for the media
msgs = Msg.objects.all().order_by('-created_on')
self.assertEqual(2, msgs.count())
self.assertEqual('Test', msgs[0].text)
self.assertIsNone(msgs[0].media)
self.assertTrue(msgs[1].media.startswith('audio/x-wav:https://%s' % settings.AWS_BUCKET_DOMAIN))
self.assertTrue(msgs[1].media.endswith('.wav'))
# text should have the url (without the content type)
self.assertTrue(msgs[1].text.startswith('https://%s' % settings.AWS_BUCKET_DOMAIN))
self.assertTrue(msgs[1].text.endswith('.wav'))
Msg.objects.all().delete()
# try with no message body
with patch('requests.get') as response:
mock = MockResponse(200, 'Fake Recording Bits')
mock.add_header('Content-Disposition', 'filename="audio0000.wav"')
mock.add_header('Content-Type', 'audio/x-wav')
response.return_value = mock
post_data['Body'] = ''
signature = validator.compute_signature('https://' + settings.TEMBA_HOST + '/handlers/twilio/', post_data)
response = self.client.post(twilio_url, post_data, **{'HTTP_X_TWILIO_SIGNATURE': signature})
# just a single message this time
msg = Msg.objects.get()
self.assertTrue(msg.media.startswith('audio/x-wav:https://%s' % settings.AWS_BUCKET_DOMAIN))
self.assertTrue(msg.media.endswith('.wav'))
Msg.objects.all().delete()
with patch('requests.get') as response:
mock1 = MockResponse(404, 'No such file')
mock2 = MockResponse(200, 'Fake VCF Bits')
mock2.add_header('Content-Type', 'text/x-vcard')
mock2.add_header('Content-Disposition', 'inline')
response.side_effect = (mock1, mock2)
post_data['Body'] = ''
signature = validator.compute_signature('https://' + settings.TEMBA_HOST + '/handlers/twilio/', post_data)
response = self.client.post(twilio_url, post_data, **{'HTTP_X_TWILIO_SIGNATURE': signature})
msg = Msg.objects.get()
self.assertTrue(msg.media.startswith('text/x-vcard:https://%s' % settings.AWS_BUCKET_DOMAIN))
self.assertTrue(msg.media.endswith('.vcf'))
def test_receive(self):
post_data = dict(To=self.channel.address, From='+250788383383', Body="Hello World")
twilio_url = reverse('handlers.twilio_handler')
try:
self.client.post(twilio_url, post_data)
self.fail("Invalid signature, should have failed")
except ValidationError:
pass
# this time sign it appropriately, should work
client = self.org.get_twilio_client()
validator = RequestValidator(client.auth[1])
# remove twilio connection
self.channel.org.config = json.dumps({})
self.channel.org.save()
signature = validator.compute_signature('https://' + settings.TEMBA_HOST + '/handlers/twilio/', post_data)
response = self.client.post(twilio_url, post_data, **{'HTTP_X_TWILIO_SIGNATURE': signature})
self.assertEquals(400, response.status_code)
# connect twilio again
self.channel.org.config = json.dumps({ACCOUNT_SID: self.account_sid,
ACCOUNT_TOKEN: self.account_token,
APPLICATION_SID: self.application_sid})
self.channel.org.save()
response = self.signed_request(twilio_url, post_data)
self.assertEqual(response.status_code, 201)
# and we should have a new message
msg1 = Msg.objects.get()
self.assertEquals("+250788383383", msg1.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg1.direction)
self.assertEquals(self.org, msg1.org)
self.assertEquals(self.channel, msg1.channel)
self.assertEquals("Hello World", msg1.text)
# try without including number, but with country
del post_data['To']
post_data['ToCountry'] = 'RW'
response = self.signed_request(twilio_url, post_data)
self.assertEqual(response.status_code, 400)
# try with non-normalized number
post_data['To'] = '0785551212'
post_data['ToCountry'] = 'RW'
response = self.signed_request(twilio_url, post_data)
self.assertEqual(response.status_code, 201)
# and we should have another new message
msg2 = Msg.objects.exclude(pk=msg1.pk).get()
self.assertEquals(self.channel, msg2.channel)
# create an outgoing message instead
contact = msg2.contact
Msg.objects.all().delete()
contact.send("outgoing message", self.admin)
msg = Msg.objects.get()
# now update the status via a callback
post_data['SmsStatus'] = 'sent'
validator = RequestValidator(self.org.get_twilio_client().auth[1])
# remove twilio connection
self.channel.org.config = json.dumps({})
self.channel.org.save()
response = self.signed_request(twilio_url + "?action=callback&id=%d" % msg.id, post_data, validator)
self.assertEqual(response.status_code, 400)
# connect twilio again
self.channel.org.config = json.dumps({ACCOUNT_SID: self.account_sid,
ACCOUNT_TOKEN: self.account_token,
APPLICATION_SID: self.application_sid})
self.channel.org.save()
response = self.signed_request(twilio_url + "?action=callback&id=%d" % msg.id, post_data)
self.assertEqual(response.status_code, 200)
msg = Msg.objects.get()
self.assertEquals(SENT, msg.status)
# try it with a failed SMS
Msg.objects.all().delete()
contact.send("outgoing message", self.admin)
msg = Msg.objects.get()
# now update the status via a callback
post_data['SmsStatus'] = 'failed'
response = self.signed_request(twilio_url + "?action=callback&id=%d" % msg.id, post_data)
self.assertEqual(response.status_code, 200)
msg = Msg.objects.get()
self.assertEquals(FAILED, msg.status)
# no message with id
Msg.objects.all().delete()
response = self.signed_request(twilio_url + "?action=callback&id=%d" % msg.id, post_data)
self.assertEqual(response.status_code, 400)
# test TwiML Handler...
self.channel.delete()
post_data = dict(To=self.channel.address, From='+250788383300', Body="Hello World")
# try without signing
twiml_api_url = reverse('handlers.twiml_api_handler', args=['1234-1234-1234-12345'])
response = self.client.post(twiml_api_url, post_data)
self.assertEqual(response.status_code, 400)
# create new channel
self.channel = Channel.create(self.org, self.user, 'RW', 'TW', None, '+250785551212',
uuid='00000000-0000-0000-0000-000000001234')
send_url = "https://api.twilio.com"
self.channel.config = json.dumps({ACCOUNT_SID: self.account_sid, ACCOUNT_TOKEN: self.account_token,
Channel.CONFIG_SEND_URL: send_url})
self.channel.save()
post_data = dict(To=self.channel.address, From='+250788383300', Body="Hello World")
twiml_api_url = reverse('handlers.twiml_api_handler', args=[self.channel.uuid])
try:
self.client.post(twiml_api_url, post_data)
self.fail("Invalid signature, should have failed")
except ValidationError:
pass
client = self.channel.get_twiml_client()
validator = RequestValidator(client.auth[1])
signature = validator.compute_signature(
'https://' + settings.HOSTNAME + '/handlers/twiml_api/' + self.channel.uuid,
post_data
)
response = self.client.post(twiml_api_url, post_data, **{'HTTP_X_TWILIO_SIGNATURE': signature})
self.assertEquals(201, response.status_code)
msg1 = Msg.objects.get()
self.assertEquals("+250788383300", msg1.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg1.direction)
self.assertEquals(self.org, msg1.org)
self.assertEquals(self.channel, msg1.channel)
self.assertEquals("Hello World", msg1.text)
def test_send(self):
from temba.orgs.models import ACCOUNT_SID, ACCOUNT_TOKEN, APPLICATION_SID
org_config = self.org.config_json()
org_config[ACCOUNT_SID] = 'twilio_sid'
org_config[ACCOUNT_TOKEN] = 'twilio_token'
org_config[APPLICATION_SID] = 'twilio_sid'
self.org.config = json.dumps(org_config)
self.org.save()
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("Test message", self.admin, trigger_send=False)
with self.settings(SEND_MESSAGES=True):
with patch('twilio.rest.resources.messages.Messages.create') as mock:
mock.return_value = "Sent"
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
self.clear_cache()
# handle the status callback
callback_url = Channel.build_twilio_callback_url(msg.pk)
client = self.org.get_twilio_client()
validator = RequestValidator(client.auth[1])
post_data = dict(SmsStatus='delivered', To='+250788383383')
signature = validator.compute_signature(callback_url, post_data)
response = self.client.post(callback_url, post_data, **{'HTTP_X_TWILIO_SIGNATURE': signature})
self.assertEquals(response.status_code, 200)
msg.refresh_from_db()
self.assertEquals(msg.status, DELIVERED)
with patch('twilio.rest.resources.messages.Messages.create') as mock:
mock.side_effect = Exception("Failed to send message")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
with patch('twilio.rest.resources.messages.Messages.create') as mock:
mock.side_effect = TwilioRestException(400, "https://twilio.com/", "User has opted out", code=21610)
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as failed and the contact should be stopped
msg.refresh_from_db()
self.assertEquals(FAILED, msg.status)
self.assertTrue(Contact.objects.get(id=msg.contact_id))
# check that our channel log works as well
self.login(self.admin)
response = self.client.get(reverse('channels.channellog_list') + "?channel=%d" % (self.channel.pk))
# there should be three log items for the three times we sent
self.assertEquals(3, len(response.context['channellog_list']))
# number of items on this page should be right as well
self.assertEquals(3, response.context['paginator'].count)
self.assertEquals(2, self.channel.get_error_log_count())
self.assertEquals(1, self.channel.get_success_log_count())
# view the detailed information for one of them
response = self.client.get(reverse('channels.channellog_read', args=[ChannelLog.objects.all()[1].pk]))
# check that it contains the log of our exception
self.assertContains(response, "Failed to send message")
# delete our error entries
ChannelLog.objects.filter(is_error=True).delete()
# our channel counts should be unaffected
self.channel = Channel.objects.get(id=self.channel.pk)
self.assertEquals(2, self.channel.get_error_log_count())
self.assertEquals(1, self.channel.get_success_log_count())
class TwilioMessagingServiceTest(TembaTest):
def setUp(self):
super(TwilioMessagingServiceTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'US', 'TMS', None, None,
config=dict(messaging_service_sid="MSG-SERVICE-SID"),
uuid='00000000-0000-0000-0000-000000001234')
def test_receive(self):
# twilio test credentials
account_sid = "ACe54dc36bfd2a3b483b7ed854b2dd40c1"
account_token = "0b14d47901387c03f92253a4e4449d5e"
application_sid = "AP6fe2069df7f9482a8031cb61dc155de2"
self.channel.org.config = json.dumps({ACCOUNT_SID: account_sid, ACCOUNT_TOKEN: account_token,
APPLICATION_SID: application_sid})
self.channel.org.save()
messaging_service_sid = self.channel.config_json()['messaging_service_sid']
post_data = dict(message_service_sid=messaging_service_sid, From='+250788383383', Body="Hello World")
twilio_url = reverse('handlers.twilio_messaging_service_handler', args=['receive', self.channel.uuid])
try:
self.client.post(twilio_url, post_data)
self.fail("Invalid signature, should have failed")
except ValidationError:
pass
# this time sign it appropriately, should work
client = self.org.get_twilio_client()
validator = RequestValidator(client.auth[1])
signature = validator.compute_signature(
'https://' + settings.HOSTNAME + '/handlers/twilio_messaging_service/receive/' + self.channel.uuid,
post_data
)
response = self.client.post(twilio_url, post_data, **{'HTTP_X_TWILIO_SIGNATURE': signature})
self.assertEquals(201, response.status_code)
# and we should have a new message
msg1 = Msg.objects.get()
self.assertEquals("+250788383383", msg1.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg1.direction)
self.assertEquals(self.org, msg1.org)
self.assertEquals(self.channel, msg1.channel)
self.assertEquals("Hello World", msg1.text)
# remove twilio connection
self.channel.org.config = json.dumps({})
self.channel.org.save()
signature = validator.compute_signature(
'https://' + settings.HOSTNAME + '/handlers/twilio_messaging_service/receive/' + self.channel.uuid,
post_data
)
response = self.client.post(twilio_url, post_data, **{'HTTP_X_TWILIO_SIGNATURE': signature})
self.assertEquals(400, response.status_code)
def test_send(self):
from temba.orgs.models import ACCOUNT_SID, ACCOUNT_TOKEN, APPLICATION_SID
org_config = self.org.config_json()
org_config[ACCOUNT_SID] = 'twilio_sid'
org_config[ACCOUNT_TOKEN] = 'twilio_token'
org_config[APPLICATION_SID] = 'twilio_sid'
self.org.config = json.dumps(org_config)
self.org.save()
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("Test message", self.admin, trigger_send=False)
with self.settings(SEND_MESSAGES=True):
settings.SEND_MESSAGES = True
with patch('twilio.rest.resources.Messages.create') as mock:
mock.return_value = "Sent"
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
self.clear_cache()
# handle the status callback
callback_url = Channel.build_twilio_callback_url(msg.pk)
client = self.org.get_twilio_client()
validator = RequestValidator(client.auth[1])
post_data = dict(SmsStatus='delivered', To='+250788383383')
signature = validator.compute_signature(callback_url, post_data)
response = self.client.post(callback_url, post_data, **{'HTTP_X_TWILIO_SIGNATURE': signature})
self.assertEquals(response.status_code, 200)
msg.refresh_from_db()
self.assertEquals(msg.status, DELIVERED)
with patch('twilio.rest.resources.Messages.create') as mock:
mock.side_effect = Exception("Failed to send message")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
# check that our channel log works as well
self.login(self.admin)
response = self.client.get(reverse('channels.channellog_list') + "?channel=%d" % self.channel.pk)
# there should be two log items for the two times we sent
self.assertEquals(2, len(response.context['channellog_list']))
# of items on this page should be right as well
self.assertEquals(2, response.context['paginator'].count)
# the counts on our relayer should be correct as well
self.channel = Channel.objects.get(id=self.channel.pk)
self.assertEquals(1, self.channel.get_error_log_count())
self.assertEquals(1, self.channel.get_success_log_count())
# view the detailed information for one of them
response = self.client.get(reverse('channels.channellog_read', args=[ChannelLog.objects.all()[1].pk]))
# check that it contains the log of our exception
self.assertContains(response, "Failed to send message")
# delete our error entry
ChannelLog.objects.filter(is_error=True).delete()
# our channel counts should be unaffected
self.channel = Channel.objects.get(id=self.channel.pk)
self.assertEquals(1, self.channel.get_error_log_count())
self.assertEquals(1, self.channel.get_success_log_count())
class ClickatellTest(TembaTest):
def setUp(self):
super(ClickatellTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'RW', 'CT', None, '+250788123123',
config=dict(username='uname', password='pword', api_id='api1'),
uuid='00000000-0000-0000-0000-000000001234')
def test_receive_utf16(self):
self.channel.org.config = json.dumps({Channel.CONFIG_API_ID: '12345', Channel.CONFIG_USERNAME: 'uname', Channel.CONFIG_PASSWORD: 'pword'})
self.channel.org.save()
data = {'to': self.channel.address,
'from': '250788383383',
'timestamp': '2012-10-10 10:10:10',
'moMsgId': 'id1234'}
encoded_message = urlencode(data)
encoded_message += "&text=%00m%00e%00x%00i%00c%00o%00+%00k%00+%00m%00i%00s%00+%00p%00a%00p%00a%00s%00+%00n%00o%00+%00t%00e%00n%00%ED%00a%00+%00d%00i%00n%00e%00r%00o%00+%00p%00a%00r%00a%00+%00c%00o%00m%00p%00r%00a%00r%00n%00o%00s%00+%00l%00o%00+%00q%00+%00q%00u%00e%00r%00%ED%00a%00m%00o%00s%00.%00."
encoded_message += "&charset=UTF-16BE"
receive_url = reverse('handlers.clickatell_handler', args=['receive', self.channel.uuid]) + '?' + encoded_message
response = self.client.get(receive_url)
self.assertEquals(200, response.status_code)
# and we should have a new message
msg1 = Msg.objects.get()
self.assertEquals("+250788383383", msg1.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg1.direction)
self.assertEquals(self.org, msg1.org)
self.assertEquals(self.channel, msg1.channel)
self.assertEquals(u"mexico k mis papas no ten\xeda dinero para comprarnos lo q quer\xedamos..", msg1.text)
self.assertEquals(2012, msg1.sent_on.year)
self.assertEquals('id1234', msg1.external_id)
def test_receive_iso_8859_1(self):
self.channel.org.config = json.dumps({Channel.CONFIG_API_ID: '12345', Channel.CONFIG_USERNAME: 'uname', Channel.CONFIG_PASSWORD: 'pword'})
self.channel.org.save()
data = {'to': self.channel.address,
'from': '250788383383',
'timestamp': '2012-10-10 10:10:10',
'moMsgId': 'id1234'}
encoded_message = urlencode(data)
encoded_message += "&text=%05%EF%BF%BD%EF%BF%BD%034%02%02i+mapfumbamwe+vana+4+kuwacha+handingapedze+izvozvo+ndozvikukonzera+kt+varoorwe+varipwere+ngapaonekwe+ipapo+ndatenda."
encoded_message += "&charset=ISO-8859-1"
receive_url = reverse('handlers.clickatell_handler', args=['receive', self.channel.uuid]) + '?' + encoded_message
response = self.client.get(receive_url)
self.assertEquals(200, response.status_code)
# and we should have a new message
msg1 = Msg.objects.get()
self.assertEquals("+250788383383", msg1.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg1.direction)
self.assertEquals(self.org, msg1.org)
self.assertEquals(self.channel, msg1.channel)
self.assertEquals(u'\x05\x034\x02\x02i mapfumbamwe vana 4 kuwacha handingapedze izvozvo ndozvikukonzera kt varoorwe varipwere ngapaonekwe ipapo ndatenda.', msg1.text)
self.assertEquals(2012, msg1.sent_on.year)
self.assertEquals('id1234', msg1.external_id)
Msg.objects.all().delete()
encoded_message = urlencode(data)
encoded_message += "&text=Artwell+S%ECbbnda"
encoded_message += "&charset=ISO-8859-1"
receive_url = reverse('handlers.clickatell_handler', args=['receive', self.channel.uuid]) + '?' + encoded_message
response = self.client.get(receive_url)
self.assertEquals(200, response.status_code)
# and we should have a new message
msg1 = Msg.objects.get()
self.assertEquals("+250788383383", msg1.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg1.direction)
self.assertEquals(self.org, msg1.org)
self.assertEquals(self.channel, msg1.channel)
self.assertEquals("Artwell Sìbbnda", msg1.text)
self.assertEquals(2012, msg1.sent_on.year)
self.assertEquals('id1234', msg1.external_id)
Msg.objects.all().delete()
encoded_message = urlencode(data)
encoded_message += "&text=a%3F+%A3irvine+stinta%3F%A5.++"
encoded_message += "&charset=ISO-8859-1"
receive_url = reverse('handlers.clickatell_handler', args=['receive', self.channel.uuid]) + '?' + encoded_message
response = self.client.get(receive_url)
self.assertEquals(200, response.status_code)
# and we should have a new message
msg1 = Msg.objects.get()
self.assertEquals("+250788383383", msg1.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg1.direction)
self.assertEquals(self.org, msg1.org)
self.assertEquals(self.channel, msg1.channel)
self.assertEquals("a? £irvine stinta?¥. ", msg1.text)
self.assertEquals(2012, msg1.sent_on.year)
self.assertEquals('id1234', msg1.external_id)
Msg.objects.all().delete()
data['text'] = 'when? or What? is this '
encoded_message = urlencode(data)
encoded_message += "&charset=ISO-8859-1"
receive_url = reverse('handlers.clickatell_handler', args=['receive', self.channel.uuid]) + '?' + encoded_message
response = self.client.get(receive_url)
self.assertEquals(200, response.status_code)
# and we should have a new message
msg1 = Msg.objects.get()
self.assertEquals("+250788383383", msg1.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg1.direction)
self.assertEquals(self.org, msg1.org)
self.assertEquals(self.channel, msg1.channel)
self.assertEquals("when? or What? is this ", msg1.text)
self.assertEquals(2012, msg1.sent_on.year)
self.assertEquals('id1234', msg1.external_id)
def test_receive(self):
self.channel.org.config = json.dumps({Channel.CONFIG_API_ID: '12345', Channel.CONFIG_USERNAME: 'uname', Channel.CONFIG_PASSWORD: 'pword'})
self.channel.org.save()
data = {'to': self.channel.address,
'from': '250788383383',
'text': "Hello World",
'timestamp': '2012-10-10 10:10:10',
'moMsgId': 'id1234'}
encoded_message = urlencode(data)
receive_url = reverse('handlers.clickatell_handler', args=['receive', self.channel.uuid]) + '?' + encoded_message
response = self.client.get(receive_url)
self.assertEquals(200, response.status_code)
# and we should have a new message
msg1 = Msg.objects.get()
self.assertEquals("+250788383383", msg1.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg1.direction)
self.assertEquals(self.org, msg1.org)
self.assertEquals(self.channel, msg1.channel)
self.assertEquals("Hello World", msg1.text)
self.assertEquals(2012, msg1.sent_on.year)
# times are sent as GMT+2
self.assertEquals(8, msg1.sent_on.hour)
self.assertEquals('id1234', msg1.external_id)
def test_status(self):
self.channel.org.config = json.dumps({Channel.CONFIG_API_ID: '12345', Channel.CONFIG_USERNAME: 'uname', Channel.CONFIG_PASSWORD: 'pword'})
self.channel.org.save()
contact = self.create_contact("Joe", "+250788383383")
msg = Msg.create_outgoing(self.org, self.user, contact, "test")
msg.external_id = 'id1234'
msg.save(update_fields=('external_id',))
data = {'apiMsgId': 'id1234', 'status': '001'}
encoded_message = urlencode(data)
callback_url = reverse('handlers.clickatell_handler', args=['status', self.channel.uuid]) + "?" + encoded_message
response = self.client.get(callback_url)
self.assertEquals(200, response.status_code)
# reload our message
msg = Msg.objects.get(pk=msg.pk)
# make sure it is marked as failed
self.assertEquals(FAILED, msg.status)
# reset our status to WIRED
msg.status = WIRED
msg.save()
# and do it again with a received state
data = {'apiMsgId': 'id1234', 'status': '004'}
encoded_message = urlencode(data)
callback_url = reverse('handlers.clickatell_handler', args=['status', self.channel.uuid]) + "?" + encoded_message
response = self.client.get(callback_url)
# load our message
msg = Msg.objects.all().order_by('-pk').first()
# make sure it is marked as delivered
self.assertEquals(DELIVERED, msg.status)
def test_send(self):
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("Test message", self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
with patch('requests.get') as mock:
msg.text = "Test message"
mock.return_value = MockResponse(200, "000")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
params = {'api_id': 'api1',
'user': 'uname',
'password': 'pword',
'from': '250788123123',
'concat': 3,
'callback': 7,
'mo': 1,
'unicode': 0,
'to': "250788383383",
'text': "Test message"}
mock.assert_called_with('https://api.clickatell.com/http/sendmsg', params=params, headers=TEMBA_HEADERS,
timeout=5)
self.clear_cache()
with patch('requests.get') as mock:
msg.text = "Test message ☺"
mock.return_value = MockResponse(200, "ID: 15")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
self.assertEqual(msg.external_id, "15")
params = {'api_id': 'api1',
'user': 'uname',
'password': 'pword',
'from': '250788123123',
'concat': 3,
'callback': 7,
'mo': 1,
'unicode': 1,
'to': "250788383383",
'text': "Test message ☺"}
mock.assert_called_with('https://api.clickatell.com/http/sendmsg', params=params, headers=TEMBA_HEADERS,
timeout=5)
self.clear_cache()
with patch('requests.get') as mock:
mock.return_value = MockResponse(400, "Error", method='POST')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
with patch('requests.get') as mock:
mock.side_effect = Exception('Kaboom!')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
finally:
settings.SEND_MESSAGES = False
class TelegramTest(TembaTest):
def setUp(self):
super(TelegramTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, None, Channel.TYPE_TELEGRAM, None, 'RapidBot',
config=dict(auth_token='valid'),
uuid='00000000-0000-0000-0000-000000001234')
def test_receive(self):
data = """
{
"update_id": 174114370,
"message": {
"message_id": 41,
"from": {
"id": 3527065,
"first_name": "Nic",
"last_name": "Pottier"
},
"chat": {
"id": 3527065,
"first_name": "Nic",
"last_name": "Pottier",
"type": "private"
},
"date": 1454119029,
"text": "Hello World"
}
}
"""
receive_url = reverse('handlers.telegram_handler', args=[self.channel.uuid])
response = self.client.post(receive_url, data, content_type='application/json', post_data=data)
self.assertEquals(200, response.status_code)
# and we should have a new message
msg1 = Msg.objects.get()
self.assertEquals('3527065', msg1.contact.get_urn(TELEGRAM_SCHEME).path)
self.assertEquals(INCOMING, msg1.direction)
self.assertEquals(self.org, msg1.org)
self.assertEquals(self.channel, msg1.channel)
self.assertEquals("Hello World", msg1.text)
self.assertEqual(msg1.contact.name, 'Nic Pottier')
def test_file_message(data, file_path, content_type, extension, caption=None):
Msg.objects.all().delete()
with patch('requests.post') as post:
with patch('requests.get') as get:
post.return_value = MockResponse(200, json.dumps(dict(ok="true", result=dict(file_path=file_path))))
get.return_value = MockResponse(200, "Fake image bits", headers={"Content-Type": content_type})
response = self.client.post(receive_url, data, content_type='application/json', post_data=data)
self.assertEquals(200, response.status_code)
# should have a media message now with an image
msgs = Msg.objects.all().order_by('-pk')
if caption:
self.assertEqual(msgs.count(), 2)
self.assertEqual(msgs[1].text, caption)
else:
self.assertEqual(msgs.count(), 1)
self.assertTrue(msgs[0].media.startswith('%s:https://' % content_type))
self.assertTrue(msgs[0].media.endswith(extension))
self.assertTrue(msgs[0].text.startswith('https://'))
self.assertTrue(msgs[0].text.endswith(extension))
# stickers are allowed
sticker_data = """
{
"update_id":174114373,
"message":{
"message_id":44,
"from":{
"id":3527065,
"first_name":"Nic",
"last_name":"Pottier"
},
"chat":{
"id":3527065,
"first_name":"Nic",
"last_name":"Pottier",
"type":"private"
},
"date":1454119668,
"sticker":{
"width":436,
"height":512,
"thumb":{
"file_id":"AAQDABNW--sqAAS6easb1s1rNdJYAAIC",
"file_size":2510,
"width":77,
"height":90
},
"file_id":"BQADAwADRQADyIsGAAHtBskMy6GoLAI",
"file_size":38440
}
}
}
"""
photo_data = """
{
"update_id":414383172,
"message":{
"message_id":52,
"from":{
"id":25028612,
"first_name":"Eric",
"last_name":"Newcomer",
"username":"ericn"
},
"chat":{
"id":25028612,
"first_name":"Eric",
"last_name":"Newcomer",
"username":"ericn",
"type":"private"
},
"date":1460845907,
"photo":[
{
"file_id":"AgADAwADJKsxGwTofQF_vVnL5P2C2P8AAewqAARQoXPLPaJRfrgPAQABAg",
"file_size":1527,
"width":90,
"height":67
},
{
"file_id":"AgADAwADJKsxGwTofQF_vVnL5P2C2P8AAewqAATfgqvLofrK17kPAQABAg",
"file_size":21793,
"width":320,
"height":240
},
{
"file_id":"AgADAwADJKsxGwTofQF_vVnL5P2C2P8AAewqAAQn6a6fBlz_KLcPAQABAg",
"file_size":104602,
"width":800,
"height":600
},
{
"file_id":"AgADAwADJKsxGwTofQF_vVnL5P2C2P8AAewqAARtnUHeihUe-LYPAQABAg",
"file_size":193145,
"width":1280,
"height":960
}
]
}
}
"""
video_data = """
{
"update_id":414383173,
"message":{
"caption": "Check out this amazeballs video",
"message_id":54,
"from":{
"id":25028612,
"first_name":"Eric",
"last_name":"Newcomer",
"username":"ericn"
},
"chat":{
"id":25028612,
"first_name":"Eric",
"last_name":"Newcomer",
"username":"ericn",
"type":"private"
},
"date":1460848768,
"video":{
"duration":5,
"width":640,
"height":360,
"thumb":{
"file_id":"AAQDABNaEOwqAATL2L1LaefkMyccAAIC",
"file_size":1903,
"width":90,
"height":50
},
"file_id":"BAADAwADbgADBOh9ARFryoDddM4bAg",
"file_size":368568
}
}
}
"""
audio_data = """
{
"update_id":414383174,
"message":{
"message_id":55,
"from":{
"id":25028612,
"first_name":"Eric",
"last_name":"Newcomer",
"username":"ericn"
},
"chat":{
"id":25028612,
"first_name":"Eric",
"last_name":"Newcomer",
"username":"ericn",
"type":"private"
},
"date":1460849148,
"voice":{
"duration":2,
"mime_type":"audio\/ogg",
"file_id":"AwADAwADbwADBOh9AYp70sKPJ09pAg",
"file_size":7748
}
}
}
"""
test_file_message(sticker_data, 'file/image.webp', "image/webp", "webp")
test_file_message(photo_data, 'file/image.jpg', "image/jpeg", "jpg")
test_file_message(video_data, 'file/video.mp4', "video/mp4", "mp4", caption="Check out this amazeballs video")
test_file_message(audio_data, 'file/audio.oga', "audio/ogg", "oga")
location_data = """
{
"update_id":414383175,
"message":{
"message_id":56,
"from":{
"id":25028612,
"first_name":"Eric",
"last_name":"Newcomer",
"username":"ericn"
},
"chat":{
"id":25028612,
"first_name":"Eric",
"last_name":"Newcomer",
"username":"ericn",
"type":"private"
},
"date":1460849460,
"location":{
"latitude":-2.910574,
"longitude":-79.000239
},
"venue":{
"location":{
"latitude":-2.910574,
"longitude":-79.000239
},
"title":"Fogo Mar",
"address":"Av. Paucarbamba",
"foursquare_id":"55033319498eed335779a701"
}
}
}
"""
# with patch('requests.post') as post:
# post.return_value = MockResponse(200, json.dumps(dict(ok="true", result=dict(file_path=file_path))))
Msg.objects.all().delete()
response = self.client.post(receive_url, location_data, content_type='application/json', post_data=location_data)
self.assertEquals(200, response.status_code)
# should have a media message now with an image
msgs = Msg.objects.all().order_by('-created_on')
self.assertEqual(msgs.count(), 1)
self.assertTrue(msgs[0].media.startswith('geo:'))
self.assertTrue('Fogo Mar' in msgs[0].text)
no_message = """
{
"channel_post": {
"caption": "@A_caption",
"chat": {
"id": -1001091928432,
"title": "a title",
"type": "channel",
"username": "a_username"
},
"date": 1479722450,
"forward_date": 1479712599,
"forward_from": {},
"forward_from_chat": {},
"forward_from_message_id": 532,
"from": {
"first_name": "a_first_name",
"id": 294674412
},
"message_id": 1310,
"voice": {
"duration": 191,
"file_id": "AwADBAAD2AYAAoN65QtM8XVBVS7P5Ao",
"file_size": 1655713,
"mime_type": "audio/ogg"
}
},
"update_id": 677142491
}
"""
response = self.client.post(receive_url, no_message, content_type='application/json', post_data=location_data)
self.assertEquals(400, response.status_code)
def test_send(self):
joe = self.create_contact("Ernie", urn='telegram:1234')
msg = joe.send("Test message", self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, json.dumps({"result": {"message_id": 1234}}))
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
self.clear_cache()
with patch('requests.post') as mock:
mock.return_value = MockResponse(400, "Error", method='POST')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
finally:
settings.SEND_MESSAGES = False
class PlivoTest(TembaTest):
def setUp(self):
super(PlivoTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'RW', 'PL', None, '+250788123123',
config={Channel.CONFIG_PLIVO_AUTH_ID: 'plivo-auth-id',
Channel.CONFIG_PLIVO_AUTH_TOKEN: 'plivo-auth-token',
Channel.CONFIG_PLIVO_APP_ID: 'plivo-app-id'},
uuid='00000000-0000-0000-0000-000000001234')
self.joe = self.create_contact("Joe", "+250788383383")
def test_release(self):
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, "Success", method='POST')
self.channel.release()
self.channel.refresh_from_db()
self.assertFalse(self.channel.is_active)
def test_receive(self):
response = self.client.get(reverse('handlers.plivo_handler', args=['receive', 'not-real-uuid']), dict())
self.assertEquals(400, response.status_code)
data = dict(MessageUUID="msg-uuid", Text="Hey, there", To="254788383383", From="254788383383")
receive_url = reverse('handlers.plivo_handler', args=['receive', self.channel.uuid])
response = self.client.get(receive_url, data)
self.assertEquals(400, response.status_code)
data = dict(MessageUUID="msg-uuid", Text="Hey, there", To=self.channel.address.lstrip('+'), From="254788383383")
response = self.client.get(receive_url, data)
self.assertEquals(200, response.status_code)
msg1 = Msg.objects.get()
self.assertEquals("+254788383383", msg1.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg1.direction)
self.assertEquals(self.org, msg1.org)
self.assertEquals(self.channel, msg1.channel)
self.assertEquals('Hey, there', msg1.text)
def test_status(self):
# an invalid uuid
data = dict(MessageUUID="-1", Status="delivered", From=self.channel.address.lstrip('+'), To="254788383383")
response = self.client.get(reverse('handlers.plivo_handler', args=['status', 'not-real-uuid']), data)
self.assertEquals(400, response.status_code)
# a valid uuid, but invalid data
delivery_url = reverse('handlers.plivo_handler', args=['status', self.channel.uuid])
response = self.client.get(delivery_url, dict())
self.assertEquals(400, response.status_code)
response = self.client.get(delivery_url, data)
self.assertEquals(400, response.status_code)
# ok, lets create an outgoing message to update
joe = self.create_contact("Joe Biden", "+254788383383")
msg = joe.send("Hey Joe, it's Obama, pick up!", self.admin)
msg.external_id = 'msg-uuid'
msg.save(update_fields=('external_id',))
data['MessageUUID'] = msg.external_id
def assertStatus(sms, status, assert_status):
sms.status = WIRED
sms.save()
data['Status'] = status
response = self.client.get(delivery_url, data)
self.assertEquals(200, response.status_code)
sms = Msg.objects.get(external_id=sms.external_id)
self.assertEquals(assert_status, sms.status)
assertStatus(msg, 'queued', WIRED)
assertStatus(msg, 'sent', SENT)
assertStatus(msg, 'delivered', DELIVERED)
assertStatus(msg, 'undelivered', SENT)
assertStatus(msg, 'rejected', FAILED)
def test_send(self):
msg = self.joe.send("Test message", self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
with patch('requests.post') as mock:
mock.return_value = MockResponse(202,
json.dumps({"message": "message(s) queued",
"message_uuid": ["db3ce55a-7f1d-11e1-8ea7-1231380bc196"],
"api_id": "db342550-7f1d-11e1-8ea7-1231380bc196"}))
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
self.clear_cache()
with patch('requests.get') as mock:
mock.return_value = MockResponse(400, "Error", method='POST')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
with patch('requests.get') as mock:
mock.side_effect = Exception('Kaboom!')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
finally:
settings.SEND_MESSAGES = False
class TwitterTest(TembaTest):
def setUp(self):
super(TwitterTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, None, 'TT', None, 'billy_bob',
config={'oauth_token': 'abcdefghijklmnopqrstuvwxyz',
'oauth_token_secret': '0123456789'},
uuid='00000000-0000-0000-0000-000000001234')
self.joe = self.create_contact("Joe", "+250788383383")
def test_send(self):
joe = self.create_contact("Joe", number="+250788383383", twitter="joe1981")
testers = self.create_group("Testers", [joe])
msg = joe.send("This is a long message, longer than just 160 characters, it spans what was before "
"more than one message but which is now but one, solitary message, going off into the "
"Twitterverse to tweet away.",
self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
with patch('twython.Twython.send_direct_message') as mock:
mock.return_value = dict(id=1234567890)
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# assert we were only called once
self.assertEquals(1, mock.call_count)
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertEquals('1234567890', msg.external_id)
self.assertTrue(msg.sent_on)
self.clear_cache()
ChannelLog.objects.all().delete()
with patch('twython.Twython.send_direct_message') as mock:
mock.side_effect = TwythonError("Failed to send message")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
self.assertEquals("Failed to send message", ChannelLog.objects.get(msg=msg).description)
self.clear_cache()
ChannelLog.objects.all().delete()
with patch('twython.Twython.send_direct_message') as mock:
mock.side_effect = TwythonError("Different 403 error.", error_code=403)
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
# should not fail the contact
contact = Contact.objects.get(pk=joe.pk)
self.assertFalse(contact.is_stopped)
self.assertEqual(contact.user_groups.count(), 1)
# should record the right error
self.assertTrue(ChannelLog.objects.get(msg=msg).description.find("Different 403 error") >= 0)
with patch('twython.Twython.send_direct_message') as mock:
mock.side_effect = TwythonError("You cannot send messages to users who are not following you.",
error_code=403)
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# should fail the message
msg.refresh_from_db()
self.assertEquals(FAILED, msg.status)
self.assertEquals(2, msg.error_count)
# should be stopped
contact = Contact.objects.get(pk=joe.pk)
self.assertTrue(contact.is_stopped)
self.assertEqual(contact.user_groups.count(), 0)
self.clear_cache()
joe.is_stopped = False
joe.save()
testers.update_contacts(self.user, [joe], add=True)
with patch('twython.Twython.send_direct_message') as mock:
mock.side_effect = TwythonError("There was an error sending your message: You can't send direct messages to this user right now.",
error_code=403)
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# should fail the message
msg.refresh_from_db()
self.assertEquals(FAILED, msg.status)
self.assertEquals(2, msg.error_count)
# should fail the contact permanently (i.e. removed from groups)
contact = Contact.objects.get(pk=joe.pk)
self.assertTrue(contact.is_stopped)
self.assertEqual(contact.user_groups.count(), 0)
self.clear_cache()
joe.is_stopped = False
joe.save()
testers.update_contacts(self.user, [joe], add=True)
with patch('twython.Twython.send_direct_message') as mock:
mock.side_effect = TwythonError("Sorry, that page does not exist.", error_code=404)
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# should fail the message
msg.refresh_from_db()
self.assertEqual(msg.status, FAILED)
self.assertEqual(msg.error_count, 2)
# should fail the contact permanently (i.e. removed from groups)
contact = Contact.objects.get(pk=joe.pk)
self.assertTrue(contact.is_stopped)
self.assertEqual(contact.user_groups.count(), 0)
self.clear_cache()
finally:
settings.SEND_MESSAGES = False
class MageHandlerTest(TembaTest):
def setUp(self):
super(MageHandlerTest, self).setUp()
self.org.webhook = u'{"url": "http://fake.com/webhook.php"}'
self.org.webhook_events = ALL_EVENTS
self.org.save()
self.joe = self.create_contact("Joe", number="+250788383383")
self.dyn_group = self.create_group("Bobs", query="name has Bob")
def create_contact_like_mage(self, name, twitter):
"""
Creates a contact as if it were created in Mage, i.e. no event/group triggering or cache updating
"""
contact = Contact.objects.create(org=self.org, name=name, is_active=True, is_blocked=False,
uuid=uuid.uuid4(), is_stopped=False,
modified_by=self.user, created_by=self.user,
modified_on=timezone.now(), created_on=timezone.now())
urn = ContactURN.objects.create(org=self.org, contact=contact,
urn="twitter:%s" % twitter, scheme="twitter", path=twitter, priority="90")
return contact, urn
def create_message_like_mage(self, text, contact, contact_urn=None):
"""
Creates a message as it if were created in Mage, i.e. no topup decrementing or cache updating
"""
if not contact_urn:
contact_urn = contact.get_urn(TEL_SCHEME)
return Msg.objects.create(org=self.org, text=text,
direction=INCOMING, created_on=timezone.now(),
channel=self.channel, contact=contact, contact_urn=contact_urn)
def test_handle_message(self):
url = reverse('handlers.mage_handler', args=['handle_message'])
headers = dict(HTTP_AUTHORIZATION='Token %s' % settings.MAGE_AUTH_TOKEN)
msg_counts = SystemLabel.get_counts(self.org)
self.assertEqual(0, msg_counts[SystemLabel.TYPE_INBOX])
self.assertEqual(0, msg_counts[SystemLabel.TYPE_FLOWS])
contact_counts = ContactGroup.get_system_group_counts(self.org)
self.assertEqual(1, contact_counts[ContactGroup.TYPE_ALL])
self.assertEqual(1000, self.org.get_credits_remaining())
msg = self.create_message_like_mage(text="Hello 1", contact=self.joe)
msg_counts = SystemLabel.get_counts(self.org)
self.assertEqual(0, msg_counts[SystemLabel.TYPE_INBOX])
contact_counts = ContactGroup.get_system_group_counts(self.org)
self.assertEqual(1, contact_counts[ContactGroup.TYPE_ALL])
self.assertEqual(1000, self.org.get_credits_remaining())
# check that GET doesn't work
response = self.client.get(url, dict(message_id=msg.pk), **headers)
self.assertEqual(405, response.status_code)
# check that POST does work
response = self.client.post(url, dict(message_id=msg.pk, new_contact=False), **headers)
self.assertEqual(200, response.status_code)
# check that new message is handled and has a topup
msg = Msg.objects.get(pk=msg.pk)
self.assertEqual('H', msg.status)
self.assertEqual(self.welcome_topup, msg.topup)
# check for a web hook event
event = json.loads(WebHookEvent.objects.get(org=self.org, event=SMS_RECEIVED).data)
self.assertEqual(msg.id, event['sms'])
msg_counts = SystemLabel.get_counts(self.org)
self.assertEqual(1, msg_counts[SystemLabel.TYPE_INBOX])
contact_counts = ContactGroup.get_system_group_counts(self.org)
self.assertEqual(1, contact_counts[ContactGroup.TYPE_ALL])
self.assertEqual(999, self.org.get_credits_remaining())
# check that a message that has a topup, doesn't decrement twice
msg = self.create_message_like_mage(text="Hello 2", contact=self.joe)
(msg.topup_id, amount) = self.org.decrement_credit()
msg.save()
self.client.post(url, dict(message_id=msg.pk, new_contact=False), **headers)
msg_counts = SystemLabel.get_counts(self.org)
self.assertEqual(2, msg_counts[SystemLabel.TYPE_INBOX])
contact_counts = ContactGroup.get_system_group_counts(self.org)
self.assertEqual(1, contact_counts[ContactGroup.TYPE_ALL])
self.assertEqual(998, self.org.get_credits_remaining())
# simulate scenario where Mage has added new contact with name that should put it into a dynamic group
mage_contact, mage_contact_urn = self.create_contact_like_mage("Bob", "bobby81")
msg = self.create_message_like_mage(text="Hello via Mage", contact=mage_contact, contact_urn=mage_contact_urn)
response = self.client.post(url, dict(message_id=msg.pk, new_contact=True), **headers)
self.assertEqual(200, response.status_code)
msg = Msg.objects.get(pk=msg.pk)
self.assertEqual('H', msg.status)
self.assertEqual(self.welcome_topup, msg.topup)
msg_counts = SystemLabel.get_counts(self.org)
self.assertEqual(3, msg_counts[SystemLabel.TYPE_INBOX])
contact_counts = ContactGroup.get_system_group_counts(self.org)
self.assertEqual(2, contact_counts[ContactGroup.TYPE_ALL])
self.assertEqual(997, self.org.get_credits_remaining())
# check that contact ended up dynamic group
self.assertEqual([mage_contact], list(self.dyn_group.contacts.order_by('name')))
# check invalid auth key
response = self.client.post(url, dict(message_id=msg.pk), **dict(HTTP_AUTHORIZATION='Token xyz'))
self.assertEqual(401, response.status_code)
# check rejection of empty or invalid msgId
response = self.client.post(url, dict(), **headers)
self.assertEqual(400, response.status_code)
response = self.client.post(url, dict(message_id='xx'), **headers)
self.assertEqual(400, response.status_code)
def test_follow_notification(self):
url = reverse('handlers.mage_handler', args=['follow_notification'])
headers = dict(HTTP_AUTHORIZATION='Token %s' % settings.MAGE_AUTH_TOKEN)
flow = self.create_flow()
channel = Channel.create(self.org, self.user, None, 'TT', "Twitter Channel", address="billy_bob")
Trigger.objects.create(created_by=self.user, modified_by=self.user, org=self.org,
trigger_type=Trigger.TYPE_FOLLOW, flow=flow, channel=channel)
contact = self.create_contact("Mary Jo", twitter='mary_jo')
urn = contact.get_urn(TWITTER_SCHEME)
response = self.client.post(url, dict(channel_id=channel.id, contact_urn_id=urn.id), **headers)
self.assertEqual(200, response.status_code)
self.assertEqual(1, flow.runs.all().count())
contact_counts = ContactGroup.get_system_group_counts(self.org)
self.assertEqual(2, contact_counts[ContactGroup.TYPE_ALL])
# simulate scenario where Mage has added new contact with name that should put it into a dynamic group
mage_contact, mage_contact_urn = self.create_contact_like_mage("Bob", "bobby81")
response = self.client.post(url, dict(channel_id=channel.id,
contact_urn_id=mage_contact_urn.id, new_contact=True), **headers)
self.assertEqual(200, response.status_code)
self.assertEqual(2, flow.runs.all().count())
# check that contact ended up dynamic group
self.assertEqual([mage_contact], list(self.dyn_group.contacts.order_by('name')))
# check contact count updated
contact_counts = ContactGroup.get_system_group_counts(self.org)
self.assertEqual(contact_counts[ContactGroup.TYPE_ALL], 3)
class StartMobileTest(TembaTest):
def setUp(self):
super(StartMobileTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'UA', 'ST', None, '1212',
config=dict(username='st-user', password='st-password'),
uuid='00000000-0000-0000-0000-000000001234')
def test_received(self):
body = """
<message>
<service type="sms" timestamp="1450450974" auth="asdfasdf" request_id="msg1"/>
<from>+250788123123</from>
<to>1515</to>
<body content-type="content-type" encoding="utf8">Hello World</body>
</message>
"""
callback_url = reverse('handlers.start_handler', args=['receive', self.channel.uuid])
response = self.client.post(callback_url, content_type='application/xml', data=body)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.get()
self.assertEquals('+250788123123', msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello World", msg.text)
# try it with an invalid body
response = self.client.post(callback_url, content_type='application/xml', data="invalid body")
# should get a 400, as the body is invalid
self.assertEquals(400, response.status_code)
Msg.objects.all().delete()
# empty text element from Start Mobile we create "" message
body = """
<message>
<service type="sms" timestamp="1450450974" auth="asdfasdf" request_id="msg1"/>
<from>+250788123123</from>
<to>1515</to>
<body content-type="content-type" encoding="utf8"></body>
</message>
"""
response = self.client.post(callback_url, content_type='application/xml', data=body)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.get()
self.assertEquals('+250788123123', msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("", msg.text)
# try it with an invalid channel
callback_url = reverse('handlers.start_handler', args=['receive', '1234-asdf'])
response = self.client.post(callback_url, content_type='application/xml', data=body)
# should get 400 as the channel wasn't found
self.assertEquals(400, response.status_code)
def test_send(self):
joe = self.create_contact("Joe", "+977788123123")
msg = joe.send("Вітаємо в U-Report, системі опитувань про майбутнє країни.Зараз невеличка реєстрація.?",
self.admin, trigger_send=False)
try:
settings.SEND_MESSAGES = True
with patch('requests.post') as mock:
mock.return_value = MockResponse(200,
"""<status date='Wed, 25 May 2016 17:29:56 +0300'>
<id>380502535130309161501</id><state>Accepted</state></status>""")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
self.assertEqual(msg.external_id, "380502535130309161501")
# check the call that was made
self.assertEqual('http://bulk.startmobile.com.ua/clients.php', mock.call_args[0][0])
message_el = ET.fromstring(mock.call_args[1]['data'])
self.assertEqual(message_el.find('service').attrib, dict(source='1212', id='single', validity='+12 hours'))
self.assertEqual(message_el.find('body').text, msg.text)
self.clear_cache()
# return 400
with patch('requests.post') as mock:
mock.return_value = MockResponse(400, "Error", method='POST')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
self.clear_cache()
# return invalid XML
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, "<error>This is an error</error>", method='POST')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
self.clear_cache()
# unexpected exception
with patch('requests.post') as mock:
mock.side_effect = Exception('Kaboom!')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(FAILED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
self.clear_cache()
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
finally:
settings.SEND_MESSAGES = False
class ChikkaTest(TembaTest):
def setUp(self):
super(ChikkaTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'PH', Channel.TYPE_CHIKKA, None, '920920',
uuid='00000000-0000-0000-0000-000000001234')
config = {Channel.CONFIG_USERNAME: 'username', Channel.CONFIG_PASSWORD: 'password'}
self.channel.config = json.dumps(config)
self.channel.save()
def test_status(self):
# try with an invalid channel uuid
data = dict(message_type='outgoing', message_id=1001, status='FAILED')
response = self.client.post(reverse('handlers.chikka_handler', args=['not-real-uuid']), data)
self.assertEquals(400, response.status_code)
# ok, try with a valid uuid, but invalid message id 1001, should return 400 as well
response = self.client.post(reverse('handlers.chikka_handler', args=[self.channel.uuid]), data)
self.assertEquals(400, response.status_code)
# ok, lets create an outgoing message to update
joe = self.create_contact("Joe Biden", "+63911231234")
msg = joe.send("Hey Joe, it's Obama, pick up!", self.admin)
data['message_id'] = msg.id
# valid id, invalid status, 400
data['status'] = 'INVALID'
response = self.client.post(reverse('handlers.chikka_handler', args=[self.channel.uuid]), data)
self.assertEquals(400, response.status_code)
def assertStatus(sms, status, assert_status):
sms.status = WIRED
sms.save()
data['status'] = status
response = self.client.post(reverse('handlers.chikka_handler', args=[self.channel.uuid]), data)
self.assertEquals(200, response.status_code)
updated_sms = Msg.objects.get(pk=sms.id)
self.assertEquals(assert_status, updated_sms.status)
assertStatus(msg, 'FAILED', FAILED)
assertStatus(msg, 'SENT', SENT)
def test_receive(self):
data = dict(message_type='incoming', mobile_number='639178020779', request_id='4004',
message='Hello World!', timestamp='1457670059.69')
callback_url = reverse('handlers.chikka_handler', args=[self.channel.uuid])
response = self.client.post(callback_url, data)
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.get()
self.assertEquals("+639178020779", msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello World!", msg.text)
self.assertEquals('4004', msg.external_id)
self.assertEquals(msg.sent_on.date(), date(day=11, month=3, year=2016))
def test_send(self):
joe = self.create_contact("Joe", '+63911231234')
# incoming message for a reply test
incoming = Msg.create_incoming(self.channel, 'tel:+63911231234', "incoming message")
incoming.external_id = '4004'
incoming.save()
msg = joe.send("Test message", self.admin, trigger_send=False)
with self.settings(SEND_MESSAGES=True):
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, "Success", method='POST')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
# check we were called as a send
self.assertEqual(mock.call_args[1]['data']['message_type'], 'SEND')
self.clear_cache()
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, "Success", method='POST')
msg.response_to = incoming
msg.save()
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
# assert that we were called as a reply
self.assertEqual(mock.call_args[1]['data']['message_type'], 'REPLY')
self.assertEqual(mock.call_args[1]['data']['request_id'], '4004')
self.clear_cache()
with patch('requests.post') as mock:
error = dict(status=400, message='BAD REQUEST', description='Invalid/Used Request ID')
# first request (as a reply) is an error, second should be success without request id
mock.side_effect = [
MockResponse(400, json.dumps(error), method='POST'),
MockResponse(200, 'Success', method='POST')
]
msg.response_to = incoming
msg.save()
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertTrue(msg.sent_on)
first_call_args = mock.call_args_list[0][1]['data']
second_call_args = mock.call_args_list[1][1]['data']
# first request is as a reply
self.assertEqual(first_call_args['message_type'], 'REPLY')
self.assertEqual(first_call_args['request_id'], '4004')
# but when that fails, we should try again as a send
self.assertEqual(second_call_args['message_type'], 'SEND')
self.assertTrue('request_id' not in second_call_args)
# our message should be succeeded
msg.refresh_from_db()
self.assertEquals(WIRED, msg.status)
self.assertEquals(0, msg.error_count)
self.clear_cache()
# test with an invalid request id, then an unexpected error
with patch('requests.post') as mock:
error = dict(status=400, message='BAD REQUEST', description='Invalid/Used Request ID')
# first request (as a reply) is an error, second should be success without request id
mock.side_effect = [
MockResponse(400, json.dumps(error), method='POST'),
Exception("Unexpected Error")
]
msg.response_to = incoming
msg.save()
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
with patch('requests.post') as mock:
mock.return_value = MockResponse(400, "{}", method='POST')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
self.clear_cache()
with patch('requests.post') as mock:
mock.side_effect = Exception("Couldn't reach server")
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# should also have an error
msg.refresh_from_db()
# third try, we should be failed now
self.assertEquals(FAILED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
class JasminTest(TembaTest):
def setUp(self):
super(JasminTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'RW', 'JS', None, '1234',
config=dict(username='jasmin-user', password='jasmin-pass', send_url='http://foo/'),
uuid='00000000-0000-0000-0000-000000001234')
def tearDown(self):
super(JasminTest, self).tearDown()
settings.SEND_MESSAGES = False
def test_status(self):
# ok, what happens with an invalid uuid?
data = dict(id="-1", dlvr="0", err="0")
response = self.client.post(reverse('handlers.jasmin_handler', args=['status', 'not-real-uuid']), data)
self.assertEquals(400, response.status_code)
# ok, try with a valid uuid, but invalid message id -1
delivery_url = reverse('handlers.jasmin_handler', args=['status', self.channel.uuid])
response = self.client.post(delivery_url, data)
self.assertEquals(400, response.status_code)
# ok, lets create an outgoing message to update
joe = self.create_contact("Joe Biden", "+254788383383")
msg = joe.send("Hey Joe, it's Obama, pick up!", self.admin)
msg.external_id = "jasmin-external-id"
msg.save(update_fields=('external_id',))
data['id'] = msg.external_id
def assertStatus(sms, dlvrd, err, assert_status):
data['dlvrd'] = dlvrd
data['err'] = err
response = self.client.post(reverse('handlers.jasmin_handler', args=['status', self.channel.uuid]), data)
self.assertEquals(200, response.status_code)
sms = Msg.objects.get(pk=sms.id)
self.assertEquals(assert_status, sms.status)
assertStatus(msg, 0, 0, WIRED)
assertStatus(msg, 1, 0, DELIVERED)
assertStatus(msg, 0, 1, FAILED)
def test_receive(self):
from temba.utils import gsm7
data = {
'to': '1234',
'from': '0788383383',
'coding': '0',
'content': gsm7.encode("événement")[0],
'id': 'external1'
}
callback_url = reverse('handlers.jasmin_handler', args=['receive', self.channel.uuid])
response = self.client.post(callback_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "ACK/Jasmin")
# load our message
msg = Msg.objects.get()
self.assertEquals("+250788383383", msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("événement", msg.text)
def test_send(self):
from temba.utils import gsm7
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("événement", self.admin, trigger_send=False)
settings.SEND_MESSAGES = True
with patch('requests.get') as mock:
mock.return_value = MockResponse(200, 'Success "07033084-5cfd-4812-90a4-e4d24ffb6e3d"')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEqual(msg.status, WIRED)
self.assertTrue(msg.sent_on)
self.assertEqual(msg.external_id, '07033084-5cfd-4812-90a4-e4d24ffb6e3d')
# assert we were properly encoded
self.assertEqual(mock.call_args[1]['params']['content'], gsm7.encode('événement')[0])
self.clear_cache()
with patch('requests.get') as mock:
mock.return_value = MockResponse(412, 'Error “No route found”')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message now errored
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
with patch('requests.get') as mock:
# force an exception
mock.side_effect = Exception('Kaboom!')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message now errored
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
class JunebugTest(TembaTest):
def setUp(self):
super(JunebugTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(
self.org, self.user, 'RW', Channel.TYPE_JUNEBUG, None, '1234',
config=dict(username='junebug-user', password='junebug-pass', send_url='http://example.org/'),
uuid='00000000-0000-0000-0000-000000001234')
def tearDown(self):
super(JunebugTest, self).tearDown()
settings.SEND_MESSAGES = False
def mk_event(self, **kwargs):
default = {
'event_type': 'submitted',
'message_id': 'message-id',
'timestamp': '2017-01-01 00:00:00+0000',
}
default.update(kwargs)
return default
def mk_msg(self, **kwargs):
default = {
"channel_data": {"session_event": "new"},
"from": "+27123456789",
"channel_id": "channel-id",
"timestamp": "2017-01-01 00:00:00+0000",
"content": "content",
"to": "to-addr",
"reply_to": None,
"message_id": "message-id"
}
default.update(kwargs)
return default
def test_get_request(self):
response = self.client.get(
reverse('handlers.junebug_handler',
args=['event', self.channel.uuid]))
self.assertEquals(response.status_code, 400)
def test_status_with_invalid_event(self):
delivery_url = reverse('handlers.junebug_handler',
args=['event', self.channel.uuid])
response = self.client.post(delivery_url, data=json.dumps({}),
content_type='application/json')
self.assertEquals(400, response.status_code)
self.assertTrue('Missing one of' in response.content)
def test_status(self):
# ok, what happens with an invalid uuid?
data = self.mk_event()
response = self.client.post(
reverse('handlers.junebug_handler',
args=['event', 'not-real-uuid']),
data=json.dumps(data),
content_type='application/json')
self.assertEquals(400, response.status_code)
# ok, try with a valid uuid, but invalid message id -1
delivery_url = reverse('handlers.junebug_handler',
args=['event', self.channel.uuid])
response = self.client.post(delivery_url, data=json.dumps(data),
content_type='application/json')
self.assertEquals(400, response.status_code)
# ok, lets create an outgoing message to update
joe = self.create_contact("Joe Biden", "+254788383383")
msg = joe.send("Hey Joe, it's Obama, pick up!", self.admin)
msg.external_id = data['message_id']
msg.save(update_fields=('external_id',))
# data['id'] = msg.external_id
def assertStatus(sms, event_type, assert_status):
data['event_type'] = event_type
response = self.client.post(
reverse('handlers.junebug_handler',
args=['event', self.channel.uuid]),
data=json.dumps(data),
content_type='application/json')
self.assertEquals(200, response.status_code)
sms = Msg.objects.get(pk=sms.id)
self.assertEquals(assert_status, sms.status)
assertStatus(msg, 'submitted', SENT)
assertStatus(msg, 'delivery_succeeded', DELIVERED)
assertStatus(msg, 'delivery_failed', FAILED)
assertStatus(msg, 'rejected', FAILED)
def test_status_invalid_message_id(self):
# ok, what happens with an invalid uuid?
data = self.mk_event()
response = self.client.post(
reverse('handlers.junebug_handler',
args=['event', self.channel.uuid]),
data=json.dumps(data),
content_type='application/json')
self.assertEquals(400, response.status_code)
self.assertEquals(
response.content,
"Message with external id of '%s' not found" % (
data['message_id'],))
def test_receive_with_invalid_message(self):
callback_url = reverse('handlers.junebug_handler',
args=['inbound', self.channel.uuid])
response = self.client.post(callback_url, json.dumps({}),
content_type='application/json')
self.assertEquals(400, response.status_code)
self.assertTrue('Missing one of' in response.content)
def test_receive(self):
data = self.mk_msg(content="événement")
callback_url = reverse('handlers.junebug_handler',
args=['inbound', self.channel.uuid])
response = self.client.post(callback_url, json.dumps(data),
content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "OK")
# load our message
msg = Msg.objects.get()
self.assertEquals(data["from"], msg.contact.get_urn(TEL_SCHEME).path)
self.assertEquals(INCOMING, msg.direction)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("événement", msg.text)
def test_send_wired(self):
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("événement", self.admin, trigger_send=False)
settings.SEND_MESSAGES = True
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, json.dumps({
'result': {
'id': '07033084-5cfd-4812-90a4-e4d24ffb6e3d',
}
}))
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEqual(msg.status, WIRED)
self.assertTrue(msg.sent_on)
self.assertEqual(
msg.external_id, '07033084-5cfd-4812-90a4-e4d24ffb6e3d')
self.clear_cache()
def test_send_errored_remote(self):
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("événement", self.admin, trigger_send=False)
settings.SEND_MESSAGES = True
with patch('requests.post') as mock:
mock.return_value = MockResponse(499, 'Error')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message now errored
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
def test_send_errored_exception(self):
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("événement", self.admin, trigger_send=False)
settings.SEND_MESSAGES = True
with patch('requests.post') as mock:
# force an exception
mock.side_effect = Exception('Kaboom!')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message now errored
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
class MbloxTest(TembaTest):
def setUp(self):
super(MbloxTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'RW', 'MB', None, '1234',
config=dict(username='mbox-user', password='mblox-pass'),
uuid='00000000-0000-0000-0000-000000001234')
def tearDown(self):
super(MbloxTest, self).tearDown()
settings.SEND_MESSAGES = False
def test_dlr(self):
# invalid uuid
data = dict(batch_id="-1", status="Failed", type="recipient_delivery_report_sms")
response = self.client.post(reverse('handlers.mblox_handler', args=['not-real-uuid']), json.dumps(data),
content_type="application/json")
self.assertEquals(400, response.status_code)
delivery_url = reverse('handlers.mblox_handler', args=[self.channel.uuid])
# missing batch_id param
data = dict(status="Failed", type="recipient_delivery_report_sms")
response = self.client.post(delivery_url, json.dumps(data), content_type="application/json")
self.assertEquals(400, response.status_code)
# missing type params
data = dict(status="Failed")
response = self.client.post(delivery_url, json.dumps(data), content_type="application/json")
self.assertEquals(400, response.status_code)
# valid uuid, invalid batch_id
data = dict(batch_id="-1", status="Failed", type="recipient_delivery_report_sms")
response = self.client.post(delivery_url, json.dumps(data), content_type="application/json")
self.assertEquals(400, response.status_code)
# create test message to update
joe = self.create_contact("Joe Biden", "+254788383383")
msg = joe.send("Hey Joe, it's Obama, pick up!", self.admin)
msg.external_id = "mblox-id"
msg.save(update_fields=('external_id',))
data['batch_id'] = msg.external_id
def assertStatus(msg, status, assert_status):
Msg.objects.filter(id=msg.id).update(status=WIRED)
data['status'] = status
response = self.client.post(delivery_url, json.dumps(data), content_type="application/json")
self.assertEquals(200, response.status_code)
self.assertEqual(response.content, "SMS Updated: %d" % msg.id)
msg = Msg.objects.get(pk=msg.id)
self.assertEquals(assert_status, msg.status)
assertStatus(msg, "Delivered", DELIVERED)
assertStatus(msg, "Dispatched", SENT)
assertStatus(msg, "Aborted", FAILED)
assertStatus(msg, "Rejected", FAILED)
assertStatus(msg, "Failed", FAILED)
assertStatus(msg, "Expired", FAILED)
def test_receive(self):
data = {
"id": "OzQ5UqIOdoY8",
"from": "12067799294",
"to": "18444651185",
"body": "MO",
"type": "mo_text",
"received_at": "2016-03-30T19:33:06.643Z"
}
callback_url = reverse('handlers.mblox_handler', args=[self.channel.uuid])
response = self.client.post(callback_url, json.dumps(data), content_type="application/json")
msg = Msg.objects.get()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "SMS Accepted: %d" % msg.id)
# load our message
self.assertEqual(msg.contact.get_urn(TEL_SCHEME).path, "+12067799294")
self.assertEqual(msg.direction, INCOMING)
self.assertEqual(msg.org, self.org)
self.assertEqual(msg.channel, self.channel)
self.assertEqual(msg.text, "MO")
self.assertEqual(msg.sent_on.date(), date(day=30, month=3, year=2016))
def test_send(self):
joe = self.create_contact("Joe", "+250788383383")
msg = joe.send("MT", self.admin, trigger_send=False)
settings.SEND_MESSAGES = True
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, '{ "id":"OzYDlvf3SQVc" }')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEqual(msg.status, WIRED)
self.assertTrue(msg.sent_on)
self.assertEqual(msg.external_id, 'OzYDlvf3SQVc')
self.clear_cache()
with patch('requests.get') as mock:
mock.return_value = MockResponse(412, 'Error')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message now errored
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
with patch('requests.get') as mock:
mock.side_effect = Exception('Kaboom!')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message now errored
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
class FacebookWhitelistTest(TembaTest):
def setUp(self):
super(FacebookWhitelistTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, None, 'FB', None, '1234',
config={Channel.CONFIG_AUTH_TOKEN: 'auth'},
uuid='00000000-0000-0000-0000-000000001234')
def test_whitelist(self):
whitelist_url = reverse('channels.channel_facebook_whitelist', args=[self.channel.uuid])
response = self.client.get(whitelist_url)
self.assertLoginRedirect(response)
self.login(self.admin)
response = self.client.get(reverse('channels.channel_read', args=[self.channel.uuid]))
self.assertContains(response, whitelist_url)
with patch('requests.post') as mock:
mock.return_value = MockResponse(400, '{"error": { "message": "FB Error" } }')
response = self.client.post(whitelist_url, dict(whitelisted_domain='https://foo.bar'))
self.assertFormError(response, 'form', None, 'FB Error')
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, '{ "ok": "true" }')
response = self.client.post(whitelist_url, dict(whitelisted_domain='https://foo.bar'))
mock.assert_called_once_with('https://graph.facebook.com/v2.6/me/thread_settings?access_token=auth',
json=dict(setting_type='domain_whitelisting',
whitelisted_domains=['https://foo.bar'],
domain_action_type='add'))
self.assertNoFormErrors(response)
class FacebookTest(TembaTest):
TEST_INCOMING = """
{
"entry": [{
"id": "208685479508187",
"messaging": [{
"message": {
"text": "hello world",
"mid": "external_id"
},
"recipient": {
"id": "1234"
},
"sender": {
"id": "5678"
},
"timestamp": 1459991487970
}],
"time": 1459991487970
}]
}
"""
def setUp(self):
super(FacebookTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, None, 'FB', None, '1234',
config={Channel.CONFIG_AUTH_TOKEN: 'auth'},
uuid='00000000-0000-0000-0000-000000001234')
def tearDown(self):
super(FacebookTest, self).tearDown()
settings.SEND_MESSAGES = False
def test_dlr(self):
# invalid uuid
body = dict()
response = self.client.post(reverse('handlers.facebook_handler', args=['invalid']), json.dumps(body),
content_type="application/json")
self.assertEquals(400, response.status_code)
# invalid body
response = self.client.post(reverse('handlers.facebook_handler', args=[self.channel.uuid]), json.dumps(body),
content_type="application/json")
self.assertEquals(400, response.status_code)
# no known msgs, gracefully ignore
body = dict(entry=[dict()])
response = self.client.post(reverse('handlers.facebook_handler', args=[self.channel.uuid]), json.dumps(body),
content_type="application/json")
self.assertEquals(200, response.status_code)
# create test message to update
joe = self.create_contact("Joe Biden", urn='facebook:1234')
msg = joe.send("Hey Joe, it's Obama, pick up!", self.admin)
msg.external_id = "fb-message-id-out"
msg.save(update_fields=('external_id',))
body = dict(entry=[dict(messaging=[dict(delivery=dict(mids=[msg.external_id]))])])
response = self.client.post(reverse('handlers.facebook_handler', args=[self.channel.uuid]), json.dumps(body),
content_type='application/json')
self.assertEqual(response.status_code, 200)
msg.refresh_from_db()
self.assertEqual(msg.status, DELIVERED)
# ignore incoming messages delivery reports
msg = self.create_msg(direction=INCOMING, contact=joe, text="Read message")
msg.external_id = "fb-message-id-in"
msg.save(update_fields=('external_id',))
status = msg.status
body = dict(entry=[dict(messaging=[dict(delivery=dict(mids=[msg.external_id]))])])
response = self.client.post(reverse('handlers.facebook_handler', args=[self.channel.uuid]), json.dumps(body),
content_type='application/json')
self.assertEqual(response.status_code, 200)
msg.refresh_from_db()
self.assertEqual(msg.status, status)
def test_affinity(self):
data = json.loads(FacebookTest.TEST_INCOMING)
with patch('requests.get') as mock_get:
mock_get.return_value = MockResponse(200, '{"first_name": "Ben","last_name": "Haggerty"}')
callback_url = reverse('handlers.facebook_handler', args=[self.channel.uuid])
response = self.client.post(callback_url, json.dumps(data), content_type="application/json")
self.assertEqual(response.status_code, 200)
# check the channel affinity for our URN
urn = ContactURN.objects.get(urn='facebook:5678')
self.assertEqual(self.channel, urn.channel)
# create another facebook channel
channel2 = Channel.create(self.org, self.user, None, 'FB', None, '1234',
config={Channel.CONFIG_AUTH_TOKEN: 'auth'},
uuid='00000000-0000-0000-0000-000000012345')
# have to change the message so we don't treat it as a duplicate
data['entry'][0]['messaging'][0]['message']['text'] = '2nd Message'
data['entry'][0]['messaging'][0]['message']['mid'] = 'external_id_2'
callback_url = reverse('handlers.facebook_handler', args=[channel2.uuid])
response = self.client.post(callback_url, json.dumps(data), content_type="application/json")
self.assertEqual(response.status_code, 200)
urn = ContactURN.objects.get(urn='facebook:5678')
self.assertEqual(channel2, urn.channel)
def test_ignored_webhooks(self):
TEST_PAYLOAD = """{
"object": "page",
"entry": [{
"id": "208685479508187",
"time": 1459991487970,
"messaging": []
}]
}"""
READ_ENTRY = """
{
"sender":{ "id":"1001" },
"recipient":{ "id":"%s" },
"timestamp":1458668856463,
"read":{
"watermark":1458668856253,
"seq":38
}
}
"""
ECHO_ENTRY = """{
"sender": {"id": "1001"},
"recipient": {"id": "%s"},
"timestamp": 1467905036620,
"message": {
"is_echo": true,
"app_id": 1077392885670130,
"mid": "mid.1467905036543:c721a8364e45388954",
"seq": 4,
"text": "Echo Test"
}
}
"""
LINK_ENTRY = """{
"sender":{
"id":"1001"
},
"recipient":{
"id":"%s"
},
"timestamp":1234567890,
"account_linking":{
"status":"linked",
"authorization_code":"PASS_THROUGH_AUTHORIZATION_CODE"
}
}
"""
AUTH_ENTRY = """{
"sender":{
"id":"1001"
},
"recipient":{
"id":"%s"
},
"timestamp":1234567890,
"optin":{
"ref":"PASS_THROUGH_PARAM"
}
}
"""
ATTACHMENT_UNAVAILABLE = """{
"sender":{
"id":"1001"
},
"recipient":{
"id":"%s"
},
"timestamp":1234567890,
"message":{
"mid":"mid.1471652393639:4ecd7f5649c8586032",
"seq":"77866",
"attachments":[{
"title":"Attachment Unavailable",
"url":null,
"type":"fallback",
"payload":null
}]
}
}
"""
callback_url = reverse('handlers.facebook_handler', args=[self.channel.uuid])
for entry in (READ_ENTRY, ECHO_ENTRY, LINK_ENTRY, AUTH_ENTRY, ATTACHMENT_UNAVAILABLE):
payload = json.loads(TEST_PAYLOAD)
payload['entry'][0]['messaging'].append(json.loads(entry % self.channel.address))
with patch('requests.get') as mock_get:
mock_get.return_value = MockResponse(200, '{"first_name": "Ben","last_name": "Haggerty"}')
response = self.client.post(callback_url, json.dumps(payload), content_type="application/json")
# ignored but 200
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Ignored")
def test_referrals(self):
# create two triggers for referrals
flow = self.get_flow('favorites')
Trigger.objects.create(org=self.org, trigger_type=Trigger.TYPE_REFERRAL, referrer_id='join',
flow=flow, created_by=self.admin, modified_by=self.admin)
Trigger.objects.create(org=self.org, trigger_type=Trigger.TYPE_REFERRAL, referrer_id='signup',
flow=flow, created_by=self.admin, modified_by=self.admin)
callback_url = reverse('handlers.facebook_handler', args=[self.channel.uuid])
optin = """
{
"sender": { "id": "1122" },
"recipient": { "id": "PAGE_ID" },
"timestamp": 1234567890,
"optin": {
"ref": "join"
}
}
"""
data = json.loads(FacebookTest.TEST_INCOMING)
data['entry'][0]['messaging'][0] = json.loads(optin)
response = self.client.post(callback_url, json.dumps(data), content_type='application/json')
self.assertEqual(200, response.status_code)
self.assertEqual('Msg Ignored for recipient id: PAGE_ID', response.content)
response = self.client.post(callback_url, json.dumps(data).replace('PAGE_ID', '1234'), content_type='application/json')
self.assertEqual(200, response.status_code)
# check that the user started the flow
contact1 = Contact.objects.get(org=self.org, urns__path='1122')
self.assertEqual("What is your favorite color?", contact1.msgs.all().first().text)
# try an invalid optin (has fields for neither type)
del data['entry'][0]['messaging'][0]['sender']
response = self.client.post(callback_url, json.dumps(data).replace('PAGE_ID', '1234'), content_type='application/json')
self.assertEqual(200, response.status_code)
self.assertEqual('{"status": ["Ignored opt-in, no user_ref or sender"]}', response.content)
# ok, use a user_ref optin instead
entry = json.loads(optin)
del entry['sender']
entry['optin']['user_ref'] = 'user_ref2'
data = json.loads(FacebookTest.TEST_INCOMING)
data['entry'][0]['messaging'][0] = entry
with override_settings(SEND_MESSAGES=True):
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, '{"recipient_id":"1133", "message_id":"mid.external"}')
response = self.client.post(callback_url, json.dumps(data).replace('PAGE_ID', '1234'),
content_type='application/json')
self.assertEqual(200, response.status_code)
contact2 = Contact.objects.get(org=self.org, urns__path='1133')
self.assertEqual("What is your favorite color?", contact2.msgs.all().first().text)
# contact should have two URNs now
fb_urn = contact2.urns.get(scheme=FACEBOOK_SCHEME)
self.assertEqual(fb_urn.path, '1133')
self.assertEqual(fb_urn.channel, self.channel)
ext_urn = contact2.urns.get(scheme=EXTERNAL_SCHEME)
self.assertEqual(ext_urn.path, 'user_ref2')
self.assertIsNone(ext_urn.channel)
def test_receive(self):
data = json.loads(FacebookTest.TEST_INCOMING)
callback_url = reverse('handlers.facebook_handler', args=[self.channel.uuid])
with patch('requests.get') as mock_get:
mock_get.return_value = MockResponse(200, '{"first_name": "Ben","last_name": "Haggerty"}')
response = self.client.post(callback_url, json.dumps(data), content_type="application/json")
msg = Msg.objects.get()
self.assertEqual(response.status_code, 200)
# load our message
self.assertEqual(msg.contact.get_urn(FACEBOOK_SCHEME).path, "5678")
self.assertEqual(msg.direction, INCOMING)
self.assertEqual(msg.org, self.org)
self.assertEqual(msg.channel, self.channel)
self.assertEqual(msg.text, "hello world")
self.assertEqual(msg.external_id, "external_id")
# make sure our contact's name was populated
self.assertEqual(msg.contact.name, 'Ben Haggerty')
Msg.objects.all().delete()
Contact.all().delete()
# simulate a failure to fetch contact data
with patch('requests.get') as mock_get:
mock_get.return_value = MockResponse(400, '{"error": "Unable to look up profile data"}')
response = self.client.post(callback_url, json.dumps(data), content_type="application/json")
self.assertEqual(response.status_code, 200)
msg = Msg.objects.get()
self.assertEqual(msg.contact.get_urn(FACEBOOK_SCHEME).path, "5678")
self.assertIsNone(msg.contact.name)
Msg.objects.all().delete()
Contact.all().delete()
# simulate an exception
with patch('requests.get') as mock_get:
mock_get.return_value = MockResponse(200, 'Invalid JSON')
response = self.client.post(callback_url, json.dumps(data), content_type="application/json")
self.assertEqual(response.status_code, 200)
msg = Msg.objects.get()
self.assertEqual(msg.contact.get_urn(FACEBOOK_SCHEME).path, "5678")
self.assertIsNone(msg.contact.name)
Msg.objects.all().delete()
Contact.all().delete()
# now with a anon org, shouldn't try to look things up
self.org.is_anon = True
self.org.save()
with patch('requests.get') as mock_get:
response = self.client.post(callback_url, json.dumps(data), content_type="application/json")
self.assertEqual(response.status_code, 200)
msg = Msg.objects.get()
self.assertEqual(msg.contact.get_urn(FACEBOOK_SCHEME).path, "5678")
self.assertIsNone(msg.contact.name)
self.assertEqual(mock_get.call_count, 0)
Msg.objects.all().delete()
self.org.is_anon = False
self.org.save()
# rich media
data = """
{
"entry": [{
"id": 208685479508187,
"messaging": [{
"message": {
"attachments": [{
"payload": { "url": "http://mediaurl.com/img.gif" }
}],
"mid": "external_id"
},
"recipient": {
"id": 1234
},
"sender": {
"id": 5678
},
"timestamp": 1459991487970
}],
"time": 1459991487970
}]}
"""
data = json.loads(data)
response = self.client.post(callback_url, json.dumps(data), content_type="application/json")
msg = Msg.objects.get()
self.assertEqual(response.status_code, 200)
self.assertEqual(msg.text, "http://mediaurl.com/img.gif")
# link attachment
data = """{
"object":"page",
"entry":[{
"id":"32408604530",
"time":1468418021822,
"messaging":[{
"sender":{"id":"5678"},
"recipient":{"id":"1234"},
"timestamp":1468417833159,
"message": {
"mid":"external_id",
"seq":11242,
"attachments":[{
"title":"Get in touch with us.",
"url": "http:\x5c/\x5c/m.me\x5c/",
"type": "fallback",
"payload": null
}]
}
}]
}]
}
"""
Msg.objects.all().delete()
data = json.loads(data)
response = self.client.post(callback_url, json.dumps(data), content_type="application/json")
msg = Msg.objects.get()
self.assertEqual(response.status_code, 200)
self.assertEqual(msg.text, "Get in touch with us.\nhttp://m.me/")
# link attachment without title
data = """{
"object":"page",
"entry":[{
"id":"32408604530",
"time":1468418021822,
"messaging":[{
"sender":{"id":"5678"},
"recipient":{"id":"1234"},
"timestamp":1468417833159,
"message": {
"mid":"external_id",
"seq":11242,
"attachments":[{
"title": null,
"url": "http:\x5c/\x5c/m.me\x5c/",
"type": "fallback",
"payload": null
}]
}
}]
}]
}
"""
Msg.objects.all().delete()
data = json.loads(data)
response = self.client.post(callback_url, json.dumps(data), content_type="application/json")
msg = Msg.objects.get()
self.assertEqual(response.status_code, 200)
self.assertEqual(msg.text, "http://m.me/")
def test_send(self):
joe = self.create_contact("Joe", urn="facebook:1234")
msg = joe.send("Facebook Msg", self.admin, trigger_send=False)
settings.SEND_MESSAGES = True
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, '{"recipient_id":"1234", '
'"message_id":"mid.external"}')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEqual(msg.status, WIRED)
self.assertTrue(msg.sent_on)
self.assertEqual(msg.external_id, 'mid.external')
self.clear_cache()
with patch('requests.get') as mock:
mock.return_value = MockResponse(412, 'Error')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message now errored
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
with patch('requests.post') as mock:
mock.side_effect = Exception('Kaboom!')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message now errored
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
class GlobeTest(TembaTest):
def setUp(self):
super(GlobeTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, 'PH', 'GL', None, '21586380',
config=dict(app_id='AppId', app_secret='AppSecret', passphrase='Passphrase'),
uuid='00000000-0000-0000-0000-000000001234')
def test_receive(self):
# invalid UUID
response = self.client.post(reverse('handlers.globe_handler', args=['receive', '00000000-0000-0000-0000-000000000000']))
self.assertEqual(response.status_code, 400)
data = {
"inboundSMSMessageList": {
"inboundSMSMessage": [{
"dateTime": "Fri Nov 22 2013 12:12:13 GMT+0000 (UTC)",
"destinationAddress": "tel:21586380",
"messageId": None,
"message": "Hello",
"resourceURL": None,
"senderAddress": "tel:9171234567"
}]
}
}
callback_url = reverse('handlers.globe_handler', args=['receive', self.channel.uuid])
# try a GET
response = self.client.get(callback_url)
self.assertEqual(response.status_code, 405)
# POST invalid JSON data
response = self.client.post(callback_url, "not json", content_type="application/json")
self.assertEqual(response.status_code, 400)
# POST missing data
response = self.client.post(callback_url, json.dumps({}), content_type="application/json")
self.assertEqual(response.status_code, 400)
# POST missing fields in msg
bad_data = copy.deepcopy(data)
del bad_data['inboundSMSMessageList']['inboundSMSMessage'][0]['message']
response = self.client.post(callback_url, json.dumps(bad_data), content_type="application/json")
self.assertEqual(response.status_code, 400)
# POST, invalid sender address
bad_data = copy.deepcopy(data)
bad_data['inboundSMSMessageList']['inboundSMSMessage'][0]['senderAddress'] = '9999'
response = self.client.post(callback_url, json.dumps(bad_data), content_type="application/json")
self.assertEqual(response.status_code, 400)
# POST, invalid destination address
bad_data = copy.deepcopy(data)
bad_data['inboundSMSMessageList']['inboundSMSMessage'][0]['destinationAddress'] = '9999'
response = self.client.post(callback_url, json.dumps(bad_data), content_type="application/json")
self.assertEqual(response.status_code, 400)
# POST, different destination address accepted (globe does mapping on their side)
bad_data = copy.deepcopy(data)
bad_data['inboundSMSMessageList']['inboundSMSMessage'][0]['destinationAddress'] = 'tel:9999'
response = self.client.post(callback_url, json.dumps(bad_data), content_type="application/json")
self.assertEqual(response.status_code, 200)
msg = Msg.objects.get()
self.assertEqual(msg.channel, self.channel)
self.assertEqual(response.content, "Msgs Accepted: %d" % msg.id)
Msg.objects.all().delete()
# another valid post on the right address
response = self.client.post(callback_url, json.dumps(data), content_type="application/json")
self.assertEqual(response.status_code, 200)
msg = Msg.objects.get()
self.assertEqual(response.content, "Msgs Accepted: %d" % msg.id)
# load our message
self.assertEqual(msg.contact.get_urn(TEL_SCHEME).path, "+639171234567")
self.assertEqual(msg.direction, INCOMING)
self.assertEqual(msg.org, self.org)
self.assertEqual(msg.channel, self.channel)
self.assertEqual(msg.text, "Hello")
self.assertEqual(msg.sent_on.date(), date(day=22, month=11, year=2013))
def test_send(self):
joe = self.create_contact("Joe", "+639171234567")
msg = joe.send("MT", self.admin, trigger_send=False)
settings.SEND_MESSAGES = True
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, '{ "status":"accepted" }')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
mock.assert_called_once_with('https://devapi.globelabs.com.ph/smsmessaging/v1/outbound/21586380/requests',
headers={'User-agent': 'RapidPro'},
data={'message': 'MT', 'app_secret': 'AppSecret', 'app_id': 'AppId',
'passphrase': 'Passphrase', 'address': '639171234567'},
timeout=5)
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEqual(msg.status, WIRED)
self.assertTrue(msg.sent_on)
self.clear_cache()
with patch('requests.get') as mock:
mock.return_value = MockResponse(401, 'Error')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message now errored
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.clear_cache()
with patch('requests.get') as mock:
mock.side_effect = Exception("Unable to reach host")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message now errored
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.clear_cache()
with patch('requests.post') as mock:
mock.side_effect = Exception('Kaboom!')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message now errored
msg.refresh_from_db()
self.assertEquals(FAILED, msg.status)
self.clear_cache()
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
class ViberTest(TembaTest):
def setUp(self):
super(ViberTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, None, Channel.TYPE_VIBER, None, '1001',
uuid='00000000-0000-0000-0000-000000001234')
def test_status(self):
data = {
"message_token": 99999,
"message_status": 0
}
# ok, what happens with an invalid uuid?
response = self.client.post(reverse('handlers.viber_handler', args=['status', 'not-real-uuid']), json.dumps(data),
content_type="application/json")
self.assertEquals(400, response.status_code)
# ok, try with a valid uuid, but invalid message id (no msg yet)
status_url = reverse('handlers.viber_handler', args=['status', self.channel.uuid])
response = self.client.post(status_url, json.dumps(data), content_type="application/json")
self.assertEquals(200, response.status_code)
self.assertContains(response, 'not found')
# ok, lets create an outgoing message to update
joe = self.create_contact("Joe Biden", "+254788383383")
msg = joe.send("Hey Joe, it's Obama, pick up!", self.admin)
msg.external_id = "99999"
msg.save(update_fields=('external_id',))
response = self.client.post(status_url, json.dumps(data), content_type="application/json")
self.assertNotContains(response, 'not found')
self.assertEquals(200, response.status_code)
msg = Msg.objects.get(pk=msg.id)
self.assertEquals(DELIVERED, msg.status)
# ignore status report from viber for incoming message
incoming = self.create_msg(direction=INCOMING, contact=joe, text="Read message")
incoming.external_id = "88888"
incoming.save(update_fields=('external_id',))
data['message_token'] = 88888
response = self.client.post(status_url, json.dumps(data), content_type="application/json")
self.assertEquals(200, response.status_code)
def test_receive(self):
# invalid UUID
response = self.client.post(reverse('handlers.viber_handler', args=['receive', '00000000-0000-0000-0000-000000000000']))
self.assertEqual(response.status_code, 400)
data = {
"message_token": 44444444444444,
"phone_number": "972512222222",
"time": 1471906585,
"message": {
"text": "a message to the service",
"tracking_data": "tracking_id:100035"
}
}
callback_url = reverse('handlers.viber_handler', args=['receive', self.channel.uuid])
# try a GET
response = self.client.get(callback_url)
self.assertEqual(response.status_code, 405)
# POST invalid JSON data
response = self.client.post(callback_url, "not json", content_type="application/json")
self.assertEqual(response.status_code, 400)
# POST missing data
response = self.client.post(callback_url, json.dumps({}), content_type="application/json")
self.assertEqual(response.status_code, 400)
# ok, valid post
response = self.client.post(callback_url, json.dumps(data), content_type="application/json")
self.assertEqual(response.status_code, 200)
msg = Msg.objects.get()
self.assertEqual(response.content, "Msg Accepted: %d" % msg.id)
# load our message
self.assertEqual(msg.contact.get_urn(TEL_SCHEME).path, "+972512222222")
self.assertEqual(msg.direction, INCOMING)
self.assertEqual(msg.org, self.org)
self.assertEqual(msg.channel, self.channel)
self.assertEqual(msg.text, "a message to the service")
self.assertEqual(msg.sent_on.date(), date(day=22, month=8, year=2016))
self.assertEqual(msg.external_id, "44444444444444")
def test_send(self):
joe = self.create_contact("Joe", "+639171234567")
msg = joe.send("MT", self.admin, trigger_send=False)
settings.SEND_MESSAGES = True
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, '{ "status":0, "seq": 123456, "message_token": "999" }')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEqual(msg.status, WIRED)
self.assertTrue(msg.sent_on)
self.assertEqual(msg.external_id, "999")
self.clear_cache()
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, '{"status":3}')
# send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should have failed permanently
msg.refresh_from_db()
self.assertEqual(msg.status, FAILED)
self.clear_cache()
with patch('requests.post') as mock:
mock.return_value = MockResponse(401, '{"status":"error"}')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message now errored
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.clear_cache()
with patch('requests.post') as mock:
mock.side_effect = Exception("Unable to reach host")
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message now errored
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.clear_cache()
class LineTest(TembaTest):
def setUp(self):
super(LineTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, None, Channel.TYPE_LINE, '123456789', '123456789',
config=dict(channel_id='1234', channel_secret='1234', channel_mid='1234', auth_token='abcdefgij'),
uuid='00000000-0000-0000-0000-000000001234')
def test_receive(self):
data = {
"events": [{
"replyToken": "abcdefghij",
"type": "message",
"timestamp": 1451617200000,
"source": {
"type": "user",
"userId": "uabcdefghij"
},
"message": {
"id": "100001",
"type": "text",
"text": "Hello, world"
}
}, {
"replyToken": "abcdefghijklm",
"type": "message",
"timestamp": 1451617210000,
"source": {
"type": "user",
"userId": "uabcdefghij"
},
"message": {
"id": "100002",
"type": "sticker",
"packageId": "1",
"stickerId": "1"
}
}]
}
callback_url = reverse('handlers.line_handler', args=[self.channel.uuid])
response = self.client.post(callback_url, json.dumps(data), content_type="application/json")
self.assertEquals(200, response.status_code)
# load our message
msg = Msg.objects.get()
self.assertEquals("uabcdefghij", msg.contact.get_urn(LINE_SCHEME).path)
self.assertEquals(self.org, msg.org)
self.assertEquals(self.channel, msg.channel)
self.assertEquals("Hello, world", msg.text)
response = self.client.get(callback_url)
self.assertEquals(400, response.status_code)
data = {
"events": [{
"replyToken": "abcdefghij",
"type": "message",
"timestamp": 1451617200000,
"source": {
"type": "user",
"userId": "uabcdefghij"
}
}]
}
callback_url = reverse('handlers.line_handler', args=[self.channel.uuid])
response = self.client.post(callback_url, json.dumps(data), content_type="application/json")
self.assertEquals(400, response.status_code)
def test_send(self):
joe = self.create_contact("Joe", urn="line:uabcdefghijkl")
msg = joe.send("Hello, world!", self.admin, trigger_send=False)
with self.settings(SEND_MESSAGES=True):
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, '{}')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# check the status of the message is now sent
msg.refresh_from_db()
self.assertEqual(msg.status, WIRED)
self.assertTrue(msg.sent_on)
self.clear_cache()
with patch('requests.post') as mock:
mock.return_value = MockResponse(400, "Error", method='POST')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(1, msg.error_count)
self.assertTrue(msg.next_attempt)
with patch('requests.post') as mock:
mock.side_effect = Exception('Kaboom!')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
# message should be marked as an error
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.assertEquals(2, msg.error_count)
self.assertTrue(msg.next_attempt)
self.assertFalse(ChannelLog.objects.filter(description__icontains="local variable 'response' "
"referenced before assignment"))
class ViberPublicTest(TembaTest):
def setUp(self):
super(ViberPublicTest, self).setUp()
self.channel.delete()
self.channel = Channel.create(self.org, self.user, None, Channel.TYPE_VIBER_PUBLIC, None, '1001',
uuid='00000000-0000-0000-0000-000000001234',
config={Channel.CONFIG_AUTH_TOKEN: "auth_token"})
self.callback_url = reverse('handlers.viber_public_handler', args=[self.channel.uuid])
def test_receive_on_anon(self):
with AnonymousOrg(self.org):
data = {
"event": "message",
"timestamp": 1481142112807,
"message_token": 4987381189870374000,
"sender": {
"id": "xy5/5y6O81+/kbWHpLhBoA==",
"name": "ET3",
},
"message": {
"text": "incoming msg",
"type": "text",
"tracking_data": "3055"
}
}
response = self.client.post(self.callback_url, json.dumps(data), content_type="application/json",
HTTP_X_VIBER_CONTENT_SIGNATURE='ab4ea2337c1bb9a49eff53dd182f858817707df97cbc82368769e00c56d38419')
self.assertEqual(response.status_code, 200)
msg = Msg.objects.get()
self.assertEqual(response.content, "Msg Accepted: %d" % msg.id)
self.assertEqual(msg.contact.get_urn(VIBER_SCHEME).path, "xy5/5y6O81+/kbWHpLhBoA==")
self.assertEqual(msg.contact.name, None)
self.assertEqual(msg.direction, INCOMING)
self.assertEqual(msg.org, self.org)
self.assertEqual(msg.channel, self.channel)
self.assertEqual(msg.text, "incoming msg")
self.assertEqual(msg.sent_on.date(), date(day=7, month=12, year=2016))
self.assertEqual(msg.external_id, "4987381189870374000")
def test_receive(self):
# invalid UUID
response = self.client.post(reverse('handlers.viber_public_handler', args=['00000000-0000-0000-0000-000000000000']))
self.assertEqual(response.status_code, 200)
data = {
"event": "message",
"timestamp": 1481142112807,
"message_token": 4987381189870374000,
"sender": {
"id": "xy5/5y6O81+/kbWHpLhBoA==",
"name": "ET3",
},
"message": {
"text": "incoming msg",
"type": "text",
"tracking_data": "3055"
}
}
# try a GET
response = self.client.get(self.callback_url)
self.assertEqual(response.status_code, 405)
# POST invalid JSON data
response = self.client.post(self.callback_url, "not json", content_type="application/json")
self.assertEqual(response.status_code, 400)
# Invalid signature
response = self.client.post(self.callback_url, json.dumps({}), content_type="application/json",
HTTP_X_VIBER_CONTENT_SIGNATURE='bad_sig')
self.assertEqual(response.status_code, 400)
# POST missing data
response = self.client.post(self.callback_url, json.dumps({}), content_type="application/json",
HTTP_X_VIBER_CONTENT_SIGNATURE='a182e13e58cbe9bb893cc03c055a1218fba31e8efa6f3ab74a54d4f8542ae376')
self.assertEqual(response.status_code, 400)
# ok, valid post
response = self.client.post(self.callback_url, json.dumps(data), content_type="application/json",
HTTP_X_VIBER_CONTENT_SIGNATURE='ab4ea2337c1bb9a49eff53dd182f858817707df97cbc82368769e00c56d38419')
self.assertEqual(response.status_code, 200)
msg = Msg.objects.get()
self.assertEqual(response.content, "Msg Accepted: %d" % msg.id)
self.assertEqual(msg.contact.get_urn(VIBER_SCHEME).path, "xy5/5y6O81+/kbWHpLhBoA==")
self.assertEqual(msg.contact.name, "ET3")
self.assertEqual(msg.direction, INCOMING)
self.assertEqual(msg.org, self.org)
self.assertEqual(msg.channel, self.channel)
self.assertEqual(msg.text, "incoming msg")
self.assertEqual(msg.sent_on.date(), date(day=7, month=12, year=2016))
self.assertEqual(msg.external_id, "4987381189870374000")
def assertSignedRequest(self, payload):
from temba.channels.handlers import ViberPublicHandler
signature = ViberPublicHandler.calculate_sig(payload, "auth_token")
response = self.client.post(self.callback_url, payload, content_type="application/json",
HTTP_X_VIBER_CONTENT_SIGNATURE=signature)
self.assertEqual(response.status_code, 200, response.content)
def assertMessageReceived(self, msg_type, payload_name, payload_value, assert_text, assert_media=None):
data = {
"event": "message",
"timestamp": 1481142112807,
"message_token": 4987381189870374000,
"sender": {
"id": "xy5/5y6O81+/kbWHpLhBoA==",
"name": "ET3",
},
"message": {
"text": "incoming msg",
"type": "undefined",
"tracking_data": "3055",
}
}
data['message']['type'] = msg_type
data['message'][payload_name] = payload_value
self.assertSignedRequest(json.dumps(data))
msg = Msg.objects.get()
self.assertEqual(msg.text, assert_text)
if assert_media:
self.assertEqual(msg.media, assert_media)
def test_receive_contact(self):
self.assertMessageReceived('contact', 'contact', dict(name="Alex", phone_number="+12067799191"), 'Alex: +12067799191')
def test_receive_url(self):
self.assertMessageReceived('url', 'media', 'http://foo.com/', 'http://foo.com/')
def test_receive_gps(self):
self.assertMessageReceived('location', 'location', dict(lat='1.2', lon='-1.3'), 'geo:1.2,-1.3')
def test_webhook_check(self):
data = {
"event": "webhook",
"timestamp": 4987034606158369000,
"message_token": 1481059480858
}
self.assertSignedRequest(json.dumps(data))
def test_subscribed(self):
data = {
"event": "subscribed",
"timestamp": 1457764197627,
"user": {
"id": "01234567890A=",
"name": "yarden",
"avatar": "http://avatar_url",
"country": "IL",
"language": "en",
"api_version": 1
},
"message_token": 4912661846655238145
}
self.assertSignedRequest(json.dumps(data))
# check that the contact was created
contact = Contact.objects.get(org=self.org, urns__path='01234567890A=', urns__scheme=VIBER_SCHEME)
self.assertEqual(contact.name, "yarden")
data = {
"event": "unsubscribed",
"timestamp": 1457764197627,
"user_id": "01234567890A=",
"message_token": 4912661846655238145
}
self.assertSignedRequest(json.dumps(data))
contact.refresh_from_db()
self.assertTrue(contact.is_stopped)
# use a user id we haven't seen before
data['user_id'] = "01234567890B="
self.assertSignedRequest(json.dumps(data))
# should not create contacts we don't already know about
self.assertIsNone(Contact.from_urn(self.org, URN.from_viber("01234567890B=")))
def test_subscribed_on_anon(self):
with AnonymousOrg(self.org):
data = {
"event": "subscribed",
"timestamp": 1457764197627,
"user": {
"id": "01234567890A=",
"name": "yarden",
"avatar": "http://avatar_url",
"country": "IL",
"language": "en",
"api_version": 1
},
"message_token": 4912661846655238145
}
self.assertSignedRequest(json.dumps(data))
# check that the contact was created
contact = Contact.objects.get(org=self.org, urns__path='01234567890A=', urns__scheme=VIBER_SCHEME)
self.assertEqual(contact.name, None)
def test_conversation_started(self):
# this is a no-op
data = {
"event": "conversation_started",
"timestamp": 1457764197627,
"message_token": 4912661846655238145,
"type": "open",
"context": "context information",
"user": {
"id": "01234567890A=",
"name": "yarden",
"avatar": "http://avatar_url",
"country": "IL",
"language": "en",
"api_version": 1
}
}
self.assertSignedRequest(json.dumps(data))
def test_send(self):
joe = self.create_contact("Joe", urn="viber:xy5/5y6O81+/kbWHpLhBoA==")
msg = joe.send("MT", self.admin, trigger_send=False)
settings.SEND_MESSAGES = True
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, '{"status":0,"status_message":"ok","message_token":4987381194038857789}')
# manually send it off
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
mock.assert_called_with('https://chatapi.viber.com/pa/send_message',
headers={'Accept': u'application/json', u'User-agent': u'RapidPro'},
json={'text': u'MT',
'auth_token': u'auth_token',
'tracking_data': msg.id,
'type': u'text',
'receiver': u'xy5/5y6O81+/kbWHpLhBoA=='},
timeout=5)
msg.refresh_from_db()
self.assertEqual(msg.status, WIRED)
self.assertTrue(msg.sent_on)
self.assertEqual(msg.external_id, "4987381194038857789")
self.clear_cache()
with patch('requests.post') as mock:
mock.return_value = MockResponse(200, '{"status":3, "status_message":"invalidAuthToken"}')
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
msg.refresh_from_db()
self.assertEqual(msg.status, FAILED)
self.clear_cache()
with patch('requests.post') as mock:
mock.return_value = MockResponse(401, '{"status":"5"}')
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.clear_cache()
with patch('requests.post') as mock:
mock.side_effect = Exception("Unable to reach host")
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
msg.refresh_from_db()
self.assertEquals(ERRORED, msg.status)
self.clear_cache()
| tsotetsi/textily-web | temba/channels/tests.py | Python | agpl-3.0 | 393,025 | [
"VisIt"
] | 341c30e7b426b757c9945c76900fff7d9334261c66b6f5bce1eb7eb03959c8cf |
#
# Copyright 2018-2019, 2021 Jan Griesser (U. Freiburg)
# 2021 Lars Pastewka (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Simple pair potential.
"""
#
# Coding convention
# * All numpy arrays are suffixed with the array dimensions
# * The suffix stands for a certain type of dimension:
# - n: Atomic index, i.e. array dimension of length nb_atoms
# - p: Pair index, i.e. array dimension of length nb_pairs
# - c: Cartesian index, array dimension of length 3
#
from abc import ABC, abstractmethod
import numpy as np
from scipy.sparse import bsr_matrix
from ...neighbours import neighbour_list, first_neighbours
from ..calculator import MatscipyCalculator
from ...numpy_tricks import mabincount
class CutoffInteraction(ABC):
"""Pair interaction potential with cutoff."""
def __init__(self, cutoff):
"""Initialize with cutoff."""
self._cutoff = cutoff
@property
def cutoff(self):
"""Physical cutoff distance for pair interaction."""
return self._cutoff
@cutoff.setter
def cutoff(self, v):
self._cutoff = np.clip(v, 0, None)
def get_cutoff(self):
"""Get cutoff. Deprecated."""
return self.cutoff
@abstractmethod
def __call__(self, r, qi, qj):
"""Compute interaction energy."""
@abstractmethod
def first_derivative(self, r, qi, qj):
"""Compute derivative w/r to distance."""
@abstractmethod
def second_derivative(self, r, qi, qj):
"""Compute second derivative w/r to distance."""
def derivative(self, n=1):
"""Return specified derivative."""
if n == 1:
return self.first_derivative
elif n == 2:
return self.second_derivative
else:
raise ValueError(
"Don't know how to compute {}-th derivative.".format(n)
)
class LennardJonesCut(CutoffInteraction):
"""
Functional form for a 12-6 Lennard-Jones potential with a hard cutoff.
Energy is shifted to zero at cutoff.
"""
def __init__(self, epsilon, sigma, cutoff):
super().__init__(cutoff)
self.epsilon = epsilon
self.sigma = sigma
self.offset = (sigma / cutoff) ** 12 - (sigma / cutoff) ** 6
def __call__(self, r, *args):
r6 = (self.sigma / r) ** 6
return 4 * self.epsilon * ((r6 - 1) * r6 - self.offset)
def first_derivative(self, r, *args):
r = self.sigma / r
r6 = r**6
return -24 * self.epsilon / self.sigma * (2 * r6 - 1) * r6 * r
def second_derivative(self, r, *args):
r2 = (self.sigma / r) ** 2
r6 = r2**3
return 24 * self.epsilon / self.sigma**2 * (26 * r6 - 7) * r6 * r2
###
class LennardJonesQuadratic(CutoffInteraction):
"""
Functional form for a 12-6 Lennard-Jones potential with a soft cutoff.
Energy, its first and second derivative are shifted to zero at cutoff.
"""
def __init__(self, epsilon, sigma, cutoff):
super().__init__(cutoff)
self.epsilon = epsilon
self.sigma = sigma
self.offset_energy = (sigma / cutoff) ** 12 - (sigma / cutoff) ** 6
self.offset_force = (
6 / cutoff * (-2 * (sigma / cutoff) ** 12 + (sigma / cutoff) ** 6)
)
self.offset_dforce = (1 / cutoff**2) * (
156 * (sigma / cutoff) ** 12 - 42 * (sigma / cutoff) ** 6
)
def __call__(self, r, *args):
"""
Return function value (potential energy).
"""
r6 = (self.sigma / r) ** 6
return (
4
* self.epsilon
* (
(r6 - 1) * r6
- self.offset_energy
- (r - self.cutoff) * self.offset_force
- ((r - self.cutoff) ** 2 / 2) * self.offset_dforce
)
)
def first_derivative(self, r, *args):
r6 = (self.sigma / r) ** 6
return (
4
* self.epsilon
* (
(6 / r) * (-2 * r6 + 1) * r6
- self.offset_force
- (r - self.cutoff) * self.offset_dforce
)
)
def second_derivative(self, r, *args):
r6 = (self.sigma / r) ** 6
return (
4
* self.epsilon
* ((1 / r**2) * (156 * r6 - 42) * r6 - self.offset_dforce)
)
###
class LennardJonesLinear(CutoffInteraction):
"""
Function form of a 12-6 Lennard-Jones potential with a soft cutoff
The energy and the force are shifted at the cutoff.
"""
def __init__(self, epsilon, sigma, cutoff):
super().__init__(cutoff)
self.epsilon = epsilon
self.sigma = sigma
self.offset_energy = (sigma / cutoff) ** 12 - (sigma / cutoff) ** 6
self.offset_force = (
6 / cutoff * (-2 * (sigma / cutoff) ** 12 + (sigma / cutoff) ** 6)
)
def __call__(self, r, *args):
"""
Return function value (potential energy).
"""
r6 = (self.sigma / r) ** 6
return (
4
* self.epsilon
* (
(r6 - 1) * r6
- self.offset_energy
- (r - self.cutoff) * self.offset_force
)
)
def first_derivative(self, r, *args):
r6 = (self.sigma / r) ** 6
return (
4
* self.epsilon
* ((6 / r) * (-2 * r6 + 1) * r6 - self.offset_force)
)
def second_derivative(self, r, *args):
r6 = (self.sigma / r) ** 6
return 4 * self.epsilon * ((1 / r**2) * (156 * r6 - 42) * r6)
###
class FeneLJCut(LennardJonesCut):
"""
Finite extensible nonlinear elastic(FENE) potential for a bead-spring polymer model.
For the Lennard-Jones interaction a LJ-cut potential is used. Due to choice of the cutoff (rc=2^(1/6) sigma)
it ensures a continous potential and force at the cutoff.
"""
def __init__(self, K, R0, epsilon, sigma):
super().__init__(2 ** (1 / 6) * sigma)
self.K = K
self.R0 = R0
self.epsilon = epsilon
self.sigma = sigma
def __call__(self, r, *args):
return -0.5 * self.K * self.R0**2 * np.log(
1 - (r / self.R0) ** 2
) + super().__call__(r)
def first_derivative(self, r, *args):
return self.K * r / (
1 - (r / self.R0) ** 2
) + super().first_derivative(r)
def second_derivative(self, r, *args):
invLength = 1 / (1 - (r / self.R0) ** 2)
return (
self.K * invLength
+ 2 * self.K * r**2 * invLength**2 / self.R0**2
+ super().second_derivative(r)
)
###
class LennardJones84(CutoffInteraction):
"""
Function form of a 8-4 Lennard-Jones potential, used to model the structure of a CuZr.
Kobayashi, Shinji et. al. "Computer simulation of atomic structure of Cu57Zr43 amorphous alloy."
Journal of the Physical Society of Japan 48.4 (1980): 1147-1152.
"""
def __init__(self, C1, C2, C3, C4, cutoff):
super().__init__(cutoff)
self.C1 = C1
self.C2 = C2
self.C3 = C3
self.C4 = C4
def __call__(self, r, *args):
r4 = (1 / r) ** 4
return (self.C2 * r4 - self.C1) * r4 + self.C3 * r + self.C4
def first_derivative(self, r, *args):
r4 = (1 / r) ** 4
return (-8 * self.C2 * r4 / r + 4 * self.C1 / r) * r4 + self.C3
def second_derivative(self, r, *args):
r4 = (1 / r) ** 4
return (72 * self.C2 * r4 / r**2 - 20 * self.C1 / r**2) * r4
class BeestKramerSanten(CutoffInteraction):
"""
Beest, Kramer, van Santen (BKS) potential.
Buckingham:
Energy is shifted to zero at the cutoff.
References
----------
B. W. Van Beest, G. J. Kramer and R. A. Van Santen, Phys. Rev. Lett. 64.16 (1990)
"""
def __init__(self, A, B, C, cutoff):
super().__init__(cutoff)
self.A, self.B, self.C = A, B, C
self.buck_offset_energy = A * np.exp(-B * cutoff) - C / cutoff**6
def __call__(self, r, *args):
return (
self.A * np.exp(-self.B * r)
- self.C / r**6
- self.buck_offset_energy
)
def first_derivative(self, r, *args):
return -self.A * self.B * np.exp(-self.B * r) + 6 * self.C / r**7
def second_derivative(self, r, *args):
return (
self.A * self.B**2 * np.exp(-self.B * r) - 42 * self.C / r**8
)
# Broadcast slices
_c, _cc = np.s_[..., np.newaxis], np.s_[..., np.newaxis, np.newaxis]
class PairPotential(MatscipyCalculator):
implemented_properties = [
"energy",
"free_energy",
"stress",
"forces",
"hessian",
"nonaffine_forces",
"birch_coefficients",
"nonaffine_elastic_contribution",
"stress_elastic_contribution",
"born_constants",
]
default_parameters = {}
name = "PairPotential"
class _dummy_charge:
"""Dummy object for when system has no charge."""
def __getitem__(self, x):
return None
def __init__(self, f, cutoff=None):
"""Construct calculator."""
MatscipyCalculator.__init__(self)
self.f = f
self.reset()
def reset(self):
super().reset()
self.dict = {x: obj.cutoff for x, obj in self.f.items()}
self.df = {x: obj.derivative(1) for x, obj in self.f.items()}
self.df2 = {x: obj.derivative(2) for x, obj in self.f.items()}
def _mask_pairs(self, i_p, j_p):
"""Iterate over pair masks."""
numi_p, numj_p = self.atoms.numbers[i_p], self.atoms.numbers[j_p]
for pair in self.dict:
mask = (numi_p == pair[0]) & (numj_p == pair[1])
if pair[0] != pair[1]:
mask |= (numi_p == pair[1]) & (numj_p == pair[0])
yield mask, pair
def _get_charges(self, i_p, j_p):
"""Return charges if available."""
if self.atoms.has("charge"):
return [self.atoms.get_array("charge")[i] for i in (i_p, j_p)]
return [self._dummy_charge(), self._dummy_charge()]
def calculate(self, atoms, properties, system_changes):
"""Calculate system properties."""
super().calculate(atoms, properties, system_changes)
nb_atoms = len(self.atoms)
i_p, j_p, r_p, r_pc = neighbour_list("ijdD", atoms, self.dict)
qi_p, qj_p = self._get_charges(i_p, j_p)
e_p = np.zeros_like(r_p)
de_p = np.zeros_like(r_p)
for mask, pair in self._mask_pairs(i_p, j_p):
e_p[mask] = self.f[pair](r_p[mask], qi_p[mask], qj_p[mask])
de_p[mask] = self.df[pair](r_p[mask], qi_p[mask], qj_p[mask])
epot = 0.5 * np.sum(e_p)
# Forces
df_pc = -0.5 * de_p[_c] * r_pc / r_p[_c]
f_nc = mabincount(j_p, df_pc, nb_atoms) - mabincount(
i_p, df_pc, nb_atoms
)
# Virial
virial_v = -np.array(
[
r_pc[:, 0] * df_pc[:, 0], # xx
r_pc[:, 1] * df_pc[:, 1], # yy
r_pc[:, 2] * df_pc[:, 2], # zz
r_pc[:, 1] * df_pc[:, 2], # yz
r_pc[:, 0] * df_pc[:, 2], # xz
r_pc[:, 0] * df_pc[:, 1],
]
).sum(
axis=1
) # xy
self.results.update(
{
"energy": epot,
"free_energy": epot,
"stress": virial_v / atoms.get_volume(),
"forces": f_nc,
}
)
###
def get_hessian(self, atoms, format="dense", divide_by_masses=False):
"""
Calculate the Hessian matrix for a pair potential.
For an atomic configuration with N atoms in d dimensions the hessian matrix is a symmetric, hermitian matrix
with a shape of (d*N,d*N). The matrix is in general a sparse matrix, which consists of dense blocks of
shape (d,d), which are the mixed second derivatives. The result of the derivation for a pair potential can be
found e.g. in:
L. Pastewka et. al. "Seamless elastic boundaries for atomistic calculations", Phys. Rev. B 86, 075459 (2012).
Parameters
----------
atoms: ase.Atoms
Atomic configuration in a local or global minima.
format: "sparse" or "neighbour-list"
Output format of the hessian matrix.
divide_by_masses: bool
if true return the dynamic matrix else hessian matrix
Restrictions
----------
This method is currently only implemented for three dimensional systems
"""
if self.atoms is None:
self.atoms = atoms
f = self.f
df = self.df
df2 = self.df2
nb_atoms = len(atoms)
i_p, j_p, r_p, r_pc = neighbour_list("ijdD", atoms, self.dict)
first_i = first_neighbours(nb_atoms, i_p)
qi_p, qj_p = self._get_charges(i_p, j_p)
e_p = np.zeros_like(r_p)
de_p = np.zeros_like(r_p)
dde_p = np.zeros_like(r_p)
for mask, pair in self._mask_pairs(i_p, j_p):
e_p[mask] = f[pair](r_p[mask], qi_p[mask], qj_p[mask])
de_p[mask] = df[pair](r_p[mask], qi_p[mask], qj_p[mask])
dde_p[mask] = df2[pair](r_p[mask], qi_p[mask], qj_p[mask])
n_pc = r_pc / r_p[_c]
nn_pcc = n_pc[..., :, np.newaxis] * n_pc[..., np.newaxis, :]
H_pcc = -(dde_p[_cc] * nn_pcc)
H_pcc += -((de_p / r_p)[_cc] * (np.eye(3, dtype=n_pc.dtype) - nn_pcc))
# Sparse BSR-matrix
if format == "sparse":
if divide_by_masses:
masses_n = atoms.get_masses()
geom_mean_mass_p = np.sqrt(masses_n[i_p] * masses_n[j_p])
H = bsr_matrix(
((H_pcc.T / geom_mean_mass_p).T, j_p, first_i),
shape=(3 * nb_atoms, 3 * nb_atoms),
)
else:
H = bsr_matrix(
(H_pcc, j_p, first_i), shape=(3 * nb_atoms, 3 * nb_atoms)
)
Hdiag_icc = np.empty((nb_atoms, 3, 3))
for x in range(3):
for y in range(3):
Hdiag_icc[:, x, y] = -np.bincount(
i_p, weights=H_pcc[:, x, y], minlength=nb_atoms
)
if divide_by_masses:
H += bsr_matrix(
(
(Hdiag_icc.T / masses_n).T,
np.arange(nb_atoms),
np.arange(nb_atoms + 1),
),
shape=(3 * nb_atoms, 3 * nb_atoms),
)
else:
H += bsr_matrix(
(Hdiag_icc, np.arange(nb_atoms), np.arange(nb_atoms + 1)),
shape=(3 * nb_atoms, 3 * nb_atoms),
)
return H
# Dense matrix format
elif format == "dense":
H = np.zeros((3 * nb_atoms, 3 * nb_atoms))
for atom in range(len(i_p)):
H[
3 * i_p[atom] : 3 * i_p[atom] + 3,
3 * j_p[atom] : 3 * j_p[atom] + 3,
] += H_pcc[atom]
Hdiag_icc = np.empty((nb_atoms, 3, 3))
for x in range(3):
for y in range(3):
Hdiag_icc[:, x, y] = -np.bincount(
i_p, weights=H_pcc[:, x, y], minlength=nb_atoms
)
Hdiag_ncc = np.zeros((3 * nb_atoms, 3 * nb_atoms))
for atom in range(nb_atoms):
Hdiag_ncc[
3 * atom : 3 * atom + 3, 3 * atom : 3 * atom + 3
] += Hdiag_icc[atom]
H += Hdiag_ncc
if divide_by_masses:
masses_p = (atoms.get_masses()).repeat(3)
H /= np.sqrt(masses_p.reshape(-1, 1) * masses_p.reshape(1, -1))
return H
else:
return H
# Neighbour list format
elif format == "neighbour-list":
return H_pcc, i_p, j_p, r_pc, r_p
| libAtoms/matscipy | matscipy/calculators/pair_potential/calculator.py | Python | lgpl-2.1 | 16,909 | [
"ASE",
"Matscipy"
] | 1dc3dd2f65e0f06206606a31671d06908e659a57b417db0de3b86901b266b15e |
import os
import sys
import types
import re
from numpy.core.numerictypes import obj2sctype, generic, issubclass_, \
issubsctype, issubdtype
from numpy.core.multiarray import dtype as _dtype
from numpy.core import product, ndarray
__all__ = ['issubclass_', 'get_numpy_include', 'issubsctype',
'issubdtype', 'deprecate', 'deprecate_with_doc',
'get_numarray_include',
'get_include', 'info', 'source', 'who', 'lookfor',
'byte_bounds', 'may_share_memory', 'safe_eval']
def get_include():
"""
Return the directory that contains the numpy \\*.h header files.
Extension modules that need to compile against numpy should use this
function to locate the appropriate include directory.
Notes
-----
When using ``distutils``, for example in ``setup.py``.
::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_include()])
...
"""
import numpy
if numpy.show_config is None:
# running from numpy source directory
d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include')
else:
# using installed numpy core headers
import numpy.core as core
d = os.path.join(os.path.dirname(core.__file__), 'include')
return d
def get_numarray_include(type=None):
"""
Return the directory that contains the numarray \\*.h header files.
Extension modules that need to compile against numarray should use this
function to locate the appropriate include directory.
Notes
-----
When using ``distutils``, for example in ``setup.py``.
::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_numarray_include()])
...
"""
from numpy.numarray import get_numarray_include_dirs
include_dirs = get_numarray_include_dirs()
if type is None:
return include_dirs[0]
else:
return include_dirs + [get_include()]
if sys.version_info < (2, 4):
# Can't set __name__ in 2.3
import new
def _set_function_name(func, name):
func = new.function(func.func_code, func.func_globals,
name, func.func_defaults, func.func_closure)
return func
else:
def _set_function_name(func, name):
func.__name__ = name
return func
def deprecate(func, oldname=None, newname=None):
"""Deprecate old functions.
Issues a DeprecationWarning, adds warning to oldname's docstring,
rebinds oldname.__name__ and returns new function object.
Example:
oldfunc = deprecate(newfunc, 'oldfunc', 'newfunc')
"""
import warnings
if oldname is None:
try:
oldname = func.func_name
except AttributeError:
oldname = func.__name__
if newname is None:
str1 = "%s is deprecated" % (oldname,)
depdoc = "%s is DEPRECATED!!" % (oldname,)
else:
str1 = "%s is deprecated, use %s" % (oldname, newname),
depdoc = '%s is DEPRECATED!! -- use %s instead' % (oldname, newname,)
def newfunc(*args,**kwds):
"""Use get_include, get_numpy_include is DEPRECATED."""
warnings.warn(str1, DeprecationWarning)
return func(*args, **kwds)
newfunc = _set_function_name(newfunc, oldname)
doc = func.__doc__
if doc is None:
doc = depdoc
else:
doc = '\n\n'.join([depdoc, doc])
newfunc.__doc__ = doc
try:
d = func.__dict__
except AttributeError:
pass
else:
newfunc.__dict__.update(d)
return newfunc
def deprecate_with_doc(somestr):
"""Decorator to deprecate functions and provide detailed documentation
with 'somestr' that is added to the functions docstring.
Example:
depmsg = 'function scipy.foo has been merged into numpy.foobar'
@deprecate_with_doc(depmsg)
def foo():
pass
"""
def _decorator(func):
newfunc = deprecate(func)
newfunc.__doc__ += "\n" + somestr
return newfunc
return _decorator
get_numpy_include = deprecate(get_include, 'get_numpy_include', 'get_include')
#--------------------------------------------
# Determine if two arrays can share memory
#--------------------------------------------
def byte_bounds(a):
"""(low, high) are pointers to the end-points of an array
low is the first byte
high is just *past* the last byte
If the array is not single-segment, then it may not actually
use every byte between these bounds.
The array provided must conform to the Python-side of the array interface
"""
ai = a.__array_interface__
a_data = ai['data'][0]
astrides = ai['strides']
ashape = ai['shape']
nd_a = len(ashape)
bytes_a = int(ai['typestr'][2:])
a_low = a_high = a_data
if astrides is None: # contiguous case
a_high += product(ashape, dtype=int)*bytes_a
else:
for shape, stride in zip(ashape, astrides):
if stride < 0:
a_low += (shape-1)*stride
else:
a_high += (shape-1)*stride
a_high += bytes_a
return a_low, a_high
def may_share_memory(a, b):
"""Determine if two arrays can share memory
The memory-bounds of a and b are computed. If they overlap then
this function returns True. Otherwise, it returns False.
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
"""
a_low, a_high = byte_bounds(a)
b_low, b_high = byte_bounds(b)
if b_low >= a_high or a_low >= b_high:
return False
return True
#-----------------------------------------------------------------------------
# Function for output and information on the variables used.
#-----------------------------------------------------------------------------
def who(vardict=None):
"""
Print the Numpy arrays in the given dictionary.
If there is no dictionary passed in or `vardict` is None then returns
Numpy arrays in the globals() dictionary (all Numpy arrays in the
namespace).
Parameters
----------
vardict : dict, optional
A dictionary possibly containing ndarrays. Default is globals().
Returns
-------
out : None
Returns 'None'.
Notes
-----
Prints out the name, shape, bytes and type of all of the ndarrays present
in `vardict`.
Examples
--------
>>> d = {'x': arange(2.0), 'y': arange(3.0), 'txt': 'Some str', 'idx': 5}
>>> np.whos(d)
Name Shape Bytes Type
===========================================================
<BLANKLINE>
y 3 24 float64
x 2 16 float64
<BLANKLINE>
Upper bound on total bytes = 40
"""
if vardict is None:
frame = sys._getframe().f_back
vardict = frame.f_globals
sta = []
cache = {}
for name in vardict.keys():
if isinstance(vardict[name],ndarray):
var = vardict[name]
idv = id(var)
if idv in cache.keys():
namestr = name + " (%s)" % cache[idv]
original=0
else:
cache[idv] = name
namestr = name
original=1
shapestr = " x ".join(map(str, var.shape))
bytestr = str(var.itemsize*product(var.shape))
sta.append([namestr, shapestr, bytestr, var.dtype.name,
original])
maxname = 0
maxshape = 0
maxbyte = 0
totalbytes = 0
for k in range(len(sta)):
val = sta[k]
if maxname < len(val[0]):
maxname = len(val[0])
if maxshape < len(val[1]):
maxshape = len(val[1])
if maxbyte < len(val[2]):
maxbyte = len(val[2])
if val[4]:
totalbytes += int(val[2])
if len(sta) > 0:
sp1 = max(10,maxname)
sp2 = max(10,maxshape)
sp3 = max(10,maxbyte)
prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ')
print prval + "\n" + "="*(len(prval)+5) + "\n"
for k in range(len(sta)):
val = sta[k]
print "%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
val[1], ' '*(sp2-len(val[1])+5),
val[2], ' '*(sp3-len(val[2])+5),
val[3])
print "\nUpper bound on total bytes = %d" % totalbytes
return
#-----------------------------------------------------------------------------
# NOTE: pydoc defines a help function which works simliarly to this
# except it uses a pager to take over the screen.
# combine name and arguments and split to multiple lines of
# width characters. End lines on a comma and begin argument list
# indented with the rest of the arguments.
def _split_line(name, arguments, width):
firstwidth = len(name)
k = firstwidth
newstr = name
sepstr = ", "
arglist = arguments.split(sepstr)
for argument in arglist:
if k == firstwidth:
addstr = ""
else:
addstr = sepstr
k = k + len(argument) + len(addstr)
if k > width:
k = firstwidth + 1 + len(argument)
newstr = newstr + ",\n" + " "*(firstwidth+2) + argument
else:
newstr = newstr + addstr + argument
return newstr
_namedict = None
_dictlist = None
# Traverse all module directories underneath globals
# to see if something is defined
def _makenamedict(module='numpy'):
module = __import__(module, globals(), locals(), [])
thedict = {module.__name__:module.__dict__}
dictlist = [module.__name__]
totraverse = [module.__dict__]
while 1:
if len(totraverse) == 0:
break
thisdict = totraverse.pop(0)
for x in thisdict.keys():
if isinstance(thisdict[x],types.ModuleType):
modname = thisdict[x].__name__
if modname not in dictlist:
moddict = thisdict[x].__dict__
dictlist.append(modname)
totraverse.append(moddict)
thedict[modname] = moddict
return thedict, dictlist
def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
"""
Get help information for a function, class, or module.
Parameters
----------
object : optional
Input object to get information about.
maxwidth : int, optional
Printing width.
output : file like object open for writing, optional
Write into file like object.
toplevel : string, optional
Start search at this level.
Examples
--------
>>> np.info(np.polyval) # doctest: +SKIP
polyval(p, x)
Evaluate the polymnomial p at x.
...
"""
global _namedict, _dictlist
# Local import to speed up numpy's import time.
import pydoc, inspect
if hasattr(object,'_ppimport_importer') or \
hasattr(object, '_ppimport_module'):
object = object._ppimport_module
elif hasattr(object, '_ppimport_attr'):
object = object._ppimport_attr
if object is None:
info(info)
elif isinstance(object, ndarray):
import numpy.numarray as nn
nn.info(object, output=output, numpy=1)
elif isinstance(object, str):
if _namedict is None:
_namedict, _dictlist = _makenamedict(toplevel)
numfound = 0
objlist = []
for namestr in _dictlist:
try:
obj = _namedict[namestr][object]
if id(obj) in objlist:
print >> output, "\n *** Repeat reference found in %s *** " % namestr
else:
objlist.append(id(obj))
print >> output, " *** Found in %s ***" % namestr
info(obj)
print >> output, "-"*maxwidth
numfound += 1
except KeyError:
pass
if numfound == 0:
print >> output, "Help for %s not found." % object
else:
print >> output, "\n *** Total of %d references found. ***" % numfound
elif inspect.isfunction(object):
name = object.func_name
arguments = inspect.formatargspec(*inspect.getargspec(object))
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
print >> output, inspect.getdoc(object)
elif inspect.isclass(object):
name = object.__name__
arguments = "()"
try:
if hasattr(object, '__init__'):
arguments = inspect.formatargspec(*inspect.getargspec(object.__init__.im_func))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
except:
pass
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
doc1 = inspect.getdoc(object)
if doc1 is None:
if hasattr(object,'__init__'):
print >> output, inspect.getdoc(object.__init__)
else:
print >> output, inspect.getdoc(object)
methods = pydoc.allmethods(object)
if methods != []:
print >> output, "\n\nMethods:\n"
for meth in methods:
if meth[0] == '_':
continue
thisobj = getattr(object, meth, None)
if thisobj is not None:
methstr, other = pydoc.splitdoc(inspect.getdoc(thisobj) or "None")
print >> output, " %s -- %s" % (meth, methstr)
elif type(object) is types.InstanceType: ## check for __call__ method
print >> output, "Instance of class: ", object.__class__.__name__
print >> output
if hasattr(object, '__call__'):
arguments = inspect.formatargspec(*inspect.getargspec(object.__call__.im_func))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if hasattr(object,'name'):
name = "%s" % object.name
else:
name = "<name>"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
doc = inspect.getdoc(object.__call__)
if doc is not None:
print >> output, inspect.getdoc(object.__call__)
print >> output, inspect.getdoc(object)
else:
print >> output, inspect.getdoc(object)
elif inspect.ismethod(object):
name = object.__name__
arguments = inspect.formatargspec(*inspect.getargspec(object.im_func))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
print >> output, inspect.getdoc(object)
elif hasattr(object, '__doc__'):
print >> output, inspect.getdoc(object)
def source(object, output=sys.stdout):
"""
Print or write to a file the source code for a Numpy object.
Parameters
----------
object : numpy object
Input object.
output : file object, optional
If `output` not supplied then source code is printed to screen
(sys.stdout). File object must be created with either write 'w' or
append 'a' modes.
"""
# Local import to speed up numpy's import time.
import inspect
try:
print >> output, "In file: %s\n" % inspect.getsourcefile(object)
print >> output, inspect.getsource(object)
except:
print >> output, "Not available for this object."
# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...}
# where kind: "func", "class", "module", "object"
# and index: index in breadth-first namespace traversal
_lookfor_caches = {}
# regexp whose match indicates that the string may contain a function signature
_function_signature_re = re.compile(r"[a-z_]+\(.*[,=].*\)", re.I)
def lookfor(what, module=None, import_modules=True, regenerate=False):
"""
Do a keyword search on docstrings.
A list of of objects that matched the search is displayed,
sorted by relevance.
Parameters
----------
what : str
String containing words to look for.
module : str, module
Module whose docstrings to go through.
import_modules : bool
Whether to import sub-modules in packages.
Will import only modules in ``__all__``.
regenerate : bool
Whether to re-generate the docstring cache.
Examples
--------
>>> np.lookfor('binary representation')
Search results for 'binary representation'
------------------------------------------
numpy.binary_repr
Return the binary representation of the input number as a string.
"""
import pydoc
# Cache
cache = _lookfor_generate_cache(module, import_modules, regenerate)
# Search
# XXX: maybe using a real stemming search engine would be better?
found = []
whats = str(what).lower().split()
if not whats: return
for name, (docstring, kind, index) in cache.iteritems():
if kind in ('module', 'object'):
# don't show modules or objects
continue
ok = True
doc = docstring.lower()
for w in whats:
if w not in doc:
ok = False
break
if ok:
found.append(name)
# Relevance sort
# XXX: this is full Harrison-Stetson heuristics now,
# XXX: it probably could be improved
kind_relevance = {'func': 1000, 'class': 1000,
'module': -1000, 'object': -1000}
def relevance(name, docstr, kind, index):
r = 0
# do the keywords occur within the start of the docstring?
first_doc = "\n".join(docstr.lower().strip().split("\n")[:3])
r += sum([200 for w in whats if w in first_doc])
# do the keywords occur in the function name?
r += sum([30 for w in whats if w in name])
# is the full name long?
r += -len(name) * 5
# is the object of bad type?
r += kind_relevance.get(kind, -1000)
# is the object deep in namespace hierarchy?
r += -name.count('.') * 10
r += max(-index / 100, -100)
return r
def relevance_sort(a, b):
dr = relevance(b, *cache[b]) - relevance(a, *cache[a])
if dr != 0: return dr
else: return cmp(a, b)
found.sort(relevance_sort)
# Pretty-print
s = "Search results for '%s'" % (' '.join(whats))
help_text = [s, "-"*len(s)]
for name in found:
doc, kind, ix = cache[name]
doclines = [line.strip() for line in doc.strip().split("\n")
if line.strip()]
# find a suitable short description
try:
first_doc = doclines[0].strip()
if _function_signature_re.search(first_doc):
first_doc = doclines[1].strip()
except IndexError:
first_doc = ""
help_text.append("%s\n %s" % (name, first_doc))
# Output
if len(help_text) > 10:
pager = pydoc.getpager()
pager("\n".join(help_text))
else:
print "\n".join(help_text)
def _lookfor_generate_cache(module, import_modules, regenerate):
"""
Generate docstring cache for given module.
Parameters
----------
module : str, None, module
Module for which to generate docstring cache
import_modules : bool
Whether to import sub-modules in packages.
Will import only modules in __all__
regenerate: bool
Re-generate the docstring cache
Returns
-------
cache : dict {obj_full_name: (docstring, kind, index), ...}
Docstring cache for the module, either cached one (regenerate=False)
or newly generated.
"""
global _lookfor_caches
# Local import to speed up numpy's import time.
import inspect
if module is None:
module = "numpy"
if isinstance(module, str):
module = __import__(module)
if id(module) in _lookfor_caches and not regenerate:
return _lookfor_caches[id(module)]
# walk items and collect docstrings
cache = {}
_lookfor_caches[id(module)] = cache
seen = {}
index = 0
stack = [(module.__name__, module)]
while stack:
name, item = stack.pop(0)
if id(item) in seen: continue
seen[id(item)] = True
index += 1
kind = "object"
if inspect.ismodule(item):
kind = "module"
try:
_all = item.__all__
except AttributeError:
_all = None
# import sub-packages
if import_modules and hasattr(item, '__path__'):
for pth in item.__path__:
for mod_path in os.listdir(pth):
init_py = os.path.join(pth, mod_path, '__init__.py')
if not os.path.isfile(init_py):
continue
if _all is not None and mod_path not in _all:
continue
try:
__import__("%s.%s" % (name, mod_path))
except ImportError:
continue
for n, v in inspect.getmembers(item):
if _all is not None and n not in _all:
continue
stack.append(("%s.%s" % (name, n), v))
elif inspect.isclass(item):
kind = "class"
for n, v in inspect.getmembers(item):
stack.append(("%s.%s" % (name, n), v))
elif callable(item):
kind = "func"
doc = inspect.getdoc(item)
if doc is not None:
cache[name] = (doc, kind, index)
return cache
#-----------------------------------------------------------------------------
# The following SafeEval class and company are adapted from Michael Spencer's
# ASPN Python Cookbook recipe:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/364469
# Accordingly it is mostly Copyright 2006 by Michael Spencer.
# The recipe, like most of the other ASPN Python Cookbook recipes was made
# available under the Python license.
# http://www.python.org/license
# It has been modified to:
# * handle unary -/+
# * support True/False/None
# * raise SyntaxError instead of a custom exception.
class SafeEval(object):
def visit(self, node, **kw):
cls = node.__class__
meth = getattr(self,'visit'+cls.__name__,self.default)
return meth(node, **kw)
def default(self, node, **kw):
raise SyntaxError("Unsupported source construct: %s" % node.__class__)
def visitExpression(self, node, **kw):
for child in node.getChildNodes():
return self.visit(child, **kw)
def visitConst(self, node, **kw):
return node.value
def visitDict(self, node,**kw):
return dict([(self.visit(k),self.visit(v)) for k,v in node.items])
def visitTuple(self, node, **kw):
return tuple([self.visit(i) for i in node.nodes])
def visitList(self, node, **kw):
return [self.visit(i) for i in node.nodes]
def visitUnaryAdd(self, node, **kw):
return +self.visit(node.getChildNodes()[0])
def visitUnarySub(self, node, **kw):
return -self.visit(node.getChildNodes()[0])
def visitName(self, node, **kw):
if node.name == 'False':
return False
elif node.name == 'True':
return True
elif node.name == 'None':
return None
else:
raise SyntaxError("Unknown name: %s" % node.name)
def safe_eval(source):
"""
Protected string evaluation.
Evaluate a string containing a Python literal expression without
allowing the execution of arbitrary non-literal code.
Parameters
----------
source : str
Returns
-------
obj : object
Raises
------
SyntaxError
If the code has invalid Python syntax, or if it contains non-literal
code.
Examples
--------
>>> from numpy.lib.utils import safe_eval
>>> safe_eval('1')
1
>>> safe_eval('[1, 2, 3]')
[1, 2, 3]
>>> safe_eval('{"foo": ("bar", 10.0)}')
{'foo': ('bar', 10.0)}
>>> safe_eval('import os')
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> safe_eval('open("/home/user/.ssh/id_dsa").read()')
Traceback (most recent call last):
...
SyntaxError: Unsupported source construct: compiler.ast.CallFunc
>>> safe_eval('dict')
Traceback (most recent call last):
...
SyntaxError: Unknown name: dict
"""
# Local import to speed up numpy's import time.
import compiler
walker = SafeEval()
try:
ast = compiler.parse(source, "eval")
except SyntaxError, err:
raise
try:
return walker.visit(ast)
except SyntaxError, err:
raise
#-----------------------------------------------------------------------------
| houseind/robothon | GlyphProofer/dist/GlyphProofer.app/Contents/Resources/lib/python2.6/numpy/lib/utils.py | Python | mit | 26,219 | [
"VisIt"
] | 9a50e8ac25493281f8ee93dac905475bab0ce463a55c5088b6d2bd2825de893e |
#!/usr/bin/env python
"""
"""
import vtk
def main():
# colors = vtk.vtkNamedColors()
fileName = get_program_parameters()
# Read the image.
readerFactory = vtk.vtkImageReader2Factory()
reader = readerFactory.CreateImageReader2(fileName)
reader.SetFileName(fileName)
reader.Update()
scalarRange = [0] * 2
scalarRange[0] = reader.GetOutput().GetPointData().GetScalars().GetRange()[0]
scalarRange[1] = reader.GetOutput().GetPointData().GetScalars().GetRange()[1]
print("Range:", scalarRange)
middleSlice = (reader.GetOutput().GetExtent()[5] - reader.GetOutput().GetExtent()[4]) // 2
# Work with double images.
cast = vtk.vtkImageCast()
cast.SetInputConnection(reader.GetOutputPort())
cast.SetOutputScalarTypeToDouble()
cast.Update()
originalData = vtk.vtkImageData()
originalData.DeepCopy(cast.GetOutput())
noisyData = vtk.vtkImageData()
AddShotNoise(originalData, noisyData, 2000.0, 0.1, reader.GetOutput().GetExtent())
median = vtk.vtkImageMedian3D()
median.SetInputData(noisyData)
median.SetKernelSize(5, 5, 1)
hybridMedian1 = vtk.vtkImageHybridMedian2D()
hybridMedian1.SetInputData(noisyData)
hybridMedian = vtk.vtkImageHybridMedian2D()
hybridMedian.SetInputConnection(hybridMedian1.GetOutputPort())
colorWindow = (scalarRange[1] - scalarRange[0]) * 0.8
colorLevel = colorWindow / 2
originalActor = vtk.vtkImageActor()
originalActor.GetMapper().SetInputData(originalData)
originalActor.GetProperty().SetColorWindow(colorWindow)
originalActor.GetProperty().SetColorLevel(colorLevel)
originalActor.GetProperty().SetInterpolationTypeToNearest()
originalActor.SetDisplayExtent(reader.GetDataExtent()[0], reader.GetDataExtent()[1], reader.GetDataExtent()[2],
reader.GetDataExtent()[3], middleSlice, middleSlice)
noisyActor = vtk.vtkImageActor()
noisyActor.GetMapper().SetInputData(noisyData)
noisyActor.GetProperty().SetColorWindow(colorWindow)
noisyActor.GetProperty().SetColorLevel(colorLevel)
noisyActor.GetProperty().SetInterpolationTypeToNearest()
noisyActor.SetDisplayExtent(originalActor.GetDisplayExtent())
hybridMedianActor = vtk.vtkImageActor()
hybridMedianActor.GetMapper().SetInputConnection(hybridMedian.GetOutputPort())
hybridMedianActor.GetProperty().SetColorWindow(colorWindow)
hybridMedianActor.GetProperty().SetColorLevel(colorLevel)
hybridMedianActor.GetProperty().SetInterpolationTypeToNearest()
hybridMedianActor.SetDisplayExtent(originalActor.GetDisplayExtent())
medianActor = vtk.vtkImageActor()
medianActor.GetMapper().SetInputConnection(median.GetOutputPort())
medianActor.GetProperty().SetColorWindow(colorWindow)
medianActor.GetProperty().SetColorLevel(colorLevel)
medianActor.GetProperty().SetInterpolationTypeToNearest()
# Setup the renderers.
originalRenderer = vtk.vtkRenderer()
originalRenderer.AddActor(originalActor)
noisyRenderer = vtk.vtkRenderer()
noisyRenderer.AddActor(noisyActor)
hybridRenderer = vtk.vtkRenderer()
hybridRenderer.AddActor(hybridMedianActor)
medianRenderer = vtk.vtkRenderer()
medianRenderer.AddActor(medianActor)
renderers = list()
renderers.append(originalRenderer)
renderers.append(noisyRenderer)
renderers.append(hybridRenderer)
renderers.append(medianRenderer)
# Setup viewports for the renderers.
rendererSize = 400
xGridDimensions = 2
yGridDimensions = 2
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(
rendererSize * xGridDimensions, rendererSize * yGridDimensions)
for row in range(0, yGridDimensions):
for col in range(xGridDimensions):
index = row * xGridDimensions + col
# (xmin, ymin, xmax, ymax)
viewport = [float(col) / xGridDimensions, float(yGridDimensions - (row + 1)) / yGridDimensions,
float(col + 1) / xGridDimensions, float(yGridDimensions - row) / yGridDimensions]
renderers[index].SetViewport(viewport)
renderWindow.AddRenderer(renderers[index])
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
style = vtk.vtkInteractorStyleImage()
renderWindowInteractor.SetInteractorStyle(style)
renderWindowInteractor.SetRenderWindow(renderWindow)
# The renderers share one camera.
renderWindow.Render()
for r in range(1, len(renderers)):
renderers[r].SetActiveCamera(renderers[0].GetActiveCamera())
renderWindowInteractor.Initialize()
renderWindowInteractor.Start()
def get_program_parameters():
import argparse
description = 'Comparison of median and hybrid-median filters.'
epilogue = '''
The hybrid filter preserves corners and thin lines, better than the median filter.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('filename', help='TestPattern.png.')
args = parser.parse_args()
return args.filename
def AddShotNoise(inputImage, outputImage, noiseAmplitude, noiseFraction, extent):
shotNoiseSource = vtk.vtkImageNoiseSource()
shotNoiseSource.SetWholeExtent(extent)
shotNoiseSource.SetMinimum(0.0)
shotNoiseSource.SetMaximum(1.0)
shotNoiseThresh1 = vtk.vtkImageThreshold()
shotNoiseThresh1.SetInputConnection(shotNoiseSource.GetOutputPort())
shotNoiseThresh1.ThresholdByLower(1.0 - noiseFraction)
shotNoiseThresh1.SetInValue(0)
shotNoiseThresh1.SetOutValue(noiseAmplitude)
shotNoiseThresh2 = vtk.vtkImageThreshold()
shotNoiseThresh2.SetInputConnection(shotNoiseSource.GetOutputPort())
shotNoiseThresh2.ThresholdByLower(noiseFraction)
shotNoiseThresh2.SetInValue(1.0 - noiseAmplitude)
shotNoiseThresh2.SetOutValue(0.0)
shotNoise = vtk.vtkImageMathematics()
shotNoise.SetInputConnection(0, shotNoiseThresh1.GetOutputPort())
shotNoise.SetInputConnection(1, shotNoiseThresh2.GetOutputPort())
shotNoise.SetOperationToAdd()
add = vtk.vtkImageMathematics()
add.SetInputData(0, inputImage)
add.SetInputConnection(1, shotNoise.GetOutputPort())
add.SetOperationToAdd()
add.Update()
outputImage.DeepCopy(add.GetOutput())
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/ImageProcessing/HybridMedianComparison.py | Python | apache-2.0 | 6,417 | [
"VTK"
] | 7146f5c1d169e5f22e6d372b4b8e370c76acf652f2ee7a3f281794f381c4079a |
"""
Instructor Dashboard Views
"""
import logging
import datetime
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
import uuid
import pytz
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.http import Http404, HttpResponseServerError
from django.conf import settings
from util.json_request import JsonResponse
from mock import patch
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from openedx.core.lib.xblock_utils import wrap_xblock
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_studio_url
from django_comment_client.utils import has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR
from student.models import CourseEnrollment
from shoppingcart.models import Coupon, PaidCourseRegistration, CourseRegCodeItem
from course_modes.models import CourseMode, CourseModesArchive
from student.roles import CourseFinanceAdminRole, CourseSalesAdminRole
from certificates.models import (
CertificateGenerationConfiguration,
CertificateWhitelist,
GeneratedCertificate,
CertificateStatuses,
CertificateGenerationHistory,
CertificateInvalidation,
)
from certificates import api as certs_api
from util.date_utils import get_default_time_display
from class_dashboard.dashboard_data import get_section_display_name, get_array_section_has_problem
from .tools import get_units_with_due_date, title_or_url, bulk_email_is_enabled_for_course
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if the specified user has staff access.
"""
return bool(user and has_access(user, 'staff', course, course.id))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
}
if not access['staff']:
raise Http404()
is_white_label = CourseMode.is_white_label(course_key)
sections = [
_section_course_info(course, access),
_section_membership(course, access, is_white_label),
_section_cohort_management(course, access),
_section_student_admin(course, access),
_section_data_download(course, access),
]
analytics_dashboard_message = None
if settings.ANALYTICS_DASHBOARD_URL:
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = analytics_dashboard_message.format(
link_start=link_start, link_end="</a>", analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
u"Course %s has %s course modes with payment options. Course must only have "
u"one paid course mode to enable eCommerce options.",
unicode(course_key), len(paid_modes)
)
if settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']:
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if bulk_email_is_enabled_for_course(course_key):
sections.append(_section_send_email(course, access))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course, access))
# Gate access to Ecommerce tab
if course_mode_has_price and (access['finance_admin'] or access['sales_admin']):
sections.append(_section_e_commerce(course, access, paid_modes[0], is_white_label, is_white_label))
# Gate access to Special Exam tab depending if either timed exams or proctored exams
# are enabled in the course
# NOTE: For now, if we only have procotred exams enabled, then only platform Staff
# (user.is_staff) will be able to view the special exams tab. This may
# change in the future
can_see_special_exams = (
((course.enable_proctored_exams and request.user.is_staff) or course.enable_timed_exams) and
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False)
)
if can_see_special_exams:
sections.append(_section_special_exams(course, access))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
certs_enabled = CertificateGenerationConfiguration.current().enabled
if certs_enabled and access['admin']:
sections.append(_section_certificates(course))
disable_buttons = not _is_small_course(course_key)
certificate_white_list = CertificateWhitelist.get_certificate_white_list(course_key)
generate_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_certificate_exceptions',
kwargs={'course_id': unicode(course_key), 'generate_for': ''}
)
generate_bulk_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_bulk_certificate_exceptions',
kwargs={'course_id': unicode(course_key)}
)
certificate_exception_view_url = reverse(
'certificate_exception_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidation_view_url = reverse( # pylint: disable=invalid-name
'certificate_invalidation_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidations = CertificateInvalidation.get_certificate_invalidations(course_key)
context = {
'course': course,
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message,
'certificate_white_list': certificate_white_list,
'certificate_invalidations': certificate_invalidations,
'generate_certificate_exceptions_url': generate_certificate_exceptions_url,
'generate_bulk_certificate_exceptions_url': generate_bulk_certificate_exceptions_url,
'certificate_exception_view_url': certificate_exception_view_url,
'certificate_invalidation_view_url': certificate_invalidation_view_url,
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_e_commerce(course, access, paid_mode, coupons_enabled, reports_enabled):
""" Provide data for the corresponding dashboard section """
course_key = course.id
coupons = Coupon.objects.filter(course_id=course_key).order_by('-is_active')
course_price = paid_mode.min_price
total_amount = None
if access['finance_admin']:
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_key)
total_amount = single_purchase_total + bulk_purchase_total
section_data = {
'section_key': 'e-commerce',
'section_display_name': _('E-Commerce'),
'access': access,
'course_id': unicode(course_key),
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'ajax_remove_coupon_url': reverse('remove_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_get_coupon_info': reverse('get_coupon_info', kwargs={'course_id': unicode(course_key)}),
'get_user_invoice_preference_url': reverse('get_user_invoice_preference', kwargs={'course_id': unicode(course_key)}),
'sale_validation_url': reverse('sale_validation', kwargs={'course_id': unicode(course_key)}),
'ajax_update_coupon': reverse('update_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_add_coupon': reverse('add_coupon', kwargs={'course_id': unicode(course_key)}),
'get_sale_records_url': reverse('get_sale_records', kwargs={'course_id': unicode(course_key)}),
'get_sale_order_records_url': reverse('get_sale_order_records', kwargs={'course_id': unicode(course_key)}),
'instructor_url': reverse('instructor_dashboard', kwargs={'course_id': unicode(course_key)}),
'get_registration_code_csv_url': reverse('get_registration_codes', kwargs={'course_id': unicode(course_key)}),
'generate_registration_code_csv_url': reverse('generate_registration_codes', kwargs={'course_id': unicode(course_key)}),
'active_registration_code_csv_url': reverse('active_registration_codes', kwargs={'course_id': unicode(course_key)}),
'spent_registration_code_csv_url': reverse('spent_registration_codes', kwargs={'course_id': unicode(course_key)}),
'set_course_mode_url': reverse('set_course_mode_price', kwargs={'course_id': unicode(course_key)}),
'download_coupon_codes_url': reverse('get_coupon_codes', kwargs={'course_id': unicode(course_key)}),
'enrollment_report_url': reverse('get_enrollment_report', kwargs={'course_id': unicode(course_key)}),
'exec_summary_report_url': reverse('get_exec_summary_report', kwargs={'course_id': unicode(course_key)}),
'list_financial_report_downloads_url': reverse('list_financial_report_downloads',
kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'look_up_registration_code': reverse('look_up_registration_code', kwargs={'course_id': unicode(course_key)}),
'coupons': coupons,
'sales_admin': access['sales_admin'],
'coupons_enabled': coupons_enabled,
'reports_enabled': reports_enabled,
'course_price': course_price,
'total_amount': total_amount
}
return section_data
def _section_special_exams(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'special_exams',
'section_display_name': _('Special Exams'),
'access': access,
'course_id': unicode(course_key)
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = None
html_cert_enabled = certs_api.has_html_certificates_enabled(course.id, course)
if html_cert_enabled:
can_enable_for_course = True
else:
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
certificate_statuses_with_count = {
certificate['status']: certificate['count']
for certificate in GeneratedCertificate.get_unique_statuses(course_key=course.id)
}
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'instructor_generation_enabled': instructor_generation_enabled,
'html_cert_enabled': html_cert_enabled,
'active_certificate': certs_api.get_active_web_certificate(course),
'certificate_statuses_with_count': certificate_statuses_with_count,
'status': CertificateStatuses,
'certificate_generation_history':
CertificateGenerationHistory.objects.filter(course_id=course.id).order_by("-created"),
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_regeneration': reverse(
'start_certificate_regeneration',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=course_honor_mode[0].min_price, currency=course_honor_mode[0].currency,
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'start_date': get_default_time_display(course.start),
'end_date': get_default_time_display(course.end) or _('No end date set'),
'num_sections': len(course.children),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if settings.ANALYTICS_DASHBOARD_URL:
dashboard_link = _get_dashboard_link(course_key)
message = _("Enrollment data is now available in {dashboard_link}.").format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse('gitlogs_detail', kwargs={'course_id': unicode(course_key)})
try:
sorted_cutoffs = sorted(course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True)
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, sorted_cutoffs, "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
# section_data['offline_grades'] = offline_grades_available(course_key)
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access, is_white_label):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'is_white_label': is_white_label,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': unicode(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': unicode(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': unicode(course_key)}),
'discussion_topics_url': reverse('cohort_discussion_topics', kwargs={'course_key_string': unicode(course_key)}),
}
return section_data
def _is_small_course(course_key):
""" Compares against MAX_ENROLLMENT_INSTR_BUTTONS to determine if course enrollment is considered small. """
is_small_course = False
enrollment_count = CourseEnrollment.objects.num_enrolled_in(course_key)
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
is_small_course = enrollment_count <= max_enrollment_for_buttons
return is_small_course
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = _is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': unicode(course_key)}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': unicode(course_key)}),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': unicode(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(course_key)}),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unicode(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)}),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
show_proctored_report_button = (
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and
course.enable_proctored_exams
)
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': show_proctored_report_button,
'get_problem_responses_url': reverse('get_problem_responses', kwargs={'course_id': unicode(course_key)}),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': unicode(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': unicode(course_key)}),
'get_issued_certificates_url': reverse(
'get_issued_certificates', kwargs={'course_id': unicode(course_key)}
),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': unicode(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': unicode(course_key)}),
'list_proctored_results_url': reverse('get_proctored_exam_results', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
'course_has_survey': True if course.course_survey_name else False,
'course_survey_results_url': reverse('get_course_survey_results', kwargs={'course_id': unicode(course_key)}),
'export_ora2_data_url': reverse('export_ora2_data', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlDescriptor for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlDescriptor is only being used to generate a nice text editor.
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': unicode(course_key)}),
'editor': email_editor,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': unicode(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link = u"<a href=\"{0}\" target=\"_blank\">{1}</a>".format(analytics_dashboard_url,
settings.ANALYTICS_DASHBOARD_NAME)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
insights_message = _("For analytics about your course, go to {analytics_dashboard_name}.")
insights_message = insights_message.format(
analytics_dashboard_name=u'{0}{1}</a>'.format(link_start, settings.ANALYTICS_DASHBOARD_NAME)
)
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'insights_message': insights_message,
}
return section_data
def _section_metrics(course, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'metrics',
'section_display_name': _('Metrics'),
'access': access,
'course_id': unicode(course_key),
'sub_section_display_name': get_section_display_name(course_key),
'section_has_problem': get_array_section_has_problem(course_key),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
return section_data
| CourseTalk/edx-platform | lms/djangoapps/instructor/views/instructor_dashboard.py | Python | agpl-3.0 | 31,181 | [
"VisIt"
] | 0e78a332f1d8575c5cddd96b4a7322b4bc56c36d9ca99f3d93026c6483074029 |
import numpy as np
import pandas as pd
import math as math
from scipy.stats import norm
import statsmodels.api as sm
import matplotlib.pyplot as plt
import scripts.common_functions as cmfunc
import sklearn.neighbors as nb
from sets import Set
from sklearn.neighbors import DistanceMetric
import trollius
import warnings
warnings.simplefilter('ignore')
groud_trust = [[350, 832],[732, 733, 734, 745, 736, 755, 762, 773, 774, 795]]
#groud_trust = [[594],[435, 459, 557, 558, 559, 560, 561, 562, 563, 564, 570, 571, 572, 573, 574, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 1194, 1195, 1403, 1438, 1443]]
def getCSVData(dataPath):
try:
data = pd.read_csv(dataPath)
except IOError("Invalid path to data file."):
return
return data
def anomaly_detection(result_dta, raw_dta, filed_name,alpha ,data_file = 'dta_tsing', debug_mode = 0):
if debug_mode == 1:
dataPath_result_bayes = './results/bayesChangePt/realKnownCause/bayesChangePt_'+ data_file +'.csv'
dataPath_result_relativeE = './results/relativeEntropy/realKnownCause/relativeEntropy_'+ data_file +'.csv'
dataPath_result_numenta = './results/numenta/realKnownCause/numenta_'+ data_file +'.csv'
dataPath_result_knncad = './results/knncad/realKnownCause/knncad_'+ data_file +'.csv'
dataPath_result_WindowGaussian = './results/windowedGaussian/realKnownCause/windowedGaussian_'+ data_file +'.csv'
dataPath_result_contextOSE = './results/contextOSE/realKnownCause/contextOSE_'+ data_file +'.csv'
dataPath_result_skyline = './results/skyline/realKnownCause/skyline_'+ data_file +'.csv'
dataPath_result_ODIN = './results/ODIN_result.csv'
# dataPath_result = './results/skyline/realKnownCause/skyline_data_compare_1.csv'
dataPath_raw = './data/realKnownCause/'+ data_file +'.csv'
result_dta_bayes = getCSVData(dataPath_result_bayes) if dataPath_result_bayes else None
result_dta_numenta = getCSVData(dataPath_result_numenta) if dataPath_result_numenta else None
result_dta_knncad = getCSVData(dataPath_result_knncad) if dataPath_result_knncad else None
result_dta_odin = getCSVData(dataPath_result_ODIN) if dataPath_result_ODIN else None
result_dta_relativeE = getCSVData(dataPath_result_relativeE) if dataPath_result_relativeE else None
result_dta_WindowGaussian = getCSVData(
dataPath_result_WindowGaussian) if dataPath_result_WindowGaussian else None
result_dta_contextOSE = getCSVData(dataPath_result_contextOSE) if dataPath_result_contextOSE else None
result_dta_skyline = getCSVData(dataPath_result_skyline) if dataPath_result_skyline else None
raw_dta = getCSVData(dataPath_raw) if dataPath_raw else None
# result_dta_numenta.anomaly_score[0:150] = np.min(result_dta_numenta.anomaly_score)
# dao ham bac 1
der = cmfunc.change_after_k_seconds(raw_dta.value, k=1)
# dao ham bac 2
sec_der = cmfunc.change_after_k_seconds(raw_dta.value, k=1)
median_sec_der = np.median(sec_der)
std_sec_der = np.std(sec_der)
breakpoint_candidates = list(map(
lambda x: (x[1] - median_sec_der) - np.abs(std_sec_der) if (x[1] - median_sec_der) - np.abs(
std_sec_der) > 0 else 0,
enumerate(sec_der)))
breakpoint_candidates = (breakpoint_candidates - np.min(breakpoint_candidates)) / (
np.max(breakpoint_candidates) - np.min(breakpoint_candidates))
breakpoint_candidates = np.insert(breakpoint_candidates, 0, 0)
dta_full = result_dta
dta_full.value.index = result_dta.timestamp
std_anomaly_set = np.std(result_dta['anomaly_score'])
np.argsort(result_dta['anomaly_score'])
# Get 5% anomaly point
# anomaly_index = np.array(np.argsort(result_dta['anomaly_score']))[-five_percentage:]
anomaly_index = np.array([i for i, value in enumerate(result_dta['anomaly_score']) if value > 3 * std_anomaly_set])
#print("Anomaly Point Found", anomaly_index)
# Decay value is 5%
#alpha = 0.1
limit_size = int(1 / alpha)
# Y is the anomaly spreding and Z is the normal spreading.
Y = np.zeros(len(result_dta['anomaly_score']))
Z = np.zeros(len(result_dta['anomaly_score']))
X = list(map(lambda x: [x, result_dta.values[x][1]], np.arange(len(result_dta.values))))
# dt=DistanceMetric.get_metric('pyfunc',func=mydist)
tree = nb.KDTree(X, leaf_size=20)
# tree = nb.BallTree(X, leaf_size=20, metric=dt)
# Calculate Y
for anomaly_point in anomaly_index:
anomaly_neighboor = np.array(cmfunc.find_inverneghboor_of_point(tree, X, anomaly_point, limit_size),
dtype=np.int32)
for NN_pair in anomaly_neighboor:
Y[NN_pair[1]] = Y[NN_pair[1]] + result_dta['anomaly_score'][anomaly_point] - NN_pair[0] * alpha if \
result_dta['anomaly_score'][anomaly_point] - NN_pair[0] * alpha > 0 else Y[NN_pair[1]]
backup_draw = result_dta.copy()
# Calculate final score
result_dta.anomaly_score = result_dta.anomaly_score + Y
# Find normal point
# normal_index = np.array(np.argsort(result_dta['anomaly_score']))[:int((0.4 * len(result_dta['anomaly_score'])))]
normal_index = [i for i, value in enumerate(result_dta['anomaly_score']) if
value <= np.percentile(result_dta['anomaly_score'], 20)]
if (debug_mode == 1):
print("Correct Point Found", normal_index)
cmfunc.plot_data_all('Normal Choosing Result BEFORE',
[[range(0, len(raw_dta.value)), raw_dta.value],
[normal_index, raw_dta.value[normal_index]]],
['lines', 'markers'], ['a', 'b'])
normal_index = np.random.choice(normal_index, int(len(normal_index) * 0.5), replace=False)
if (debug_mode == 1):
cmfunc.plot_data_all('Normal Choosing Result AFTER',
[[range(0, len(raw_dta.value)), raw_dta.value],
[normal_index, raw_dta.value[normal_index]]],
['lines', 'markers'], ['a', 'b'])
# Calculate Z
for normal_point in normal_index:
nomaly_neighboor = np.array(cmfunc.find_inverneghboor_of_point(tree, X, normal_point, limit_size),
dtype=np.int32)
for NN_pair in nomaly_neighboor:
Z[NN_pair[1]] = Z[NN_pair[1]] + (1 - result_dta['anomaly_score'][normal_point]) - NN_pair[0] * alpha if (1 -
result_dta[
'anomaly_score'][
normal_point]) - \
NN_pair[
0] * alpha > 0 else \
Z[NN_pair[1]]
result_dta.anomaly_score = result_dta.anomaly_score - Z
final_score = map(lambda x: 0 if x < 0 else x, result_dta.anomaly_score);
final_score = (final_score - np.min(final_score)) / (
np.max(final_score) - np.min(final_score))
### Draw final result
#### Draw step result ####
if debug_mode == 1:
cmfunc.plot_data('Final Result',
[raw_dta.value, result_dta_bayes.anomaly_score, breakpoint_candidates,
result_dta_relativeE.anomaly_score, result_dta_skyline.anomaly_score,
result_dta_numenta.anomaly_score, result_dta_contextOSE.anomaly_score, final_score], [],
('Raw Data', 'Bayes Result', 'EDGE Result', 'Relative Entropy Result',
'Skyline Gaussian Result',
'Numenta Result', 'ContextOSE Result', 'Our Result'),
['Raw Data', 'Bayes Result', 'EDGE Result', 'Relative Entropy Result', 'Skyline Result',
'Numenta Result', 'ContextOSE Result', 'Our Result'])
cmfunc.plot_data('Zoomed Final Result',
[raw_dta.value[720:800], result_dta_bayes.anomaly_score[720:800],
breakpoint_candidates[720:800],
result_dta_relativeE.anomaly_score[720:800], result_dta_skyline.anomaly_score[720:800],
result_dta_numenta.anomaly_score[720:800], result_dta_contextOSE.anomaly_score[720:800],
final_score[720:800]], [],
('Raw Data[720:800]', 'Bayes Result[720:800]', 'EDGE Result [720:800]',
'Relative Entropy Result [720:800]', 'Skyline Result [720:800]', 'Numenta Result[720:800]',
'ContextOSE Result [720:800]',
'Our Result[720:800]'),
['Raw Data', 'Bayes Result', 'EDGE Result', 'Relative Entropy Result', 'Skyline Result',
'Numenta Result', 'ContextOSE Result', 'Our Result'])
cmfunc.plot_data('Step Result', [raw_dta.value, backup_draw.anomaly_score, Y, Z, final_score], [],
(
'Raw Data', 'Metric of Score', 'Spreading Anomaly Score', 'Spreading Normal Score',
'Final Score'),
['Raw Data', 'Metric of Score', 'Spreading Anomaly Score', 'Spreading Normal Score',
'Final Score'])
### Find potential anomaly point
std_final_point = np.std(final_score)
anomaly_set = [i for i, v in enumerate(final_score) if v > 3 * std_final_point]
# draw the whole data with potential anomaly point.
if debug_mode == 1:
cmfunc.plot_data_all('Potential Final Result',
[[range(0, len(raw_dta.value)), raw_dta.value], [anomaly_set, raw_dta.value[anomaly_set]]],
['lines', 'markers'], ('Raw Data', 'High Potential Anomaly'))
# The algorithm to seperate anomaly point and change point.
X = list(map(lambda x: [x, x], np.arange(len(result_dta.values))))
newX = list(np.array(X)[anomaly_set])
newtree = nb.KDTree(X, leaf_size=20)
anomaly_group_set = []
new_small_x = 0
sliding_index = 1
for index_value, new_small_x in enumerate(anomaly_set):
anomaly_neighboor = np.array(cmfunc.find_inverneghboor_of_point_1(newtree, X, new_small_x, anomaly_set),
dtype=np.int32)
tmp_array = list(map(lambda x: x[1], anomaly_neighboor))
if index_value > 0:
common_array = list(set(tmp_array).intersection(anomaly_group_set[index_value - sliding_index]))
# anomaly_group_set = np.concatenate((anomaly_group_set, tmp_array))
if len(common_array) != 0:
union_array = list(set(tmp_array).union(anomaly_group_set[index_value - sliding_index]))
anomaly_group_set[index_value - sliding_index] = np.append(
anomaly_group_set[index_value - sliding_index],
list(set(tmp_array).difference(anomaly_group_set[
index_value - sliding_index])))
sliding_index = sliding_index + 1
else:
anomaly_group_set.append(np.sort(tmp_array))
else:
anomaly_group_set.append(np.sort(tmp_array))
new_array = [tuple(row) for row in anomaly_group_set]
uniques = new_array
std_example_data = []
std_example_outer = []
detect_final_result = [[],[]]
for detect_pattern in uniques:
#rest_anomaly_set = [i for i in anomaly_set if i not in list(detect_pattern)]
example_data = [i for i in (
list(raw_dta.value.values[int(min(detect_pattern) - 10): int(min(detect_pattern))]) + list(
raw_dta.value.values[int(max(detect_pattern) + 1): int(max(detect_pattern) + 11)]))]
in_std_with_Anomaly = np.std(example_data + list(raw_dta.value.values[int(min(detect_pattern)): int(max(detect_pattern) + 1)]))
std_example_data.append(in_std_with_Anomaly)
example_data_iner = list(raw_dta.value.values[int(min(detect_pattern)): int(max(detect_pattern)) + 1])
example_data_outer = []
for j in example_data:
if j not in example_data_iner:
example_data_outer.append(j)
else:
example_data_iner.remove(j)
in_std_with_NonAnomaly = np.std(example_data_outer)
if (in_std_with_Anomaly > 2* in_std_with_NonAnomaly):
detect_final_result[1].extend(np.array(detect_pattern, dtype=np.int))
else:
detect_final_result[0].append(int(np.min(detect_pattern)))
std_example_outer.append(in_std_with_NonAnomaly)
final_changepoint_set = detect_final_result[0]
result_precision = 100 * len(set(final_changepoint_set).intersection(set(groud_trust[0]))) / len(set(final_changepoint_set)) if len(set(final_changepoint_set)) != 0 else 0
result_recall = 100 * len(set(final_changepoint_set).intersection(set(groud_trust[0]))) / len(set(groud_trust[0]))
result_f = float(2*result_precision*result_recall/(result_precision+result_recall)) if (result_precision+result_recall) != 0 else 0
####################################################################################################################
result_precision_AL = 100 * len(set(detect_final_result[1]).intersection(set(groud_trust[1]))) / len(set(detect_final_result[1])) if len(set(detect_final_result[1])) != 0 else 0
result_recall_AL = 100 * len(set(detect_final_result[1]).intersection(set(groud_trust[1]))) / len(set(groud_trust[1]))
result_f_AL = float(2*result_precision_AL*result_recall_AL/(result_precision_AL+result_recall_AL)) if (result_precision_AL+result_recall_AL) != 0 else 0
##################################################################################################
print "Metric: %f * %s + %f * %s " %(filed_name[1][0], filed_name[0][0], filed_name[1][1], filed_name[0][1])
print "Change Point Detection - Precision: %d %%, Recall: %d %%, F: %f" %(result_precision, result_recall, result_f)
print "Anomaly Detection - Precision: %d %%, Recall: %d %%, F: %f" %(result_precision_AL, result_recall_AL, result_f_AL)
print "Total Point: %f" %(np.mean([result_f, result_f_AL]))
print("_________________________________________________________________________________________")
Grouping_Anomaly_Points_Result = [[range(0, len(raw_dta.value)), raw_dta.value]]
Grouping_Anomaly_Points_Result_type = ['lines']
bar_group_name = ['Raw Data']
for j, value in enumerate(uniques):
Grouping_Anomaly_Points_Result.append(
list([list(map(int, value)), raw_dta.value.values[list(map(int, value))]]))
Grouping_Anomaly_Points_Result_type.append('markers')
bar_group_name.append("Group_" + str(j))
# # Plot the grouping process.
if debug_mode == 1:
cmfunc.plot_data_all('Grouping Anomaly Points Result', Grouping_Anomaly_Points_Result,
Grouping_Anomaly_Points_Result_type, bar_group_name)
# Plot the comparasion of std.
cmfunc.plot_data_barchart("Anomaly Detection using Standard Deviation Changing",
[[bar_group_name, std_example_data], [bar_group_name, std_example_outer]],
name=['With potential anomaly', 'Non potential anomaly'])
return np.mean([result_f, result_f_AL])
# def anomaly_detection_v2(result_dta, raw_dta, filed_name,data_file = 'dta_tsing', debug_mode = 0):
#
# if debug_mode == 1:
# dataPath_result_bayes = './results/bayesChangePt/realKnownCause/bayesChangePt_'+ data_file +'.csv'
# dataPath_result_relativeE = './results/relativeEntropy/realKnownCause/relativeEntropy_'+ data_file +'.csv'
# dataPath_result_numenta = './results/numenta/realKnownCause/numenta_'+ data_file +'.csv'
# dataPath_result_knncad = './results/knncad/realKnownCause/knncad_'+ data_file +'.csv'
# dataPath_result_WindowGaussian = './results/windowedGaussian/realKnownCause/windowedGaussian_'+ data_file +'.csv'
# dataPath_result_contextOSE = './results/contextOSE/realKnownCause/contextOSE_'+ data_file +'.csv'
# dataPath_result_skyline = './results/skyline/realKnownCause/skyline_'+ data_file +'.csv'
# dataPath_result_ODIN = './results/ODIN_result.csv'
# # dataPath_result = './results/skyline/realKnownCause/skyline_data_compare_1.csv'
# dataPath_raw = './data/realKnownCause/'+ data_file +'.csv'
#
# result_dta_bayes = getCSVData(dataPath_result_bayes) if dataPath_result_bayes else None
# result_dta_numenta = getCSVData(dataPath_result_numenta) if dataPath_result_numenta else None
# result_dta_knncad = getCSVData(dataPath_result_knncad) if dataPath_result_knncad else None
# result_dta_odin = getCSVData(dataPath_result_ODIN) if dataPath_result_ODIN else None
# result_dta_relativeE = getCSVData(dataPath_result_relativeE) if dataPath_result_relativeE else None
# result_dta_WindowGaussian = getCSVData(
# dataPath_result_WindowGaussian) if dataPath_result_WindowGaussian else None
# result_dta_contextOSE = getCSVData(dataPath_result_contextOSE) if dataPath_result_contextOSE else None
# result_dta_skyline = getCSVData(dataPath_result_skyline) if dataPath_result_skyline else None
# raw_dta = getCSVData(dataPath_raw) if dataPath_raw else None
#
# # result_dta_numenta.anomaly_score[0:150] = np.min(result_dta_numenta.anomaly_score)
#
# # dao ham bac 1
# der = cmfunc.change_after_k_seconds(raw_dta.value, k=1)
# # dao ham bac 2
# sec_der = cmfunc.change_after_k_seconds(raw_dta.value, k=1)
#
# median_sec_der = np.median(sec_der)
# std_sec_der = np.std(sec_der)
#
# breakpoint_candidates = list(map(
# lambda x: (x[1] - median_sec_der) - np.abs(std_sec_der) if (x[1] - median_sec_der) - np.abs(
# std_sec_der) > 0 else 0,
# enumerate(sec_der)))
# breakpoint_candidates = (breakpoint_candidates - np.min(breakpoint_candidates)) / (
# np.max(breakpoint_candidates) - np.min(breakpoint_candidates))
#
# breakpoint_candidates = np.insert(breakpoint_candidates, 0, 0)
#
# dta_full = result_dta
#
# dta_full.value.index = result_dta.timestamp
#
# std_anomaly_set = np.std(result_dta['anomaly_score'])
# np.argsort(result_dta['anomaly_score'])
#
# # Get 5% anomaly point
# # anomaly_index = np.array(np.argsort(result_dta['anomaly_score']))[-five_percentage:]
# anomaly_index = np.array([i for i, value in enumerate(result_dta['anomaly_score']) if value > 3 * std_anomaly_set])
#
# #print("Anomaly Point Found", anomaly_index)
# # Decay value is 5%
# alpha = 0.1
# limit_size = int(1 / alpha)
# # Y is the anomaly spreding and Z is the normal spreading.
# Y = np.zeros(len(result_dta['anomaly_score']))
# Z = np.zeros(len(result_dta['anomaly_score']))
# X = list(map(lambda x: [x, result_dta.values[x][1]], np.arange(len(result_dta.values))))
# # dt=DistanceMetric.get_metric('pyfunc',func=mydist)
# tree = nb.KDTree(X, leaf_size=20)
# # tree = nb.BallTree(X, leaf_size=20, metric=dt)
#
# # Calculate Y
# for anomaly_point in anomaly_index:
# anomaly_neighboor = np.array(cmfunc.find_inverneghboor_of_point(tree, X, anomaly_point, limit_size),
# dtype=np.int32)
# for NN_pair in anomaly_neighboor:
# Y[NN_pair[1]] = Y[NN_pair[1]] + result_dta['anomaly_score'][anomaly_point] - NN_pair[0] * alpha if \
# result_dta['anomaly_score'][anomaly_point] - NN_pair[0] * alpha > 0 else Y[NN_pair[1]]
#
# backup_draw = result_dta.copy()
#
#
#
# # Find normal point
# # normal_index = np.array(np.argsort(result_dta['anomaly_score']))[:int((0.4 * len(result_dta['anomaly_score'])))]
# normal_index = [i for i, value in enumerate(result_dta['anomaly_score']) if
# value <= np.percentile(result_dta['anomaly_score'], 5)]
#
# if (debug_mode == 1):
# print("Correct Point Found", normal_index)
# cmfunc.plot_data_all('Normal Choosing Result BEFORE',
# [[range(0, len(raw_dta.value)), raw_dta.value],
# [normal_index, raw_dta.value[normal_index]]],
# ['lines', 'markers'], ['a', 'b'])
#
# normal_index = np.random.choice(normal_index, int(len(normal_index) * 0.5), replace=False)
#
# if (debug_mode == 1):
# cmfunc.plot_data_all('Normal Choosing Result AFTER',
# [[range(0, len(raw_dta.value)), raw_dta.value],
# [normal_index, raw_dta.value[normal_index]]],
# ['lines', 'markers'], ['a', 'b'])
#
# # Calculate Z
# for normal_point in normal_index:
# nomaly_neighboor = np.array(cmfunc.find_inverneghboor_of_point(tree, X, normal_point, limit_size),
# dtype=np.int32)
# for NN_pair in nomaly_neighboor:
# Z[NN_pair[1]] = Z[NN_pair[1]] + (1 - result_dta['anomaly_score'][normal_point]) - NN_pair[0] * alpha if (1 -
# result_dta[
# 'anomaly_score'][
# normal_point]) - \
# NN_pair[
# 0] * alpha > 0 else \
# Z[NN_pair[1]]
# # Calculate final score
#
# result_dta.anomaly_score = result_dta.anomaly_score + Y - Z
#
# final_score = map(lambda x: 0 if x < 0 else x, result_dta.anomaly_score);
# final_score = (final_score - np.min(final_score)) / (
# np.max(final_score) - np.min(final_score))
#
# ### Draw final result
# #### Draw step result ####
#
# if debug_mode == 1:
# cmfunc.plot_data('Final Result',
# [raw_dta.value, result_dta_bayes.anomaly_score, breakpoint_candidates,
# result_dta_relativeE.anomaly_score, result_dta_skyline.anomaly_score,
# result_dta_numenta.anomaly_score, result_dta_contextOSE.anomaly_score, final_score], [],
# ('Raw Data', 'Bayes Result', 'EDGE Result', 'Relative Entropy Result',
# 'Skyline Gaussian Result',
# 'Numenta Result', 'ContextOSE Result', 'Our Result'),
# ['Raw Data', 'Bayes Result', 'EDGE Result', 'Relative Entropy Result', 'Skyline Result',
# 'Numenta Result', 'ContextOSE Result', 'Our Result'])
# cmfunc.plot_data('Zoomed Final Result',
# [raw_dta.value[720:800], result_dta_bayes.anomaly_score[720:800],
# breakpoint_candidates[720:800],
# result_dta_relativeE.anomaly_score[720:800], result_dta_skyline.anomaly_score[720:800],
# result_dta_numenta.anomaly_score[720:800], result_dta_contextOSE.anomaly_score[720:800],
# final_score[720:800]], [],
# ('Raw Data[720:800]', 'Bayes Result[720:800]', 'EDGE Result [720:800]',
# 'Relative Entropy Result [720:800]', 'Skyline Result [720:800]', 'Numenta Result[720:800]',
# 'ContextOSE Result [720:800]',
# 'Our Result[720:800]'),
# ['Raw Data', 'Bayes Result', 'EDGE Result', 'Relative Entropy Result', 'Skyline Result',
# 'Numenta Result', 'ContextOSE Result', 'Our Result'])
# cmfunc.plot_data('Step Result', [raw_dta.value, backup_draw.anomaly_score, Y, Z, final_score], [],
# (
# 'Raw Data', 'Metric of Score', 'Spreading Anomaly Score', 'Spreading Normal Score',
# 'Final Score'),
# ['Raw Data', 'Metric of Score', 'Spreading Anomaly Score', 'Spreading Normal Score',
# 'Final Score'])
#
# ### Find potential anomaly point
# std_final_point = np.std(final_score)
# anomaly_set = [i for i, v in enumerate(final_score) if v > 3 * std_final_point]
#
# # draw the whole data with potential anomaly point.
# if debug_mode == 1:
# cmfunc.plot_data_all('Potential Final Result',
# [[range(0, len(raw_dta.value)), raw_dta.value], [anomaly_set, raw_dta.value[anomaly_set]]],
# ['lines', 'markers'], ('Raw Data', 'High Potential Anomaly'))
#
# # The algorithm to seperate anomaly point and change point.
# X = list(map(lambda x: [x, x], np.arange(len(result_dta.values))))
# newX = list(np.array(X)[anomaly_set])
# newtree = nb.KDTree(X, leaf_size=20)
#
# anomaly_group_set = []
# new_small_x = 0
# sliding_index = 1
# for index_value, new_small_x in enumerate(anomaly_set):
# anomaly_neighboor = np.array(cmfunc.find_inverneghboor_of_point_1(newtree, X, new_small_x, anomaly_set),
# dtype=np.int32)
# tmp_array = list(map(lambda x: x[1], anomaly_neighboor))
# if index_value > 0:
# common_array = list(set(tmp_array).intersection(anomaly_group_set[index_value - sliding_index]))
# # anomaly_group_set = np.concatenate((anomaly_group_set, tmp_array))
# if len(common_array) != 0:
# union_array = list(set(tmp_array).union(anomaly_group_set[index_value - sliding_index]))
# anomaly_group_set[index_value - sliding_index] = np.append(
# anomaly_group_set[index_value - sliding_index],
# list(set(tmp_array).difference(anomaly_group_set[
# index_value - sliding_index])))
# sliding_index = sliding_index + 1
# else:
# anomaly_group_set.append(np.sort(tmp_array))
# else:
# anomaly_group_set.append(np.sort(tmp_array))
#
# new_array = [tuple(row) for row in anomaly_group_set]
# uniques = new_array
# std_example_data = []
# std_example_outer = []
# detect_final_result = [[],[]]
# for detect_pattern in uniques:
# #rest_anomaly_set = [i for i in anomaly_set if i not in list(detect_pattern)]
# example_data = [i for i in (
# list(raw_dta.value.values[int(min(detect_pattern) - 10): int(min(detect_pattern))]) + list(
# raw_dta.value.values[int(max(detect_pattern) + 1): int(max(detect_pattern) + 11)]))]
# in_std_with_Anomaly = np.std(example_data + list(raw_dta.value.values[int(min(detect_pattern)): int(max(detect_pattern) + 1)]))
# std_example_data.append(in_std_with_Anomaly)
# example_data_iner = list(raw_dta.value.values[int(min(detect_pattern)): int(max(detect_pattern)) + 1])
# example_data_outer = []
# for j in example_data:
# if j not in example_data_iner:
# example_data_outer.append(j)
# else:
# example_data_iner.remove(j)
#
# in_std_with_NonAnomaly = np.std(example_data_outer)
# if (in_std_with_Anomaly > 2* in_std_with_NonAnomaly):
# detect_final_result[1].extend(np.array(detect_pattern, dtype=np.int))
# else:
# detect_final_result[0].extend(np.array(detect_pattern, dtype=np.int))
# std_example_outer.append(in_std_with_NonAnomaly)
#
# #print("std with anomaly: ", std_example_data, " Std non anomaly", std_example_outer)
# #print("Final result: ", detect_final_result)
# result_precision = 100 * len(set(detect_final_result[0]).intersection(set(groud_trust[0]))) / len(set(detect_final_result[0])) if len(set(detect_final_result[0])) != 0 else 0
# result_recall = 100 * len(set(detect_final_result[0]).intersection(set(groud_trust[0]))) / len(set(groud_trust[0]))
# result_f = float(2*result_precision*result_recall/(result_precision+result_recall)) if (result_precision+result_recall) != 0 else 0
# ####################################################################################################################
# result_precision_AL = 100 * len(set(detect_final_result[1]).intersection(set(groud_trust[1]))) / len(set(detect_final_result[1])) if len(set(detect_final_result[1])) != 0 else 0
# result_recall_AL = 100 * len(set(detect_final_result[1]).intersection(set(groud_trust[1]))) / len(set(groud_trust[1]))
# result_f_AL = float(2*result_precision_AL*result_recall_AL/(result_precision_AL+result_recall_AL)) if (result_precision_AL+result_recall_AL) != 0 else 0
# ##################################################################################################
# print "Metric: %f * %s + %f * %s " %(filed_name[1][0], filed_name[0][0], filed_name[1][1], filed_name[0][1])
# print "Change Point Detection - Precision: %d %%, Recall: %d %%, F: %f" %(result_precision, result_recall, result_f)
# print "Anomaly Detection - Precision: %d %%, Recall: %d %%, F: %f" %(result_precision_AL, result_recall_AL, result_f_AL)
# print "Total Point: %f" %(np.mean([result_f, result_f_AL]))
# print("_________________________________________________________________________________________")
# Grouping_Anomaly_Points_Result = [[range(0, len(raw_dta.value)), raw_dta.value]]
# Grouping_Anomaly_Points_Result_type = ['lines']
# bar_group_name = ['Raw Data']
# for j, value in enumerate(uniques):
# Grouping_Anomaly_Points_Result.append(
# list([list(map(int, value)), raw_dta.value.values[list(map(int, value))]]))
# Grouping_Anomaly_Points_Result_type.append('markers')
# bar_group_name.append("Group_" + str(j))
#
# # # Plot the grouping process.
# if debug_mode == 1:
# cmfunc.plot_data_all('Grouping Anomaly Points Result', Grouping_Anomaly_Points_Result,
# Grouping_Anomaly_Points_Result_type, bar_group_name)
#
# # Plot the comparasion of std.
# cmfunc.plot_data_barchart("Anomaly Detection using Standard Deviation Changing",
# [[bar_group_name, std_example_data], [bar_group_name, std_example_outer]],
# name=['With potential anomaly', 'Non potential anomaly'])
# return np.mean([result_f, result_f_AL])
| kimhungGCZ/combinedAL | detection_engine.py | Python | agpl-3.0 | 31,609 | [
"Gaussian"
] | b5d52e19d0cf629cb3899fe5c7fe0a4d53039da0c30d148b3c78bf5c2b82dffa |
# IPython log file
import numpy as np
import os
import sys
sys.path.append('/Users/jni/projects/unfold-embryo')
sys.path.append('/Users/jni/projects/skan')
sys.path.append('/Users/jni/projects/storm-cluster')
from skimage import filters, morphology, io
from gala import imio
import unfold
os.chdir('/Users/jni/Dropbox/data1/drosophila-embryo/')
v = imio.read_h5_stack('embA_0.3um_Probabilities.h5')
smoothed_vm = filters.gaussian(v[..., 0], sigma=4)
b = (smoothed_vm > 0.5)[::2, ::2, ::2]
b2 = morphology.remove_small_objects(b, 1000)
g, idxs, path = unfold.define_mesoderm_axis(b2)
sources, ids, idxs = unfold.source_id_volume(b2, idxs, path)
c0 = unfold.coord0_volume(sources, idxs)
c1 = unfold.coord1_volume(b2)
image = io.imread('embA_0.3um.tif')[::2, ::2, ::2]
channels = [unfold.sample2d(c0, c1, image[..., c])
for c in range(3)]
import stormcluster as sc
image = sc._stretchlim(np.stack(channels, axis=2))
import matplotlib.pyplot as plt
plt.imshow(image)
plt.show()
| jni/useful-histories | embryo-interpolation.py | Python | bsd-3-clause | 989 | [
"Gaussian"
] | 1ec4405a3257d4556176031388d29cfe42f6f3c787949b7f96e57d45136c6f31 |
""" DISET request handler base class for the TransformationDB.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Utilities.DEncode import ignoreEncodeWarning
from DIRAC.TransformationSystem.DB.TransformationDB import TransformationDB
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
transTypes = list(six.string_types) + list(six.integer_types)
__RCSID__ = "$Id$"
TASKS_STATE_NAMES = [
'TotalCreated',
'Created',
'Running',
'Submitted',
'Failed',
'Waiting',
'Done',
'Completed',
'Stalled',
'Killed',
'Staging',
'Checking',
'Rescheduled',
'Scheduled']
FILES_STATE_NAMES = ['PercentProcessed', 'Processed', 'Unused',
'Assigned', 'Total', 'Problematic',
'ApplicationCrash', 'MaxReset']
class TransformationManagerHandler(RequestHandler):
@classmethod
def initializeHandler(cls, serviceInfoDict):
""" Initialization of DB object
"""
cls.transformationDB = TransformationDB()
return S_OK()
types_getCounters = [six.string_types, list, dict]
@classmethod
def export_getCounters(cls, table, attrList, condDict, older=None, newer=None, timeStamp=None):
return cls.transformationDB.getCounters(table, attrList, condDict, older=older, newer=newer, timeStamp=timeStamp)
####################################################################
#
# These are the methods to manipulate the transformations table
#
types_addTransformation = [six.string_types, six.string_types,
six.string_types, six.string_types,
six.string_types, six.string_types,
six.string_types]
def export_addTransformation(self,
transName, description, longDescription,
transType, plugin, agentType, fileMask,
transformationGroup='General',
groupSize=1,
inheritedFrom=0,
body='',
maxTasks=0,
eventsPerTask=0,
addFiles=True,
inputMetaQuery=None,
outputMetaQuery=None):
# authorDN = self._clientTransport.peerCredentials['DN']
# authorGroup = self._clientTransport.peerCredentials['group']
credDict = self.getRemoteCredentials()
authorDN = credDict.get('DN', credDict.get('CN'))
authorGroup = credDict.get('group')
res = self.transformationDB.addTransformation(
transName, description, longDescription,
authorDN, authorGroup, transType, plugin,
agentType, fileMask,
transformationGroup=transformationGroup,
groupSize=groupSize,
inheritedFrom=inheritedFrom,
body=body,
maxTasks=maxTasks,
eventsPerTask=eventsPerTask,
addFiles=addFiles,
inputMetaQuery=inputMetaQuery,
outputMetaQuery=outputMetaQuery)
if res['OK']:
self.log.info("Added transformation", res['Value'])
return res
types_deleteTransformation = [transTypes]
def export_deleteTransformation(self, transName):
credDict = self.getRemoteCredentials()
authorDN = credDict.get('DN', credDict.get('CN'))
# authorDN = self._clientTransport.peerCredentials['DN']
return self.transformationDB.deleteTransformation(transName, author=authorDN)
types_cleanTransformation = [transTypes]
def export_cleanTransformation(self, transName):
credDict = self.getRemoteCredentials()
authorDN = credDict.get('DN', credDict.get('CN'))
# authorDN = self._clientTransport.peerCredentials['DN']
return self.transformationDB.cleanTransformation(transName, author=authorDN)
types_setTransformationParameter = [transTypes, six.string_types]
def export_setTransformationParameter(self, transName, paramName, paramValue):
credDict = self.getRemoteCredentials()
authorDN = credDict.get('DN', credDict.get('CN'))
# authorDN = self._clientTransport.peerCredentials['DN']
return self.transformationDB.setTransformationParameter(transName, paramName, paramValue, author=authorDN)
types_deleteTransformationParameter = [transTypes, six.string_types]
@classmethod
def export_deleteTransformationParameter(cls, transName, paramName):
# credDict = self.getRemoteCredentials()
# authorDN = credDict[ 'DN' ]
# authorDN = self._clientTransport.peerCredentials['DN']
return cls.transformationDB.deleteTransformationParameter(transName, paramName)
types_getTransformations = []
@classmethod
def export_getTransformations(cls, condDict=None, older=None, newer=None, timeStamp='CreationDate',
orderAttribute=None, limit=None, extraParams=False, offset=None):
if not condDict:
condDict = {}
return cls.transformationDB.getTransformations(
condDict=condDict,
older=older,
newer=newer,
timeStamp=timeStamp,
orderAttribute=orderAttribute,
limit=limit,
extraParams=extraParams,
offset=offset)
types_getTransformation = [transTypes]
@classmethod
def export_getTransformation(cls, transName, extraParams=False):
return cls.transformationDB.getTransformation(transName, extraParams=extraParams)
types_getTransformationParameters = [transTypes, [six.string_types, list]]
@classmethod
def export_getTransformationParameters(cls, transName, parameters):
return cls.transformationDB.getTransformationParameters(transName, parameters)
types_getTransformationWithStatus = [[six.string_types, list, tuple]]
@classmethod
def export_getTransformationWithStatus(cls, status):
return cls.transformationDB.getTransformationWithStatus(status)
####################################################################
#
# These are the methods to manipulate the TransformationFiles tables
#
types_addFilesToTransformation = [transTypes, [list, tuple]]
@classmethod
def export_addFilesToTransformation(cls, transName, lfns):
return cls.transformationDB.addFilesToTransformation(transName, lfns)
types_addTaskForTransformation = [transTypes]
@classmethod
def export_addTaskForTransformation(cls, transName, lfns=[], se='Unknown'):
return cls.transformationDB.addTaskForTransformation(
transName, lfns=lfns, se=se)
types_setFileStatusForTransformation = [transTypes, dict]
@classmethod
@ignoreEncodeWarning
def export_setFileStatusForTransformation(cls, transName, dictOfNewFilesStatus):
""" Sets the file status for the transformation.
The dictOfNewFilesStatus is a dictionary with the form:
{12345: ('StatusA', errorA), 6789: ('StatusB',errorB), ... } where the keys are fileIDs
The tuple may be a string with only the status if the client was from an older version
"""
if not dictOfNewFilesStatus:
return S_OK({})
statusSample = list(dictOfNewFilesStatus.values())[0]
if isinstance(statusSample, (list, tuple)) and len(statusSample) == 2:
newStatusForFileIDs = dictOfNewFilesStatus
else:
return S_ERROR("Status field should be two values")
res = cls.transformationDB._getConnectionTransID(False, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
return cls.transformationDB.setFileStatusForTransformation(transID, newStatusForFileIDs, connection=connection)
types_getTransformationStats = [transTypes]
@classmethod
def export_getTransformationStats(cls, transName):
return cls.transformationDB.getTransformationStats(transName)
types_getTransformationFilesCount = [transTypes, six.string_types]
@classmethod
def export_getTransformationFilesCount(cls, transName, field, selection={}):
return cls.transformationDB.getTransformationFilesCount(transName, field, selection=selection)
types_getTransformationFiles = []
@classmethod
def export_getTransformationFiles(cls, condDict=None, older=None, newer=None, timeStamp='LastUpdate',
orderAttribute=None, limit=None, offset=None):
if not condDict:
condDict = {}
return cls.transformationDB.getTransformationFiles(
condDict=condDict, older=older, newer=newer, timeStamp=timeStamp,
orderAttribute=orderAttribute, limit=limit, offset=offset,
connection=False)
####################################################################
#
# These are the methods to manipulate the TransformationTasks table
#
types_getTransformationTasks = []
@classmethod
def export_getTransformationTasks(cls, condDict=None, older=None, newer=None, timeStamp='CreationTime',
orderAttribute=None, limit=None, inputVector=False, offset=None):
if not condDict:
condDict = {}
return cls.transformationDB.getTransformationTasks(
condDict=condDict, older=older, newer=newer, timeStamp=timeStamp,
orderAttribute=orderAttribute, limit=limit, inputVector=inputVector,
offset=offset)
types_setTaskStatus = [transTypes, [list] + list(six.integer_types), six.string_types]
@classmethod
def export_setTaskStatus(cls, transName, taskID, status):
return cls.transformationDB.setTaskStatus(transName, taskID, status)
types_setTaskStatusAndWmsID = [transTypes, list(six.integer_types), six.string_types, six.string_types]
@classmethod
def export_setTaskStatusAndWmsID(cls, transName, taskID, status, taskWmsID):
return cls.transformationDB.setTaskStatusAndWmsID(transName, taskID, status, taskWmsID)
types_getTransformationTaskStats = [transTypes]
@classmethod
def export_getTransformationTaskStats(cls, transName):
return cls.transformationDB.getTransformationTaskStats(transName)
types_deleteTasks = [transTypes, list(six.integer_types), list(six.integer_types)]
def export_deleteTasks(self, transName, taskMin, taskMax):
credDict = self.getRemoteCredentials()
authorDN = credDict.get('DN', credDict.get('CN'))
# authorDN = self._clientTransport.peerCredentials['DN']
return self.transformationDB.deleteTasks(transName, taskMin, taskMax, author=authorDN)
types_extendTransformation = [transTypes, list(six.integer_types)]
def export_extendTransformation(self, transName, nTasks):
credDict = self.getRemoteCredentials()
authorDN = credDict.get('DN', credDict.get('CN'))
# authorDN = self._clientTransport.peerCredentials['DN']
return self.transformationDB.extendTransformation(transName, nTasks, author=authorDN)
types_getTasksToSubmit = [transTypes, list(six.integer_types)]
def export_getTasksToSubmit(self, transName, numTasks, site=''):
""" Get information necessary for submission for a given number of tasks for a given transformation """
res = self.transformationDB.getTransformation(transName)
if not res['OK']:
return res
transDict = res['Value']
submitDict = {}
res = self.transformationDB.getTasksForSubmission(transName, numTasks=numTasks, site=site, statusList=['Created'])
if not res['OK']:
return res
tasksDict = res['Value']
for taskID, taskDict in tasksDict.items():
res = self.transformationDB.reserveTask(transName, int(taskID))
if not res['OK']:
return res
else:
submitDict[taskID] = taskDict
transDict['JobDictionary'] = submitDict
return S_OK(transDict)
####################################################################
#
# These are the methods for TransformationMetaQueries table. It replaces methods
# for the old TransformationInputDataQuery table
#
types_createTransformationMetaQuery = [transTypes, dict, six.string_types]
def export_createTransformationMetaQuery(self, transName, queryDict, queryType):
credDict = self.getRemoteCredentials()
authorDN = credDict.get('DN', credDict.get('CN'))
return self.transformationDB.createTransformationMetaQuery(transName, queryDict, queryType, author=authorDN)
types_deleteTransformationMetaQuery = [transTypes, six.string_types]
def export_deleteTransformationMetaQuery(self, transName, queryType):
credDict = self.getRemoteCredentials()
authorDN = credDict.get('DN', credDict.get('CN'))
return self.transformationDB.deleteTransformationMetaQuery(transName, queryType, author=authorDN)
types_getTransformationMetaQuery = [transTypes, six.string_types]
def export_getTransformationMetaQuery(self, transName, queryType):
return self.transformationDB.getTransformationMetaQuery(transName, queryType)
####################################################################
#
# These are the methods for transformation logging manipulation
#
types_getTransformationLogging = [transTypes]
def export_getTransformationLogging(self, transName):
return self.transformationDB.getTransformationLogging(transName)
####################################################################
#
# These are the methods for transformation additional parameters
#
types_getAdditionalParameters = [transTypes]
def export_getAdditionalParameters(self, transName):
return self.transformationDB.getAdditionalParameters(transName)
####################################################################
#
# These are the methods for file manipulation
#
types_getFileSummary = [list]
@classmethod
def export_getFileSummary(cls, lfns):
return cls.transformationDB.getFileSummary(lfns)
types_addDirectory = [six.string_types]
@classmethod
def export_addDirectory(cls, path, force=False):
return cls.transformationDB.addDirectory(path, force=force)
types_exists = [list]
@classmethod
def export_exists(cls, lfns):
return cls.transformationDB.exists(lfns)
types_addFile = [[list, dict, six.string_types]]
@classmethod
def export_addFile(cls, fileDicts, force=False):
""" Interface provides { LFN1 : { PFN1, SE1, ... }, LFN2 : { PFN2, SE2, ... } }
"""
return cls.transformationDB.addFile(fileDicts, force=force)
types_removeFile = [[list, dict]]
@classmethod
def export_removeFile(cls, lfns):
""" Interface provides [ LFN1, LFN2, ... ]
"""
if isinstance(lfns, dict):
lfns = list(lfns)
return cls.transformationDB.removeFile(lfns)
types_setMetadata = [six.string_types, dict]
@classmethod
def export_setMetadata(cls, path, querydict):
""" Set metadata to a file or to a directory (path)
"""
return cls.transformationDB.setMetadata(path, querydict)
####################################################################
#
# These are the methods used for web monitoring
#
# TODO Get rid of this (talk to Matvey)
types_getDistinctAttributeValues = [six.string_types, dict]
@classmethod
def export_getDistinctAttributeValues(cls, attribute, selectDict):
res = cls.transformationDB.getTableDistinctAttributeValues('Transformations', [attribute], selectDict)
if not res['OK']:
return res
return S_OK(res['Value'][attribute])
types_getTableDistinctAttributeValues = [six.string_types, list, dict]
@classmethod
def export_getTableDistinctAttributeValues(cls, table, attributes, selectDict):
return cls.transformationDB.getTableDistinctAttributeValues(table, attributes, selectDict)
types_getTransformationStatusCounters = []
@classmethod
def export_getTransformationStatusCounters(cls):
res = cls.transformationDB.getCounters('Transformations', ['Status'], {})
if not res['OK']:
return res
statDict = {}
for attrDict, count in res['Value']:
statDict[attrDict['Status']] = count
return S_OK(statDict)
types_getTransformationSummary = []
def export_getTransformationSummary(self):
""" Get the summary of the currently existing transformations """
res = self.transformationDB.getTransformations()
if not res['OK']:
return res
transList = res['Value']
resultDict = {}
for transDict in transList:
transID = transDict['TransformationID']
res = self.transformationDB.getTransformationTaskStats(transID)
if not res['OK']:
self.log.warn('Failed to get job statistics for transformation', transID)
continue
transDict['JobStats'] = res['Value']
res = self.transformationDB.getTransformationStats(transID)
if not res['OK']:
transDict['NumberOfFiles'] = -1
else:
transDict['NumberOfFiles'] = res['Value']['Total']
resultDict[transID] = transDict
return S_OK(resultDict)
types_getTabbedSummaryWeb = [six.string_types, dict, dict, list, int, int]
def export_getTabbedSummaryWeb(self, table, requestedTables, selectDict, sortList, startItem, maxItems):
tableDestinations = {'Transformations': {'TransformationFiles': ['TransformationID'],
'TransformationTasks': ['TransformationID']},
'TransformationFiles': {'Transformations': ['TransformationID'],
'TransformationTasks': ['TransformationID', 'TaskID']},
'TransformationTasks': {'Transformations': ['TransformationID'],
'TransformationFiles': ['TransformationID', 'TaskID']}}
tableSelections = {'Transformations': ['TransformationID', 'AgentType', 'Type', 'TransformationGroup',
'Plugin'],
'TransformationFiles': ['TransformationID', 'TaskID', 'Status', 'UsedSE', 'TargetSE'],
'TransformationTasks': ['TransformationID', 'TaskID', 'ExternalStatus', 'TargetSE']}
tableTimeStamps = {'Transformations': 'CreationDate',
'TransformationFiles': 'LastUpdate',
'TransformationTasks': 'CreationTime'}
tableStatusColumn = {'Transformations': 'Status',
'TransformationFiles': 'Status',
'TransformationTasks': 'ExternalStatus'}
resDict = {}
res = self.__getTableSummaryWeb(table, selectDict, sortList, startItem, maxItems,
selectColumns=tableSelections[table], timeStamp=tableTimeStamps[table],
statusColumn=tableStatusColumn[table])
if not res['OK']:
self.log.error("Failed to get Summary for table", "%s %s" % (table, res['Message']))
return res
resDict[table] = res['Value']
selections = res['Value']['Selections']
tableSelection = {}
for destination in tableDestinations[table].keys():
tableSelection[destination] = {}
for parameter in tableDestinations[table][destination]:
tableSelection[destination][parameter] = selections.get(parameter, [])
for table, paramDict in requestedTables.items():
sortList = paramDict.get('SortList', [])
startItem = paramDict.get('StartItem', 0)
maxItems = paramDict.get('MaxItems', 50)
res = self.__getTableSummaryWeb(table, tableSelection[table], sortList, startItem, maxItems,
selectColumns=tableSelections[table], timeStamp=tableTimeStamps[table],
statusColumn=tableStatusColumn[table])
if not res['OK']:
self.log.error("Failed to get Summary for table", "%s %s" % (table, res['Message']))
return res
resDict[table] = res['Value']
return S_OK(resDict)
types_getTransformationsSummaryWeb = [dict, list, int, int]
def export_getTransformationsSummaryWeb(self, selectDict, sortList, startItem, maxItems):
return self.__getTableSummaryWeb(
'Transformations', selectDict, sortList, startItem, maxItems,
selectColumns=['TransformationID', 'AgentType', 'Type', 'Group', 'Plugin'],
timeStamp='CreationDate', statusColumn='Status')
types_getTransformationTasksSummaryWeb = [dict, list, int, int]
def export_getTransformationTasksSummaryWeb(self, selectDict, sortList, startItem, maxItems):
return self.__getTableSummaryWeb(
'TransformationTasks', selectDict, sortList, startItem, maxItems,
selectColumns=['TransformationID', 'ExternalStatus', 'TargetSE'],
timeStamp='CreationTime', statusColumn='ExternalStatus')
types_getTransformationFilesSummaryWeb = [dict, list, int, int]
def export_getTransformationFilesSummaryWeb(self, selectDict, sortList, startItem, maxItems):
return self.__getTableSummaryWeb(
'TransformationFiles', selectDict, sortList, startItem, maxItems,
selectColumns=['TransformationID', 'Status', 'UsedSE', 'TargetSE'],
timeStamp='LastUpdate', statusColumn='Status')
def __getTableSummaryWeb(self, table, selectDict, sortList, startItem, maxItems, selectColumns=[],
timeStamp=None, statusColumn='Status'):
fromDate = selectDict.get('FromDate', None)
if fromDate:
del selectDict['FromDate']
# if not fromDate:
# fromDate = last_update
toDate = selectDict.get('ToDate', None)
if toDate:
del selectDict['ToDate']
# Sorting instructions. Only one for the moment.
if sortList:
orderAttribute = sortList[0][0] + ":" + sortList[0][1]
else:
orderAttribute = None
# Get the columns that match the selection
fcn = None
fcnName = "get%s" % table
if hasattr(self.transformationDB, fcnName) and callable(getattr(self.transformationDB, fcnName)):
fcn = getattr(self.transformationDB, fcnName)
if not fcn:
return S_ERROR("Unable to invoke gTransformationDB.%s, it isn't a member function" % fcnName)
res = fcn(condDict=selectDict, older=toDate, newer=fromDate, timeStamp=timeStamp,
orderAttribute=orderAttribute)
if not res['OK']:
return res
# The full list of columns in contained here
allRows = res['Records']
# Prepare the standard structure now within the resultDict dictionary
resultDict = {}
# Create the total records entry
resultDict['TotalRecords'] = len(allRows)
# Create the ParameterNames entry
resultDict['ParameterNames'] = res['ParameterNames']
# Find which element in the tuple contains the requested status
if statusColumn not in resultDict['ParameterNames']:
return S_ERROR("Provided status column not present")
statusColumnIndex = resultDict['ParameterNames'].index(statusColumn)
# Get the rows which are within the selected window
if resultDict['TotalRecords'] == 0:
return S_OK(resultDict)
ini = startItem
last = ini + maxItems
if ini >= resultDict['TotalRecords']:
return S_ERROR('Item number out of range')
if last > resultDict['TotalRecords']:
last = resultDict['TotalRecords']
selectedRows = allRows[ini:last]
resultDict['Records'] = selectedRows
# Generate the status dictionary
statusDict = {}
for row in selectedRows:
status = row[statusColumnIndex]
statusDict[status] = statusDict.setdefault(status, 0) + 1
resultDict['Extras'] = statusDict
# Obtain the distinct values of the selection parameters
res = self.transformationDB.getTableDistinctAttributeValues(
table, selectColumns, selectDict, older=toDate, newer=fromDate)
distinctSelections = zip(selectColumns, [])
if res['OK']:
distinctSelections = res['Value']
resultDict['Selections'] = distinctSelections
return S_OK(resultDict)
types_getTransformationSummaryWeb = [dict, list, int, int]
def export_getTransformationSummaryWeb(self, selectDict, sortList, startItem, maxItems):
""" Get the summary of the transformation information for a given page in the generic format """
# Obtain the timing information from the selectDict
last_update = selectDict.get('CreationDate', None)
if last_update:
del selectDict['CreationDate']
fromDate = selectDict.get('FromDate', None)
if fromDate:
del selectDict['FromDate']
if not fromDate:
fromDate = last_update
toDate = selectDict.get('ToDate', None)
if toDate:
del selectDict['ToDate']
# Sorting instructions. Only one for the moment.
if sortList:
orderAttribute = []
for i in sortList:
orderAttribute += [i[0] + ":" + i[1]]
else:
orderAttribute = None
# Get the transformations that match the selection
res = self.transformationDB.getTransformations(
condDict=selectDict, older=toDate, newer=fromDate,
orderAttribute=orderAttribute)
if not res['OK']:
return res
ops = Operations()
# Prepare the standard structure now within the resultDict dictionary
resultDict = {}
trList = res['Records']
# Create the total records entry
nTrans = len(trList)
resultDict['TotalRecords'] = nTrans
# Create the ParameterNames entry
# As this list is a reference to the list in the DB, we cannot extend it, therefore copy it
resultDict['ParameterNames'] = list(res['ParameterNames'])
# Add the job states to the ParameterNames entry
taskStateNames = TASKS_STATE_NAMES + ops.getValue('Transformations/AdditionalTaskStates', [])
resultDict['ParameterNames'] += ['Jobs_' + x for x in taskStateNames]
# Add the file states to the ParameterNames entry
fileStateNames = FILES_STATE_NAMES + ops.getValue('Transformations/AdditionalFileStates', [])
resultDict['ParameterNames'] += ['Files_' + x for x in fileStateNames]
# Get the transformations which are within the selected window
if nTrans == 0:
return S_OK(resultDict)
ini = startItem
last = ini + maxItems
if ini >= nTrans:
return S_ERROR('Item number out of range')
if last > nTrans:
last = nTrans
transList = trList[ini:last]
statusDict = {}
extendableTranfs = ops.getValue('Transformations/ExtendableTransfTypes',
['Simulation', 'MCsimulation'])
givenUpFileStatus = ops.getValue('Transformations/GivenUpFileStatus',
['MissingInFC'])
problematicStatuses = ops.getValue('Transformations/ProblematicStatuses',
['Problematic'])
# Add specific information for each selected transformation
for trans in transList:
transDict = dict(zip(resultDict['ParameterNames'], trans))
# Update the status counters
status = transDict['Status']
statusDict[status] = statusDict.setdefault(status, 0) + 1
# Get the statistics on the number of jobs for the transformation
transID = transDict['TransformationID']
res = self.transformationDB.getTransformationTaskStats(transID)
taskDict = {}
if res['OK'] and res['Value']:
taskDict = res['Value']
for state in taskStateNames:
trans.append(taskDict.get(state, 0))
# Get the statistics for the number of files for the transformation
fileDict = {}
transType = transDict['Type']
if transType.lower() in extendableTranfs:
fileDict['PercentProcessed'] = '-'
else:
res = self.transformationDB.getTransformationStats(transID)
if res['OK']:
fileDict = res['Value']
total = fileDict['Total']
for stat in givenUpFileStatus:
total -= fileDict.get(stat, 0)
processed = fileDict.get('Processed', 0)
fileDict['PercentProcessed'] = "%.1f" % (int(processed * 1000. / total) / 10.) if total else 0.
problematic = 0
for stat in problematicStatuses:
problematic += fileDict.get(stat, 0)
fileDict['Problematic'] = problematic
for state in fileStateNames:
trans.append(fileDict.get(state, 0))
resultDict['Records'] = transList
resultDict['Extras'] = statusDict
return S_OK(resultDict)
###########################################################################
| yujikato/DIRAC | src/DIRAC/TransformationSystem/Service/TransformationManagerHandler.py | Python | gpl-3.0 | 28,266 | [
"DIRAC"
] | f0773f1fd6a3611970b9f36e7a4e183b75b298c78a2f5d0b7cb079620b1a85a1 |
"""
Acceptance tests for Studio related to the container page.
The container page is used both for displaying units, and
for displaying containers within units.
"""
import datetime
import ddt
import six
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.create_mode import ModeCreationPage
from common.test.acceptance.pages.lms.staff_view import StaffCoursewarePage
from common.test.acceptance.pages.studio.container import ContainerPage
from common.test.acceptance.pages.studio.html_component_editor import HtmlXBlockEditorView
from common.test.acceptance.pages.studio.move_xblock import MoveModalView
from common.test.acceptance.pages.studio.utils import add_discussion
from common.test.acceptance.pages.studio.xblock_editor import XBlockEditorView, XBlockVisibilityEditorView
from common.test.acceptance.tests.helpers import create_user_partition_json
from openedx.core.lib.tests import attr
from xmodule.partitions.partitions import ENROLLMENT_TRACK_PARTITION_ID, MINIMUM_STATIC_PARTITION_ID, Group
from .base_studio_test import ContainerBase
class NestedVerticalTest(ContainerBase):
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with nested verticals.
"""
self.container_title = ""
self.group_a = "Group A"
self.group_b = "Group B"
self.group_empty = "Group Empty"
self.group_a_item_1 = "Group A Item 1"
self.group_a_item_2 = "Group A Item 2"
self.group_b_item_1 = "Group B Item 1"
self.group_b_item_2 = "Group B Item 2"
self.group_a_handle = 0
self.group_a_item_1_handle = 1
self.group_a_item_2_handle = 2
self.group_empty_handle = 3
self.group_b_handle = 4
self.group_b_item_1_handle = 5
self.group_b_item_2_handle = 6
self.group_a_item_1_action_index = 0
self.group_a_item_2_action_index = 1
self.duplicate_label = u"Duplicate of '{0}'"
self.discussion_label = "Discussion"
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('vertical', 'Test Container').add_children(
XBlockFixtureDesc('vertical', 'Group A').add_children(
XBlockFixtureDesc('html', self.group_a_item_1),
XBlockFixtureDesc('html', self.group_a_item_2)
),
XBlockFixtureDesc('vertical', 'Group Empty'),
XBlockFixtureDesc('vertical', 'Group B').add_children(
XBlockFixtureDesc('html', self.group_b_item_1),
XBlockFixtureDesc('html', self.group_b_item_2)
)
)
)
)
)
)
@attr(shard=1)
class AddComponentTest(NestedVerticalTest):
"""
Tests of adding a component to the container page.
"""
def add_and_verify(self, menu_index, expected_ordering):
self.do_action_and_verify(
lambda container: add_discussion(container, menu_index),
expected_ordering
)
def test_add_component_in_group(self):
group_b_menu = 2
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2, self.discussion_label]},
{self.group_empty: []}]
self.add_and_verify(group_b_menu, expected_ordering)
def test_add_component_in_empty_group(self):
group_empty_menu = 1
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: [self.discussion_label]}]
self.add_and_verify(group_empty_menu, expected_ordering)
def test_add_component_in_container(self):
container_menu = 3
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b, self.discussion_label]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.add_and_verify(container_menu, expected_ordering)
@attr(shard=1)
class DuplicateComponentTest(NestedVerticalTest):
"""
Tests of duplicating a component on the container page.
"""
def duplicate_and_verify(self, source_index, expected_ordering):
self.do_action_and_verify(
lambda container: container.duplicate(source_index),
expected_ordering
)
def test_duplicate_first_in_group(self):
duplicate_label = self.duplicate_label.format(self.group_a_item_1)
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, duplicate_label, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.duplicate_and_verify(self.group_a_item_1_action_index, expected_ordering)
def test_duplicate_second_in_group(self):
duplicate_label = self.duplicate_label.format(self.group_a_item_2)
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2, duplicate_label]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.duplicate_and_verify(self.group_a_item_2_action_index, expected_ordering)
def test_duplicate_the_duplicate(self):
first_duplicate_label = self.duplicate_label.format(self.group_a_item_1)
second_duplicate_label = self.duplicate_label.format(first_duplicate_label)
expected_ordering = [
{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, first_duplicate_label, second_duplicate_label, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}
]
def duplicate_twice(container):
container.duplicate(self.group_a_item_1_action_index)
container.duplicate(self.group_a_item_1_action_index + 1)
self.do_action_and_verify(duplicate_twice, expected_ordering)
@attr(shard=1)
class DeleteComponentTest(NestedVerticalTest):
"""
Tests of deleting a component from the container page.
"""
def delete_and_verify(self, source_index, expected_ordering):
self.do_action_and_verify(
lambda container: container.delete(source_index),
expected_ordering
)
def test_delete_first_in_group(self):
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
# Group A itself has a delete icon now, so item_1 is index 1 instead of 0.
group_a_item_1_delete_index = 1
self.delete_and_verify(group_a_item_1_delete_index, expected_ordering)
@attr(shard=19)
class EditContainerTest(NestedVerticalTest):
"""
Tests of editing a container.
"""
def modify_display_name_and_verify(self, component):
"""
Helper method for changing a display name.
"""
modified_name = 'modified'
self.assertNotEqual(component.name, modified_name)
component.edit()
component_editor = XBlockEditorView(self.browser, component.locator)
component_editor.set_field_value_and_save('Display Name', modified_name)
self.assertEqual(component.name, modified_name)
def test_edit_container_on_unit_page(self):
"""
Test the "edit" button on a container appearing on the unit page.
"""
unit = self.go_to_unit_page()
component = unit.xblocks[1]
self.modify_display_name_and_verify(component)
def test_edit_container_on_container_page(self):
"""
Test the "edit" button on a container appearing on the container page.
"""
container = self.go_to_nested_container_page()
self.modify_display_name_and_verify(container)
class BaseGroupConfigurationsTest(ContainerBase):
ALL_LEARNERS_AND_STAFF = XBlockVisibilityEditorView.ALL_LEARNERS_AND_STAFF
CHOOSE_ONE = "Select a group type"
CONTENT_GROUP_PARTITION = XBlockVisibilityEditorView.CONTENT_GROUP_PARTITION
ENROLLMENT_TRACK_PARTITION = XBlockVisibilityEditorView.ENROLLMENT_TRACK_PARTITION
MISSING_GROUP_LABEL = 'Deleted Group\nThis group no longer exists. Choose another group or remove the access restriction.'
VALIDATION_ERROR_LABEL = 'This component has validation issues.'
VALIDATION_ERROR_MESSAGE = "Error:\nThis component's access settings refer to deleted or invalid groups."
GROUP_VISIBILITY_MESSAGE = 'Access to some content in this unit is restricted to specific groups of learners.'
MODAL_NOT_RESTRICTED_MESSAGE = "Access is not restricted"
def setUp(self):
super(BaseGroupConfigurationsTest, self).setUp()
# Set up a cohort-schemed user partition
self.id_base = MINIMUM_STATIC_PARTITION_ID
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
self.id_base,
self.CONTENT_GROUP_PARTITION,
'Content Group Partition',
[
Group(self.id_base + 1, 'Dogs'),
Group(self.id_base + 2, 'Cats')
],
scheme="cohort"
)
],
},
})
self.container_page = self.go_to_unit_page()
self.html_component = self.container_page.xblocks[1]
def populate_course_fixture(self, course_fixture):
"""
Populate a simple course a section, subsection, and unit, and HTML component.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Html Component')
)
)
)
)
def edit_component_visibility(self, component):
"""
Edit the visibility of an xblock on the container page and returns an XBlockVisibilityEditorView.
"""
component.edit_visibility()
return XBlockVisibilityEditorView(self.browser, component.locator)
def edit_unit_visibility(self, unit):
"""
Edit the visibility of a unit on the container page and returns an XBlockVisibilityEditorView.
"""
unit.edit_visibility()
return XBlockVisibilityEditorView(self.browser, unit.locator)
def verify_current_groups_message(self, visibility_editor, expected_current_groups):
"""
Check that the current visibility is displayed at the top of the dialog.
"""
if expected_current_groups == self.ALL_LEARNERS_AND_STAFF:
self.assertEqual("Access is not restricted", visibility_editor.current_groups_message)
else:
self.assertEqual(
u"Access is restricted to: {groups}".format(groups=expected_current_groups),
visibility_editor.current_groups_message
)
def verify_selected_partition_scheme(self, visibility_editor, expected_scheme):
"""
Check that the expected partition scheme is selected.
"""
six.assertCountEqual(self, expected_scheme, visibility_editor.selected_partition_scheme)
def verify_selected_groups(self, visibility_editor, expected_groups):
"""
Check the expected partition groups.
"""
six.assertCountEqual(self, expected_groups, [group.text for group in visibility_editor.selected_groups])
def select_and_verify_saved(self, component, partition_label, groups=[]):
"""
Edit the visibility of an xblock on the container page and
verify that the edit persists. Note that `groups`
are labels which should be clicked, but not necessarily checked.
"""
# Make initial edit(s) and save
visibility_editor = self.edit_component_visibility(component)
visibility_editor.select_groups_in_partition_scheme(partition_label, groups)
# Re-open the modal and inspect its selected inputs. If no groups were selected,
# "All Learners" should be selected partitions scheme, and we show "Select a group type" in the select.
if not groups:
partition_label = self.CHOOSE_ONE
visibility_editor = self.edit_component_visibility(component)
self.verify_selected_partition_scheme(visibility_editor, partition_label)
self.verify_selected_groups(visibility_editor, groups)
visibility_editor.save()
def select_and_verify_unit_group_access(self, unit, partition_label, groups=[]):
"""
Edit the visibility of an xblock on the unit page and
verify that the edit persists. Note that `groups`
are labels which should be clicked, but are not necessarily checked.
"""
unit_access_editor = self.edit_unit_visibility(unit)
unit_access_editor.select_groups_in_partition_scheme(partition_label, groups)
if not groups:
partition_label = self.CHOOSE_ONE
unit_access_editor = self.edit_unit_visibility(unit)
self.verify_selected_partition_scheme(unit_access_editor, partition_label)
self.verify_selected_groups(unit_access_editor, groups)
unit_access_editor.save()
def verify_component_validation_error(self, component):
"""
Verify that we see validation errors for the given component.
"""
self.assertTrue(component.has_validation_error)
self.assertEqual(component.validation_error_text, self.VALIDATION_ERROR_LABEL)
self.assertEqual([self.VALIDATION_ERROR_MESSAGE], component.validation_error_messages)
def verify_visibility_set(self, component, is_set):
"""
Verify that the container page shows that component visibility
settings have been edited if `is_set` is True; otherwise
verify that the container page shows no such information.
"""
if is_set:
self.assertIn(self.GROUP_VISIBILITY_MESSAGE, self.container_page.sidebar_visibility_message)
self.assertTrue(component.has_group_visibility_set)
else:
self.assertNotIn(self.GROUP_VISIBILITY_MESSAGE, self.container_page.sidebar_visibility_message)
self.assertFalse(component.has_group_visibility_set)
def verify_unit_visibility_set(self, unit, set_groups=[]):
"""
Verify that the container visibility modal shows that unit visibility
settings have been edited if there are `set_groups`. Otherwise verify
that the modal shows no such information.
"""
unit_access_editor = self.edit_unit_visibility(unit)
if set_groups:
self.assertIn(", ".join(set_groups), unit_access_editor.current_groups_message)
else:
self.assertEqual(self.MODAL_NOT_RESTRICTED_MESSAGE, unit_access_editor.current_groups_message)
unit_access_editor.cancel()
def update_component(self, component, metadata):
"""
Update a component's metadata and refresh the page.
"""
self.course_fixture._update_xblock(component.locator, {'metadata': metadata})
self.browser.refresh()
self.container_page.wait_for_page()
def remove_missing_groups(self, visibility_editor, component):
"""
Deselect the missing groups for a component. After save,
verify that there are no missing group messages in the modal
and that there is no validation error on the component.
"""
for option in visibility_editor.all_group_options:
if option.text == self.MISSING_GROUP_LABEL:
option.click()
visibility_editor.save()
visibility_editor = self.edit_component_visibility(component)
self.assertNotIn(self.MISSING_GROUP_LABEL, [item.text for item in visibility_editor.all_group_options])
visibility_editor.cancel()
self.assertFalse(component.has_validation_error)
@attr(shard=21)
class UnitAccessContainerTest(BaseGroupConfigurationsTest):
"""
Tests unit level access
"""
GROUP_RESTRICTED_MESSAGE = 'Access to this unit is restricted to: Dogs'
def _toggle_container_unit_access(self, group_ids, unit):
"""
Toggle the unit level access on the course outline page
"""
unit.toggle_unit_access('Content Groups', group_ids)
def _verify_container_unit_access_message(self, group_ids, expected_message):
"""
Check that the container page displays the correct unit
access message.
"""
self.outline.visit()
self.outline.expand_all_subsections()
unit = self.outline.section_at(0).subsection_at(0).unit_at(0)
self._toggle_container_unit_access(group_ids, unit)
container_page = self.go_to_unit_page()
self.assertEqual(str(container_page.get_xblock_access_message()), expected_message)
def test_default_selection(self):
"""
Tests that no message is displayed when there are no
restrictions on the unit or components.
"""
self._verify_container_unit_access_message([], '')
def test_restricted_components_message(self):
"""
Test that the proper message is displayed when access to
some components is restricted.
"""
container_page = self.go_to_unit_page()
html_component = container_page.xblocks[1]
# Initially set visibility to Dog group.
self.update_component(
html_component,
{'group_access': {self.id_base: [self.id_base + 1]}}
)
self._verify_container_unit_access_message([], self.GROUP_VISIBILITY_MESSAGE)
def test_restricted_access_message(self):
"""
Test that the proper message is displayed when access to the
unit is restricted to a particular group.
"""
self._verify_container_unit_access_message([self.id_base + 1], self.GROUP_RESTRICTED_MESSAGE)
@attr(shard=9)
class ContentGroupVisibilityModalTest(BaseGroupConfigurationsTest):
"""
Tests of the visibility settings modal for components on the unit
page (content groups).
"""
def test_default_selection(self):
"""
Scenario: The component visibility modal selects visible to all by default.
Given I have a unit with one component
When I go to the container page for that unit
And I open the visibility editor modal for that unit's component
Then the default visibility selection should be 'All Students and Staff'
And the container page should not display the content visibility warning
"""
visibility_dialog = self.edit_component_visibility(self.html_component)
self.verify_current_groups_message(visibility_dialog, self.ALL_LEARNERS_AND_STAFF)
self.verify_selected_partition_scheme(visibility_dialog, self.CHOOSE_ONE)
visibility_dialog.cancel()
self.verify_visibility_set(self.html_component, False)
def test_reset_to_all_students_and_staff(self):
"""
Scenario: The component visibility modal can be set to be visible to all students and staff.
Given I have a unit with one component
When I go to the container page for that unit
Then the container page should not display the content visibility warning by default.
If I then restrict access and save, and then I open the visibility editor modal for that unit's component
And I select 'All Students and Staff'
And I save the modal
Then the visibility selection should be 'All Students and Staff'
And the container page should still not display the content visibility warning
"""
self.select_and_verify_saved(self.html_component, self.CONTENT_GROUP_PARTITION, ['Dogs'])
self.select_and_verify_saved(self.html_component, self.ALL_LEARNERS_AND_STAFF)
self.verify_visibility_set(self.html_component, False)
def test_reset_unit_access_to_all_students_and_staff(self):
"""
Scenario: The unit visibility modal can be set to be visible to all students and staff.
Given I have a unit
When I go to the container page for that unit
And I open the visibility editor modal for that unit
And I select 'Dogs'
And I save the modal
Then I re-open the modal, the unit access modal should display the content visibility settings
Then after re-opening the modal again
And I select 'All Learners and Staff'
And I save the modal
And I re-open the modal, the unit access modal should display that no content is restricted
"""
self.select_and_verify_unit_group_access(self.container_page, self.CONTENT_GROUP_PARTITION, ['Dogs'])
self.verify_unit_visibility_set(self.container_page, set_groups=["Dogs"])
self.select_and_verify_unit_group_access(self.container_page, self.ALL_LEARNERS_AND_STAFF)
self.verify_unit_visibility_set(self.container_page)
def test_select_single_content_group(self):
"""
Scenario: The component visibility modal can be set to be visible to one content group.
Given I have a unit with one component
When I go to the container page for that unit
And I open the visibility editor modal for that unit's component
And I select 'Dogs'
And I save the modal
Then the visibility selection should be 'Dogs' and 'Specific Content Groups'
"""
self.select_and_verify_saved(self.html_component, self.CONTENT_GROUP_PARTITION, ['Dogs'])
def test_select_multiple_content_groups(self):
"""
Scenario: The component visibility modal can be set to be visible to multiple content groups.
Given I have a unit with one component
When I go to the container page for that unit
And I open the visibility editor modal for that unit's component
And I select 'Dogs' and 'Cats'
And I save the modal
Then the visibility selection should be 'Dogs', 'Cats', and 'Specific Content Groups'
"""
self.select_and_verify_saved(self.html_component, self.CONTENT_GROUP_PARTITION, ['Dogs', 'Cats'])
def test_missing_groups(self):
"""
Scenario: The component visibility modal shows a validation error when visibility is set to multiple unknown
group ids.
Given I have a unit with one component
And that component's group access specifies multiple invalid group ids
When I go to the container page for that unit
Then I should see a validation error message on that unit's component
And I open the visibility editor modal for that unit's component
Then I should see that I have selected multiple deleted groups
And the container page should display the content visibility warning
And I de-select the missing groups
And I save the modal
Then the visibility selection should be 'All Students and Staff'
And I should not see any validation errors on the component
And the container page should not display the content visibility warning
"""
self.update_component(
self.html_component,
{'group_access': {self.id_base: [self.id_base + 3, self.id_base + 4]}}
)
self._verify_and_remove_missing_content_groups(
"Deleted Group, Deleted Group",
[self.MISSING_GROUP_LABEL] * 2
)
self.verify_visibility_set(self.html_component, False)
def test_found_and_missing_groups(self):
"""
Scenario: The component visibility modal shows a validation error when visibility is set to multiple unknown
group ids and multiple known group ids.
Given I have a unit with one component
And that component's group access specifies multiple invalid and valid group ids
When I go to the container page for that unit
Then I should see a validation error message on that unit's component
And I open the visibility editor modal for that unit's component
Then I should see that I have selected multiple deleted groups
And then if I de-select the missing groups
And I save the modal
Then the visibility selection should be the names of the valid groups.
And I should not see any validation errors on the component
"""
self.update_component(
self.html_component,
{'group_access': {self.id_base: [self.id_base + 1, self.id_base + 2, self.id_base + 3, self.id_base + 4]}}
)
self._verify_and_remove_missing_content_groups(
'Dogs, Cats, Deleted Group, Deleted Group',
['Dogs', 'Cats'] + [self.MISSING_GROUP_LABEL] * 2
)
visibility_editor = self.edit_component_visibility(self.html_component)
self.verify_selected_partition_scheme(visibility_editor, self.CONTENT_GROUP_PARTITION)
expected_groups = ['Dogs', 'Cats']
self.verify_current_groups_message(visibility_editor, ", ".join(expected_groups))
self.verify_selected_groups(visibility_editor, expected_groups)
def _verify_and_remove_missing_content_groups(self, current_groups_message, all_group_labels):
self.verify_component_validation_error(self.html_component)
visibility_editor = self.edit_component_visibility(self.html_component)
self.verify_selected_partition_scheme(visibility_editor, self.CONTENT_GROUP_PARTITION)
self.verify_current_groups_message(visibility_editor, current_groups_message)
self.verify_selected_groups(visibility_editor, all_group_labels)
self.remove_missing_groups(visibility_editor, self.html_component)
@attr(shard=20)
class EnrollmentTrackVisibilityModalTest(BaseGroupConfigurationsTest):
"""
Tests of the visibility settings modal for components on the unit
page (enrollment tracks).
"""
AUDIT_TRACK = "Audit Track"
VERIFIED_TRACK = "Verified Track"
def setUp(self):
super(EnrollmentTrackVisibilityModalTest, self).setUp()
# Add an audit mode to the course
ModeCreationPage(self.browser, self.course_id, mode_slug=u'audit', mode_display_name=self.AUDIT_TRACK).visit()
# Add a verified mode to the course
ModeCreationPage(
self.browser, self.course_id, mode_slug=u'verified',
mode_display_name=self.VERIFIED_TRACK, min_price=10
).visit()
self.container_page = self.go_to_unit_page()
self.html_component = self.container_page.xblocks[1]
# Initially set visibility to Verified track.
self.update_component(
self.html_component,
{'group_access': {ENROLLMENT_TRACK_PARTITION_ID: [2]}} # "2" is Verified
)
def verify_component_group_visibility_messsage(self, component, expected_groups):
"""
Verifies that the group visibility message below the component display name is correct.
"""
if not expected_groups:
self.assertIsNone(component.get_partition_group_message)
else:
self.assertEqual("Access restricted to: " + expected_groups, component.get_partition_group_message)
def test_setting_enrollment_tracks(self):
"""
Test that enrollment track groups can be selected.
"""
# Verify that the "Verified" Group is shown on the unit page (under the unit display name).
self.verify_component_group_visibility_messsage(self.html_component, "Verified Track")
# Open dialog with "Verified" already selected.
visibility_editor = self.edit_component_visibility(self.html_component)
self.verify_current_groups_message(visibility_editor, self.VERIFIED_TRACK)
self.verify_selected_partition_scheme(
visibility_editor,
self.ENROLLMENT_TRACK_PARTITION
)
self.verify_selected_groups(visibility_editor, [self.VERIFIED_TRACK])
visibility_editor.cancel()
# Select "All Learners and Staff". The helper method saves the change,
# then reopens the dialog to verify that it was persisted.
self.select_and_verify_saved(self.html_component, self.ALL_LEARNERS_AND_STAFF)
self.verify_component_group_visibility_messsage(self.html_component, None)
# Select "Audit" enrollment track. The helper method saves the change,
# then reopens the dialog to verify that it was persisted.
self.select_and_verify_saved(self.html_component, self.ENROLLMENT_TRACK_PARTITION, [self.AUDIT_TRACK])
self.verify_component_group_visibility_messsage(self.html_component, "Audit Track")
@attr(shard=16)
class UnitPublishingTest(ContainerBase):
"""
Tests of the publishing control and related widgets on the Unit page.
"""
PUBLISHED_STATUS = "Publishing Status\nPublished (not yet released)"
PUBLISHED_LIVE_STATUS = "Publishing Status\nPublished and Live"
DRAFT_STATUS = "Publishing Status\nDraft (Unpublished changes)"
LOCKED_STATUS = "Publishing Status\nVisible to Staff Only"
RELEASE_TITLE_RELEASED = "RELEASED:"
RELEASE_TITLE_RELEASE = "RELEASE:"
LAST_PUBLISHED = 'Last published'
LAST_SAVED = 'Draft saved on'
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with a unit and a single HTML child.
"""
self.html_content = '<p><strong>Body of HTML Unit.</strong></p>'
self.courseware = CoursewarePage(self.browser, self.course_id)
past_start_date = datetime.datetime(1974, 6, 22)
self.past_start_date_text = "Jun 22, 1974 at 00:00 UTC"
future_start_date = datetime.datetime(2100, 9, 13)
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Test html', data=self.html_content)
)
)
),
XBlockFixtureDesc(
'chapter',
'Unlocked Section',
metadata={'start': past_start_date.isoformat()}
).add_children(
XBlockFixtureDesc('sequential', 'Unlocked Subsection').add_children(
XBlockFixtureDesc('vertical', 'Unlocked Unit').add_children(
XBlockFixtureDesc('problem', '<problem></problem>', data=self.html_content)
)
)
),
XBlockFixtureDesc('chapter', 'Section With Locked Unit').add_children(
XBlockFixtureDesc(
'sequential',
'Subsection With Locked Unit',
metadata={'start': past_start_date.isoformat()}
).add_children(
XBlockFixtureDesc(
'vertical',
'Locked Unit',
metadata={'visible_to_staff_only': True}
).add_children(
XBlockFixtureDesc('discussion', '', data=self.html_content)
)
)
),
XBlockFixtureDesc(
'chapter',
'Unreleased Section',
metadata={'start': future_start_date.isoformat()}
).add_children(
XBlockFixtureDesc('sequential', 'Unreleased Subsection').add_children(
XBlockFixtureDesc('vertical', 'Unreleased Unit')
)
)
)
def test_publishing(self):
"""
Scenario: The publish title changes based on whether or not draft content exists
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
Then the title in the Publish information box is "Published and Live"
And the Publish button is disabled
And the last published text contains "Last published"
And the last saved text contains "Last published"
And when I add a component to the unit
Then the title in the Publish information box is "Draft (Unpublished changes)"
And the last saved text contains "Draft saved on"
And the Publish button is enabled
And when I click the Publish button
Then the title in the Publish information box is "Published and Live"
And the last published text contains "Last published"
And the last saved text contains "Last published"
"""
unit = self.go_to_unit_page()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
# Start date set in course fixture to 1970.
self._verify_release_date_info(
unit, self.RELEASE_TITLE_RELEASED, 'Jan 01, 1970 at 00:00 UTC\nwith Section "Test Section"'
)
self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_PUBLISHED)
# Should not be able to click on Publish action -- but I don't know how to test that it is not clickable.
# TODO: continue discussion with Muhammad and Jay about this.
# Add a component to the page so it will have unpublished changes.
add_discussion(unit)
unit.verify_publish_title(self.DRAFT_STATUS)
self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_SAVED)
unit.publish()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_PUBLISHED)
def test_discard_changes(self):
"""
Scenario: The publish title changes after "Discard Changes" is clicked
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
Then the Discard Changes button is disabled
And I add a component to the unit
Then the title in the Publish information box is "Draft (Unpublished changes)"
And the Discard Changes button is enabled
And when I click the Discard Changes button
Then the title in the Publish information box is "Published and Live"
"""
unit = self.go_to_unit_page()
add_discussion(unit)
unit.verify_publish_title(self.DRAFT_STATUS)
unit.discard_changes()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
def test_view_live_no_changes(self):
"""
Scenario: "View Live" shows published content in LMS
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
Then the View Live button is enabled
And when I click on the View Live button
Then I see the published content in LMS
"""
unit = self.go_to_unit_page()
self._view_published_version(unit)
self._verify_components_visible(['html'])
def test_view_live_changes(self):
"""
Scenario: "View Live" does not show draft content in LMS
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And when I add a component to the unit
And when I click on the View Live button
Then I see the published content in LMS
And I do not see the unpublished component
"""
unit = self.go_to_unit_page()
add_discussion(unit)
self._view_published_version(unit)
self._verify_components_visible(['html'])
self.assertEqual(self.html_content, self.courseware.xblock_component_html_content(0))
def test_view_live_after_publish(self):
"""
Scenario: "View Live" shows newly published content
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And when I add a component to the unit
And when I click the Publish button
And when I click on the View Live button
Then I see the newly published component
"""
unit = self.go_to_unit_page()
add_discussion(unit)
unit.publish()
self._view_published_version(unit)
self._verify_components_visible(['html', 'discussion'])
def test_initially_unlocked_visible_to_students(self):
"""
Scenario: An unlocked unit with release date in the past is visible to students
Given I have a published unlocked unit with release date in the past
When I go to the unit page in Studio
Then the unit has a warning that it is visible to students
And it is marked as "RELEASED" with release date in the past visible
And when I click on the View Live Button
And when I view the course as a student
Then I see the content in the unit
"""
unit = self.go_to_unit_page("Unlocked Section", "Unlocked Subsection", "Unlocked Unit")
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.assertTrue(unit.currently_visible_to_students)
self._verify_release_date_info(
unit, self.RELEASE_TITLE_RELEASED, self.past_start_date_text + '\n' + 'with Section "Unlocked Section"'
)
self._view_published_version(unit)
self._verify_student_view_visible(['problem'])
def test_locked_visible_to_staff_only(self):
"""
Scenario: After locking a unit with release date in the past, it is only visible to staff
Given I have a published unlocked unit with release date in the past
When I go to the unit page in Studio
And when I select "Hide from students"
Then the unit does not have a warning that it is visible to students
And the unit does not display inherited staff lock
And when I click on the View Live Button
Then I see the content in the unit when logged in as staff
And when I view the course as a student
Then I do not see any content in the unit
"""
unit = self.go_to_unit_page("Unlocked Section", "Unlocked Subsection", "Unlocked Unit")
checked = unit.toggle_staff_lock()
self.assertTrue(checked)
self.assertFalse(unit.currently_visible_to_students)
self.assertFalse(unit.shows_inherited_staff_lock())
unit.verify_publish_title(self.LOCKED_STATUS)
self._view_published_version(unit)
# Will initially be in staff view, locked component should be visible.
self._verify_components_visible(['problem'])
# Switch to student view and verify not visible
self._verify_student_view_locked()
def test_initially_locked_not_visible_to_students(self):
"""
Scenario: A locked unit with release date in the past is not visible to students
Given I have a published locked unit with release date in the past
When I go to the unit page in Studio
Then the unit does not have a warning that it is visible to students
And it is marked as "RELEASE" with release date in the past visible
And when I click on the View Live Button
And when I view the course as a student
Then I do not see any content in the unit
"""
unit = self.go_to_unit_page("Section With Locked Unit", "Subsection With Locked Unit", "Locked Unit")
unit.verify_publish_title(self.LOCKED_STATUS)
self.assertFalse(unit.currently_visible_to_students)
self._verify_release_date_info(
unit, self.RELEASE_TITLE_RELEASE,
self.past_start_date_text + '\n' + 'with Subsection "Subsection With Locked Unit"'
)
self._view_published_version(unit)
self._verify_student_view_locked()
def test_unlocked_visible_to_all(self):
"""
Scenario: After unlocking a unit with release date in the past, it is visible to both students and staff
Given I have a published unlocked unit with release date in the past
When I go to the unit page in Studio
And when I deselect "Hide from students"
Then the unit does have a warning that it is visible to students
And when I click on the View Live Button
Then I see the content in the unit when logged in as staff
And when I view the course as a student
Then I see the content in the unit
"""
unit = self.go_to_unit_page("Section With Locked Unit", "Subsection With Locked Unit", "Locked Unit")
checked = unit.toggle_staff_lock()
self.assertFalse(checked)
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.assertTrue(unit.currently_visible_to_students)
self._view_published_version(unit)
# Will initially be in staff view, components always visible.
self._verify_components_visible(['discussion'])
# Switch to student view and verify visible.
self._verify_student_view_visible(['discussion'])
def test_explicit_lock_overrides_implicit_subsection_lock_information(self):
"""
Scenario: A unit's explicit staff lock hides its inherited subsection staff lock information
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a subsection
When I visit the unit page
Then the unit page shows its inherited staff lock
And I enable explicit staff locking
Then the unit page does not show its inherited staff lock
And when I disable explicit staff locking
Then the unit page now shows its inherited staff lock
"""
self.outline.visit()
self.outline.expand_all_subsections()
subsection = self.outline.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
subsection.set_staff_lock(True)
unit_page = unit.go_to()
self._verify_explicit_lock_overrides_implicit_lock_information(unit_page)
def test_explicit_lock_overrides_implicit_section_lock_information(self):
"""
Scenario: A unit's explicit staff lock hides its inherited subsection staff lock information
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a section
When I visit the unit page
Then the unit page shows its inherited staff lock
And I enable explicit staff locking
Then the unit page does not show its inherited staff lock
And when I disable explicit staff locking
Then the unit page now shows its inherited staff lock
"""
self.outline.visit()
self.outline.expand_all_subsections()
section = self.outline.section_at(0)
unit = section.subsection_at(0).unit_at(0)
section.set_staff_lock(True)
unit_page = unit.go_to()
self._verify_explicit_lock_overrides_implicit_lock_information(unit_page)
def test_cancel_does_not_create_draft(self):
"""
Scenario: Editing a component and then canceling does not create a draft version (TNL-399)
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And edit the content of an HTML component and then press cancel
Then the content does not change
And the title in the Publish information box is "Published and Live"
And when I reload the page
Then the title in the Publish information box is "Published and Live"
"""
unit = self.go_to_unit_page()
component = unit.xblocks[1]
component.edit()
HtmlXBlockEditorView(self.browser, component.locator).set_content_and_cancel("modified content")
self.assertEqual(component.student_content, "Body of HTML Unit.")
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.browser.refresh()
unit.wait_for_page()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
def test_delete_child_in_published_unit(self):
"""
Scenario: A published unit can be published again after deleting a child
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And delete the only component
Then the title in the Publish information box is "Draft (Unpublished changes)"
And when I click the Publish button
Then the title in the Publish information box is "Published and Live"
And when I click the View Live button
Then I see an empty unit in LMS
"""
unit = self.go_to_unit_page()
unit.delete(0)
unit.verify_publish_title(self.DRAFT_STATUS)
unit.publish()
unit.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self._view_published_version(unit)
self.assertEqual(0, self.courseware.num_xblock_components)
def test_published_not_live(self):
"""
Scenario: The publish title displays correctly for units that are not live
Given I have a published unit with no unpublished changes that releases in the future
When I go to the unit page in Studio
Then the title in the Publish information box is "Published (not yet released)"
And when I add a component to the unit
Then the title in the Publish information box is "Draft (Unpublished changes)"
And when I click the Publish button
Then the title in the Publish information box is "Published (not yet released)"
"""
unit = self.go_to_unit_page('Unreleased Section', 'Unreleased Subsection', 'Unreleased Unit')
unit.verify_publish_title(self.PUBLISHED_STATUS)
add_discussion(unit)
unit.verify_publish_title(self.DRAFT_STATUS)
unit.publish()
unit.verify_publish_title(self.PUBLISHED_STATUS)
def _view_published_version(self, unit):
"""
Goes to the published version, then waits for the browser to load the page.
"""
unit.view_published_version()
self.assertEqual(len(self.browser.window_handles), 2)
self.courseware.wait_for_page()
def _verify_and_return_staff_page(self):
"""
Verifies that the browser is on the staff page and returns a StaffCoursewarePage.
"""
page = StaffCoursewarePage(self.browser, self.course_id)
page.wait_for_page()
return page
def _verify_student_view_locked(self):
"""
Verifies no component is visible when viewing as a student.
"""
page = self._verify_and_return_staff_page()
page.set_staff_view_mode('Learner')
page.wait_for(lambda: self.courseware.num_xblock_components == 0, 'No XBlocks visible')
def _verify_student_view_visible(self, expected_components):
"""
Verifies expected components are visible when viewing as a student.
"""
self._verify_and_return_staff_page().set_staff_view_mode('Learner')
self._verify_components_visible(expected_components)
def _verify_components_visible(self, expected_components):
"""
Verifies the expected components are visible (and there are no extras).
"""
self.assertEqual(len(expected_components), self.courseware.num_xblock_components)
for index, component in enumerate(expected_components):
self.assertEqual(component, self.courseware.xblock_component_type(index))
def _verify_release_date_info(self, unit, expected_title, expected_date):
"""
Verifies how the release date is displayed in the publishing sidebar.
"""
self.assertEqual(expected_title, unit.release_title)
self.assertEqual(expected_date, unit.release_date)
def _verify_last_published_and_saved(self, unit, expected_published_prefix, expected_saved_prefix):
"""
Verifies that last published and last saved messages respectively contain the given strings.
"""
self.assertIn(expected_published_prefix, unit.last_published_text)
self.assertIn(expected_saved_prefix, unit.last_saved_text)
def _verify_explicit_lock_overrides_implicit_lock_information(self, unit_page):
"""
Verifies that a unit with inherited staff lock does not display inherited information when explicitly locked.
"""
self.assertTrue(unit_page.shows_inherited_staff_lock())
unit_page.toggle_staff_lock(inherits_staff_lock=True)
self.assertFalse(unit_page.shows_inherited_staff_lock())
unit_page.toggle_staff_lock(inherits_staff_lock=True)
self.assertTrue(unit_page.shows_inherited_staff_lock())
# TODO: need to work with Jay/Christine to get testing of "Preview" working.
# def test_preview(self):
# unit = self.go_to_unit_page()
# add_discussion(unit)
# unit.preview()
# self.assertEqual(2, self.courseware.num_xblock_components)
# self.assertEqual('html', self.courseware.xblock_component_type(0))
# self.assertEqual('discussion', self.courseware.xblock_component_type(1))
@attr(shard=20)
class DisplayNameTest(ContainerBase):
"""
Test consistent use of display_name_with_default
"""
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with nested verticals.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('vertical', None)
)
)
)
)
def test_display_name_default(self):
"""
Scenario: Given that an XBlock with a dynamic display name has been added to the course,
When I view the unit page and note the display name of the block,
Then I see the dynamically generated display name,
And when I then go to the container page for that same block,
Then I see the same generated display name.
"""
# Unfortunately no blocks in the core platform implement display_name_with_default
# in an interesting way for this test, so we are just testing for consistency and not
# the actual value.
unit = self.go_to_unit_page()
test_block = unit.xblocks[1]
title_on_unit_page = test_block.name
container = test_block.go_to_container()
self.assertEqual(container.name, title_on_unit_page)
@attr(shard=3)
class ProblemCategoryTabsTest(ContainerBase):
"""
Test to verify tabs in problem category.
"""
def setUp(self, is_staff=True):
super(ProblemCategoryTabsTest, self).setUp(is_staff=is_staff)
def populate_course_fixture(self, course_fixture):
"""
Sets up course structure.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def test_correct_tabs_present(self):
"""
Scenario: Verify that correct tabs are present in problem category.
Given I am a staff user
When I go to unit page
Then I only see `Common Problem Types` and `Advanced` tabs in `problem` category
"""
self.go_to_unit_page()
page = ContainerPage(self.browser, None)
self.assertEqual(page.get_category_tab_names('problem'), ['Common Problem Types', 'Advanced'])
def test_common_problem_types_tab(self):
"""
Scenario: Verify that correct components are present in Common Problem Types tab.
Given I am a staff user
When I go to unit page
Then I see correct components under `Common Problem Types` tab in `problem` category
"""
self.go_to_unit_page()
page = ContainerPage(self.browser, None)
expected_components = [
"Blank Common Problem",
"Checkboxes",
"Dropdown",
"Multiple Choice",
"Numerical Input",
"Text Input",
"Checkboxes with Hints and Feedback",
"Dropdown with Hints and Feedback",
"Multiple Choice with Hints and Feedback",
"Numerical Input with Hints and Feedback",
"Text Input with Hints and Feedback",
]
self.assertEqual(page.get_category_tab_components('problem', 1), expected_components)
@attr(shard=16)
@ddt.ddt
class MoveComponentTest(ContainerBase):
"""
Tests of moving an XBlock to another XBlock.
"""
PUBLISHED_LIVE_STATUS = "Publishing Status\nPublished and Live"
DRAFT_STATUS = "Publishing Status\nDraft (Unpublished changes)"
def setUp(self, is_staff=True):
super(MoveComponentTest, self).setUp(is_staff=is_staff)
self.container = ContainerPage(self.browser, None)
self.move_modal_view = MoveModalView(self.browser)
self.navigation_options = {
'section': 0,
'subsection': 0,
'unit': 1,
}
self.source_component_display_name = 'HTML 11'
self.source_xblock_category = 'component'
self.message_move = u'Success! "{display_name}" has been moved.'
self.message_undo = u'Move cancelled. "{display_name}" has been moved back to its original location.'
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure.
"""
# pylint: disable=attribute-defined-outside-init
self.unit_page1 = XBlockFixtureDesc('vertical', 'Test Unit 1').add_children(
XBlockFixtureDesc('html', 'HTML 11'),
XBlockFixtureDesc('html', 'HTML 12')
)
self.unit_page2 = XBlockFixtureDesc('vertical', 'Test Unit 2').add_children(
XBlockFixtureDesc('html', 'HTML 21'),
XBlockFixtureDesc('html', 'HTML 22')
)
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
self.unit_page1,
self.unit_page2
)
)
)
def verify_move_opertions(self, unit_page, source_component, operation, component_display_names_after_operation,
should_verify_publish_title=True):
"""
Verify move operations.
Arguments:
unit_page (Object) Unit container page.
source_component (Object) Source XBlock object to be moved.
operation (str), `move` or `undo move` operation.
component_display_names_after_operation (dict) Display names of components after operation in source/dest
should_verify_publish_title (Boolean) Should verify publish title ot not. Default is True.
"""
source_component.open_move_modal()
self.move_modal_view.navigate_to_category(self.source_xblock_category, self.navigation_options)
self.assertEqual(self.move_modal_view.is_move_button_enabled, True)
# Verify unit is in published state before move operation
if should_verify_publish_title:
self.container.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.move_modal_view.click_move_button()
self.container.verify_confirmation_message(
self.message_move.format(display_name=self.source_component_display_name)
)
self.assertEqual(len(unit_page.displayed_children), 1)
# Verify unit in draft state now
if should_verify_publish_title:
self.container.verify_publish_title(self.DRAFT_STATUS)
if operation == 'move':
self.container.click_take_me_there_link()
elif operation == 'undo_move':
self.container.click_undo_move_link()
self.container.verify_confirmation_message(
self.message_undo.format(display_name=self.source_component_display_name)
)
unit_page = ContainerPage(self.browser, None)
components = unit_page.displayed_children
self.assertEqual(
[component.name for component in components],
component_display_names_after_operation
)
def verify_state_change(self, unit_page, operation):
"""
Verify that after state change, confirmation message is hidden.
Arguments:
unit_page (Object) Unit container page.
operation (String) Publish or discard changes operation.
"""
# Verify unit in draft state now
self.container.verify_publish_title(self.DRAFT_STATUS)
# Now click publish/discard button
if operation == 'publish':
unit_page.publish()
else:
unit_page.discard_changes()
# Now verify success message is hidden
self.container.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.container.verify_confirmation_message(
message=self.message_move.format(display_name=self.source_component_display_name),
verify_hidden=True
)
def test_move_component_successfully(self):
"""
Test if we can move a component successfully.
Given I am a staff user
And I go to unit page in first section
And I open the move modal
And I navigate to unit in second section
And I see move button is enabled
When I click on the move button
Then I see move operation success message
And When I click on take me there link
Then I see moved component there.
"""
unit_page = self.go_to_unit_page(unit_name='Test Unit 1')
components = unit_page.displayed_children
self.assertEqual(len(components), 2)
self.verify_move_opertions(
unit_page=unit_page,
source_component=components[0],
operation='move',
component_display_names_after_operation=['HTML 21', 'HTML 22', 'HTML 11']
)
@ddt.data('publish', 'discard')
def test_publish_discard_changes_afer_move(self, operation):
"""
Test if success banner is hidden when we discard changes or publish the unit after a move operation.
Given I am a staff user
And I go to unit page in first section
And I open the move modal
And I navigate to unit in second section
And I see move button is enabled
When I click on the move button
Then I see move operation success message
And When I click on publish or discard changes button
Then I see move operation success message is hidden.
"""
unit_page = self.go_to_unit_page(unit_name='Test Unit 1')
components = unit_page.displayed_children
self.assertEqual(len(components), 2)
components[0].open_move_modal()
self.move_modal_view.navigate_to_category(self.source_xblock_category, self.navigation_options)
self.assertEqual(self.move_modal_view.is_move_button_enabled, True)
# Verify unit is in published state before move operation
self.container.verify_publish_title(self.PUBLISHED_LIVE_STATUS)
self.move_modal_view.click_move_button()
self.container.verify_confirmation_message(
self.message_move.format(display_name=self.source_component_display_name)
)
self.assertEqual(len(unit_page.displayed_children), 1)
self.verify_state_change(unit_page, operation)
def test_content_experiment(self):
"""
Test if we can move a component of content experiment successfully.
Given that I am a staff user
And I go to content experiment page
And I open the move dialogue modal
When I navigate to the unit in second section
Then I see move button is enabled
And when I click on the move button
Then I see move operation success message
And when I click on take me there link
Then I see moved component there
And when I undo move a component
Then I see that undo move operation success message
"""
# Add content experiment support to course.
self.course_fixture.add_advanced_settings({
u'advanced_modules': {'value': ['split_test']},
})
# Create group configurations
# pylint: disable=protected-access
self.course_fixture._update_xblock(self.course_fixture._course_location, {
'metadata': {
u'user_partitions': [
create_user_partition_json(
0,
'Test Group Configuration',
'Description of the group configuration.',
[Group('0', 'Group A'), Group('1', 'Group B')]
),
],
},
})
# Add split test to unit_page1 and assign newly created group configuration to it
split_test = XBlockFixtureDesc('split_test', 'Test Content Experiment', metadata={'user_partition_id': 0})
self.course_fixture.create_xblock(self.unit_page1.locator, split_test)
# Visit content experiment container page.
unit_page = ContainerPage(self.browser, split_test.locator)
unit_page.visit()
group_a_locator = unit_page.displayed_children[0].locator
# Add some components to Group A.
self.course_fixture.create_xblock(
group_a_locator, XBlockFixtureDesc('html', 'HTML 311')
)
self.course_fixture.create_xblock(
group_a_locator, XBlockFixtureDesc('html', 'HTML 312')
)
# Go to group page to move it's component.
group_container_page = ContainerPage(self.browser, group_a_locator)
group_container_page.visit()
# Verify content experiment block has correct groups and components.
components = group_container_page.displayed_children
self.assertEqual(len(components), 2)
self.source_component_display_name = 'HTML 311'
# Verify undo move operation for content experiment.
self.verify_move_opertions(
unit_page=group_container_page,
source_component=components[0],
operation='undo_move',
component_display_names_after_operation=['HTML 311', 'HTML 312'],
should_verify_publish_title=False
)
# Verify move operation for content experiment.
self.verify_move_opertions(
unit_page=group_container_page,
source_component=components[0],
operation='move',
component_display_names_after_operation=['HTML 21', 'HTML 22', 'HTML 311'],
should_verify_publish_title=False
)
# Ideally this test should be decorated with @attr('a11y') so that it should run in a11y jenkins job
# But for some reason it always fails in a11y jenkins job and passes always locally on devstack as well
# as in bokchoy jenkins job. Due to this reason, test is marked to run under bokchoy jenkins job.
def test_a11y(self):
"""
Verify move modal a11y.
"""
unit_page = self.go_to_unit_page(unit_name='Test Unit 1')
unit_page.a11y_audit.config.set_scope(
include=[".modal-window.move-modal"]
)
unit_page.a11y_audit.config.set_rules({
'ignore': [
'color-contrast', # TODO: AC-716
'link-href', # TODO: AC-716
]
})
unit_page.displayed_children[0].open_move_modal()
for category in ['section', 'subsection', 'component']:
self.move_modal_view.navigate_to_category(category, self.navigation_options)
unit_page.a11y_audit.check_for_accessibility_errors()
| cpennington/edx-platform | common/test/acceptance/tests/studio/test_studio_container.py | Python | agpl-3.0 | 66,869 | [
"VisIt"
] | 4f047f3c837c61c4e0def4bb19f8e8d7b9ef7837a4ae138c101362f5a91c4fd5 |
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SearchIO objects to model similarity search program outputs.
The SearchIO object model consists of a hierarchy of four nested objects:
* QueryResult, to represent a search query.
This is the top-level object returned by the main SearchIO ``parse`` and
``read`` functions. QueryResult objects may contain zero or more Hit
objects, each accessible by its ID string (like in Python dictionaries)
or integer index (like in Python lists).
* Hit, to represent a database entry containing a full or partial sequence
match with the query sequence.
Hit objects contain one or more HSP objects, each accessible by its integer
index. They behave very similar to a Python list.
* HSP, to represent a region of significant alignment(s) between the query
and hit sequences.
HSP objects contain one or more HSPFragment objects, each accessible by
its integer index. In most cases, the HSP objects are where the bulk of
search result statistics (e.g. e-value, bitscore) are stored. Like Hit
objects, HSPs also behave very similar to a Python list.
* HSPFragment, to represent a single contiguous alignment between the query
and hit sequences.
HSPFragment objects may store hit and query sequences resulting from the
sequence search. If present, these sequences are stored as SeqRecord
objects (see SeqRecord). If both of them are present, HSPFragment will
create a MultipleSeqAlignment object from both sequences.
Most search programs only have HSPs with one HSPFragment in them, making
these two objects inseparable. However, there are programs (e.g. BLAT and
Exonerate) which may have more than one HSPFragment objects in any given
HSP. If you are not using these programs, you can safely consider HSP and
HSPFragment as a single union.
"""
from .query import QueryResult
from .hit import Hit
from .hsp import HSP, HSPFragment
__all__ = ['QueryResult', 'Hit', 'HSP', 'HSPFragment']
__docformat__ = "restructuredtext en"
# if not used as a module, run the doctest
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
| updownlife/multipleK | dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/SearchIO/_model/__init__.py | Python | gpl-2.0 | 2,435 | [
"Biopython"
] | ddd5dda2c080d8cc969355404f55c33cb7483cb53b881c9430def9c0f7571af3 |
# some functions
from __future__ import print_function
from __future__ import absolute_import
try:
# For Python 2
basestring = basestring
except NameError:
basestring = str
def poly(c,x):
""" y = Sum { c(i)*x^i }, i=0,len(c)"""
import numpy as N
y=N.zeros(len(x))
for i in range(len(c)):
y += c[i]*(x**i)
return y
def sp_in(c, x):
""" Spectral index in freq-flux space """
import numpy as N
order = len(c)-1
if order == 1:
y = c[0]*N.power(x, c[1])
else:
if order == 2:
y = c[0]*N.power(x, c[1])*N.power(x, c[2]*N.log(x))
else:
print('Not yet implemented')
return y
def wenss_fit(c,x):
""" sqrt(c0*c0 + c1^2/x^2)"""
import numpy as N
y = N.sqrt(c[0]*c[0]+c[1]*c[1]/(x*x))
return y
def nanmean(x):
""" Mean of array with NaN """
import numpy as N
sum = N.nansum(x)
n = N.sum(~N.isnan(x))
if n > 0:
mean = sum/n
else:
mean = float("NaN")
return mean
def shapeletfit(cf, Bset, cfshape):
""" The function """
import numpy as N
ordermax = Bset.shape[0]
y = (Bset[0,0,::]).flatten()
y = N.zeros(y.shape)
index = [(i,j) for i in range(ordermax) for j in range(ordermax-i)] # i=0->nmax, j=0-nmax-i
for coord in index:
linbasis = (Bset[coord[0], coord[1], ::]).flatten()
y += cf.reshape(cfshape)[coord]*linbasis
return y
def func_poly2d(ord,p,x,y):
""" 2d polynomial.
ord=0 : z=p[0]
ord=1 : z=p[0]+p[1]*x+p[2]*y
ord=2 : z=p[0]+p[1]*x+p[2]*y+p[3]*x*x+p[4]*y*y+p[5]*x*y
ord=3 : z=p[0]+p[1]*x+p[2]*y+p[3]*x*x+p[4]*y*y+p[5]*x*y+
p[6]*x*x*x+p[7]*x*x*y+p[8]*x*y*y+p[9]*y*y*y"""
if ord == 0:
z=p[0]
if ord == 1:
z=p[0]+p[1]*x+p[2]*y
if ord == 2:
z=p[0]+p[1]*x+p[2]*y+p[3]*x*x+p[4]*y*y+p[5]*x*y
if ord == 3:
z=p[0]+p[1]*x+p[2]*y+p[3]*x*x+p[4]*y*y+p[5]*x*y+\
p[6]*x*x*x+p[7]*x*x*y+p[8]*x*y*y+p[9]*y*y*y
if ord > 3:
print(" We do not trust polynomial fits > 3 ")
z = None
return z
def func_poly2d_ini(ord, av):
""" Initial guess -- assume flat plane. """
if ord == 0:
p0 = N.asarray([av])
if ord == 1:
p0 = N.asarray([av] + [0.0]*2)
if ord == 2:
p0 = N.asarray([av] + [0.0]*5)
if ord == 3:
p0 = N.asarray([av] + [0.0]*9)
if ord > 3:
p0 = None
return p0
def ilist(x):
""" integer part of a list of floats. """
fn = lambda x : [int(round(i)) for i in x]
return fn(x)
def cart2polar(cart, cen):
""" convert cartesian coordinates to polar coordinates around cen. theta is
zero for +ve xaxis and goes counter clockwise. cart is a numpy array [x,y] where
x and y are numpy arrays of all the (>0) values of coordinates."""
import math
polar = N.zeros(cart.shape)
pi = math.pi
rad = 180.0/pi
cc = N.transpose(cart)
cc = (cc-cen)*(cc-cen)
polar[0] = N.sqrt(N.sum(cc,1))
th = N.arctan2(cart[1]-cen[1],cart[0]-cen[0])*rad
polar[1] = N.where(th > 0, th, 360+th)
return polar
def polar2cart(polar, cen):
""" convert polar coordinates around cen to cartesian coordinates. theta is
zero for +ve xaxis and goes counter clockwise. polar is a numpy array of [r], [heta]
and cart is a numpy array [x,y] where x and y are numpy arrays of all the (>0)
values of coordinates."""
import math
cart = N.zeros(polar.shape)
pi = math.pi
rad = 180.0/pi
cart[0]=polar[0]*N.cos(polar[1]/rad)+cen[0]
cart[1]=polar[0]*N.sin(polar[1]/rad)+cen[1]
return cart
def gaus_pixval(g, pix):
""" Calculates the value at a pixel pix due to a gaussian object g. """
from .const import fwsig, pi
from math import sin, cos, exp
cen = g.centre_pix
peak = g.peak_flux
bmaj_p, bmin_p, bpa_p = g.size_pix
a4 = bmaj_p/fwsig; a5 = bmin_p/fwsig
a6 = (bpa_p+90.0)*pi/180.0
spa = sin(a6); cpa = cos(a6)
dr1 = ((pix[0]-cen[0])*cpa + (pix[1]-cen[1])*spa)/a4
dr2 = ((pix[1]-cen[1])*cpa - (pix[0]-cen[0])*spa)/a5
pixval = peak*exp(-0.5*(dr1*dr1+dr2*dr2))
return pixval
def atanproper(dumr, dx, dy):
from math import pi
ysign = (dy >= 0.0)
xsign = (dx >= 0.0)
if ysign and (not xsign): dumr = pi - dumr
if (not ysign) and (not xsign): dumr = pi + dumr
if (not ysign) and xsign: dumr = 2.0*pi - dumr
return dumr
def gdist_pa(pix1, pix2, gsize):
""" Computes FWHM in degrees in the direction towards second source, of an elliptical gaussian. """
from math import atan, pi, sqrt, cos, sin, tan
dx = pix2[0] - pix1[0]
dy = pix2[1] - pix1[1]
if dx == 0.0:
val = pi/2.0
else:
dumr = atan(abs(dy/dx))
val = atanproper(dumr, dx, dy)
psi = val - (gsize[2]+90.0)/180.0*pi
# convert angle to eccentric anomaly
if approx_equal(gsize[1], 0.0):
psi = pi/2.0
else:
psi=atan(gsize[0]/gsize[1]*tan(psi))
dumr2 = gsize[0]*cos(psi)
dumr3 = gsize[1]*sin(psi)
fwhm = sqrt(dumr2*dumr2+dumr3*dumr3)
return fwhm
def gaus_2d(c, x, y):
""" x and y are 2d arrays with the x and y positions. """
import math
import numpy as N
rad = 180.0/math.pi
cs = math.cos(c[5]/rad)
sn = math.sin(c[5]/rad)
f1 = ((x-c[1])*cs+(y-c[2])*sn)/c[3]
f2 = ((y-c[2])*cs-(x-c[1])*sn)/c[4]
val = c[0]*N.exp(-0.5*(f1*f1+f2*f2))
return val
def gaus_2d_itscomplicated(c, x, y, p_tofix, ind):
""" x and y are 2d arrays with the x and y positions. c is a list (of lists) of gaussian parameters to fit, p_tofix
are gaussian parameters to fix. ind is a list with 0, 1; 1 = fit; 0 = fix. """
import math
import numpy as N
val = N.zeros(x.shape)
indx = N.array(ind)
if len(indx) % 6 != 0:
print(" Something wrong with the parameters passed - need multiples of 6 !")
else:
ngaus = int(len(indx)/6)
params = N.zeros(6*ngaus)
params[N.where(indx==1)[0]] = c
params[N.where(indx==0)[0]] = p_tofix
for i in range(ngaus):
gau = params[i*6:i*6+6]
val = val + gaus_2d(gau, x, y)
return val
def g2param(g, adj=False):
"""Convert gaussian object g to param list [amp, cenx, ceny, sigx, sigy, theta] """
from .const import fwsig
from math import pi
A = g.peak_flux
if adj and hasattr(g, 'size_pix_adj'):
sigx, sigy, th = g.size_pix_adj
else:
sigx, sigy, th = g.size_pix
cenx, ceny = g.centre_pix
sigx = sigx/fwsig; sigy = sigy/fwsig; th = th+90.0
params = [A, cenx, ceny, sigx, sigy, th]
return params
def g2param_err(g, adj=False):
"""Convert errors on gaussian object g to param list [Eamp, Ecenx, Eceny, Esigx, Esigy, Etheta] """
from .const import fwsig
from math import pi
A = g.peak_fluxE
if adj and hasattr(g, 'size_pix_adj'):
sigx, sigy, th = g.size_pix_adj
else:
sigx, sigy, th = g.size_pixE
cenx, ceny = g.centre_pixE
sigx = sigx/fwsig; sigy = sigy/fwsig
params = [A, cenx, ceny, sigx, sigy, th]
return params
def corrected_size(size):
""" convert major and minor axis from sigma to fwhm and angle from horizontal to P.A. """
from .const import fwsig
csize = [0,0,0]
csize[0] = size[0]*fwsig
csize[1] = size[1]*fwsig
bpa = size[2]
pa = bpa-90.0
pa = pa % 360
if pa < 0.0: pa = pa + 360.0
if pa > 180.0: pa = pa - 180.0
csize[2] = pa
return csize
def drawellipse(g):
import numpy as N
from .gausfit import Gaussian
rad = 180.0/N.pi
if isinstance(g, Gaussian):
param = g2param(g)
else:
if isinstance(g, list) and len(g)>=6:
param = g
else:
raise RuntimeError("Input to drawellipse neither Gaussian nor list")
size = [param[3], param[4], param[5]]
size_fwhm = corrected_size(size)
th=N.arange(0, 370, 10)
x1=size_fwhm[0]*N.cos(th/rad)
y1=size_fwhm[1]*N.sin(th/rad)
x2=x1*N.cos(param[5]/rad)-y1*N.sin(param[5]/rad)+param[1]
y2=x1*N.sin(param[5]/rad)+y1*N.cos(param[5]/rad)+param[2]
return x2, y2
def drawsrc(src):
import math
import numpy as N
import matplotlib.path as mpath
Path = mpath.Path
paths = []
xmin = []
xmax = []
ymin = []
ymax = []
ellx = []
elly = []
for indx, g in enumerate(src.gaussians):
gellx, gelly = drawellipse(g)
ellx += gellx.tolist()
elly += gelly.tolist()
yarr = N.array(elly)
minyarr = N.min(yarr)
maxyarr = N.max(yarr)
xarr = N.array(ellx)
for i in range(10):
inblock = N.where(yarr > minyarr + float(i)*(maxyarr-minyarr)/10.0)
yarr = yarr[inblock]
xarr = xarr[inblock]
inblock = N.where(yarr < minyarr + float(i+1)*(maxyarr-minyarr)/10.0)
xmin.append(N.min(xarr[inblock])-1.0)
xmax.append(N.max(xarr[inblock])+1.0)
ymin.append(N.mean(yarr[inblock]))
ymax.append(N.mean(yarr[inblock]))
xmax.reverse()
ymax.reverse()
pathdata = [(Path.MOVETO, (xmin[0], ymin[0]))]
for i in range(10):
pathdata.append((Path.LINETO, (xmin[i], ymin[i])))
pathdata.append((Path.CURVE3, (xmin[i], ymin[i])))
pathdata.append((Path.LINETO, ((xmin[9]+xmax[0])/2.0, (ymin[9]+ymax[0])/2.0+1.0)))
for i in range(10):
pathdata.append((Path.LINETO, (xmax[i], ymax[i])))
pathdata.append((Path.CURVE3, (xmax[i], ymax[i])))
pathdata.append((Path.LINETO, ((xmin[0]+xmax[9])/2.0, (ymin[0]+ymax[9])/2.0-1.0)))
pathdata.append((Path.CLOSEPOLY, (xmin[0], ymin[0])))
codes, verts = zip(*pathdata)
path = Path(verts, codes)
return path
def mask_fwhm(g, fac1, fac2, delc, shap):
""" take gaussian object g and make a mask (as True) for pixels which are outside (less flux)
fac1*FWHM and inside (more flux) fac2*FWHM. Also returns the values as well."""
import math
import numpy as N
from .const import fwsig
x, y = N.indices(shap)
params = g2param(g)
params[1] -= delc[0]; params[2] -= delc[1]
gau = gaus_2d(params, x, y)
dumr1 = 0.5*fac1*fwsig
dumr2 = 0.5*fac2*fwsig
flux1= params[0]*math.exp(-0.5*dumr1*dumr1)
flux2 = params[0]*math.exp(-0.5*dumr2*dumr2)
mask = (gau <= flux1) * (gau > flux2)
gau = gau * mask
return mask, gau
def flatten(x):
"""flatten(sequence) -> list
Taken from http://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]"""
result = []
for el in x:
#if isinstance(el, (list, tuple)):
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def moment(x,mask=None):
"""
Calculates first 3 moments of numpy array x. Only those values of x
for which mask is False are used, if mask is given. Works for any
dimension of x.
"""
import numpy as N
if mask is None:
mask=N.zeros(x.shape, dtype=bool)
m1=N.zeros(1)
m2=N.zeros(x.ndim)
m3=N.zeros(x.ndim)
for i, val in N.ndenumerate(x):
if not mask[i]:
m1 += val
m2 += val*N.array(i)
m3 += val*N.array(i)*N.array(i)
m2 /= m1
if N.all(m3/m1 > m2*m2):
m3 = N.sqrt(m3/m1-m2*m2)
return m1, m2, m3
def fit_mask_1d(x, y, sig, mask, funct, do_err, order=0, p0 = None):
"""
Calls scipy.optimise.leastsq for a 1d function with a mask.
Takes values only where mask=False.
"""
from scipy.optimize import leastsq
from math import sqrt, pow
import numpy as N
import sys
ind=N.where(~N.array(mask))[0]
if len(ind) > 1:
n=sum(mask)
if isinstance(x, list): x = N.array(x)
if isinstance(y, list): y = N.array(y)
if isinstance(sig, list): sig = N.array(sig)
xfit=x[ind]; yfit=y[ind]; sigfit=sig[ind]
if p0 is None:
if funct == poly:
p0=N.array([0]*(order+1))
p0[1]=(yfit[0]-yfit[-1])/(xfit[0]-xfit[-1])
p0[0]=yfit[0]-p0[1]*xfit[0]
if funct == wenss_fit:
p0=N.array([yfit[N.argmax(xfit)]] + [1.])
if funct == sp_in:
ind1 = N.where(yfit > 0.)[0]
if len(ind1) >= 2:
low = ind1[0]; hi = ind1[-1]
sp = N.log(yfit[low]/yfit[hi])/N.log(xfit[low]/xfit[hi])
p0=N.array([yfit[low]/pow(xfit[low], sp), sp] + [0.]*(order-1))
elif len(ind1) == 1:
p0=N.array([ind1[0], -0.8] + [0.]*(order-1))
else:
return [0, 0], [0, 0]
res=lambda p, xfit, yfit, sigfit: (yfit-funct(p, xfit))/sigfit
try:
(p, cov, info, mesg, flag)=leastsq(res, p0, args=(xfit, yfit, sigfit), full_output=True, warning=False)
except TypeError:
# This error means no warning argument is available, so redirect stdout to a null device
# to suppress printing of (unnecessary) warning messages
original_stdout = sys.stdout # keep a reference to STDOUT
sys.stdout = NullDevice() # redirect the real STDOUT
(p, cov, info, mesg, flag)=leastsq(res, p0, args=(xfit, yfit, sigfit), full_output=True)
sys.stdout = original_stdout # turn STDOUT back on
if do_err:
if cov is not None:
if N.sum(sig != 1.) > 0:
err = N.array([sqrt(abs(cov[i,i])) for i in range(len(p))])
else:
chisq=sum(info["fvec"]*info["fvec"])
dof=len(info["fvec"])-len(p)
err = N.array([sqrt(abs(cov[i,i])*chisq/dof) for i in range(len(p))])
else:
p, err = [0, 0], [0, 0]
else: err = [0]
else:
p, err = [0, 0], [0, 0]
return p, err
def dist_2pt(p1, p2):
""" Calculated distance between two points given as tuples p1 and p2. """
from math import sqrt
dx=p1[0]-p2[0]
dy=p1[1]-p2[1]
dist=sqrt(dx*dx + dy*dy)
return dist
def angsep(ra1, dec1, ra2, dec2):
"""Returns angular separation between two coordinates (all in degrees)"""
import math
const = math.pi/180.
ra1 = ra1*const
rb1 = dec1*const
ra2 = ra2*const
rb2 = dec2*const
v1_1 = math.cos(ra1)*math.cos(rb1)
v1_2 = math.sin(ra1)*math.cos(rb1)
v1_3 = math.sin(rb1)
v2_1 = math.cos(ra2)*math.cos(rb2)
v2_2 = math.sin(ra2)*math.cos(rb2)
v2_3 = math.sin(rb2)
w = ( (v1_1-v2_1)**2 + (v1_2-v2_2)**2 + (v1_3-v2_3)**2 )/4.0
x = math.sqrt(w)
y = math.sqrt(max(0.0, 1.0-w))
angle = 2.0*math.atan2(x, y)/const
return angle
def std(y):
""" Returns unbiased standard deviation. """
from math import sqrt
import numpy as N
l=len(y)
s=N.std(y)
if l == 1:
return s
else:
return s*sqrt(float(l)/(l-1))
def imageshift(image, shift):
""" Shifts a 2d-image by the tuple (shift). Positive shift is to the right and upwards.
This is done by fourier shifting. """
import scipy.fft
from scipy import ndimage
shape=image.shape
f1=scipy.fft.fft(image, shape[0], axis=0)
f2=scipy.fft.fft(f1, shape[1], axis=1)
s=ndimage.fourier_shift(f2,shift, axis=0)
y1=scipy.fft.ifft(s, shape[1], axis=1)
y2=scipy.fft.ifft(y1, shape[0], axis=0)
return y2.real
def trans_gaul(q):
" transposes a tuple "
y=[]
if len(q) > 0:
for i in range(len(q[0])):
elem=[]
for j in range(len(q)):
elem.append(q[j][i])
y.append(elem)
return y
def momanalmask_gaus(subim, mask, isrc, bmar_p, allpara=True):
""" Compute 2d gaussian parameters from moment analysis, for an island with
multiple gaussians. Compute only for gaussian with index (mask value) isrc.
Returns normalised peak, centroid, fwhm and P.A. assuming North is top.
"""
from math import sqrt, atan, pi
from .const import fwsig
import numpy as N
N.seterr(all='ignore')
m1 = N.zeros(2); m2 = N.zeros(2); m11 = 0.0; tot = 0.0
mompara = N.zeros(6)
n, m = subim.shape[0], subim.shape[1]
index = [(i, j) for i in range(n) for j in range(m) if mask[i,j]==isrc]
for coord in index:
tot += subim[coord]
m1 += N.array(coord)*subim[coord]
mompara[0] = tot/bmar_p
mompara[1:3] = m1/tot
if allpara:
for coord in index:
co = N.array(coord)
m2 += (co - mompara[1:3])*(co - mompara[1:3])*subim[coord]
m11 += N.product(co - mompara[1:3])*subim[coord]
mompara[3] = sqrt((m2[0]+m2[1]+sqrt((m2[0]-m2[1])*(m2[0]-m2[1])+4.0*m11*m11))/(2.0*tot))*fwsig
mompara[4] = sqrt((m2[0]+m2[1]-sqrt((m2[0]-m2[1])*(m2[0]-m2[1])+4.0*m11*m11))/(2.0*tot))*fwsig
dumr = atan(abs(2.0*m11/(m2[0]-m2[1])))
dumr = atanproper(dumr, m2[0]-m2[1], 2.0*m11)
mompara[5] = 0.5*dumr*180.0/pi - 90.0
if mompara[5] < 0.0: mompara[5] += 180.0
return mompara
def fit_gaus2d(data, p_ini, x, y, mask = None, err = None):
""" Fit 2d gaussian to data with x and y also being 2d numpy arrays with x and y positions.
Takes an optional error array and a mask array (True => pixel is masked). """
from scipy.optimize import leastsq
import numpy as N
import sys
if mask is not None and mask.shape != data.shape:
print('Data and mask array dont have the same shape, ignoring mask')
mask = None
if err is not None and err.shape != data.shape:
print('Data and error array dont have the same shape, ignoring error')
err = None
if mask is None: mask = N.zeros(data.shape, bool)
g_ind = N.where(~N.ravel(mask))[0]
if err is None:
errorfunction = lambda p: N.ravel(gaus_2d(p, x, y) - data)[g_ind]
else:
errorfunction = lambda p: N.ravel((gaus_2d(p, x, y) - data)/err)[g_ind]
try:
p, success = leastsq(errorfunction, p_ini, warning=False)
except TypeError:
# This error means no warning argument is available, so redirect stdout to a null device
# to suppress printing of warning messages
original_stdout = sys.stdout # keep a reference to STDOUT
sys.stdout = NullDevice() # redirect the real STDOUT
p, success = leastsq(errorfunction, p_ini)
sys.stdout = original_stdout # turn STDOUT back on
return p, success
def deconv(gaus_bm, gaus_c):
""" Deconvolves gaus_bm from gaus_c to give gaus_dc.
Stolen shamelessly from aips DECONV.FOR.
All PA is in degrees."""
from math import pi, cos, sin, atan, sqrt
rad = 180.0/pi
gaus_d = [0.0, 0.0, 0.0]
phi_c = gaus_c[2]+900.0 % 180
phi_bm = gaus_bm[2]+900.0 % 180
maj2_bm = gaus_bm[0]*gaus_bm[0]; min2_bm = gaus_bm[1]*gaus_bm[1]
maj2_c = gaus_c[0]*gaus_c[0]; min2_c = gaus_c[1]*gaus_c[1]
theta=2.0*(phi_c-phi_bm)/rad
cost = cos(theta)
sint = sin(theta)
rhoc = (maj2_c-min2_c)*cost-(maj2_bm-min2_bm)
if rhoc == 0.0:
sigic = 0.0
rhoa = 0.0
else:
sigic = atan((maj2_c-min2_c)*sint/rhoc) # in radians
rhoa = ((maj2_bm-min2_bm)-(maj2_c-min2_c)*cost)/(2.0*cos(sigic))
gaus_d[2] = sigic*rad/2.0+phi_bm
dumr = ((maj2_c+min2_c)-(maj2_bm+min2_bm))/2.0
gaus_d[0] = dumr-rhoa
gaus_d[1] = dumr+rhoa
error = 0
if gaus_d[0] < 0.0: error += 1
if gaus_d[1] < 0.0: error += 1
gaus_d[0] = max(0.0,gaus_d[0])
gaus_d[1] = max(0.0,gaus_d[1])
gaus_d[0] = sqrt(abs(gaus_d[0]))
gaus_d[1] = sqrt(abs(gaus_d[1]))
if gaus_d[0] < gaus_d[1]:
sint = gaus_d[0]
gaus_d[0] = gaus_d[1]
gaus_d[1] = sint
gaus_d[2] = gaus_d[2]+90.0
gaus_d[2] = gaus_d[2]+900.0 % 180
if gaus_d[0] == 0.0:
gaus_d[2] = 0.0
else:
if gaus_d[1] == 0.0:
if (abs(gaus_d[2]-phi_c) > 45.0) and (abs(gaus_d[2]-phi_c) < 135.0):
gaus_d[2] = gaus_d[2]+450.0 % 180
# errors
#if rhoc == 0.0:
#if gaus_d[0] != 0.0:
# ed_1 = gaus_c[0]/gaus_d[0]*e_1
#else:
# ed_1 = sqrt(2.0*e_1*gaus_c[0])
#if gaus_d[1] != 0.0:
# ed_2 = gaus_c[1]/gaus_d[1]*e_2
#else:
# ed_2 = sqrt(2.0*e_2*gaus_c[1])
#ed_3 =e_3
#else:
# pass
return gaus_d
def deconv2(gaus_bm, gaus_c):
""" Deconvolves gaus_bm from gaus_c to give gaus_dc.
Stolen shamelessly from Miriad gaupar.for.
All PA is in degrees.
Returns deconvolved gaussian parameters and flag:
0 All OK.
1 Result is pretty close to a point source.
2 Illegal result.
"""
from math import pi, cos, sin, atan2, sqrt
rad = 180.0/pi
phi_c = gaus_c[2]+900.0 % 180.0
phi_bm = gaus_bm[2]+900.0 % 180.0
theta1 = phi_c / rad
theta2 = phi_bm / rad
bmaj1 = gaus_c[0]
bmaj2 = gaus_bm[0]
bmin1 = gaus_c[1]
bmin2 = gaus_bm[1]
alpha = ( (bmaj1*cos(theta1))**2 + (bmin1*sin(theta1))**2 -
(bmaj2*cos(theta2))**2 - (bmin2*sin(theta2))**2 )
beta = ( (bmaj1*sin(theta1))**2 + (bmin1*cos(theta1))**2 -
(bmaj2*sin(theta2))**2 - (bmin2*cos(theta2))**2 )
gamma = 2.0 * ( (bmin1**2-bmaj1**2)*sin(theta1)*cos(theta1) -
(bmin2**2-bmaj2**2)*sin(theta2)*cos(theta2) )
s = alpha + beta
t = sqrt((alpha-beta)**2 + gamma**2)
limit = min(bmaj1, bmin1, bmaj2, bmin2)
limit = 0.1*limit*limit
if alpha < 0.0 or beta < 0.0 or s < t:
if alpha < 0.0 or beta < 0.0:
bmaj = 0.0
bpa = 0.0
else:
bmaj = sqrt(0.5*(s+t))
bpa = rad * 0.5 * atan2(-gamma, alpha-beta)
bmin = 0.0
if 0.5*(s-t) < limit and alpha > -limit and beta > -limit:
ifail = 1
else:
ifail = 2
else:
bmaj = sqrt(0.5*(s+t))
bmin = sqrt(0.5*(s-t))
if abs(gamma) + abs(alpha-beta) == 0.0:
bpa = 0.0
else:
bpa = rad * 0.5 * atan2(-gamma, alpha-beta)
ifail = 0
return (bmaj, bmin, bpa), ifail
def get_errors(img, p, stdav, bm_pix=None):
""" Returns errors from Condon 1997
Returned list includes errors on:
peak flux [Jy/beam]
x_0 [pix]
y_0 [pix]
e_maj [pix]
e_min [pix]
e_pa [deg]
e_tot [Jy]
"""
from .const import fwsig
from math import sqrt, log, pow, pi
from . import mylogger
import numpy as N
mylog = mylogger.logging.getLogger("PyBDSM.Compute")
if len(p) % 7 > 0:
mylog.error("Gaussian parameters passed have to have 7n numbers")
ngaus = int(len(p)/7)
errors = []
for i in range(ngaus):
pp = p[i*7:i*7+7]
### Now do error analysis as in Condon (and fBDSM)
size = pp[3:6]
size = corrected_size(size) # angle is now degrees CCW from +y-axis
if size[0] == 0.0 or size[1] == 0.0:
errors = errors + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
else:
sq2 = sqrt(2.0)
if bm_pix is None:
bm_pix = N.array([img.pixel_beam()[0]*fwsig, img.pixel_beam()[1]*fwsig, img.pixel_beam()[2]])
dumr = sqrt(abs(size[0] * size[1] / (4.0 * bm_pix[0] * bm_pix[1])))
dumrr1 = 1.0 + bm_pix[0] * bm_pix[1] / (size[0] * size[0])
dumrr2 = 1.0 + bm_pix[0] * bm_pix[1] / (size[1] * size[1])
dumrr3 = dumr * pp[0] / stdav
d1 = sqrt(8.0 * log(2.0))
d2 = (size[0] * size[0] - size[1] * size[1]) / (size[0] * size[0])
try:
e_peak = pp[0] * sq2 / (dumrr3 * pow(dumrr1, 0.75) * pow(dumrr2, 0.75))
e_maj = size[0] * sq2 / (dumrr3 * pow(dumrr1, 1.25) * pow(dumrr2, 0.25))
e_min = size[1] * sq2 / (dumrr3 * pow(dumrr1, 0.25) * pow(dumrr2, 1.25)) # in fw
pa_rad = size[2] * pi / 180.0
e_x0 = sqrt( (e_maj * N.sin(pa_rad))**2 + (e_min * N.cos(pa_rad))**2 ) / d1
e_y0 = sqrt( (e_maj * N.cos(pa_rad))**2 + (e_min * N.sin(pa_rad))**2 ) / d1
e_pa = 2.0 / (d2 * dumrr3 * pow(dumrr1, 0.25) * pow(dumrr2, 1.25))
e_pa = e_pa * 180.0/pi
e_tot = pp[6] * sqrt(e_peak * e_peak / (pp[0] * pp[0]) + (0.25 / dumr / dumr) * (e_maj * e_maj / (size[0] * size[0]) + e_min * e_min / (size[1] * size[1])))
except:
e_peak = 0.0
e_x0 = 0.0
e_y0 = 0.0
e_maj = 0.0
e_min = 0.0
e_pa = 0.0
e_tot = 0.0
if abs(e_pa) > 180.0: e_pa=180.0 # dont know why i did this
errors = errors + [e_peak, e_x0, e_y0, e_maj, e_min, e_pa, e_tot]
return errors
def fit_chisq(x, p, ep, mask, funct, order):
import numpy as N
ind = N.where(N.array(mask)==False)[0]
if order == 0:
fit = [funct(p)]*len(p)
else:
fitpara, efit = fit_mask_1d(x, p, ep, mask, funct, True, order)
fit = funct(fitpara, x)
dev = (p-fit)*(p-fit)/(ep*ep)
num = order+1
csq = N.sum(dev[ind])/(len(fit)-num-1)
return csq
def calc_chisq(x, y, ey, p, mask, funct, order):
import numpy as N
if order == 0:
fit = [funct(y)]*len(y)
else:
fit = funct(p, x)
dev = (y-fit)*(y-fit)/(ey*ey)
ind = N.where(~N.array(mask))
num = order+1
csq = N.sum(dev[ind])/(len(mask)-num-1)
return csq
def get_windowsize_av(S_i, rms_i, chanmask, K, minchan):
import numpy as N
av_window = N.arange(2, int(len(S_i)/minchan)+1)
win_size = 0
for window in av_window:
fluxes, vars, mask = variance_of_wted_windowedmean(S_i, rms_i, chanmask, window)
minsnr = N.min(fluxes[~mask]/vars[~mask])
if minsnr > K*1.1: ### K*1.1 since fitted peak can be less than wted peak
win_size = window # is the size of averaging window
break
return win_size
def variance_of_wted_windowedmean(S_i, rms_i, chanmask, window_size):
from math import sqrt
import numpy as N
nchan = len(S_i)
nwin = nchan/window_size
wt = 1/rms_i/rms_i
wt = wt/N.median(wt)
fluxes = N.zeros(nwin); vars = N.zeros(nwin); mask = N.zeros(nwin, bool)
for i in range(nwin):
strt = i*window_size; stp = (i+1)*window_size
if i == nwin-1: stp = nchan
ind = N.arange(strt,stp)
m = chanmask[ind]
index = [arg for ii,arg in enumerate(ind) if not m[ii]]
if len(index) > 0:
s = S_i[index]; r = rms_i[index]; w = wt[index]
fluxes[i] = N.sum(s*w)/N.sum(w)
vars[i] = 1.0/sqrt(N.sum(1.0/r/r))
mask[i] = N.product(m)
else:
fluxes[i] = 0
vars[i] = 0
mask[i] = True
return fluxes, vars, mask
def fit_mulgaus2d(image, gaus, x, y, mask = None, fitfix = None, err = None, adj=False):
""" fitcode : 0=fit all; 1=fit amp; 2=fit amp, posn; 3=fit amp, size """
from scipy.optimize import leastsq
import numpy as N
import sys
if mask is not None and mask.shape != image.shape:
print('Data and mask array dont have the same shape, ignoring mask')
mask = None
if err is not None and err.shape != image.shape:
print('Data and error array dont have the same shape, ignoring error')
err = None
if mask is None: mask = N.zeros(image.shape, bool)
g_ind = N.where(~N.ravel(mask))[0]
ngaus = len(gaus)
if ngaus > 0:
p_ini = []
for g in gaus:
p_ini = p_ini + g2param(g, adj)
p_ini = N.array(p_ini)
if fitfix is None: fitfix = [0]*ngaus
ind = N.ones(6*ngaus) # 1 => fit ; 0 => fix
for i in range(ngaus):
if fitfix[i] == 1: ind[i*6+1:i*6+6] = 0
if fitfix[i] == 2: ind[i*6+3:i*6+6] = 0
if fitfix[i] == 3: ind[i*6+1:i*6+3] = 0
ind = N.array(ind)
p_tofit = p_ini[N.where(ind==1)[0]]
p_tofix = p_ini[N.where(ind==0)[0]]
if err is None: err = N.ones(image.shape)
errorfunction = lambda p, x, y, p_tofix, ind, image, err, g_ind: \
N.ravel((gaus_2d_itscomplicated(p, x, y, p_tofix, ind)-image)/err)[g_ind]
try:
p, success = leastsq(errorfunction, p_tofit, args=(x, y, p_tofix, ind, image, err, g_ind))
except TypeError:
# This error means no warning argument is available, so redirect stdout to a null device
# to suppress printing of warning messages
original_stdout = sys.stdout # keep a reference to STDOUT
sys.stdout = NullDevice() # redirect the real STDOUT
p, success = leastsq(errorfunction, p_tofit, args=(x, y, p_tofix, ind, image, err, g_ind))
sys.stdout = original_stdout # turn STDOUT back on
else:
p, sucess = None, 1
para = N.zeros(6*ngaus)
para[N.where(ind==1)[0]] = p
para[N.where(ind==0)[0]] = p_tofix
for igaus in range(ngaus):
para[igaus*6+3] = abs(para[igaus*6+3])
para[igaus*6+4] = abs(para[igaus*6+4])
return para, success
def gaussian_fcn(g, x1, x2):
"""Evaluate Gaussian on the given grid.
Parameters:
x1, x2: grid (as produced by numpy.mgrid f.e.)
g: Gaussian object or list of Gaussian paramters
"""
from math import radians, sin, cos
from .const import fwsig
import numpy as N
if isinstance(g, list):
A, C1, C2, S1, S2, Th = g
else:
A = g.peak_flux
C1, C2 = g.centre_pix
S1, S2, Th = g.size_pix
S1 = S1/fwsig; S2 = S2/fwsig; Th = Th + 90.0 # Define theta = 0 on x-axis
th = radians(Th)
cs = cos(th)
sn = sin(th)
f1 = ((x1-C1)*cs + (x2-C2)*sn)/S1
f2 = (-(x1-C1)*sn + (x2-C2)*cs)/S2
return A*N.exp(-(f1*f1 + f2*f2)/2)
def mclean(im1, c, beam):
""" Simple image plane clean of one gaussian at posn c and size=beam """
import numpy as N
amp = im1[c]
b1, b2, b3 = beam
b3 += 90.0
para = [amp, c[0], c[1], b1, b2, b3]
x, y = N.indices(im1.shape)
im = gaus_2d(para, x, y)
im1 = im1-im
return im1
def arrstatmask(im, mask):
""" Basic statistics for a masked array. dont wanna use numpy.ma """
import numpy as N
ind = N.where(~mask)
im1 = im[ind]
av = N.mean(im1)
std = N.std(im1)
maxv = N.max(im1)
x, y = N.where(im == maxv)
xmax = x[0]; ymax = y[0]
minv = N.min(im1)
x, y = N.where(im == minv)
xmin = x[0]; ymin = y[0]
return (av, std, maxv, (xmax, ymax), minv, (xmin, ymin))
def get_maxima(im, mask, thr, shape, beam, im_pos=None):
""" Gets the peaks in an image """
from copy import deepcopy as cp
import numpy as N
if im_pos is None:
im_pos = im
im1 = cp(im)
ind = N.array(N.where(~mask)).transpose()
ind = [tuple(coord) for coord in ind if im_pos[tuple(coord)] > thr]
n, m = shape
iniposn = []
inipeak = []
for c in ind:
goodlist = [im_pos[i,j] for i in range(c[0]-1,c[0]+2) for j in range(c[1]-1,c[1]+2) \
if i>=0 and i<n and j>=0 and j<m and (i,j) != c]
peak = N.sum(im_pos[c] > goodlist) == len(goodlist)
if peak:
iniposn.append(c)
inipeak.append(im[c])
im1 = mclean(im1, c, beam)
return inipeak, iniposn, im1
def watershed(image, mask=None, markers=None, beam=None, thr=None):
import numpy as N
from copy import deepcopy as cp
import scipy.ndimage as nd
#import matplotlib.pyplot as pl
#import pylab as pl
if thr is None: thr = -1e9
if mask is None: mask = N.zeros(image.shape, bool)
if beam is None: beam = (2.0, 2.0, 0.0)
if markers is None:
inipeak, iniposn, im1 = get_maxima(image, mask, thr, image.shape, beam)
ng = len(iniposn); markers = N.zeros(image.shape, int)
for i in range(ng): markers[iniposn[i]] = i+2
markers[N.unravel_index(N.argmin(image), image.shape)] = 1
im1 = cp(image)
if im1.min() < 0.: im1 = im1-im1.min()
im1 = 255 - im1/im1.max()*255
opw = nd.watershed_ift(N.array(im1, N.uint16), markers)
return opw, markers
def get_kwargs(kwargs, key, typ, default):
obj = True
if key in kwargs:
obj = kwargs[key]
if not isinstance(obj, typ):
obj = default
return obj
def read_image_from_file(filename, img, indir, quiet=False):
""" Reads data and header from indir/filename.
We can use either pyfits or python-casacore depending on the value
of img.use_io = 'fits'/'rap'
PyFITS is required, as it is used to standardize the header format. python-casacore
is optional.
"""
from . import mylogger
import os
import numpy as N
from copy import deepcopy as cp
from distutils.version import StrictVersion
import warnings
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Readfile")
if indir is None or indir == './':
prefix = ''
else:
prefix = indir + '/'
image_file = prefix + filename
# Check that file exists
if not os.path.exists(image_file):
img._reason = 'File does not exist'
return None
# If img.use_io is set, then use appropriate io module
if img.use_io != '':
if img.use_io == 'fits':
try:
from astropy.io import fits as pyfits
old_pyfits = False
use_sections = True
except ImportError as err:
import pyfits
if StrictVersion(pyfits.__version__) < StrictVersion('2.2'):
old_pyfits = True
use_sections = False
elif StrictVersion(pyfits.__version__) < StrictVersion('2.4'):
old_pyfits = False
use_sections = False
else:
old_pyfits = False
try:
if not old_pyfits:
fits = pyfits.open(image_file, mode="readonly", ignore_missing_end=True)
else:
fits = pyfits.open(image_file, mode="readonly")
except IOError as err:
img._reason = 'Problem reading file.\nOriginal error: {0}'.format(str(err))
return None
if img.use_io == 'rap':
import casacore.images as pim
try:
inputimage = pim.image(image_file)
except IOError as err:
img._reason = 'Problem reading file.\nOriginal error: {0}'.format(str(err))
return None
else:
# Simple check of whether casacore and pyfits are available
# We need pyfits version 2.2 or greater to use the
# "ignore_missing_end" argument to pyfits.open().
try:
try:
from astropy.io import fits as pyfits
old_pyfits = False
use_sections = True
except ImportError as err:
import pyfits
if StrictVersion(pyfits.__version__) < StrictVersion('2.2'):
old_pyfits = True
use_sections = False
elif StrictVersion(pyfits.__version__) < StrictVersion('2.4'):
old_pyfits = False
use_sections = False
else:
old_pyfits = False
use_sections = True
has_pyfits = True
except ImportError as err:
raise RuntimeError("Astropy or PyFITS is required.")
try:
import casacore.images as pim
has_casacore = True
except ImportError as err:
has_casacore = False
e_casacore = str(err)
# First assume image is a fits file, and use pyfits to open it (if
# available). If that fails, try to use casacore if available.
failed_read = False
reason = 0
try:
if not old_pyfits:
fits = pyfits.open(image_file, mode="readonly", ignore_missing_end=True)
else:
fits = pyfits.open(image_file, mode="readonly")
img.use_io = 'fits'
except IOError as err:
e_pyfits = str(err)
if has_casacore:
try:
inputimage = pim.image(image_file)
img.use_io = 'rap'
except IOError as err:
e_casacore = str(err)
failed_read = True
img._reason = 'File is not a valid FITS, CASA, or HDF5 image.'
else:
failed_read = True
e_casacore = "Casacore unavailable"
img._reason = 'Problem reading file.'
if failed_read:
img._reason += '\nOriginal error: {0}\n {1}'.format(e_pyfits, e_casacore)
return None
# Now that image has been read in successfully, get header (data is loaded
# later to take advantage of sectioning if trim_box is specified).
if not quiet:
mylogger.userinfo(mylog, "Opened '"+image_file+"'")
if img.use_io == 'rap':
tmpdir = img.parentname+'_tmp'
hdr = convert_casacore_header(inputimage, tmpdir)
coords = inputimage.coordinates()
img.coords_dict = coords.dict()
if 'telescope' in img.coords_dict:
img._telescope = img.coords_dict['telescope']
else:
img._telescope = None
if img.use_io == 'fits':
hdr = fits[0].header
img.coords_dict = None
if 'TELESCOP' in hdr:
img._telescope = hdr['TELESCOP']
else:
img._telescope = None
# Make sure data is in proper order. Final order is [pol, chan, x (RA), y (DEC)],
# so we need to rearrange dimensions if they are not in this order. Use the
# ctype FITS keywords to determine order of dimensions. Note that both PyFITS
# and casacore reverse the order of the axes relative to NAXIS, so we must too.
naxis = hdr['NAXIS']
data_shape = []
for i in range(naxis):
data_shape.append(hdr['NAXIS'+str(i+1)])
data_shape.reverse()
data_shape = tuple(data_shape)
mylog.info("Original data shape of " + image_file +': ' +str(data_shape))
ctype_in = []
for i in range(naxis):
key_val_raw = hdr['CTYPE' + str(i+1)]
key_val = key_val_raw.split('-')[0]
ctype_in.append(key_val.strip())
if 'RA' not in ctype_in or 'DEC' not in ctype_in:
if 'GLON' not in ctype_in or 'GLAT' not in ctype_in:
raise RuntimeError("Image data not found")
else:
lat_lon = True
else:
lat_lon = False
# Check for incorrect spectral units. For example, "M/S" is not
# recognized by PyWCS as velocity ("S" is actually Siemens, not
# seconds). Note that we check CUNIT3 and CUNIT4 even if the
# image has only 2 axes, as the header may still have these
# entries.
for i in range(4):
key_val_raw = hdr.get('CUNIT' + str(i+1))
if key_val_raw is not None:
if 'M/S' in key_val_raw or 'm/S' in key_val_raw or 'M/s' in key_val_raw:
hdr['CUNIT' + str(i+1)] = 'm/s'
if 'HZ' in key_val_raw or 'hZ' in key_val_raw or 'hz' in key_val_raw:
hdr['CUNIT' + str(i+1)] = 'Hz'
if 'DEG' in key_val_raw or 'Deg' in key_val_raw:
hdr['CUNIT' + str(i+1)] = 'deg'
# Make sure that the spectral axis has been identified properly
if len(ctype_in) > 2 and 'FREQ' not in ctype_in:
try:
from astropy.wcs import FITSFixedWarning
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
warnings.filterwarnings("ignore",category=FITSFixedWarning)
from astropy.wcs import WCS
t = WCS(hdr)
t.wcs.fix()
except ImportError as err:
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
from pywcs import WCS
t = WCS(hdr)
t.wcs.fix()
spec_indx = t.wcs.spec
if spec_indx != -1:
ctype_in[spec_indx] = 'FREQ'
# Now reverse the axes order to match PyFITS/casacore order and define the
# final desired order (cytpe_out) and shape (shape_out).
ctype_in.reverse()
if lat_lon:
ctype_out = ['STOKES', 'FREQ', 'GLON', 'GLAT']
else:
ctype_out = ['STOKES', 'FREQ', 'RA', 'DEC']
indx_out = [-1, -1, -1, -1]
indx_in = range(naxis)
for i in indx_in:
for j in range(4):
if ctype_in[i] == ctype_out[j]:
indx_out[j] = i
shape_out = [1, 1, data_shape[indx_out[2]], data_shape[indx_out[3]]]
if indx_out[0] != -1:
shape_out[0] = data_shape[indx_out[0]]
if indx_out[1] != -1:
shape_out[1] = data_shape[indx_out[1]]
indx_out = [a for a in indx_out if a >= 0] # trim unused axes
# Read in data. If only a subsection of the image is desired (as defined
# by the trim_box option), we can try to use PyFITS to read only that section.
img._original_naxis = data_shape
img._original_shape = (shape_out[2], shape_out[3])
img._xy_hdr_shift = (0, 0)
if img.opts.trim_box is not None:
img.trim_box = [int(b) for b in img.opts.trim_box]
xmin, xmax, ymin, ymax = img.trim_box
if xmin < 0: xmin = 0
if ymin < 0: ymin = 0
if xmax > shape_out[2]: xmax = shape_out[2]
if ymax > shape_out[3]: ymax = shape_out[3]
if xmin >= xmax or ymin >= ymax:
raise RuntimeError("The trim_box option does not specify a valid part of the image.")
shape_out_untrimmed = shape_out[:]
shape_out[2] = xmax-xmin
shape_out[3] = ymax-ymin
if img.use_io == 'fits':
sx = slice(int(xmin),int(xmax))
sy = slice(int(ymin),int(ymax))
sn = slice(None)
s_array = [sx, sy]
for i in range(naxis-2):
s_array.append(sn)
s_array.reverse() # to match ordering of data array returned by PyFITS
if not old_pyfits and use_sections:
if naxis == 2:
data = fits[0].section[s_array[0], s_array[1]]
elif naxis == 3:
data = fits[0].section[s_array[0], s_array[1], s_array[2]]
elif naxis == 4:
data = fits[0].section[s_array[0], s_array[1], s_array[2], s_array[3]]
else:
# If more than 4 axes, just read in the whole image and
# do the trimming after reordering.
data = fits[0].data
else:
data = fits[0].data
fits.close()
data = data.transpose(*indx_out) # transpose axes to final order
data.shape = data.shape[0:4] # trim unused dimensions (if any)
if naxis > 4 or not use_sections:
data = data.reshape(shape_out_untrimmed) # Add axes if needed
data = data[:, :, xmin:xmax, ymin:ymax] # trim to trim_box
else:
data = data.reshape(shape_out) # Add axes if needed
else:
# With casacore, just read in the whole image and then trim
data = inputimage.getdata()
data = data.transpose(*indx_out) # transpose axes to final order
data.shape = data.shape[0:4] # trim unused dimensions (if any)
data = data.reshape(shape_out_untrimmed) # Add axes if needed
data = data[:, :, xmin:xmax, ymin:ymax] # trim to trim_box
# Adjust WCS keywords for trim_box starting x and y.
hdr['crpix1'] -= xmin
hdr['crpix2'] -= ymin
img._xy_hdr_shift = (xmin, ymin)
else:
if img.use_io == 'fits':
data = fits[0].data
fits.close()
else:
data = inputimage.getdata()
data = data.transpose(*indx_out) # transpose axes to final order
data.shape = data.shape[0:4] # trim unused dimensions (if any)
data = data.reshape(shape_out) # Add axes if needed
mylog.info("Final data shape (npol, nchan, x, y): " + str(data.shape))
return data, hdr
def convert_casacore_header(casacore_image, tmpdir):
"""Converts a casacore header to a PyFITS header."""
import tempfile
import os
import atexit
import shutil
try:
from astropy.io import fits as pyfits
except ImportError as err:
import pyfits
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
tfile = tempfile.NamedTemporaryFile(delete=False, dir=tmpdir)
casacore_image.tofits(tfile.name)
hdr = pyfits.getheader(tfile.name)
if os.path.isfile(tfile.name):
os.remove(tfile.name)
# Register deletion of temp directory at exit to be sure it is deleted
atexit.register(shutil.rmtree, tmpdir, ignore_errors=True)
return hdr
def write_image_to_file(use, filename, image, img, outdir=None,
pad_image=False, clobber=True, is_mask=False):
""" Writes image array to outdir/filename"""
import numpy as N
import os
from . import mylogger
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Writefile")
wcs_obj = img.wcs_obj
if pad_image and img.opts.trim_box is not None:
# Pad image to original size
xsize, ysize = img._original_shape
xmin, ymin = img._xy_hdr_shift
image_pad = N.zeros((xsize, ysize), dtype=N.float32)
image_pad[xmin:xmin+image.shape[0], ymin:ymin+image.shape[1]] = image
image = image_pad
else:
xmin = 0
ymin = 0
if not hasattr(img, '_telescope'):
telescope = None
else:
telescope = img._telescope
if filename == 'SAMP':
import tempfile
if not hasattr(img,'samp_client'):
s, private_key = start_samp_proxy()
img.samp_client = s
img.samp_key = private_key
# Broadcast image to SAMP Hub
temp_im = make_fits_image(N.transpose(image), wcs_obj, img.beam,
img.frequency, img.equinox, telescope, xmin=xmin, ymin=ymin,
is_mask=is_mask)
tfile = tempfile.NamedTemporaryFile(delete=False)
try:
temp_im.writeto(tfile.name, overwrite=clobber)
except TypeError:
# The "overwrite" argument was added in astropy v1.3, so fall back to "clobber"
# if it doesn't work
temp_im.writeto(tfile.name, clobber=clobber)
send_fits_image(img.samp_client, img.samp_key, 'PyBDSM image', tfile.name)
else:
# Write image to FITS file
if outdir is None:
outdir = img.indir
if not os.path.exists(outdir) and outdir != '':
os.makedirs(outdir)
if os.path.isfile(outdir+filename):
if clobber:
os.remove(outdir+filename)
else:
return
if os.path.isdir(outdir+filename):
if clobber:
os.system("rm -rf "+outdir+filename)
else:
return
temp_im = make_fits_image(N.transpose(image), wcs_obj, img.beam,
img.frequency, img.equinox, telescope, xmin=xmin, ymin=ymin,
is_mask=is_mask, shape=(img.shape[1], img.shape[0], image.shape[1],
image.shape[0]))
if use == 'rap':
outfile = outdir + filename + '.fits'
else:
outfile = outdir + filename
try:
temp_im.writeto(outfile, overwrite=clobber)
except TypeError:
# The "overwrite" argument was added in astropy v1.3, so fall back to "clobber"
# if it doesn't work
temp_im.writeto(outfile, clobber=clobber)
temp_im.close()
if use == 'rap':
# For CASA images, read in FITS image and convert
try:
import casacore.images as pim
import casacore.tables as pt
import os
outimage = pim.image(outfile)
outimage.saveas(outdir+filename, overwrite=clobber)
# For masks, use the coordinates dictionary from the input
# image, as this is needed in order for the
# image to work as a clean mask in CASA.
if is_mask:
if img.coords_dict is None:
mylog.warning('Mask header information may be incomplete.')
else:
outtable = pt.table(outdir+filename, readonly=False, ack=False)
outtable.putkeywords({'coords': img.coords_dict})
outtable.done()
except ImportError as err:
import os
os.remove(outfile)
raise RuntimeError("Error importing python-casacore. CASA image could not "
"be writen. Use img_format = 'fits' instead.")
def make_fits_image(imagedata, wcsobj, beam, freq, equinox, telescope, xmin=0, ymin=0,
is_mask=False, shape=None):
"""Makes a simple FITS hdulist appropriate for single-channel images"""
from distutils.version import StrictVersion
try:
from astropy.io import fits as pyfits
use_header_update = False
except ImportError as err:
import pyfits
# Due to changes in the way pyfits handles headers from version 3.1 on,
# we need to check for older versions and change the setting of header
# keywords accordingly.
if StrictVersion(pyfits.__version__) < StrictVersion('3.1'):
use_header_update = True
else:
use_header_update = False
import numpy as np
# If mask, expand to all channels and Stokes for compatibility with casa
if is_mask and shape is not None:
shape_out = shape
else:
shape_out = [1, 1, imagedata.shape[0], imagedata.shape[1]]
hdu = pyfits.PrimaryHDU(np.resize(imagedata, shape_out))
hdulist = pyfits.HDUList([hdu])
header = hdulist[0].header
# Add WCS info
if use_header_update:
header.update('CRVAL1', wcsobj.wcs.crval[0])
header.update('CDELT1', wcsobj.wcs.cdelt[0])
header.update('CRPIX1', wcsobj.wcs.crpix[0] + xmin)
header.update('CUNIT1', str(wcsobj.wcs.cunit[0]).strip().lower()) # needed due to bug in pywcs/astropy
header.update('CTYPE1', wcsobj.wcs.ctype[0])
header.update('CRVAL2', wcsobj.wcs.crval[1])
header.update('CDELT2', wcsobj.wcs.cdelt[1])
header.update('CRPIX2', wcsobj.wcs.crpix[1] + ymin)
header.update('CUNIT2', str(wcsobj.wcs.cunit[1]).strip().lower())
header.update('CTYPE2', wcsobj.wcs.ctype[1])
else:
header['CRVAL1'] = wcsobj.wcs.crval[0]
header['CDELT1'] = wcsobj.wcs.cdelt[0]
header['CRPIX1'] = wcsobj.wcs.crpix[0] + xmin
header['CUNIT1'] = str(wcsobj.wcs.cunit[0]).strip().lower() # needed due to bug in pywcs/astropy
header['CTYPE1'] = wcsobj.wcs.ctype[0]
header['CRVAL2'] = wcsobj.wcs.crval[1]
header['CDELT2'] = wcsobj.wcs.cdelt[1]
header['CRPIX2'] = wcsobj.wcs.crpix[1] + ymin
header['CUNIT2'] = str(wcsobj.wcs.cunit[1]).strip().lower()
header['CTYPE2'] = wcsobj.wcs.ctype[1]
# Add STOKES info
if use_header_update:
header.update('CRVAL3', 1.0)
header.update('CDELT3', 1.0)
header.update('CRPIX3', 1.0)
header.update('CUNIT3', ' ')
header.update('CTYPE3', 'STOKES')
else:
header['CRVAL3'] = 1.0
header['CDELT3'] = 1.0
header['CRPIX3'] = 1.0
header['CUNIT3'] = ''
header['CTYPE3'] = 'STOKES'
# Add frequency info
if use_header_update:
header.update('RESTFRQ', freq)
header.update('CRVAL4', freq)
header.update('CDELT4', 3e8)
header.update('CRPIX4', 1.0)
header.update('CUNIT4', 'HZ')
header.update('CTYPE4', 'FREQ')
header.update('SPECSYS', 'TOPOCENT')
else:
header['RESTFRQ'] = freq
header['CRVAL4'] = freq
header['CDELT4'] = 3e8
header['CRPIX4'] = 1.0
header['CUNIT4'] = 'HZ'
header['CTYPE4'] = 'FREQ'
header['SPECSYS'] = 'TOPOCENT'
# Add beam info
if not is_mask:
if use_header_update:
header.update('BMAJ', beam[0])
header.update('BMIN', beam[1])
header.update('BPA', beam[2])
else:
header['BMAJ'] = beam[0]
header['BMIN'] = beam[1]
header['BPA'] = beam[2]
# Add equinox
if use_header_update:
header.update('EQUINOX', equinox)
else:
header['EQUINOX'] = equinox
# Add telescope
if telescope is not None:
if use_header_update:
header.update('TELESCOP', telescope)
else:
header['TELESCOP'] = telescope
hdulist[0].header = header
return hdulist
def retrieve_map(img, map_name):
"""Returns a map cached on disk."""
import numpy as N
import os
filename = get_name(img, map_name)
if not os.path.isfile(filename):
return None
infile = open(filename, 'rb')
data = N.load(infile)
infile.close()
return data
def store_map(img, map_name, map_data):
"""Caches a map to disk."""
import numpy as N
filename = get_name(img, map_name)
outfile = open(filename, 'wb')
N.save(outfile, map_data)
outfile.close()
def del_map(img, map_name):
"""Deletes a cached map."""
import os
filename = get_name(img, map_name)
if os.path.isfile(filename):
os.remove(filename)
def get_name(img, map_name):
"""Returns name of cache file."""
import os
if img._pi:
pi_text = 'pi'
else:
pi_text = 'I'
suffix = '/w%i_%s/' % (img.j, pi_text)
dir = img.tempdir + suffix
if not os.path.exists(dir):
os.makedirs(dir)
return dir + map_name + '.bin'
def connect(mask):
""" Find if a mask is singly or multiply connected """
import scipy.ndimage as nd
connectivity = nd.generate_binary_structure(2,2)
labels, count = nd.label(mask, connectivity)
if count > 1 :
connected = 'multiple'
else:
connected = 'single'
return connected, count
def area_polygon(points):
""" Given an ANGLE ORDERED array points of [[x], [y]], find the total area by summing each successsive
triangle with the centre """
import numpy as N
x, y = points
n_tri = len(x)-1
cenx, ceny = N.mean(x), N.mean(y)
area = 0.0
for i in range(n_tri):
p1, p2, p3 = N.array([cenx, ceny]), N.array([x[i], y[i]]), N.array([x[i+1], y[i+1]])
t_area= N.linalg.norm(N.cross((p2 - p1), (p3 - p1)))/2.
area += t_area
return area
def convexhull_deficiency(isl):
""" Finds the convex hull for the island and returns the deficiency.
Code taken from http://code.google.com/p/milo-lab/source/browse/trunk/src/toolbox/convexhull.py?spec=svn140&r=140
"""
import random
import time
import numpy as N
import scipy.ndimage as nd
def _angle_to_point(point, centre):
"""calculate angle in 2-D between points and x axis"""
delta = point - centre
if delta[0] == 0.0:
res = N.pi/2.0
else:
res = N.arctan(delta[1] / delta[0])
if delta[0] < 0:
res += N.pi
return res
def area_of_triangle(p1, p2, p3):
"""calculate area of any triangle given co-ordinates of the corners"""
return N.linalg.norm(N.cross((p2 - p1), (p3 - p1)))/2.
def convex_hull(points):
"""Calculate subset of points that make a convex hull around points
Recursively eliminates points that lie inside two neighbouring points until only convex hull is remaining.
points : ndarray (2 x m) array of points for which to find hull
Returns: hull_points : ndarray (2 x n), convex hull surrounding points """
n_pts = points.shape[1]
#assert(n_pts > 5)
centre = points.mean(1)
angles = N.apply_along_axis(_angle_to_point, 0, points, centre)
pts_ord = points[:,angles.argsort()]
pts = [x[0] for x in zip(pts_ord.transpose())]
prev_pts = len(pts) + 1
k = 0
while prev_pts > n_pts:
prev_pts = n_pts
n_pts = len(pts)
i = -2
while i < (n_pts - 2):
Aij = area_of_triangle(centre, pts[i], pts[(i + 1) % n_pts])
Ajk = area_of_triangle(centre, pts[(i + 1) % n_pts], \
pts[(i + 2) % n_pts])
Aik = area_of_triangle(centre, pts[i], pts[(i + 2) % n_pts])
if Aij + Ajk < Aik:
del pts[i+1]
i += 1
n_pts = len(pts)
k += 1
return N.asarray(pts)
mask = ~isl.mask_active
points = N.asarray(N.where(mask ^ nd.binary_erosion(mask)))
hull_pts = list(convex_hull(points)) # these are already in angle-sorted order
hull_pts.append(hull_pts[0])
hull_pts = N.transpose(hull_pts)
isl_area = isl.size_active
hull_area = area_polygon(hull_pts)
ratio1 = hull_area/(isl_area - 0.5*len(hull_pts[0]))
return ratio1
def open_isl(mask, index):
""" Do an opening on a mask, divide left over pixels among opened sub islands. Mask = True => masked pixel """
import scipy.ndimage as nd
import numpy as N
connectivity = nd.generate_binary_structure(2,2)
ft = N.ones((index,index), int)
open = nd.binary_opening(~mask, ft)
open = check_1pixcontacts(open) # check if by removing one pixel from labels, you can split a sub-island
labels, n_subisl = nd.label(open, connectivity) # get label/rank image for open. label = 0 for masked pixels
labels, mask = assign_leftovers(mask, open, n_subisl, labels) # add the leftover pixels to some island
if labels is not None:
isl_pixs = [len(N.where(labels==i)[0]) for i in range(1,n_subisl+1)]
isl_pixs = N.array(isl_pixs)/float(N.sum(isl_pixs))
else:
isl_pixs = None
return n_subisl, labels, isl_pixs
def check_1pixcontacts(open):
import scipy.ndimage as nd
import numpy as N
from copy import deepcopy as cp
connectivity = nd.generate_binary_structure(2,2)
ind = N.transpose(N.where(open[1:-1,1:-1] > 0)) + [1,1] # exclude boundary to make it easier
for pixel in ind:
x, y = pixel
grid = cp(open[x-1:x+2, y-1:y+2]); grid[1,1] = 0
grid = N.where(grid == open[tuple(pixel)], 1, 0)
ll, nn = nd.label(grid, connectivity)
if nn > 1:
open[tuple(pixel)] = 0
return open
def assign_leftovers(mask, open, nisl, labels):
"""
Given isl and the image of the mask after opening (open) and the number of new independent islands n,
connect up the left over pixels to the new islands if they connect to only one island and not more.
Assign the remaining to an island. We need to assign the leftout pixels to either of many sub islands.
Easiest is to assign to the sub island with least size.
"""
import scipy.ndimage as nd
import numpy as N
from copy import deepcopy as cp
n, m = mask.shape
leftout = ~mask ^ open
connectivity = nd.generate_binary_structure(2,2)
mlabels, count = nd.label(leftout, connectivity)
npix = [len(N.where(labels==b)[0]) for b in range(1,nisl+1)]
for i_subisl in range(count):
c_list = [] # is list of all bordering pixels of the sub island
ii = i_subisl+1
coords = N.transpose(N.where(mlabels==ii)) # the coordinates of island i of left-out pixels
for co in coords:
co8 = [[x,y] for x in range(co[0]-1,co[0]+2) for y in range(co[1]-1,co[1]+2) if x >=0 and y >=0 and x <n and y<m]
c_list.extend([tuple(cc) for cc in co8 if mlabels[tuple(cc)] == 0])
c_list = list(set(c_list)) # to avoid duplicates
vals = N.array([labels[c] for c in c_list])
belongs = list(set(vals[N.nonzero(vals)]))
if len(belongs) == 0:
# No suitable islands found => mask pixels
for cc in coords:
mask = (mlabels == ii)
# mask[cc] = True
return None, mask
if len(belongs) == 1:
for cc in coords:
labels[tuple(cc)] = belongs[0]
else: # get the border pixels of the islands
nn = [npix[b-1] for b in belongs]
addto = belongs[N.argmin(nn)]
for cc in coords:
labels[tuple(cc)] = addto
return labels, mask
def _float_approx_equal(x, y, tol=1e-18, rel=1e-7):
if tol is rel is None:
raise TypeError('cannot specify both absolute and relative errors are None')
tests = []
if tol is not None: tests.append(tol)
if rel is not None: tests.append(rel*abs(x))
assert tests
return abs(x - y) <= max(tests)
def approx_equal(x, y, *args, **kwargs):
"""approx_equal(float1, float2[, tol=1e-18, rel=1e-7]) -> True|False
approx_equal(obj1, obj2[, *args, **kwargs]) -> True|False
Return True if x and y are approximately equal, otherwise False.
If x and y are floats, return True if y is within either absolute error
tol or relative error rel of x. You can disable either the absolute or
relative check by passing None as tol or rel (but not both).
For any other objects, x and y are checked in that order for a method
__approx_equal__, and the result of that is returned as a bool. Any
optional arguments are passed to the __approx_equal__ method.
__approx_equal__ can return NotImplemented to signal that it doesn't know
how to perform that specific comparison, in which case the other object is
checked instead. If neither object have the method, or both defer by
returning NotImplemented, approx_equal falls back on the same numeric
comparison used for floats.
>>> almost_equal(1.2345678, 1.2345677)
True
>>> almost_equal(1.234, 1.235)
False
"""
if not (type(x) is type(y) is float):
# Skip checking for __approx_equal__ in the common case of two floats.
methodname = '__approx_equal__'
# Allow the objects to specify what they consider "approximately equal",
# giving precedence to x. If either object has the appropriate method, we
# pass on any optional arguments untouched.
for a,b in ((x, y), (y, x)):
try:
method = getattr(a, methodname)
except AttributeError:
continue
else:
result = method(b, *args, **kwargs)
if result is NotImplemented:
continue
return bool(result)
# If we get here without returning, then neither x nor y knows how to do an
# approximate equal comparison (or are both floats). Fall back to a numeric
# comparison.
return _float_approx_equal(x, y, *args, **kwargs)
def isl_tosplit(isl, opts):
""" Splits an island and sends back parameters """
import numpy as N
size_extra5 = opts.splitisl_size_extra5
frac_bigisl3 = opts.splitisl_frac_bigisl3
connected, count = connect(isl.mask_active)
index = 0
n_subisl3, labels3, isl_pixs3 = open_isl(isl.mask_active, 3)
n_subisl5, labels5, isl_pixs5 = open_isl(isl.mask_active, 5)
isl_pixs3, isl_pixs5 = N.array(isl_pixs3), N.array(isl_pixs5)
# take open 3 or 5
open3, open5 = False, False
if n_subisl3 > 0 and isl_pixs3 is not None: # open 3 breaks up island
max_sub3 = N.max(isl_pixs3)
if max_sub3 < frac_bigisl3 : open3 = True # if biggest sub island isnt too big
if n_subisl5 > 0 and isl_pixs5 is not None: # open 5 breaks up island
max_sub5 = N.max(isl_pixs5) # if biggest subisl isnt too big OR smallest extra islands add upto 10 %
if (max_sub5 < 0.75*max_sub3) or (N.sum(N.sort(isl_pixs5)[:len(isl_pixs5)-n_subisl3]) > size_extra5):
open5 = True
# index=0 => dont split
if open5: index = 5; n_subisl = n_subisl5; labels = labels5
else:
if open3: index = 3; n_subisl = n_subisl3; labels = labels3
else: index = 0
convex_def = convexhull_deficiency(isl)
#print 'CONVEX = ',convex_def
if opts.plot_islands:
try:
import matplotlib.pyplot as pl
pl.figure()
pl.suptitle('Island '+str(isl.island_id))
pl.subplot(2,2,1); pl.imshow(N.transpose(isl.image*~isl.mask_active), origin='lower', interpolation='nearest'); pl.title('Image')
pl.subplot(2,2,2); pl.imshow(N.transpose(labels3), origin='lower', interpolation='nearest'); pl.title('labels3')
pl.subplot(2,2,3); pl.imshow(N.transpose(labels5), origin='lower', interpolation='nearest'); pl.title('labels5')
except ImportError:
print("\033[31;1mWARNING\033[0m: Matplotlib not found. Plotting disabled.")
if index == 0: return [index, n_subisl5, labels5]
else: return [index, n_subisl, labels]
class NullDevice():
"""Null device to suppress stdout, etc."""
def write(self, s):
pass
def ch0_aperture_flux(img, posn_pix, aperture_pix):
"""Measure ch0 flux inside radius aperture_pix pixels centered on posn_pix.
Returns [flux, fluxE]
"""
import numpy as N
if aperture_pix is None:
return [0.0, 0.0]
# Make ch0 and rms subimages
ch0 = img.ch0_arr
shape = ch0.shape
xlo = int(posn_pix[0]) - int(aperture_pix) - 1
if xlo < 0:
xlo = 0
xhi = int(posn_pix[0]) + int(aperture_pix) + 1
if xhi > shape[0]:
xhi = shape[0]
ylo = int(posn_pix[1]) - int(aperture_pix) - 1
if ylo < 0:
ylo = 0
yhi = int(posn_pix[1]) + int(aperture_pix) + 1
if yhi > shape[1]:
yhi = shape[1]
mean = img.mean_arr
rms = img.rms_arr
aper_im = ch0[int(xlo):int(xhi), int(ylo):int(yhi)] - mean[int(xlo):int(xhi), int(ylo):int(yhi)]
aper_rms = rms[int(xlo):int(xhi), int(ylo):int(yhi)]
posn_pix_new = [int(posn_pix[0])-xlo, int(posn_pix[1])-ylo]
pixel_beamarea = img.pixel_beamarea()
aper_flux = aperture_flux(aperture_pix, posn_pix_new, aper_im, aper_rms, pixel_beamarea)
return aper_flux
def aperture_flux(aperture_pix, posn_pix, aper_im, aper_rms, beamarea):
"""Returns aperture flux and error"""
import numpy as N
dist_mask = generate_aperture(aper_im.shape[0], aper_im.shape[1], posn_pix[0], posn_pix[1], aperture_pix)
aper_mask = N.where(dist_mask.astype(bool))
if N.size(aper_mask) == 0:
return [0.0, 0.0]
aper_flux = N.nansum(aper_im[aper_mask])/beamarea # Jy
pixels_in_source = N.sum(~N.isnan(aper_im[aper_mask])) # number of unmasked pixels assigned to current source
aper_fluxE = nanmean(aper_rms[aper_mask]) * N.sqrt(pixels_in_source/beamarea) # Jy
return [aper_flux, aper_fluxE]
def generate_aperture(xsize, ysize, xcenter, ycenter, radius):
"""Makes a mask (1 = inside aperture) for a circular aperture"""
import numpy
x, y = numpy.mgrid[0.5:xsize, 0.5:ysize]
mask = ((x - xcenter)**2 + (y - ycenter)**2 <= radius**2) * 1
return mask
def make_src_mask(mask_size, posn_pix, aperture_pix):
"""Makes an island mask (1 = inside aperture) for a given source position.
"""
import numpy as N
xsize, ysize = mask_size
if aperture_pix is None:
return N.zeros((xsize, ysize), dtype=N.int)
# Make subimages
xlo = int(posn_pix[0]-int(aperture_pix)-1)
if xlo < 0:
xlo = 0
xhi = int(posn_pix[0]+int(aperture_pix)+1)
if xhi > xsize:
xhi = xsize
ylo = int(posn_pix[1]-int(aperture_pix)-1)
if ylo < 0:
ylo = 0
yhi = int(posn_pix[1]+int(aperture_pix)+1)
if yhi > ysize:
yhi = ysize
mask = N.zeros((xsize, ysize), dtype=N.int)
posn_pix_new = [posn_pix[0]-xlo, posn_pix[1]-ylo]
submask_xsize = xhi - xlo
submask_ysize = yhi - ylo
submask = generate_aperture(submask_xsize, submask_ysize, posn_pix_new[0], posn_pix_new[1], aperture_pix)
submask_slice = [slice(int(xlo), int(xhi)), slice(int(ylo), int(yhi))]
mask[tuple(submask_slice)] = submask
return mask
def getTerminalSize():
"""
returns (lines:int, cols:int)
"""
import os, struct
def ioctl_GWINSZ(fd):
import fcntl, termios
return struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234"))
# try stdin, stdout, stderr
for fd in (0, 1, 2):
try:
return ioctl_GWINSZ(fd)
except:
pass
# try os.ctermid()
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
try:
return ioctl_GWINSZ(fd)
finally:
os.close(fd)
except:
pass
# try `stty size`
try:
return tuple(int(x) for x in os.popen("stty size", "r").read().split())
except:
pass
# try environment variables
try:
return tuple(int(os.getenv(var)) for var in ("LINES", "COLUMNS"))
except:
pass
# Give up. return 0.
return (0, 0)
def eval_func_tuple(f_args):
"""Takes a tuple of a function and args, evaluates and returns result
This function (in addition to itertools) gets around limitation that
multiple-argument sequences are not supported by multiprocessing.
"""
return f_args[0](*f_args[1:])
def start_samp_proxy():
"""Starts (registers) and returns a SAMP proxy"""
import os
try:
# Python 3
from xmlrpc.client import ServerProxy
except ImportError:
# Python 2
from xmlrpclib import ServerProxy
lockfile = os.path.expanduser('~/.samp')
if not os.path.exists(lockfile):
raise RuntimeError("A running SAMP hub was not found.")
else:
HUB_PARAMS = {}
for line in open(lockfile):
if not line.startswith('#'):
key, value = line.split('=', 1)
HUB_PARAMS[key] = value.strip()
# Set up proxy
s = ServerProxy(HUB_PARAMS['samp.hub.xmlrpc.url'])
# Register with Hub
metadata = {"samp.name": 'PyBDSM', "samp.description.text": 'PyBDSM: the Python Blob Detection and Source Measurement software'}
result = s.samp.hub.register(HUB_PARAMS['samp.secret'])
private_key = result['samp.private-key']
s.samp.hub.declareMetadata(private_key, metadata)
return s, private_key
def stop_samp_proxy(img):
"""Stops (unregisters) a SAMP proxy"""
import os
if hasattr(img, 'samp_client'):
lockfile = os.path.expanduser('~/.samp')
if os.path.exists(lockfile):
img.samp_client.samp.hub.unregister(img.samp_key)
def send_fits_image(s, private_key, name, file_path):
"""Send a SAMP notification to load a fits image."""
import os
message = {}
message['samp.mtype'] = "image.load.fits"
message['samp.params'] = {}
message['samp.params']['url'] = 'file://' + os.path.abspath(file_path)
message['samp.params']['name'] = name
lockfile = os.path.expanduser('~/.samp')
if not os.path.exists(lockfile):
raise RuntimeError("A running SAMP hub was not found.")
else:
s.samp.hub.notifyAll(private_key, message)
def send_fits_table(s, private_key, name, file_path):
"""Send a SAMP notification to load a fits table."""
import os
message = {}
message['samp.mtype'] = "table.load.fits"
message['samp.params'] = {}
message['samp.params']['url'] = 'file://' + os.path.abspath(file_path)
message['samp.params']['name'] = name
lockfile = os.path.expanduser('~/.samp')
if not os.path.exists(lockfile):
raise RuntimeError("A running SAMP hub was not found.")
else:
s.samp.hub.notifyAll(private_key, message)
def send_highlight_row(s, private_key, url, row_id):
"""Send a SAMP notification to highlight a row in a table."""
import os
message = {}
message['samp.mtype'] = "table.highlight.row"
message['samp.params'] = {}
message['samp.params']['row'] = str(row_id)
message['samp.params']['url'] = url
lockfile = os.path.expanduser('~/.samp')
if not os.path.exists(lockfile):
raise RuntimeError("A running SAMP hub was not found.")
else:
s.samp.hub.notifyAll(private_key, message)
def send_coords(s, private_key, coords):
"""Send a SAMP notification to point at given coordinates."""
import os
message = {}
message['samp.mtype'] = "coord.pointAt.sky"
message['samp.params'] = {}
message['samp.params']['ra'] = str(coords[0])
message['samp.params']['dec'] = str(coords[1])
lockfile = os.path.expanduser('~/.samp')
if not os.path.exists(lockfile):
raise RuntimeError("A running SAMP hub was not found.")
else:
s.samp.hub.notifyAll(private_key, message)
def make_curvature_map(subim):
"""Makes a curvature map with the Aegean curvature algorithm
(Hancock et al. 2012)
The Aegean algorithm uses a curvature map to identify regions of negative
curvature. These regions then define distinct sources.
"""
import scipy.signal as sg
import numpy as N
import sys
# Make average curavature map:
curv_kernal = N.array([[1, 1, 1],[1, -8, 1],[1, 1, 1]])
# The next step prints meaningless warnings, so suppress them
original_stdout = sys.stdout # keep a reference to STDOUT
sys.stdout = NullDevice() # redirect the real STDOUT
curv_map = sg.convolve2d(subim, curv_kernal)
sys.stdout = original_stdout # turn STDOUT back on
return curv_map
def bstat(indata, mask, kappa_npixbeam):
"""Numpy version of the c++ bstat routine
Uses the PySE method for calculating the clipped mean and rms of an array.
This method is superior to the c++ bstat routine (see section 2.7.3 of
http://dare.uva.nl/document/174052 for details) and, since the Numpy
functions used here are written in c, there should be no big computational
penalty in using Python code.
"""
import numpy
from scipy.special import erf, erfcinv
# Flatten array
skpix = indata.flatten()
if mask is not None:
msk_flat = mask.flatten()
unmasked = numpy.where(~msk_flat)
skpix = skpix[unmasked]
ct = skpix.size
iter = 0
c1 = 1.0
c2 = 0.0
maxiter = 200
converge_num = 1e-6
m_raw = numpy.mean(skpix)
r_raw = numpy.std(skpix, ddof=1)
while (c1 >= c2) and (iter < maxiter):
npix = skpix.size
if kappa_npixbeam > 0.0:
kappa = kappa_npixbeam
else:
npixbeam = abs(kappa_npixbeam)
kappa = numpy.sqrt(2.0)*erfcinv(1.0 / (2.0*npix/npixbeam))
if kappa < 3.0:
kappa = 3.0
lastct = ct
medval = numpy.median(skpix)
sig = numpy.std(skpix)
wsm = numpy.where(abs(skpix-medval) < kappa*sig)
ct = len(wsm[0])
if ct > 0:
skpix = skpix[wsm]
c1 = abs(ct - lastct)
c2 = converge_num * lastct
iter += 1
mean = numpy.mean(skpix)
median = numpy.median(skpix)
sigma = numpy.std(skpix, ddof=1)
mode = 2.5*median - 1.5*mean
if sigma > 0.0:
skew_par = abs(mean - median)/sigma
else:
raise RuntimeError("A region with an unphysical rms value has been found. "
"Please check the input image.")
if skew_par <= 0.3:
m = mode
else:
m = median
r1 = numpy.sqrt(2.0*numpy.pi)*erf(kappa/numpy.sqrt(2.0))
r = numpy.sqrt(sigma**2 * (r1 / (r1 - 2.0*kappa*numpy.exp(-kappa**2/2.0))))
return m_raw, r_raw, m, r, iter
def centered(arr, newshape):
"""Return the center newshape portion of the array
This function is a copy of the private _centered() function in
scipy.signal.signaltools
"""
import numpy as np
newshape = np.asarray(newshape)
currshape = np.array(arr.shape)
startind = (currshape - newshape) // 2
endind = startind + newshape
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
| lofar-astron/PyBDSF | bdsf/functions.py | Python | gpl-3.0 | 76,587 | [
"Gaussian"
] | 6fc4a118561d4df22dc9bf0258619671ed4ce470891b442114c5df0a1bfddb1d |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Time'
db.create_table('profiles_time', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=20)),
('sort', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=1)),
))
db.send_create_signal('profiles', ['Time'])
# Adding model 'IndicatorPart'
db.create_table('profiles_indicatorpart', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('indicator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.Indicator'])),
('time', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.Time'])),
('data_source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.DataSource'])),
('formula', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('profiles', ['IndicatorPart'])
def backwards(self, orm):
# Deleting model 'Time'
db.delete_table('profiles_time')
# Deleting model 'IndicatorPart'
db.delete_table('profiles_indicatorpart')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.datadomain': {
'Meta': {'object_name': 'DataDomain'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Indicator']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'})
},
'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataSource']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'profiles.geomapping': {
'Meta': {'object_name': 'GeoMapping'},
'from_record': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mappings_as_from'", 'to': "orm['profiles.GeoRecord']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'to_record': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_as_to'", 'symmetrical': 'False', 'to': "orm['profiles.GeoRecord']"})
},
'profiles.georecord': {
'Meta': {'unique_together': "(('level', 'geo_id', 'custom_name', 'owner'),)", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'profiles.indicatordata': {
'Meta': {'unique_together': "(('indicator', 'time', 'feature'),)", 'object_name': 'IndicatorData'},
'feature': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'time': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'})
},
'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataDomain']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"})
},
'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']"})
},
'profiles.time': {
'Meta': {'object_name': 'Time'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
}
}
complete_apps = ['profiles']
| ProvidencePlan/Profiles | communityprofiles/profiles/oldmigrations/0002_add_indicator_part_and_time.py | Python | mit | 11,055 | [
"MOE"
] | 9f6ea43017d955c0e94cbfb61d468c69db964bc6018c369dc13d77260fc36fe5 |
# A python3 code
# This is the main module operating the other two modules IGIMF and OSGIMF.
# The IGIMF model calculates an analytically integrated galaxy-wide IMF;
# The OSGIMF model samples all the star cluster mass and all the stellar mass in each star cluster
# and then combind the stars in all star clusters to give the galaxy stellar population.
# --------------------------------------------------------------------------------------------------------------------------------
# importing modules and libraries
import math
import csv # csv and izip/zip are used to create output files
try:
from itertools import izip as zip
except ImportError: # will be python 3.x series
pass
# --------------------------------------------------------------------------------------------------------------------------------
# The star mass resolution is the lower resolution among
# the resolution of histogram (resolution_histogram_relative)
# and the resolution of star generation (resolution_star_... in the file IMF_schulz.py)
resolution_histogram_relative = 0.01 # The star mass resolution of histogram, star mass * resolution_histogram_relative
# also re-defined in a test file, it scales automatically with the SFR
# function_galimf takes in I/OS-GMF parameters and create output files
def function_galimf(IorS, R14orNOT, SFR, alpha3_model, delta_t, M_over_H, I_ecl, M_ecl_U, M_ecl_L, beta_model,
I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model, M_turn2, M_str_U, printout=False):
if IorS == "I":
global List_xi, List_M_str_for_xi_str
function_draw_igimf(R14orNOT, SFR, alpha3_model, beta_model, delta_t, M_over_H,
I_ecl, M_ecl_U, M_ecl_L, I_str, M_str_L, alpha_1, alpha1_model,
M_turn, alpha_2, alpha2_model, M_turn2, M_str_U)
if printout is True:
# write data for GalIMF_Result/IGIMF_shape
with open('Galaxy_wide_IMF.txt', 'w') as f:
writer = csv.writer(f, delimiter=' ')
f.write("# Galaxy-wide IMF output file.\n# The columns are:\n# mass xi\n# where xi=dN/dm ("
"see Yan et.al 2017 A&A...607A.126Y)\n\n")
writer.writerows(
zip(List_M_str_for_xi_str, List_xi))
print("\n ### Galaxy-wide IMF data saved in the file Galaxy_wide_IMF.txt ###\n")
return
elif IorS == "OS":
global mass_range_center, mass_range, mass_range_upper_limit, mass_range_lower_limit, star_number
sample_for_one_epoch(R14orNOT, SFR, alpha3_model, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_model,
I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model, M_turn2, M_over_H, M_str_U)
function_draw(SFR, M_str_L, M_str_U, M_ecl_L, resolution_histogram_relative)
function_make_drop_line()
# write data for GalIMF_Result/histogram
function_draw_histogram()
if printout is True:
with open('Galaxy_stellar_mass_histogram.txt', 'w') as f:
writer = csv.writer(f, delimiter=' ')
f.write(
"# Stellar mass histogram output file. It gives the generated number of stars in different "
"mass range.\n# The columns are:\n# mass_range_center mass_range mass_range_upper_limit mass_"
"range_lower_limit star_number_in_the_mass_range\n\n")
writer.writerows(
zip(mass_range_center, mass_range, mass_range_upper_limit, mass_range_lower_limit, star_number))
print("\n ### Stellar mass histogram data saved in the file Galaxy_stellar_mass_histogram.txt ###\n")
return
else:
print("Input wrong parameter for 'IorS'!")
return
######## IGIMF #########
# This module compute IGIMF as described in Yan et al 2017
# --------------------------------------------------------------------------------------------------------------------------------
# initialization of floating length arrays
List_M_ecl_for_xi_ecl = []
List_xi_ecl = []
List_M_str_for_xi_str = []
List_xi_str = []
List_xi = []
# --------------------------------------------------------------------------------------------------------------------------------
# function_dar_IGIMF computes the IGIMF by combining function_ecmf (embedded cluster mass
# function) and function_IMF (stellar mass function in individual embedded clusters)
# equation (1) from Yan et al. 2017
# function returns values of global lists:
# List_M_ecl_for_xi_ecl - list of masses, M_ecl, of embedded clusters for ECMF
# List_xi IGIMF (xi_IGIMF = dN/dm, dN number of star in a mass bin dm) values
# by default normalized to total mass in Msun units (= SFR*10Myr)
# List_M_str_for_xi_str list of stellar masses for stellar IMF in Msun units
# List_xi_L logarithmic IGIMF (xi_IGIMF_L = dN/d log_10 m)
# List_Log_M_str - natural logarithm
def function_draw_igimf(R14orNOT, SFR, alpha3_model, beta_model, delta_t, M_over_H, I_ecl, M_ecl_U, M_ecl_L,
I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model, M_turn2, M_str_U):
if SFR != 0:
global List_M_ecl_for_xi_ecl, List_xi, List_M_str_for_xi_str, List_xi_L, List_Log_M_str, x_IMF, y_IMF, List_xi_str
function_ecmf(R14orNOT, SFR, beta_model, delta_t, I_ecl, M_ecl_U, M_ecl_L, M_over_H)
x_IMF = []
y_IMF = []
alpha_1_change = function_alpha_1_change(alpha_1, alpha1_model, M_over_H)
alpha_2_change = function_alpha_2_change(alpha_2, alpha2_model, M_over_H)
alpha_3_change = function_alpha_3_change(alpha3_model, List_M_ecl_for_xi_ecl[-1], M_over_H)
function_draw_xi_str(M_str_L, List_M_ecl_for_xi_ecl[-1], I_str, M_str_L, alpha_1_change,
M_turn, alpha_2_change, M_turn2, alpha_3_change, M_str_U)
maximum_number_of_mass_grid = function_maximum_number_of_mass_grid(M_str_L, M_str_U)
List_xi = [1e-10] * maximum_number_of_mass_grid
List_M_str_for_xi_str = [M_str_U] * maximum_number_of_mass_grid
List_xi_str = [1e-10] * maximum_number_of_mass_grid
number_of_ecl = len(List_M_ecl_for_xi_ecl) - 1
function_IMF(alpha3_model, M_over_H, I_str, M_str_L, alpha_1_change, M_turn, alpha_2_change, M_turn2, M_str_U,
number_of_ecl, 0)
x_IMF = []
y_IMF = []
function_draw_xi_str(M_str_L, List_M_ecl_for_xi_ecl[-1], I_str, M_str_L, alpha_1_change,
M_turn, alpha_2_change, M_turn2, alpha_3_change, M_str_U)
for i in range(len(x_IMF)):
List_M_str_for_xi_str[i] = x_IMF[i]
lenth = len(List_M_str_for_xi_str)
List_xi_L = [0] * lenth
List_Log_M_str = [0] * lenth
function_xi_to_xiL(lenth - 1, List_xi[0])
for eee in range(len(List_M_str_for_xi_str)):
if List_M_str_for_xi_str[eee] == M_str_U:
List_xi[eee] = List_xi[eee-1]
List_M_str_for_xi_str[eee] = List_M_str_for_xi_str[eee-1]
List_xi_str[eee] = List_xi_str[eee-1]
else:
List_M_str_for_xi_str = [0, 1000]
List_xi = [0, 0]
return
# function_ecmf computes IMF of star clusters (i.e. ECMF - embedded cluster mass function)
# The assumed shape of ECMF is single powerlaw with slope beta (function of SFR)
# the empirical lower limit for star cluster mass is 5 Msun
# the hypotetical upper mass limit is 10^9 Msun, but the M_ecl^max is computed, eq (12) in Yan et al. 2017
def function_ecmf(R14orNOT, SFR, beta_model, delta_t, I_ecl, M_ecl_U, M_ecl_L, M_over_H):
global List_M_ecl_for_xi_ecl, List_xi_ecl, x_ECMF, y_ECMF
x_ECMF = []
y_ECMF = []
if R14orNOT == True:
beta_change = 2
else:
beta_change = function_beta_change(beta_model, SFR, M_over_H)
function_draw_xi_ecl(R14orNOT, M_ecl_L, SFR, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_change)
List_M_ecl_for_xi_ecl = x_ECMF
del List_M_ecl_for_xi_ecl[0]
del List_M_ecl_for_xi_ecl[-1]
List_xi_ecl = y_ECMF
del List_xi_ecl[0]
del List_xi_ecl[-1]
return
# function_IMF computes stellar IMF in individual embedded star clusters
def function_IMF(alpha3_model, M_over_H, I_str, M_str_L, alpha_1_change, M_turn, alpha_2_change, M_turn2, M_str_U,
number_of_ecl, i):
while i < number_of_ecl:
global List_M_str_for_xi_str, List_xi_str, List_M_ecl_for_xi_ecl, x_IMF, y_IMF
x_IMF = []
y_IMF = []
M_ecl = List_M_ecl_for_xi_ecl[i]
alpha_3_change = function_alpha_3_change(alpha3_model, M_ecl, M_over_H)
# Here only alpha_3_change is recalculated as alpha1(2)_change do not depend on M_ecl thus do not change.
function_draw_xi_str(M_str_L, M_ecl, I_str, M_str_L, alpha_1_change, M_turn, alpha_2_change, M_turn2,
alpha_3_change, M_str_U)
for qqq in range(min(len(x_IMF), len(List_M_str_for_xi_str))):
List_M_str_for_xi_str[qqq] = x_IMF[qqq]
for www in range(min(len(y_IMF), len(List_xi_str))):
List_xi_str[www] = y_IMF[www]
number_of_str = len(List_M_str_for_xi_str)
function_update_list_xi(i, number_of_str, 0)
(i) = (i+1)
return
def function_update_list_xi(i, number_of_str, j):
while j < number_of_str:
global List_xi, List_xi_str, List_xi_ecl, List_M_ecl_for_xi_ecl
List_xi[j] += List_xi_str[j] * List_xi_ecl[i] * (List_M_ecl_for_xi_ecl[i+1] - List_M_ecl_for_xi_ecl[i])
(j) = (j+1)
return
def function_xi_to_xiL(i, unit):
global List_xi_L, List_xi, List_M_str_for_xi_str, List_Log_M_str
while i > -1:
if List_xi[i] == 0:
List_xi[i] = 10**(-5)
List_xi_L[i] = math.log((List_xi[i] * math.log(10) * List_M_str_for_xi_str[i] / unit * 1800), 10)
List_Log_M_str[i] = math.log(List_M_str_for_xi_str[i] , 10)
(i) = (i-1)
return
############ OSGIMF #############
# -----------------------------------------------------------------------------------------
# initialization of open-length arrays
# -----------------------------------------------------------------------------------------
List_M_str_all_i = []
List_n_str_all_i = []
List_mass_grid_x_axis = []
List_star_number_in_mass_grid_y_axis = []
List_star_number_in_mass_grid_y_axis2 = []
List_star_number_in_mass_grid_y_axis3 = []
List_star_number_in_mass_grid_y_axis4 = []
List_mass_grid = []
List_star_number_in_mass_grid = []
# -----------------------------------------------------------------------------------------
# This function gives the stellar masses in entire galaxy in unsorted manner
# i.e. the stars are grouped in parent clusters
def sample_for_one_epoch(R14orNOT, SFR, alpha3_model, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_model,
I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model, M_turn2, M_over_H, M_str_U):
global List_M_str_all_i, List_n_str_all_i, list_M_ecl_i
beta_change = function_beta_change(beta_model, SFR, M_over_H)
function_sample_cluster(R14orNOT, SFR, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_change)
len_of_M_ecl_list = len(list_M_ecl_i)
List_M_str_all_i = []
List_n_str_all_i = []
function_sample_star_from_clusters(alpha3_model, I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model,
M_turn2, M_over_H, M_str_U, len_of_M_ecl_list, 0)
return
# Masses of formed clusters
def function_sample_cluster(R14orNOT, SFR, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_change):
global list_m_ecl_i, list_n_ecl_i, list_M_ecl_i, M_max_ecl
list_m_ecl_i = []
list_n_ecl_i = []
list_M_ecl_i = []
M_max_ecl = 0
function_sample_from_ecmf(R14orNOT, SFR, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_change)
return
# Stellar masses in a given star cluster
def function_sample_star_from_clusters(alpha3_model, I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model,
M_turn2, M_over_H, M_str_U, len_of_M_ecl_list, i):
while i < len_of_M_ecl_list: # sample a total number of i clusters
global List_M_str_all_i, List_n_str_all_i, list_m_str_i, list_n_str_i, list_M_str_i
list_m_str_i = []
list_n_str_i = []
list_M_str_i = []
alpha_1_change = function_alpha_1_change(alpha_1, alpha1_model, M_over_H)
alpha_2_change = function_alpha_2_change(alpha_2, alpha2_model, M_over_H)
alpha_3_change = function_alpha_3_change(alpha3_model, list_M_ecl_i[i], M_over_H)
function_sample_from_imf(list_M_ecl_i[i],
I_str, M_str_L, alpha_1_change, M_turn, alpha_2_change, M_turn2, alpha_3_change, M_str_U)
List_M_str_all_i += [list_M_str_i] # save all i clusters in "all_i" list
List_n_str_all_i += [list_n_str_i]
(i) = (i+1)
return
##################################################################################
## The sampling is finished here. Below are just sorting, binning, and plotting.##
##################################################################################
# Now star mass are recorded in individual star clusters in the "List_M_str_all_i" and "List_n_str_all_i"
# we have for the whole galaxy: cluster mass, number of cluster with certain mass
# and for each cluster: star mass, number of stars with certain mass
# Sort out all star mass in a epoch into a mass grid
# THe main purpose here is to sort the stellar masses and preparation for plotting output
def function_draw(SFR, M_str_low, M_str_up, M_ecl_low, resolution_histogram_relative):
M_low = min(M_str_low, M_ecl_low)
global List_mass_grid, List_star_number_in_mass_grid, List_mass_grid_x_axis, List_star_number_in_mass_grid_y_axis
# for all stars
List_mass_grid = []
function_mass_grid(SFR, M_str_up, M_low, resolution_histogram_relative)
List_mass_grid += [M_low]
List_star_number_in_mass_grid = [0] * (len(List_mass_grid) - 1)
function_sort_out_star_mass(0)
##########
List_mass_grid_x_axis = [M_str_up]
make_mass_grid_x_axis(1)
List_mass_grid_x_axis += [M_low]
List_star_number_in_mass_grid_y_axis = []
make_star_number_in_mass_grid_y_axis(0)
List_mass_grid_x_axis = [List_mass_grid_x_axis[0]] + List_mass_grid_x_axis
List_mass_grid_x_axis += [List_mass_grid_x_axis[-1]]
List_star_number_in_mass_grid_y_axis = [0.0000001] + List_star_number_in_mass_grid_y_axis
List_star_number_in_mass_grid_y_axis += [0.0000001]
# for most massive star
global List_mass_grid2, List_star_number_in_mass_grid2, List_mass_grid_x_axis2, List_star_number_in_mass_grid_y_axis2
List_mass_grid2 = List_mass_grid
List_star_number_in_mass_grid2 = [0] * (len(List_mass_grid2) - 1)
function_sort_out_star_mass2(0)
##########
List_star_number_in_mass_grid_y_axis2 = []
make_star_number_in_mass_grid_y_axis2(0)
List_star_number_in_mass_grid_y_axis2 = [0.0000001] + List_star_number_in_mass_grid_y_axis2
List_star_number_in_mass_grid_y_axis2 += [0.0000001]
###################################
global List_mass_grid3, List_star_number_in_mass_grid3, List_mass_grid_x_axis3, List_star_number_in_mass_grid_y_axis3
List_mass_grid3 = List_mass_grid
List_star_number_in_mass_grid3 = [0] * (len(List_mass_grid3) - 1)
function_sort_out_star_mass3(0)
##########
List_star_number_in_mass_grid_y_axis3 = []
make_star_number_in_mass_grid_y_axis3(0)
List_star_number_in_mass_grid_y_axis3 = [0.0000001] + List_star_number_in_mass_grid_y_axis3
List_star_number_in_mass_grid_y_axis3 += [0.0000001]
###################################
global List_mass_grid4, List_star_number_in_mass_grid4, List_mass_grid_x_axis4, List_star_number_in_mass_grid_y_axis4
List_mass_grid4 = List_mass_grid
List_star_number_in_mass_grid4 = [0] * (len(List_mass_grid4) - 1)
function_sort_out_star_mass4(0)
##########
List_star_number_in_mass_grid_y_axis4 = []
make_star_number_in_mass_grid_y_axis4(0)
List_star_number_in_mass_grid_y_axis4 = [0.0000001] + List_star_number_in_mass_grid_y_axis4
List_star_number_in_mass_grid_y_axis4 += [0.0000001]
return
### make a mass grid ###
def function_mass_grid(SFR, mass, M_str_low, resolution_histogram_relative):
while mass > M_str_low:
global List_mass_grid
List_mass_grid += [mass]
(mass) = (mass * (1-resolution_histogram_relative))
# we find it is useful to use the following form of mass grid sometimes.
# One can apply this alternative form by quote the above line
# (add a # in front of the line) and unquote the below two lines:
# (mass) = (mass * (0.967 + math.log(SFR, 10) / 400) / (math.log(mass + 1) ** 2 /
# (2 ** (math.log(SFR, 10) + 6.85) - 1) + 1))
return
# count the number of star in each grid
ls = 0
def function_sort_out_star_mass(i):
while i < len(List_M_str_all_i):
global ls
ls = 0
subfunction_sort_out(i, 0)
(i) = (i+1)
return
def function_sort_out_star_mass2(i):
while i < len(List_M_str_all_i):
global ls
ls = 0
subfunction_sort_out2(i, 0)
(i) = (i+1)
return
def function_sort_out_star_mass3(i):
while i < len(List_M_str_all_i):
global ls
ls = 0
subfunction_sort_out3(i, 1)
(i) = (i+1)
return
def function_sort_out_star_mass4(i):
while i < len(List_M_str_all_i):
global ls
ls = 0
subfunction_sort_out4(i, 2)
(i) = (i+1)
return
def subfunction_sort_out(i, j):
while j < len(List_M_str_all_i[i]):
global ls, List_n_str_all_i
function_find_k(i, j, ls)
List_star_number_in_mass_grid[ls] += List_n_str_all_i[i][j] * list_n_ecl_i[i]
(j) = (j+1)
return
def subfunction_sort_out2(i, j):
if j < len(List_M_str_all_i[i]):
global ls
function_find_k(i, j, ls)
List_star_number_in_mass_grid2[ls] += list_n_ecl_i[i]
return
def subfunction_sort_out3(i, j):
if j < len(List_M_str_all_i[i]):
global ls
function_find_k(i, j, ls)
List_star_number_in_mass_grid3[ls] += list_n_ecl_i[i]
return
def subfunction_sort_out4(i, j):
if j < len(List_M_str_all_i[i]):
global ls
function_find_k(i, j, ls)
List_star_number_in_mass_grid4[ls] += list_n_ecl_i[i]
return
def function_find_k(i, j, k):
while List_mass_grid[k+1] > List_M_str_all_i[i][j]:
global ls
ls = k+1
(k) = (k+1)
return
# prepare for the breaking line plot
def make_mass_grid_x_axis(i):
global List_mass_grid_x_axis, List_mass_grid
while i < len(List_mass_grid)-1:
List_mass_grid_x_axis += [List_mass_grid[i]]*2
(i) = (i+1)
return
def make_star_number_in_mass_grid_y_axis(i):
global List_star_number_in_mass_grid_y_axis, List_star_number_in_mass_grid, List_mass_grid
while i < len(List_star_number_in_mass_grid):
List_star_number_in_mass_grid_y_axis += [List_star_number_in_mass_grid[i]/(List_mass_grid[i] -
List_mass_grid[i+1])]*2
(i) = (i+1)
return
def make_star_number_in_mass_grid_y_axis2(i):
global List_star_number_in_mass_grid_y_axis2, List_star_number_in_mass_grid2, List_mass_grid2
while i < len(List_star_number_in_mass_grid2):
List_star_number_in_mass_grid_y_axis2 += [List_star_number_in_mass_grid2[i]/(List_mass_grid2[i] -
List_mass_grid2[i+1])]*2
(i) = (i+1)
return
def make_star_number_in_mass_grid_y_axis3(i):
global List_star_number_in_mass_grid_y_axis3, List_star_number_in_mass_grid3, List_mass_grid3
while i < len(List_star_number_in_mass_grid3):
List_star_number_in_mass_grid_y_axis3 += [List_star_number_in_mass_grid3[i]/(List_mass_grid3[i] -
List_mass_grid3[i+1])]*2
(i) = (i+1)
return
def make_star_number_in_mass_grid_y_axis4(i):
global List_star_number_in_mass_grid_y_axis4, List_star_number_in_mass_grid4, List_mass_grid4
while i < len(List_star_number_in_mass_grid4):
List_star_number_in_mass_grid_y_axis4 += [List_star_number_in_mass_grid4[i]/(List_mass_grid4[i] -
List_mass_grid4[i+1])]*2
(i) = (i+1)
return
def function_make_drop_line1(i):
while i < len(List_star_number_in_mass_grid_y_axis)-1:
if List_star_number_in_mass_grid_y_axis[i] == 0:
List_star_number_in_mass_grid_y_axis[i] = 0.0000001
(i) = (i+1)
def function_make_drop_line2(i):
while i < len(List_star_number_in_mass_grid_y_axis2)-1:
if List_star_number_in_mass_grid_y_axis2[i] == 0:
List_star_number_in_mass_grid_y_axis2[i] = 0.0000001
(i) = (i+1)
def function_make_drop_line3(i):
while i < len(List_star_number_in_mass_grid_y_axis3)-1:
if List_star_number_in_mass_grid_y_axis3[i] == 0:
List_star_number_in_mass_grid_y_axis3[i] = 0.0000001
(i) = (i+1)
def function_make_drop_line4(i):
while i < len(List_star_number_in_mass_grid_y_axis4)-1:
if List_star_number_in_mass_grid_y_axis4[i] == 0:
List_star_number_in_mass_grid_y_axis4[i] = 0.0000001
(i) = (i+1)
def function_make_drop_line():
function_make_drop_line1(0)
function_make_drop_line2(0)
function_make_drop_line3(0)
function_make_drop_line4(0)
return
######################## histogram ########################
mass_range_center = []
mass_range = []
mass_range_upper_limit = []
mass_range_lower_limit = []
star_number = []
def function_draw_histogram():
global mass_range_center, mass_range, mass_range_upper_limit, mass_range_lower_limit, star_number
mass_range_center = []
i = 0
while i < len(List_mass_grid) - 1:
mass_range_center += [
0.5 * (List_mass_grid[i] + List_mass_grid[i + 1])]
i = i + 1
mass_range = []
i = 0
while i < len(List_mass_grid) - 1:
mass_range += [List_mass_grid[i] - List_mass_grid[i + 1]]
i = i + 1
mass_range_upper_limit = []
i = 0
while i < len(List_mass_grid):
mass_range_upper_limit += [List_mass_grid[i]]
i = i + 1
mass_range_lower_limit = []
i = 0
while i < len(List_mass_grid) - 1:
mass_range_lower_limit += [List_mass_grid[i + 1]]
i = i + 1
star_number = List_star_number_in_mass_grid + []
return
############## IMF #################
# use equations in "supplementary-document-galimf.pdf"
# The star mass resolution is the lower resolution among "relative resolution" and "absolute resolution" where
# the relative resolution = star mass * resolution_star_relative
# the absolute resolution = resolution_star_absolute
resolution_star_relative = 0.01
resolution_star_absolute = 0.01
mass_grid_index = 1.01
list_m_str_i = []
list_n_str_i = []
list_M_str_i = []
def function_sample_from_imf(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):
global list_m_str_i, list_n_str_i, list_M_str_i, M_max, M_max_function, k3, k2, k1, resolution_star_relative, resolution_star_absolute
M_max = 0
M_max_function = 0
function_M_max(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)
k3 = 0
k2 = 0
k1 = 0
function_k321(I_str, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)
list_m_str_i = []
list_n_str_i = []
function_m_i_str(k1, k2, k3, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_max, resolution_star_relative, resolution_star_absolute) # equation 18
list_M_str_i = []
length_n = len(list_n_str_i)
function_M_i(k1, k2, k3, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U, length_n) # equation 20
del list_n_str_i[0]
return
# M_max is computed by solving simultaneously equations (3) and (4) from Yan et al 2017
def function_M_max(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):
global M_max_function, M_max, M_max_function
M_constant = M_ecl * M_U ** (1 - alpha_3) / I_str / (1 - alpha_3) - M_turn2 ** (alpha_2 - alpha_3) * M_turn ** (
alpha_1 - alpha_2) * (M_turn ** (2 - alpha_1) - M_L ** (2 - alpha_1)) / (2 - alpha_1) - M_turn2 ** (
alpha_2 - alpha_3) * (M_turn2 ** (2 - alpha_2) - M_turn ** (
2 - alpha_2)) / (2 - alpha_2) + M_turn2 ** (2 - alpha_3) / (2 - alpha_3) # equation 16
function_M_max_1(M_constant, M_ecl, I_str, alpha_3, M_U, M_L, M_U/2, 10, -1) # equation 16
M_max_function = 1
if M_max < M_turn2:
M_constant2 = M_ecl * M_turn2 ** (1 - alpha_2) / I_str / (1 - alpha_2) + M_ecl * M_turn2 ** (
alpha_3 - alpha_2) * (M_U ** (
1 - alpha_3) - M_turn2 ** (1 - alpha_3)) / I_str / (1 - alpha_3) - M_turn ** (alpha_1 - alpha_2) * (
M_turn ** (2 - alpha_1) - M_L ** (
2 - alpha_1)) / (2 - alpha_1) + M_turn ** (2 - alpha_2) / (2 - alpha_2) # equation 25
function_M_max_2(M_constant2, M_ecl, I_str, alpha_2, M_U, M_L, 0.75, 0.1, -1) # equation 25
M_max_function = 2
if M_max < M_turn:
M_constant3 = M_ecl * M_turn ** (1 - alpha_1) / I_str / (1 - alpha_1) + M_ecl * M_turn ** (
alpha_2 - alpha_1) * (M_turn2 ** (
1 - alpha_2) - M_turn ** (1 - alpha_2)) / I_str / (1 - alpha_2) + M_ecl * M_turn2 ** (
alpha_3 - alpha_2) * M_turn ** (
alpha_2 - alpha_1) * (M_U ** (1 - alpha_3) - M_turn2 ** (1 - alpha_3)) / I_str / (1 - alpha_3) + M_L ** (
2 - alpha_1) / (2 - alpha_1) # equation 29
function_M_max_3(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, 100, 10, -1) # equation 29
M_max_function = 3
if M_max < M_L:
M_max_function = 0
print("M_max < M_L")
return
def function_k321(I_str, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):
global M_max_function, k3, k2, k1, M_max
if M_max_function == 1:
k3 = I_str*(1-alpha_3)/(M_U**(1-alpha_3)-M_max**(1-alpha_3))
# equation 14
elif M_max_function == 2:
k3 = I_str/(M_turn2**(alpha_2-alpha_3)*(M_turn2**(1-alpha_2)-M_max**(1-alpha_2))/(1-alpha_2) + (
M_U**(1-alpha_3)-M_turn2**(1-alpha_3))/(1-alpha_3))
# equation 23
elif M_max_function == 3:
k3 = I_str/(M_turn2**(alpha_2-alpha_3) * M_turn**(alpha_1-alpha_2) * (M_turn**(1-alpha_1)-M_max**(1-alpha_1)) / (
1-alpha_1) + M_turn2**(alpha_2-alpha_3)*(M_turn2**(1-alpha_2)-M_turn**(1-alpha_2))/(1-alpha_2) + (M_U**(
1-alpha_3)-M_turn2**(1-alpha_3))/(1-alpha_3))
# equation 27
else:
print("function_M_max went wrong")
return
k2 = k3*M_turn2**(alpha_2-alpha_3) # equation 2
k1 = k2*M_turn**(alpha_1-alpha_2) # equation 2
return
def function_M_max_1(M_constant, M_ecl, I_str, alpha_3, M_U, M_L, m_1, step, pm): # equation 16
m_1 = round(m_1, 10) # round
M_x = m_1**(2-alpha_3)/(2-alpha_3) + M_ecl*m_1**(1-alpha_3)/I_str/(1-alpha_3)
while abs(M_x-M_constant) > abs(M_constant) * 10 ** (-50) and m_1 > 1 and step > 0.00000001:
if m_1 - step <= M_L or m_1 + step >= M_U:
step = step / 2
elif M_x > M_constant and pm == -1:
m_1 = m_1 - step
pm = -1
M_x = m_1 ** (2 - alpha_3) / (2 - alpha_3) + M_ecl * m_1 ** (1 - alpha_3) / I_str / (1 - alpha_3)
elif M_x > M_constant and pm == 1:
m_1 = m_1 - step / 2
step = step / 2
pm = -1
M_x = m_1 ** (2 - alpha_3) / (2 - alpha_3) + M_ecl * m_1 ** (1 - alpha_3) / I_str / (1 - alpha_3)
elif M_x < M_constant and pm == 1:
m_1 = m_1 + step
pm = 1
M_x = m_1 ** (2 - alpha_3) / (2 - alpha_3) + M_ecl * m_1 ** (1 - alpha_3) / I_str / (1 - alpha_3)
elif M_x < M_constant and pm == -1:
m_1 = m_1 + step / 2
step = step / 2
pm = 1
M_x = m_1 ** (2 - alpha_3) / (2 - alpha_3) + M_ecl * m_1 ** (1 - alpha_3) / I_str / (1 - alpha_3)
global M_max
M_max = m_1
return
def function_M_max_2(M_constant2, M_ecl, I_str, alpha_2, M_U, M_L, m_1, step, pm): # equation 25
m_1 = round(m_1, 10) # round
M_x = m_1 ** (2 - alpha_2) / (2 - alpha_2) + M_ecl * m_1 ** (1 - alpha_2) / I_str / (1 - alpha_2)
while abs(M_x-M_constant2) > abs(M_constant2) * 10 ** (-7) and m_1 > 0.5 and step > 0.002:
if m_1 - step <= M_L or m_1 + step >= M_U:
step = step / 2
elif M_x > M_constant2 and pm == -1:
m_1 = m_1 - step
pm = -1
M_x = m_1 ** (2 - alpha_2) / (2 - alpha_2) + M_ecl * m_1 ** (1 - alpha_2) / I_str / (1 - alpha_2)
elif M_x > M_constant2 and pm == 1:
m_1 = m_1 - step / 2
step = step / 2
pm = -1
M_x = m_1 ** (2 - alpha_2) / (2 - alpha_2) + M_ecl * m_1 ** (1 - alpha_2) / I_str / (1 - alpha_2)
elif M_x < M_constant2 and pm == 1:
m_1 = m_1 + step
pm = 1
M_x = m_1 ** (2 - alpha_2) / (2 - alpha_2) + M_ecl * m_1 ** (1 - alpha_2) / I_str / (1 - alpha_2)
elif M_x < M_constant2 and pm == -1:
m_1 = m_1 + step / 2
step = step / 2
pm = 1
M_x = m_1 ** (2 - alpha_2) / (2 - alpha_2) + M_ecl * m_1 ** (1 - alpha_2) / I_str / (1 - alpha_2)
global M_max
M_max = m_1
return
def function_M_max_3(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1, step, pm): # equation 29
m_1 = round(m_1, 10) # round
M_x = m_1 ** (2 - alpha_1) / (2 - alpha_1) + M_ecl * m_1 ** (1 - alpha_1) / I_str / (1 - alpha_1)
if abs(M_x-M_constant3) < abs(M_constant3) * 10 ** (-7) or step < 0.001:
global M_max
M_max = m_1
elif m_1 - step <= M_L or m_1 + step >= M_U:
function_M_max_3(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1, step / 2, pm)
elif M_x > M_constant3 and pm == -1:
function_M_max_3(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1 - step, step, -1)
elif M_x > M_constant3 and pm == 1:
function_M_max_3(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1 - step / 2, step / 2, -1)
elif M_x < M_constant3 and pm == 1:
function_M_max_3(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1 + step, step, 1)
elif M_x < M_constant3 and pm == -1:
function_M_max_3(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1 + step / 2, step / 2, 1)
return
def function_m_i_str(k1, k2, k3, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_max, resolution_star_relative, resolution_star_absolute): # equation 18
global list_m_str_i
if M_max > 100:
loop_m_i_first_three(k3, M_turn2, alpha_3, M_max, 0, resolution_star_relative, resolution_star_absolute, 0)
(m_str_i, n_str_i) = cross_M_turn(k3, k2, M_turn2, alpha_3, alpha_2, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)
loop_m_i(k2, M_turn, alpha_2, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)
(m_str_i, n_str_i) = cross_M_turn(k2, k1, M_turn, alpha_2, alpha_1, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)
loop_m_i(k1, M_L, alpha_1, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)
cross_M_L(k1, M_L, alpha_1, list_m_str_i[-1])
return
elif M_max > M_turn2:
loop_m_i(k3, M_turn2, alpha_3, M_max, 0, resolution_star_relative, resolution_star_absolute)
(m_str_i, n_str_i) = cross_M_turn(k3, k2, M_turn2, alpha_3, alpha_2, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)
loop_m_i(k2, M_turn, alpha_2, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)
(m_str_i, n_str_i) = cross_M_turn(k2, k1, M_turn, alpha_2, alpha_1, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)
loop_m_i(k1, M_L, alpha_1, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)
cross_M_L(k1, M_L, alpha_1, list_m_str_i[-1])
return
elif M_max > M_turn:
loop_m_i(k2, M_turn, alpha_2, M_max, 0, resolution_star_relative, resolution_star_absolute)
(m_str_i, n_str_i) = cross_M_turn(k2, k1, M_turn, alpha_2, alpha_1, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)
loop_m_i(k1, M_L, alpha_1, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)
cross_M_L(k1, M_L, alpha_1, list_m_str_i[-1])
return
else:
loop_m_i(k1, M_L, alpha_1, M_max, 0, resolution_star_relative, resolution_star_absolute)
cross_M_L(k1, M_L, alpha_1, list_m_str_i[-1])
return
def function_get_n_new_str(m_i, k, alpha, m_i_plus_n, n_i, resolution_star_relative, resolution_star_absolute):
while m_i - m_i_plus_n < max(resolution_star_relative * m_i, resolution_star_absolute):
n_new = round(n_i * mass_grid_index + 1)
m_i_plus_n_new = (m_i ** (1 - alpha) - n_new * (1 - alpha) / k) ** (1 / (1 - alpha))
(m_i_plus_n, n_i) = (m_i_plus_n_new, n_new)
return m_i_plus_n, n_i
def loop_m_i_first_three(k, M_low, alpha, m_i, n_i, resolution_star_relative, resolution_star_absolute, count):
while m_i > M_low:
global list_m_str_i, list_n_str_i, n_turn
list_m_str_i += [m_i]
list_n_str_i += [n_i]
m_i_plus_n = (m_i ** (1 - alpha) - n_i * (1 - alpha) / k) ** (1 / (1 - alpha))
if count < 3:
m_i_plus_n = (m_i ** (1 - alpha) - (1 - alpha) / k) ** (1 / (1 - alpha))
n_turn = n_i
(m_i, n_i, count) = (m_i_plus_n, 1, (count+1))
elif m_i - m_i_plus_n > max(resolution_star_relative * m_i, resolution_star_absolute):
n_turn = n_i
(m_i, n_i) = (m_i_plus_n, n_i)
else:
(m_i_plus_n_new, n_turn) = function_get_n_new_str(m_i, k, alpha, m_i_plus_n, n_i, resolution_star_relative, resolution_star_absolute)
(m_i, n_i) = (m_i_plus_n_new, n_turn)
def loop_m_i(k, M_low, alpha, m_i, n_i, resolution_star_relative, resolution_star_absolute):
while m_i > M_low:
global list_m_str_i, list_n_str_i, n_turn
list_m_str_i += [m_i]
list_n_str_i += [n_i]
a = m_i ** (1 - alpha) - n_i * (1 - alpha) / k
if a > 0:
b = 1 / (1 - alpha)
m_i_plus_n = a ** b
if m_i - m_i_plus_n > max(resolution_star_relative * m_i, resolution_star_absolute):
(m_i, n_i) = (m_i_plus_n, n_i)
else:
(m_i_plus_n_new, n_turn) = function_get_n_new_str(m_i, k, alpha, m_i_plus_n, n_i, resolution_star_relative, resolution_star_absolute)
(m_i, n_i) = (m_i_plus_n_new, n_turn)
else:
return
def cross_M_turn(k_before, k_after, M_cross, alpha_before, alpha_after, m_i, resolution_star_relative, resolution_star_absolute):
global n_turn
n_before = int(k_before/(1-alpha_before)*(m_i**(1-alpha_before)-M_cross**(1-alpha_before)))
m_before_cross = (m_i ** (1 - alpha_before) - n_before * (1 - alpha_before) / k_before) ** (1 / (1 - alpha_before))
a = (M_cross**(1-alpha_after)+k_before/k_after*(1-alpha_after)/(1-alpha_before)*(m_before_cross**(
1-alpha_before)-M_cross**(1-alpha_before))-(1-alpha_after)/k_after)
if a > 0:
m_after_cross = a ** (1/(1-alpha_after))
n_after = int(0.9*(n_turn - n_before - 1))
m_after_cross_plus_n_after = (m_after_cross ** (1 - alpha_after) - n_after * (1 - alpha_after) / k_after) ** (1 / (1 - alpha_after))
if m_i - m_after_cross_plus_n_after > max(resolution_star_relative * m_i, resolution_star_absolute):
return (m_after_cross_plus_n_after, n_before + 1 + n_after)
else:
(m_after_cross_plus_n_new, n_after_new) = function_get_n_new_str_cross(
m_i, m_after_cross, k_after, alpha_after, m_after_cross_plus_n_after, n_after, resolution_star_relative, resolution_star_absolute)
return (m_after_cross_plus_n_new, n_before + 1 + n_after_new)
else:
return (0, 0)
def function_get_n_new_str_cross(m_i, m_after_cross, k, alpha, m_after_cross_plus_n, n_i, resolution_star_relative, resolution_star_absolute):
while m_i - m_after_cross_plus_n < max(resolution_star_relative * m_i, resolution_star_absolute):
n_after_new = round(n_i * mass_grid_index + 1)
m_after_cross_plus_n_new = (m_after_cross ** (1 - alpha) - n_after_new * (1 - alpha) / k) ** (1 / (1 - alpha))
(m_after_cross_plus_n, n_i) = (m_after_cross_plus_n_new, n_after_new)
return m_after_cross_plus_n, n_i
def cross_M_L(k_1, M_L, alpha_1, m_i): # equation 21
global list_m_str_i, list_n_str_i
n_i = int(k_1 / (1 - alpha_1) * (m_i ** (1 - alpha_1) - M_L ** (1 - alpha_1)))
list_m_str_i += [M_L]
list_n_str_i += [n_i]
return
def function_M_i(k1, k2, k3, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U, length_n): # equation 20
global list_m_str_i, new_i, list_M_str_i, M_max, list_n_str_i
new_i = 0
if M_max > M_turn2:
loop_M_i(k3, M_turn2, alpha_3, new_i)
cross_M_turn2(k3, k2, M_turn2, alpha_3, alpha_2, new_i)
if new_i + 1 < len(list_m_str_i):
loop_M_i(k2, M_turn, alpha_2, new_i)
if list_n_str_i[new_i + 1] > 0:
cross_M_turn2(k2, k1, M_turn, alpha_2, alpha_1, new_i)
if new_i + 1 < len(list_m_str_i):
loop_M_i(k1, M_L, alpha_1, new_i)
if list_n_str_i[new_i+1] == 0:
return
else:
M_i = k1 / (2 - alpha_1) * (list_m_str_i[new_i] ** (2 - alpha_1) - list_m_str_i[new_i + 1] ** (2 - alpha_1)) / \
list_n_str_i[new_i + 1]
list_M_str_i += [M_i]
return
elif M_max > M_turn:
loop_M_i(k2, M_turn, alpha_2, new_i)
cross_M_turn2(k2, k1, M_turn, alpha_2, alpha_1, new_i)
loop_M_i(k1, M_L, alpha_1, new_i)
if list_n_str_i[new_i+1] == 0:
return
else:
M_i = k1 / (2 - alpha_1) * (list_m_str_i[new_i] ** (2 - alpha_1) - list_m_str_i[new_i + 1] ** (
2 - alpha_1)) / list_n_str_i[new_i + 1]
list_M_str_i += [M_i]
return
else:
loop_M_i(k1, M_L, alpha_1, new_i)
if list_n_str_i[new_i+1] == 0:
return
else:
M_i = k1 / (2 - alpha_1) * (list_m_str_i[new_i] ** (2 - alpha_1) - list_m_str_i[new_i + 1] ** (
2 - alpha_1)) / list_n_str_i[new_i + 1]
list_M_str_i += [M_i]
return
def loop_M_i(k, M_low, alpha, i):
global list_m_str_i, list_n_str_i, list_M_str_i, new_i
while list_m_str_i[i+1] > M_low:
M_i = k/(2-alpha)*(list_m_str_i[i]**(2-alpha)-list_m_str_i[i+1]**(2-alpha))/list_n_str_i[i+1]
list_M_str_i += [M_i]
new_i = i + 1
(i)=(new_i)
def cross_M_turn2(k_before, k_after, M_cross, alpha_before, alpha_after, i):
global list_m_str_i, list_n_str_i, list_M_str_i, new_i
M_i = k_before / (2 - alpha_before) * (list_m_str_i[i] ** (2 - alpha_before) - M_cross ** (2 - alpha_before)
) / list_n_str_i[i + 1] + k_after / (2 - alpha_after) * (M_cross ** (2 - alpha_after
) - list_m_str_i[i + 1] ** (2 - alpha_after)) / list_n_str_i[i + 1]
list_M_str_i += [M_i]
new_i = i + 1
return
################# draw IMF without sampling #################
# k_str is a normalization factor.
# The IMF is normalized to the total mass of the star cluster (M_ecl)
# The normalization is done by first calculate the M_max (with function function_M_max),
# then k_str (function_k321) as described by the Part I of supplementary-document-galimf.pdf
def k_str(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):
global M_max, M_max_function, k3, k2, k1
M_max = 0
M_max_function = 0
function_M_max(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)
k3 = 0
k2 = 0
k1 = 0
function_k321(I_str, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)
return
x_IMF = []
y_IMF = []
def function_draw_xi_str(M_str_L, M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):
global x_IMF, y_IMF, k1, k2, k3, M_max
k_str(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)
function_draw_xi_str_loop(M_str_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3)
return
def function_draw_xi_str_loop(M_str, alpha_1, M_turn, alpha_2, M_turn2, alpha_3):
global x_IMF, y_IMF, k1, k2, k3, M_max, mass_grid_index
while M_str < M_max:
x_IMF += [M_str]
if M_str > M_turn2:
xi = k3 * M_str ** (-alpha_3)
elif M_str > M_turn:
xi = k2 * M_str ** (-alpha_2)
else:
xi = k1 * M_str ** (-alpha_1)
y_IMF += [xi]
(M_str) = (mass_grid_index * M_str)
return
def function_maximum_number_of_mass_grid(M_str_min, M_str_max):
global mass_grid_index
maximum_number_of_mass_grid = 4
M_str = M_str_min
while M_str < M_max:
maximum_number_of_mass_grid += 1
(M_str) = (mass_grid_index * M_str)
return maximum_number_of_mass_grid
########### alpha ###########
def function_alpha_1_change(alpha_1, alpha1_model, M_over_H):
if (alpha1_model == 0):
return alpha_1
elif (alpha1_model == 1):
alpha_1_change = alpha_1 + 0.5 * M_over_H
return alpha_1_change
elif (alpha1_model == 'IGIMF2.5'):
alpha_1_change = alpha_1 + 0.12 * M_over_H
return alpha_1_change
elif (alpha1_model == 'Z'):
alpha_1_change = alpha_1 + 63 * (10**M_over_H - 1) * 0.0142
return alpha_1_change
else:
print("alpha1_model: %s, do not exist.\nCheck file 'alpha1.py'" % (alpha1_model))
return
def function_alpha_2_change(alpha_2, alpha2_model, M_over_H):
if (alpha2_model == 0):
return alpha_2
elif (alpha2_model == 1):
alpha_2_change = alpha_2 + 0.5 * M_over_H
return alpha_2_change
elif (alpha2_model == 'Z'):
alpha_2_change = alpha_2 + 63 * (10**M_over_H - 1) * 0.0142
if M_over_H>1:
print("Warning: Abnormally high gas metallicity leading to an unrealistic IMF shape according to the assumed variation law: alpha2_model == 'Z'. Please check your galaxy evolution settings or change to a different IMF variation assumption.")
return alpha_2_change
elif (alpha2_model == 'IGIMF2.5'):
alpha_2_change = alpha_2 + 0.12 * M_over_H
return alpha_2_change
elif (alpha2_model == 'R14'):
alpha_2_change = 2.3 + 0.0572 * M_over_H
return alpha_2_change
else:
print("alpha2_model: %s, do not exist.\nCheck file 'alpha2.py'" % (alpha2_model))
return
def function_alpha_3_change(alpha3_model, M_ecl, M_over_H):
if (alpha3_model == 0):
default_alpha3 = 2.3
# print("alpha_3 is set to be a constant: %s, as this is the default alpha_3 value for alpha3_model 0.\nFor more options regarding alpha_3 variation, please check file 'alpha3.py'" % (default_alpha3))
return default_alpha3
elif (alpha3_model == 1):
rho = 10 ** (0.61 * math.log(M_ecl, 10) + 2.85)
if rho < 9.5 * 10 ** 4:
alpha_3_change = 2.3
else:
alpha_3_change = 1.86 - 0.43 * math.log(rho / 10 ** 6, 10)
# print("Notification in file 'alpha3_model' uncompleted")
if alpha_3_change < 0.5:
print("IMF alpha_3 being", alpha_3_change, "out of the tested range from Marks et al. 2012.")
return alpha_3_change
elif (alpha3_model == 2):
rho = 10 ** (0.61 * math.log(M_ecl, 10) + 2.85)
x = -0.1405 * M_over_H + 0.99 * math.log(rho / 10 ** 6, 10)
if x < -0.87:
alpha_3_change = 2.3
else:
alpha_3_change = -0.41 * x + 1.94
# print("Notification in file 'alpha3_model' uncompleted")
return alpha_3_change
elif (alpha3_model == 'R14'):
alpha_3_change = 2.3 + 0.0572 * M_over_H
return alpha_3_change
else:
# print("alpha_3 is set to be a constant: %s, as this is the input value of parameter 'alpha3_model'.\nFor more options regarding alpha_3 variation, please check file 'alpha3.py'" % (alpha3_model))
return alpha3_model
########## ECMF #########
# This part gives the cluster masses according to file "supplementary-document-galimf.pdf".
# The code is only valid when SFR > 3 * 10^(-10) solar / year.
# Inputs:
# SFR,delta_t, I, M_U, M_L, \beta
# step 1
# use equation 13 or 17
# give first integration limit m_1 i.e. M_max_ecl
# step 2
# use equation 10 or 14
# give k
# step 3
# use equation 21
# give every integration limit m_i and the number of stars in this region n_i
# step 4
# use equation 22 or 23
# give every cluster mass M_i
# Outputs:
# list of star mass "list_M_ecl_i"
# and the number of star with each mass "list_n_ecl_i"
################### sample cluster from ECMF #####################
resolution_cluster_relative = 0.01 # The mass resolution of a embedded cluster with mass M is: M * resolution_cluster_relative.
list_m_ecl_i = []
list_n_ecl_i = []
list_M_ecl_i = []
M_max_ecl = 0
def function_sample_from_ecmf(R14orNOT, SFR, delta_t, I_ecl, M_U, M_L, beta):
global list_m_ecl_i, list_n_ecl_i, list_M_ecl_i, M_max_ecl, resolution_cluster_relative
M_tot = SFR * delta_t * 10**6 # units in Myr
if R14orNOT == True:
M_max_ecl = 10**(4.83+0.75*math.log(SFR, 10))
k = I_ecl / (1 / M_max_ecl - 1 / M_U) # equation 41
list_m_ecl_i = [M_max_ecl]
list_n_ecl_i = []
beta = 2
function_m_i_ecl(M_max_ecl, M_L, k, beta, 1) # equation 48
list_M_ecl_i = []
length_n = len(list_n_ecl_i)
function_M_i_2(k, 0, length_n) # equation 50
else:
if beta == 2:
M_max_ecl = 0
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, 10**8, 10**7, -1) # equation 44
k = I_ecl / (1 / M_max_ecl - 1 / M_U) # equation 41
list_m_ecl_i = [M_max_ecl]
list_n_ecl_i = []
function_m_i_ecl(M_max_ecl, M_L, k, beta, 1) # equation 48
list_M_ecl_i = []
length_n = len(list_n_ecl_i)
function_M_i_2(k, 0, length_n) # equation 50
else:
M_max_ecl = 0
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, 10**8, 10**7, -1) # equation 40
k = I_ecl * (1 - beta) / (M_U ** (1 - beta) - M_max_ecl ** (1 - beta)) # equation 37
list_m_ecl_i = [M_max_ecl]
list_n_ecl_i = []
function_m_i_ecl(M_max_ecl, M_L, k, beta, 1) # equation 48
list_M_ecl_i = []
length_n = len(list_n_ecl_i)
function_M_i_not_2(k, beta, 0, length_n) # equation 49
return
def function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1, step, pm): # equation 44
m_1 = round(m_1, 10) # round makes the code only valid when SFR > 3 * 10^(-10) solar / year
M_x = I_ecl * (math.log(m_1) - math.log(M_L)) / (1 / m_1 - 1 / M_U)
if M_tot * (1. + 10 ** (-5)) > M_x > M_tot * (1- 10 ** (-5)):
global M_max_ecl
M_max_ecl = m_1
elif m_1 - step < M_L or m_1 + step > M_U:
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1, step/10, pm)
elif M_x > M_tot and pm == -1:
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1 - step, step, -1)
elif M_x > M_tot and pm == 1:
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1 - step/10, step/10, -1)
elif M_x < M_tot and pm == 1:
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1 + step, step, 1)
elif M_x < M_tot and pm == -1:
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1 + step/10, step/10, 1)
def function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1, step, pm): # equation 40
m_1 = round(m_1, 10) # round makes the code only valid when SFR > 3 * 10^(-10) solar / year
M_x = I_ecl * (1 - beta) / (2 - beta) * (m_1 ** (2 - beta) - M_L ** (2 - beta)) / (
M_U ** (1 - beta) - m_1 ** (1 - beta))
if M_tot * (1.+10**(-5)) > M_x > M_tot * (1-10**(-5)):
global M_max_ecl
M_max_ecl = m_1
elif m_1 - step <= M_L or m_1 + step >= M_U:
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1, step/2, pm)
elif M_x > M_tot and pm == -1:
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1 - step, step, -1)
elif M_x > M_tot and pm == 1:
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1 - step/2, step/2, -1)
elif M_x < M_tot and pm == 1:
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1 + step, step, 1)
elif M_x < M_tot and pm == -1:
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1 + step/2, step/2, 1)
def function_m_i_ecl(m_i, M_L, k, beta, n_i): # equation 48
while m_i > M_L:
global list_m_ecl_i, list_n_ecl_i, resolution_cluster_relative
m_i_plus_n = (m_i**(1-beta) - n_i * (1-beta) / k)**(1/(1-beta))
if m_i_plus_n < M_L:
list_m_ecl_i += [M_L]
n_L = int((m_i**(1-beta) - M_L**(1-beta)) * k / (1-beta))
if n_L == 0:
return
else:
list_n_ecl_i += [n_L]
return
elif m_i - m_i_plus_n > resolution_cluster_relative * m_i:
list_m_ecl_i += [m_i_plus_n]
list_n_ecl_i += [n_i]
(m_i, n_i) = (m_i_plus_n, n_i)
else:
(m_i_plus_n_new, n_new) = function_get_n_new_ecl(m_i, k, beta, m_i_plus_n, n_i)
list_m_ecl_i += [m_i_plus_n_new]
list_n_ecl_i += [n_new]
(m_i, n_i) = (m_i_plus_n_new, n_new)
return
def function_get_n_new_ecl(m_i, k, beta, m_i_plus_n, n_i):
while m_i - m_i_plus_n < resolution_cluster_relative * m_i:
n_new = round(n_i * mass_grid_index + 1)
m_i_plus_n_new = (m_i ** (1 - beta) - n_new * (1 - beta) / k) ** (1 / (1 - beta))
(m_i_plus_n, n_i) = (m_i_plus_n_new, n_new)
return m_i_plus_n, n_i
def function_M_i_2(k, i, length_n): # equation 50
while i < length_n:
global list_m_ecl_i, list_n_ecl_i, list_M_ecl_i
M_i = k * (math.log(list_m_ecl_i[i]) - math.log(list_m_ecl_i[i+1])) / list_n_ecl_i[i]
list_M_ecl_i += [M_i]
(i) = (i+1)
return
def function_M_i_not_2(k, beta, i, length_n): # equation 49
while i < length_n:
global list_m_ecl_i, list_n_ecl_i, list_M_ecl_i
M_i = k / (2-beta) * (list_m_ecl_i[i]**(2-beta)-list_m_ecl_i[i+1]**(2-beta)) / list_n_ecl_i[i]
list_M_ecl_i += [M_i]
(i) = (i+1)
return
################### draw ECMF without sampling #####################
# k_ecl is a normalization factor.
# The ECMF is normalized to the total mass of the cluster population in a 10 Myr star formation epoch (M_tot)
# That is M_tot = SFR [Msun/yr] * 10^7 [yr]
# The normalization is done by first calculate the M_max_ecl then k_ecl as described by the Part II of supplementary-document-galimf.pdf
def k_ecl(R14orNOT, M_ecl, SFR, delta_t, I_ecl, M_U, M_L, beta):
global M_max_ecl
M_tot = SFR * delta_t * 10 ** 6 # units in Myr
if R14orNOT == True:
M_max_ecl = 10 ** (4.83 + 0.75 * math.log(SFR, 10))
if M_max_ecl < 5:
M_max_ecl = 5
k = I_ecl / (1 / M_max_ecl - 1 / M_U) # equation 45
else:
if beta == 2:
M_max_ecl = 0
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, 10**8, 10**7, -1) # equation 48
k = I_ecl / (1 / M_max_ecl - 1 / M_U) # equation 45
else:
M_max_ecl = 0
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, M_U/10, M_U/100, -1) # equation 44
k = I_ecl * (1 - beta) / (M_U ** (1 - beta) - M_max_ecl ** (1 - beta)) # equation 41
return k
x_ECMF = []
y_ECMF = []
def function_draw_xi_ecl(R14orNOT, M_ecl, SFR, delta_t, I_ecl, M_U, M_L, beta):
global x_ECMF, y_ECMF
k = k_ecl(R14orNOT, M_ecl, SFR, delta_t, I_ecl, M_U, M_L, beta)
function_draw_xi_ecl_loop(M_ecl, k, M_U, beta)
x_ECMF = [x_ECMF[0]] + x_ECMF
x_ECMF += [x_ECMF[-1]]
y_ECMF = [0.000000001] + y_ECMF
y_ECMF += [0.000000001]
return
def function_draw_xi_ecl_loop(M_ecl, k, M_U, beta):
global x_ECMF, y_ECMF, M_max_ecl
while M_ecl < M_max_ecl:
x_ECMF += [M_ecl]
xi = k * M_ecl ** (-beta)
y_ECMF += [xi]
(M_ecl) = (mass_grid_index * M_ecl)
return
########## beta ###########
def function_beta_change(beta_model, SFR, M_over_H):
if (beta_model == 0):
default_beta = 2.00000001
return default_beta
elif (beta_model == 1):
beta_change = -0.106 * math.log(SFR, 10) + 2.000001 #+ 0.5*M_over_H
if beta_change < 1.5:
beta_change = 1.5
elif beta_change > 2.5:
beta_change = 2.5
# print("ECMF-beta =", beta_change)
return beta_change
elif (beta_model == 2):
if SFR > 1:
beta_change = -0.106 * math.log(SFR, 10) + 2.00000001
else:
beta_change = 2.0000001
return beta_change
else:
return beta_model
| Azeret/galIMF | galimf.py | Python | gpl-3.0 | 55,263 | [
"Galaxy"
] | 4d77ee0d44cd6c68ca0fdc52d4191187f19aa33f9d8cbd291543b49474435beb |
import numpy as np
import deepautoencoder.utils as utils
import tensorflow as tf
allowed_activations = ['sigmoid', 'tanh', 'softmax', 'relu', 'linear']
allowed_noises = [None, 'gaussian', 'mask']
allowed_losses = ['rmse', 'cross-entropy']
class StackedAutoEncoder:
"""A deep autoencoder with denoising capability"""
def assertions(self):
global allowed_activations, allowed_noises, allowed_losses
assert self.loss in allowed_losses, 'Incorrect loss given'
assert 'list' in str(
type(self.dims)), 'dims must be a list even if there is one layer.'
assert len(self.epoch) == len(
self.dims), "No. of epochs must equal to no. of hidden layers"
assert len(self.activations) == len(
self.dims), "No. of activations must equal to no. of hidden layers"
assert all(
True if x > 0 else False
for x in self.epoch), "No. of epoch must be atleast 1"
assert set(self.activations + allowed_activations) == set(
allowed_activations), "Incorrect activation given."
assert utils.noise_validator(
self.noise, allowed_noises), "Incorrect noise given"
def __init__(self, dims, activations, epoch=1000, noise=None, loss='rmse',
lr=0.001, batch_size=100, print_step=50):
self.print_step = print_step
self.batch_size = batch_size
self.lr = lr
self.loss = loss
self.activations = activations
self.noise = noise
self.epoch = epoch
self.dims = dims
self.assertions()
self.depth = len(dims)
self.weights, self.biases = [], []
def add_noise(self, x):
if self.noise == 'gaussian':
n = np.random.normal(0, 0.1, (len(x), len(x[0])))
return x + n
if 'mask' in self.noise:
frac = float(self.noise.split('-')[1])
temp = np.copy(x)
for i in temp:
n = np.random.choice(len(i), round(
frac * len(i)), replace=False)
i[n] = 0
return temp
if self.noise == 'sp':
pass
def fit(self, x):
for i in range(self.depth):
print('Layer {0}'.format(i + 1))
if self.noise is None:
x = self.run(data_x=x, activation=self.activations[i],
data_x_=x,
hidden_dim=self.dims[i], epoch=self.epoch[
i], loss=self.loss,
batch_size=self.batch_size, lr=self.lr,
print_step=self.print_step)
else:
temp = np.copy(x)
x = self.run(data_x=self.add_noise(temp),
activation=self.activations[i], data_x_=x,
hidden_dim=self.dims[i],
epoch=self.epoch[
i], loss=self.loss,
batch_size=self.batch_size,
lr=self.lr, print_step=self.print_step)
def transform(self, data):
tf.reset_default_graph()
sess = tf.Session()
x = tf.constant(data, dtype=tf.float32)
for w, b, a in zip(self.weights, self.biases, self.activations):
weight = tf.constant(w, dtype=tf.float32)
bias = tf.constant(b, dtype=tf.float32)
layer = tf.matmul(x, weight) + bias
x = self.activate(layer, a)
return x.eval(session=sess)
def fit_transform(self, x):
self.fit(x)
return self.transform(x)
def run(self, data_x, data_x_, hidden_dim, activation, loss, lr,
print_step, epoch, batch_size=100):
tf.reset_default_graph()
input_dim = len(data_x[0])
sess = tf.Session()
x = tf.placeholder(dtype=tf.float32, shape=[None, input_dim], name='x')
x_ = tf.placeholder(dtype=tf.float32, shape=[
None, input_dim], name='x_')
encode = {'weights': tf.Variable(tf.truncated_normal(
[input_dim, hidden_dim], dtype=tf.float32)),
'biases': tf.Variable(tf.truncated_normal([hidden_dim],
dtype=tf.float32))}
decode = {'biases': tf.Variable(tf.truncated_normal([input_dim],
dtype=tf.float32)),
'weights': tf.transpose(encode['weights'])}
encoded = self.activate(
tf.matmul(x, encode['weights']) + encode['biases'], activation)
decoded = tf.matmul(encoded, decode['weights']) + decode['biases']
# reconstruction loss
if loss == 'rmse':
loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(x_, decoded))))
elif loss == 'cross-entropy':
loss = -tf.reduce_mean(x_ * tf.log(decoded))
train_op = tf.train.AdamOptimizer(lr).minimize(loss)
sess.run(tf.global_variables_initializer())
for i in range(epoch):
b_x, b_x_ = utils.get_batch(
data_x, data_x_, batch_size)
sess.run(train_op, feed_dict={x: b_x, x_: b_x_})
if (i + 1) % print_step == 0:
l = sess.run(loss, feed_dict={x: data_x, x_: data_x_})
print('epoch {0}: global loss = {1}'.format(i, l))
# self.loss_val = l
# debug
# print('Decoded', sess.run(decoded, feed_dict={x: self.data_x_})[0])
self.weights.append(sess.run(encode['weights']))
self.biases.append(sess.run(encode['biases']))
return sess.run(encoded, feed_dict={x: data_x_})
def activate(self, linear, name):
if name == 'sigmoid':
return tf.nn.sigmoid(linear, name='encoded')
elif name == 'softmax':
return tf.nn.softmax(linear, name='encoded')
elif name == 'linear':
return linear
elif name == 'tanh':
return tf.nn.tanh(linear, name='encoded')
elif name == 'relu':
return tf.nn.relu(linear, name='encoded')
| rajarsheem/libsdae-autoencoder-tensorflow | deepautoencoder/stacked_autoencoder.py | Python | mit | 6,154 | [
"Gaussian"
] | 5095ac48d64ac23b27f340b2f307a21ecd395e3abe67ded139fdb090c5327c2e |
import datetime
from glob import glob
import netCDF4 as NET
import numpy as np
import os
import re
from shutil import rmtree
from sqlalchemy import and_
#tethys imports
from tethys_dataset_services.engines import GeoServerSpatialDatasetEngine
#local import
from model import SettingsSessionMaker, MainSettings, Watershed, Geoserver
from sfpt_dataset_manager.dataset_manager import CKANDatasetManager
def check_shapefile_input_files(shp_files):
"""
#make sure required files for shapefiles are included
"""
required_extentsions = ['.shp', '.shx', '.prj','.dbf']
accepted_extensions = []
for shp_file in shp_files:
file_name, file_extension = os.path.splitext(shp_file.name)
for required_extentsion in required_extentsions:
if file_extension == required_extentsion:
accepted_extensions.append(required_extentsion)
required_extentsions.remove(required_extentsion)
return required_extentsions
def rename_shapefile_input_files(shp_files, new_file_name):
"""
#make sure required files for shapefiles are included
"""
for shp_file in shp_files:
file_name, file_extension = os.path.splitext(shp_file.name)
shp_file.name = "%s%s" % (new_file_name, file_extension)
def delete_old_watershed_prediction_files(watershed, forecast="all"):
"""
Removes old watershed prediction files from system if no other watershed has them
"""
def delete_prediciton_files(main_folder_name, sub_folder_name, local_prediction_files_location):
"""
Removes predicitons from folder and folder if not empty
"""
prediciton_folder = os.path.join(local_prediction_files_location,
main_folder_name,
sub_folder_name)
#remove watersheds subbsasins folders/files
if main_folder_name and sub_folder_name and \
local_prediction_files_location and os.path.exists(prediciton_folder):
#remove all prediction files from watershed/subbasin
try:
rmtree(prediciton_folder)
except OSError:
pass
#remove watershed folder if no other subbasins exist
try:
os.rmdir(os.path.join(local_prediction_files_location,
main_folder_name))
except OSError:
pass
#initialize session
session = SettingsSessionMaker()
main_settings = session.query(MainSettings).order_by(MainSettings.id).first()
forecast = forecast.lower()
#Remove ECMWF Forecasta
if forecast == "all" or forecast == "ecmwf":
#Make sure that you don't delete if another watershed is using the
#same predictions
num_ecmwf_watersheds_with_forecast = session.query(Watershed) \
.filter(
and_(
Watershed.ecmwf_data_store_watershed_name == watershed.ecmwf_data_store_watershed_name,
Watershed.ecmwf_data_store_subbasin_name == watershed.ecmwf_data_store_subbasin_name
)
) \
.filter(Watershed.id != watershed.id) \
.count()
if num_ecmwf_watersheds_with_forecast <= 0:
delete_prediciton_files(watershed.ecmwf_data_store_watershed_name,
watershed.ecmwf_data_store_subbasin_name,
main_settings.ecmwf_rapid_prediction_directory)
#Remove WRF-Hydro Forecasts
if forecast == "all" or forecast == "wrf_hydro":
#Make sure that you don't delete if another watershed is using the
#same predictions
num_wrf_hydro_watersheds_with_forecast = session.query(Watershed) \
.filter(
and_(
Watershed.wrf_hydro_data_store_watershed_name == watershed.wrf_hydro_data_store_watershed_name,
Watershed.wrf_hydro_data_store_subbasin_name == watershed.wrf_hydro_data_store_subbasin_name
)
) \
.filter(Watershed.id != watershed.id) \
.count()
if num_wrf_hydro_watersheds_with_forecast <= 0:
delete_prediciton_files(watershed.wrf_hydro_data_store_watershed_name,
watershed.wrf_hydro_data_store_subbasin_name,
main_settings.wrf_hydro_rapid_prediction_directory)
session.close()
def delete_old_watershed_kml_files(watershed):
"""
Removes old watershed kml files from system
"""
old_kml_file_location = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'public','kml',watershed.folder_name)
#remove old kml files on local server
#drainange line
try:
if watershed.kml_drainage_line_layer:
os.remove(os.path.join(old_kml_file_location,
watershed.kml_drainage_line_layer))
except OSError:
pass
#catchment
try:
if watershed.kml_catchment_layer:
os.remove(os.path.join(old_kml_file_location,
watershed.kml_catchment_layer))
except OSError:
pass
#gage
try:
if watershed.kml_gage_layer:
os.remove(os.path.join(old_kml_file_location,
watershed.kml_gage_layer))
except OSError:
pass
#folder
try:
os.rmdir(old_kml_file_location)
except OSError:
pass
def purge_remove_geoserver_layer(layer_id, engine):
"""
completely remove geoserver layer
"""
engine.delete_layer(layer_id, purge=True, recurse=True)
engine.delete_resource(layer_id, purge=True, recurse=True)
engine.delete_store(layer_id, purge=True, recurse=True)
def delete_old_watershed_geoserver_files(watershed):
"""
Removes old watershed geoserver files from system
"""
engine = GeoServerSpatialDatasetEngine(endpoint="%s/rest" % watershed.geoserver.url,
username=watershed.geoserver.username,
password=watershed.geoserver.password)
if watershed.geoserver_drainage_line_uploaded:
purge_remove_geoserver_layer(watershed.geoserver_drainage_line_layer,
engine)
if watershed.geoserver_catchment_uploaded:
purge_remove_geoserver_layer(watershed.geoserver_catchment_layer,
engine)
if watershed.geoserver_gage_uploaded:
purge_remove_geoserver_layer(watershed.geoserver_gage_layer,
engine)
if watershed.geoserver_ahps_station_uploaded:
purge_remove_geoserver_layer(watershed.geoserver_ahps_station_layer,
engine)
def delete_old_watershed_files(watershed, ecmwf_local_prediction_files_location,
wrf_hydro_local_prediction_files_location):
"""
Removes old watershed files from system
"""
#remove old kml files
delete_old_watershed_kml_files(watershed)
#remove old geoserver files
delete_old_watershed_geoserver_files(watershed)
#remove old ECMWF and WRF-Hydro prediction files
delete_old_watershed_prediction_files(watershed, forecast="all")
#remove RAPID input files on CKAN
data_store = watershed.data_store
if 'ckan' == data_store.data_store_type.code_name and watershed.ecmwf_rapid_input_resource_id:
#get dataset managers
data_manager = CKANDatasetManager(data_store.api_endpoint,
data_store.api_key,
"ecmwf"
)
data_manager.dataset_engine.delete_resource(watershed.ecmwf_rapid_input_resource_id)
def ecmwf_find_most_current_files(path_to_watershed_files, start_folder):
"""""
Finds the current output from downscaled ECMWF forecasts
"""""
if(start_folder=="most_recent"):
if not os.path.exists(path_to_watershed_files):
return None, None
directories = sorted([d for d in os.listdir(path_to_watershed_files) \
if os.path.isdir(os.path.join(path_to_watershed_files, d))],
reverse=True)
else:
directories = [start_folder]
for directory in directories:
try:
date = datetime.datetime.strptime(directory.split(".")[0],"%Y%m%d")
time = directory.split(".")[-1]
path_to_files = os.path.join(path_to_watershed_files, directory)
if os.path.exists(path_to_files):
basin_files = glob(os.path.join(path_to_files,"*.nc"))
if len(basin_files)>0:
hour = int(time)/100
return basin_files, date + datetime.timedelta(0,int(hour)*60*60)
except Exception as ex:
print ex
pass
#there are no files found
return None, None
def wrf_hydro_find_most_current_file(path_to_watershed_files, date_string):
"""""
Finds the current output from downscaled WRF-Hydro forecasts
"""""
if(date_string=="most_recent"):
if not os.path.exists(path_to_watershed_files):
return None
prediction_files = sorted(glob(os.path.join(path_to_watershed_files,"*.nc")),
reverse=True)
else:
#RapidResult_20150405T2300Z_CF.nc
prediction_files = ["RapidResult_%s_CF.nc" % date_string]
for prediction_file in prediction_files:
try:
path_to_file = os.path.join(path_to_watershed_files, prediction_file)
if os.path.exists(path_to_file):
return path_to_file
except Exception as ex:
print ex
pass
#there are no files found
return None
def format_name(string):
"""
Formats watershed name for code
"""
if string:
formatted_string = string.strip().replace(" ", "_").lower()
formatted_string = re.sub(r'[^a-zA-Z0-9_-]', '', formatted_string)
while formatted_string.startswith("-") or formatted_string.startswith("_"):
formatted_string = formatted_string[1:]
else:
formatted_string = ""
return formatted_string
def format_watershed_title(watershed, subbasin):
"""
Formats title for watershed in navigation
"""
max_length = 30
watershed = watershed.strip()
subbasin = subbasin.strip()
watershed_length = len(watershed)
if(watershed_length>max_length):
return watershed[:max_length-1].strip() + "..."
max_length -= watershed_length
subbasin_length = len(subbasin)
if(subbasin_length>max_length):
return (watershed + " (" + subbasin[:max_length-3].strip() + " ...)")
return (watershed + " (" + subbasin + ")")
def get_cron_command():
"""
Gets cron command for downloading datasets
"""
#/usr/lib/tethys/src/tethys_apps/tethysapp/erfp_tool/cron/load_datasets.py
local_directory = os.path.dirname(os.path.abspath(__file__))
delimiter = ""
if "/" in local_directory:
delimiter = "/"
elif "\\" in local_directory:
delimiter = "\\"
virtual_env_path = ""
if delimiter and local_directory:
virtual_env_path = delimiter.join(local_directory.split(delimiter)[:-4])
command = '%s %s' % (os.path.join(virtual_env_path,'bin','python'),
os.path.join(local_directory, 'load_datasets.py'))
return command
else:
return None
def get_reach_index(reach_id, prediction_file, guess_index=None):
"""
Gets the index of the reach from the COMID
"""
data_nc = NET.Dataset(prediction_file, mode="r")
com_ids = data_nc.variables['COMID'][:]
data_nc.close()
try:
if guess_index:
if int(reach_id) == int(com_ids[int(guess_index)]):
return int(guess_index)
except Exception as ex:
print ex
pass
try:
reach_index = np.where(com_ids==int(reach_id))[0][0]
except Exception as ex:
print ex
reach_index = None
pass
return reach_index
def get_comids_in_lookup_comid_list(search_reach_id_list, lookup_reach_id_list):
"""
Gets the subset comid_index_list, reordered_comid_list from the netcdf file
"""
try:
#get where comids are in search_list
search_reach_indices_list = np.where(np.in1d(search_reach_id_list, lookup_reach_id_list))[0]
except Exception as ex:
print ex
return search_reach_indices_list, lookup_reach_id_list[search_reach_indices_list]
def get_subbasin_list(file_path):
"""
Gets a list of subbasins in the watershed
"""
subbasin_list = []
drainage_line_kmls = glob(os.path.join(file_path, '*drainage_line.kml'))
for drainage_line_kml in drainage_line_kmls:
subbasin_name = "-".join(os.path.basename(drainage_line_kml).split("-")[:-1])
if subbasin_name not in subbasin_list:
subbasin_list.append(subbasin_name)
catchment_kmls = glob(os.path.join(file_path, '*catchment.kml'))
for catchment_kml in catchment_kmls:
subbasin_name = "-".join(os.path.basename(catchment_kml).split("-")[:-1])
if subbasin_name not in subbasin_list:
subbasin_list.append(subbasin_name)
subbasin_list.sort()
return subbasin_list
def get_watershed_info(app_instance_id, session, watersheds_group):
# maybe make this one list?
#list of names and ids in group for dropdown list
dropdown_watershed_list = []
for watershed in watersheds_group:
dropdown_watershed_list.append(("%s (%s)" % (watershed.watershed_name, watershed.subbasin_name),
watershed.id))
outline_watersheds_list = []
#list of names, geoservers, app_id, ids for loading outlines
for watershed in watersheds_group:
geoserver = session.query(Geoserver).filter(Geoserver.id == watershed.geoserver_id).all()[0]
geoserver_url = geoserver.url
outline_watersheds_list.append(
[watershed.ecmwf_data_store_watershed_name, geoserver_url, watershed.id, watershed.geoserver_outline_layer])
return outline_watersheds_list, dropdown_watershed_list
def handle_uploaded_file(f, file_path, file_name):
"""
Uploads file to specified path
"""
#remove old file if exists
try:
os.remove(os.path.join(file_path, file_name))
except OSError:
pass
#make directory
if not os.path.exists(file_path):
os.mkdir(file_path)
#upload file
with open(os.path.join(file_path,file_name), 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
def user_permission_test(user):
"""
User needs to be superuser or staff
"""
return user.is_superuser or user.is_staff | CI-WATER/tethysapp-erfp_tool | tethysapp/erfp_tool/functions.py | Python | mpl-2.0 | 15,209 | [
"NetCDF"
] | 098e00acf067700d9997e9bfa8fead1b105f7fa17f159bfa363718cd3eaa0747 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RXde(RPackage):
"""Multi-level model for cross-study detection of differential gene
expression."""
homepage = "https://www.bioconductor.org/packages/XDE/"
url = "https://git.bioconductor.org/packages/XDE"
version('2.22.0', git='https://git.bioconductor.org/packages/XDE', commit='25bcec965ae42a410dd285a9db9be46d112d8e81')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-genefilter', type=('build', 'run'))
depends_on('r-gtools', type=('build', 'run'))
depends_on('r-mergemaid', type=('build', 'run'))
depends_on('r-mvtnorm', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@2.22.0')
| EmreAtes/spack | var/spack/repos/builtin/packages/r-xde/package.py | Python | lgpl-2.1 | 1,980 | [
"Bioconductor"
] | 62078db4e3314de0442e079777a77021c3947c1a5826cbc35ef35c6c3b278b07 |
import sys
from os import path
import webbrowser
from PyQt4 import QtGui
from PyQt4 import QtCore
if hasattr(sys, 'frozen'):
scriptDir = path.dirname(unicode(sys.executable, sys.getfilesystemencoding()))
else:
scriptDir = path.dirname(unicode(__file__, sys.getfilesystemencoding()))
theQuestion = 'How am I experiencing this moment of being alive?'
methodURL = 'http://www.actualfreedom.com.au/richard/articles/thismomentofbeingalive.htm'
projectURL = 'http://github.com/srid/haietmoba-reminder'
welcomeMsg = '''This application will remind you to ask HAIETMOBA every %d minutes. \
For each reminder, answer yourself how you are experiencing this moment of being alive; \
then click one of the buttons depending on how you are generally feeling.
'''
class MainWindow(QtGui.QWidget):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.resize(1, 1) # adding widgets will expand to fit size
self.setWindowTitle('HAIETMOBA?')
self.setToolTip(theQuestion)
self.setWindowFlags(
QtCore.Qt.Window |
QtCore.Qt.WindowMinimizeButtonHint | # only minimize (no maximize)
QtCore.Qt.WindowStaysOnTopHint)
self.createInterface()
self.gap = 10 # in minutes
self.quitAction = QtGui.QAction("&Quit", self,
triggered=app.quit)
def setGap(self, gap):
"""Set the gap between reminders in minutes"""
self.gap = gap
def show(self):
super(MainWindow, self).show()
self.center()
def createInterface(self):
"""Create the UI elements of our main window"""
# The reason for using three buttons (instead of just one called 'OK')
# is to help prevent the habituation. At least, one has to invest in
# a few thoughts when there are more buttons ("which one to click? ah,
# that requires me to first answer the question!")
good = QtGui.QPushButton(":-&)")
good.setToolTip('Feeling good (generally)')
meh = QtGui.QPushButton(":-&|")
meh.setToolTip('Feeling OK/neutral (generally) -- what is preventing me from feeling good now?')
bad = QtGui.QPushButton(":-&(")
bad.setToolTip('Feeling bad (generally) -- should investigate the issue')
good.clicked.connect(self.receiveAnswer)
meh.clicked.connect(self.receiveAnswer)
bad.clicked.connect(self.receiveAnswer)
# The question itself in a big/bold text
lbl = QtGui.QLabel()
lbl.setText(theQuestion)
lbl.setFont(QtGui.QFont('Verdana', 16, 100))
# Qt layout boilerplate
hbox = QtGui.QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(good)
hbox.addWidget(meh)
hbox.addWidget(bad)
hbox.addStretch(1)
vbox = QtGui.QVBoxLayout()
vbox.addStretch(1)
vbox.addWidget(lbl)
vbox.addLayout(hbox)
vbox.addStretch(1)
self.setLayout(vbox)
def receiveAnswer(self):
"""On receiving the answer, hide the window till next reminder"""
self.hide()
interval = 1000*60*self.gap
QtCore.QTimer.singleShot(interval, self.show)
def center(self):
"""Center the window on screen"""
screen = QtGui.QDesktopWidget().screenGeometry()
size = self.geometry()
self.move(
(screen.width()-size.width())/2,
(screen.height()-size.height())/2)
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(
self, 'Message', 'Are you sure to quit?',
QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
app.trayIcon.hide()
else:
event.ignore()
class Application(QtGui.QApplication):
def __init__(self, *args, **kw):
QtGui.QApplication.__init__(self, *args, **kw)
self.icon = QtGui.QIcon(path.join(scriptDir, 'data/icon.png'))
def createInterface(self):
self.mainWindow = MainWindow()
self.mainWindow.setWindowIcon(self.icon)
self.createSystemTrayIcon()
def show(self):
self.mainWindow.show()
self.mainWindow.center()
self.trayIcon.show()
self.trayIcon.showMessage(
'Welcome',
welcomeMsg % self.mainWindow.gap,
QtGui.QSystemTrayIcon.Information,
1000*60)
def quit(self, *a, **k):
super(Application, self).quit()
self.trayIcon.hide()
def createSystemTrayIcon(self):
"""Create a systray icon with a context menu"""
self.trayIcon = QtGui.QSystemTrayIcon(self.icon, self.mainWindow)
# systray context menu
menu = QtGui.QMenu(self.mainWindow)
self.frequency = QtGui.QActionGroup(self.mainWindow)
for (mins, choice) in [(1, 'Every minute'),
(2, 'Every 2 minutes'),
(3, 'Every 3 minutes'),
(4, 'Every 4 minutes'),
(5, 'Every 5 minutes'),
(10, 'Every 10 minutes (recommended)'),
(15, 'Every 15 minutes'),
(20, 'Every 20 minutes'),
(30, 'Every 30 minutes'),
(60, 'Every hour')]:
a = self.frequency.addAction(choice)
a.setCheckable(True)
menu.addAction(a)
def getGapSetter(m):
return lambda: self.mainWindow.setGap(m)
a.triggered.connect(getGapSetter(mins))
if 'recommended' in choice:
a.setChecked(True) # default
self.mainWindow.setGap(mins)
menu.addSeparator()
aboutAction = menu.addAction('About the actualism method')
aboutAction.triggered.connect(lambda: webbrowser.open(methodURL))
aboutAppAction = menu.addAction('Visit the application home page')
aboutAppAction.triggered.connect(lambda: webbrowser.open(projectURL))
menu.addSeparator()
menu.addAction(self.mainWindow.quitAction)
self.trayIcon.setContextMenu(menu)
self.trayIcon.setToolTip(theQuestion)
self.trayIcon.messageClicked.connect(
lambda : webbrowser.open(methodURL))
app = Application(sys.argv)
if not QtGui.QSystemTrayIcon.isSystemTrayAvailable():
QtGui.QMessageBox.critical(None, "Systray",
"I couldn't detect any system tray on this system.")
sys.exit(1)
app.createInterface()
app.show()
sys.exit(app.exec_())
| srid/haietmoba-reminder | haietmoba-reminder.py | Python | mit | 7,079 | [
"VisIt"
] | 6e19064d9c094b398c488a31e20f840e9719592b287e51f590745b223497af91 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2016 David Emms
#
# This program (OrthoFinder) is distributed under the terms of the GNU General Public License v3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# When publishing work that uses OrthoFinder please cite:
# Emms, D.M. and Kelly, S. (2015) OrthoFinder: solving fundamental biases in whole genome comparisons dramatically
# improves orthogroup inference accuracy, Genome Biology 16:157
#
# For any enquiries send an email to David Emms
# david_emms@hotmail.comhor: david
"""
Handles location of all input and output files
Users:
1. Call InitialiseFileHandler
2. Interact with FileHandler object (which is initialised by the above method)
- it is an instance of __Files_new_dont_manually_create__
The code also contains the help class: PreviousFilesLocator (and child classes of it)
"""
import os
import sys
import glob
import time
import shutil
import datetime
from . import util
class SpeciesInfo(object):
def __init__(self):
self.speciesToUse = [] # seqsInfo.iSpeciesToUse - which to include for this analysis
self.nSpAll = None # seqsInfo.nSpAll => 0, 1, ..., nSpAll - 1 are valid species indices
self.iFirstNewSpecies = None # iFirstNew => (0, 1, ..., iFirstNew-1) are from previous and (iFirstNew, iFirstNew+1, ..., nSpecies-1) are the new species indices
def __str__(self):
return str((self.speciesToUse, self.nSpAll, self.iFirstNewSpecies))
""" ************************************************************************************************************************* """
""" ************************************************************************************************************************* """
""" ************************************************************************************************************************* """
class __Files_new_dont_manually_create__(object):
def __init__(self):
self.baseOgFormat = "OG%07d"
self.wd_base = [] # Base: blast, species & sequence IDs, species fasta files - should not request this and then write here
self.wd_current = None # Location to write out any new files
self.wd_trees = None # Location of working dir containing tree files
self.rd1 = None
self.fileIdentifierString = "OrthoFinder"
self.clustersFilename = None
self.iResultsVersion = None
self.nondefaultPickleDir = None
self.speciesTreeRootedIDsFN = None
self.multipleRootedSpeciesTreesDir = None
self.species_ids_corrected = None
# to be modified as appropriate
""" ========================================================================================== """
# RefactorDS - FileHandler
def CreateOutputDirFromStart_new(self, fasta_dir, base, user_name = None, old_wd_base_list=None):
"""
The initial difference will be that results will go in OrthoFinder/Results_DATE or USER_SPECIFIED/RESULTS_DATE
whereas before they went in Results_DATE or USER_SPECIFIED.
If this is a composite analysis (-f + -b) then old_wd_base_list != None
old_wd_base_list - first item is the WD from a previous analysis to be extended. If this extended other
ones itself then there will be other items in the list.
"""
if user_name == None:
self.rd1 = util.CreateNewWorkingDirectory(base + "Results_")
else:
self.rd1 = util.CreateNewWorkingDirectory(base + "Results_" + user_name, qDate=False)
self.wd_current = self.rd1 + "WorkingDirectory/"
os.mkdir(self.wd_current)
self.wd_base = [self.wd_current]
if old_wd_base_list != None:
shutil.copy(old_wd_base_list[0] + "SpeciesIDs.txt", self.wd_current + "SpeciesIDs.txt")
shutil.copy(old_wd_base_list[0] + "SequenceIDs.txt", self.wd_current + "SequenceIDs.txt")
# Log the first wd in list, this can then be followed back to previous ones
# Log file - point to WD at start of chain which contains the new species
# wd_base_list - should contain current directory and then previous linked directories
with open(self.wd_current + "previous_wd.txt", 'w') as outfile: outfile.write(old_wd_base_list[0] + "\n")
self.wd_base.extend(old_wd_base_list)
self.wd_trees = self.wd_current
self.StartLog()
# RefactorDS - PreviousFilesLocator
def StartFromOrthogroupsOrSequenceSearch(self, wd_base_list, base, clustersFilename_pairs=None, user_name = None, userSpeciesTree=None):
"""
NEed to initialise:
wd_base
wd_trees
wd_current
"""
if len(self.wd_base) != 0: raise Exception("Changing WorkingDirectory1")
self.wd_base = wd_base_list
if clustersFilename_pairs != None: self.clustersFilename = clustersFilename_pairs[:-len("_id_pairs.txt")]
if user_name == None:
self.rd1 = util.CreateNewWorkingDirectory(base + "Results_")
else:
self.rd1 = util.CreateNewWorkingDirectory(base + "Results_" + user_name, qDate=False)
self.wd_current = self.rd1 + "WorkingDirectory/"
os.mkdir(self.wd_current)
with open(self.rd1 + "Log.txt", 'w'):
pass
self.wd_trees = self.wd_current
self.StartLog()
def StartFromTrees(self,
wd1_list,
wd2,
base,
clustersFilename_pairs,
speciesTreeFN,
qIsUSerSpeciesTree,
user_name=None):
"""
Convert user species tree here if necessary
For OF species tree copy it to location given by FileHandler
For user species tree, this must be done immediately by OF code
"""
self.wd_base = wd1_list
self.wd_trees = wd2
if user_name == None:
self.rd1 = util.CreateNewWorkingDirectory(base + "Results_")
else:
self.rd1 = util.CreateNewWorkingDirectory(base + "Results_" + user_name, qDate=False)
self.wd_current = self.rd1 + "WorkingDirectory/"
os.mkdir(self.wd_current)
self.clustersFilename = clustersFilename_pairs[:-len("_id_pairs.txt")]
self.StartLog()
if not qIsUSerSpeciesTree:
shutil.copy(speciesTreeFN, self.GetSpeciesTreeIDsRootedFN())
self.WriteToLog("Species Tree: %s\n" % speciesTreeFN)
self.LogWorkingDirectoryTrees()
def CreateOutputDirectories(self, options, previous_files_locator, base_dir, fastaDir=None):
if options.qStartFromFasta and options.qStartFromBlast:
wd1 = previous_files_locator.GetStartFromBlast()
self.CreateOutputDirFromStart_new(fastaDir, base_dir, user_name=options.name, old_wd_base_list = wd1)
elif options.qStartFromFasta:
self.CreateOutputDirFromStart_new(fastaDir, base_dir, user_name=options.name)
elif options.qStartFromBlast:
wd1 = previous_files_locator.GetStartFromBlast()
self.StartFromOrthogroupsOrSequenceSearch(wd1,
base_dir,
user_name=options.name)
elif options.qStartFromGroups:
wd1, clustersFilename_pairs = previous_files_locator.GetStartFromOGs()
self.StartFromOrthogroupsOrSequenceSearch(wd1,
base_dir,
clustersFilename_pairs,
user_name=options.name)
elif options.qStartFromTrees:
wd1, clustersFilename_pairs, wd_trees, speciesTreeFN = previous_files_locator.GetStartFromTrees()
if options.speciesTreeFN != None:
qIsUserSpeciesTree = True
speciesTreeFN = options.speciesTreeFN
elif speciesTreeFN != None:
qIsUserSpeciesTree = False
else:
print("ERROR: Could not find species tree")
util.Fail()
self.StartFromTrees(wd1,
wd_trees,
base_dir,
clustersFilename_pairs,
speciesTreeFN,
qIsUserSpeciesTree,
user_name=options.name)
if (options.qStartFromGroups or options.qStartFromTrees) and previous_files_locator.species_ids_lines != None:
# In only these cases, it's possible that the SpeciesIDs.txt file is out of sync and the version in the previous log should be used instead
self.CreateCorrectedSpeciesIDsFile(previous_files_locator.species_ids_lines)
def CreateCorrectedSpeciesIDsFile(self, species_ids_lines):
self.species_ids_corrected = self.wd_current + "SpeciesIDs.txt"
with open(self.species_ids_corrected, 'w') as outfile:
outfile.write(species_ids_lines)
""" ========================================================================================== """
# RefactorDS - FileHandler
def SetNondefaultPickleDir(self, d):
self.pickleDir = d
def GetPickleDir(self):
if self.nondefaultPickleDir != None:
d = self.pickleDir
else:
d = self.wd_current + "pickle/"
if not os.path.exists(d): os.mkdir(d)
return d
""" Standard Methods
========================================================================================== """
def LogSpecies(self):
text = "\nSpecies used: \n"
fn = self.GetSpeciesIDsFN()
with open(fn, 'r') as infile:
text += "".join(infile.readlines())
self.WriteToLog(text + "\n")
""" Standard Directories
========================================================================================== """
def GetWorkingDirectory1_Read(self):
if len(self.wd_base) == 0: raise Exception("No wd1")
return self.wd_base
def GetWorkingDirectory_Write(self):
if self.wd_current == None: raise Exception("No wd_current")
return self.wd_current
def GetResultsDirectory1(self):
if self.rd1 == None: raise Exception("No rd1")
return self.rd1
def GetResultsDirectory2(self):
if self.rd1 == None: raise Exception("No rd1")
return self.rd1
def GetOrthologuesDirectory(self):
""""Where the directories of species orthologues are"""
if self.rd1 == None: raise Exception("No rd1")
d = self.rd1 + "Orthologues/"
if not os.path.exists(d): os.mkdir(d)
return d
""" Orthogroups files
========================================================================================== """
def GetSpeciesIDsFN(self):
if self.species_ids_corrected != None:
return self.species_ids_corrected
return self.wd_base[0] + "SpeciesIDs.txt"
def GetSequenceIDsFN(self):
# It is always in the first of the 'extension' directories (as this is the relevant one)
return self.wd_base[0] + "SequenceIDs.txt"
# def GetSpeciesIDsFN(self):
# if self.species_ids_corrected != None:
# return self.species_ids_corrected
# if len(self.wd_base) == 0: raise Exception("No wd1")
# for d in self.wd_base:
# fn = d + "SpeciesIDs.txt"
# if os.path.exists(fn): return fn
# raise Exception(fn + " not found")
#
# def GetSequenceIDsFN(self):
# if len(self.wd_base) == 0: raise Exception("No wd1")
# for d in self.wd_base:
# fn = d + "SequenceIDs.txt"
# if os.path.exists(fn): return fn
# raise Exception(fn + " not found")
def GetSpeciesSeqsDir(self):
if len(self.wd_base) == 0: raise Exception("No wd1")
return self.wd_base
def GetSpeciesFastaFN(self, iSpecies, qForCreation=False):
"""
qForCreation: A path is required at which the file should be created (don't search for it)
"""
if len(self.wd_base) == 0: raise Exception("No wd1")
if qForCreation:
return "%sSpecies%d.fa" % (self.wd_base[0], iSpecies)
for d in self.wd_base:
fn = "%sSpecies%d.fa" % (d, iSpecies)
if os.path.exists(fn): return fn
raise Exception(fn + " not found")
def GetSortedSpeciesFastaFiles(self):
if len(self.wd_base) == 0: raise Exception("No wd1")
fastaFilenames = []
for d in self.wd_base:
fastaFilenames.extend(glob.glob(d + "Species*.fa"))
speciesIndices = []
for f in fastaFilenames:
start = f.rfind("Species")
speciesIndices.append(int(f[start+7:-3]))
indices, sortedFasta = util.SortArrayPairByFirst(speciesIndices, fastaFilenames)
return sortedFasta
def GetSpeciesDatabaseN(self, iSpecies, program="Blast"):
return "%s%sDBSpecies%d" % (self.wd_current, program, iSpecies)
def GetBlastResultsDir(self):
return self.wd_base
def GetBlastResultsFN(self, iSpeciesSearch, jSpeciesDB, qForCreation=False):
if len(self.wd_base) == 0: raise Exception("No wd1")
if qForCreation: return "%sBlast%d_%d.txt" % (self.wd_base[0], iSpeciesSearch, jSpeciesDB)
for d in self.wd_base:
fn = "%sBlast%d_%d.txt" % (d, iSpeciesSearch, jSpeciesDB)
if os.path.exists(fn) or os.path.exists(fn + ".gz"): return fn
raise Exception(fn + " not found")
def GetGraphFilename(self):
if self.wd_current == None: raise Exception("No wd_current")
return self.wd_current + "%s_graph.txt" % self.fileIdentifierString
def CreateUnusedClustersFN(self, mclInflation):
if self.wd_current == None: raise Exception("No wd_current")
self.clustersFilename, self.iResultsVersion = util.GetUnusedFilename(self.wd_current + "clusters_%s_I%0.1f" % (self.fileIdentifierString, mclInflation), ".txt")
return self.clustersFilename, self.clustersFilename + "_id_pairs.txt"
def SetClustersFN(self, pairsFN):
self.clustersFilename = pairsFN[:-len("_id_pairs.txt")]
log = "Orthogroups used: %s\n\n" % self.clustersFilename
self.WriteToLog(log)
def GetClustersFN(self):
return self.clustersFilename + "_id_pairs.txt"
""" Orthologues files
========================================================================================== """
def GetResultsSeqsDir_SingleCopy(self):
d = self.rd1 + "Single_Copy_Orthologue_Sequences/"
if not os.path.exists(d): os.mkdir(d)
return d
def GetResultsSeqsDir(self):
return self.rd1 + "Orthogroup_Sequences/"
def GetResultsAlignDir(self):
return self.rd1 + "MultipleSequenceAlignments/"
def GetResultsTreesDir(self):
return self.rd1 + "Gene_Trees/"
def GetOGsSeqFN(self, iOG, qResults=False):
if qResults:
return self.rd1 + "Orthogroup_Sequences/" + (self.baseOgFormat % iOG) + ".fa"
else:
return self.wd_current + "Sequences_ids/" + (self.baseOgFormat % iOG) + ".fa"
def GetOGsAlignFN(self, iOG, qResults=False):
if qResults:
return self.rd1 + "MultipleSequenceAlignments/" + (self.baseOgFormat % iOG) + ".fa"
else:
return self.wd_current + "Alignments_ids/" + (self.baseOgFormat % iOG) + ".fa"
def GetOGsTreeFN(self, iOG, qResults=False):
if qResults:
return self.rd1 + "Gene_Trees/" + (self.baseOgFormat % iOG) + "_tree.txt"
else:
return self.wd_trees + "Trees_ids/" + (self.baseOgFormat % iOG) + "_tree_id.txt"
def GetSpeciesTreeConcatAlignFN(self, qResults=False):
if qResults:
return self.rd1 + "MultipleSequenceAlignments/" + "SpeciesTreeAlignment.fa"
else:
return self.wd_current + "Alignments_ids/SpeciesTreeAlignment.fa"
def GetSpeciesTreeMatrixFN(self, qPutInWorkingDir = False):
if qPutInWorkingDir:
return self.wd_current + "SpeciesMatrix.phy"
else:
return self.wd_current + "Distances/SpeciesMatrix.phy"
def GetSpeciesTreeUnrootedFN(self, qAccessions=False):
if qAccessions:
return self.wd_trees + "SpeciesTree_unrooted.txt"
else:
return self.wd_trees + "SpeciesTree_unrooted_ids.txt"
def GetSpeciesTreeIDsRootedFN(self):
return self.wd_current + "SpeciesTree_rooted_ids.txt"
def GetSpeciesTreeResultsFN(self, i, qUnique):
"""
The results species tree (rooted, accessions, support values)
i: index for species tree, starting at 0
qUnique: bool, has a unique root been identified (as it may not be known exatly which branch the root belongs on)
E.g. if there were just one species tree, the correct call would be GetSpeciesTreeResultsFN(0,True)
"""
d = self.rd1 + "Species_Tree/"
if not os.path.exists(d): os.mkdir(d)
if qUnique:
return d + "SpeciesTree_rooted.txt"
else:
if not self.multipleRootedSpeciesTreesDir:
self.multipleRootedSpeciesTreesDir = d + "Potential_Rooted_Species_Trees/"
if not os.path.exists(self.multipleRootedSpeciesTreesDir): os.mkdir(self.multipleRootedSpeciesTreesDir)
return self.multipleRootedSpeciesTreesDir + "SpeciesTree_rooted_at_outgroup_%d.txt" % i
def GetSpeciesTreeResultsNodeLabelsFN(self):
return self.GetSpeciesTreeResultsFN(0, True)[:-4] + "_node_labels.txt"
def GetOGsDistMatFN(self, iOG):
return self.wd_current + "Distances/OG%07d.phy" % iOG
def GetSpeciesDict(self):
d = util.FullAccession(self.GetSpeciesIDsFN()).GetIDToNameDict()
return {k:v.rsplit(".",1)[0] for k,v in d.items()}
def GetHierarchicalOrthogroupsFN(self, sp_node_name):
return self.rd1 + "Phylogenetic_Hierarchical_Orthogroups/%s.tsv" % sp_node_name
""" ========================================================================================== """
def GetOGsTreeDir(self, qResults=False):
if qResults:
return self.rd1 + "Gene_Trees/"
else:
return self.wd_trees + "Trees_ids/"
def GetOGsReconTreeDir(self, qResults=False):
if qResults:
d = self.rd1 + "Resolved_Gene_Trees/"
if not os.path.exists(d): os.mkdir(d)
return d
else:
raise NotImplemented()
def GetOGsReconTreeFN(self, iOG):
return self.rd1 + "Resolved_Gene_Trees/OG%07d_tree.txt" % iOG
def GetPhyldogWorkingDirectory(self):
d = self.wd_current + "phyldog/"
if not os.path.exists(d): os.mkdir(d)
return d
def GetPhyldogOGResultsTreeFN(self, i):
return self.wd_current + "phyldog/OG%07d.ReconciledTree.txt" % i
""" ========================================================================================== """
def CleanWorkingDir2(self):
dirs = ['Distances/']
for d in dirs:
dFull = self.wd_current + d
if os.path.exists(dFull):
try:
shutil.rmtree(dFull)
except OSError:
time.sleep(1)
shutil.rmtree(dFull, True) # shutil / NFS bug - ignore errors, it's less crucial that the files are deleted
""" ************************************************************************************************************************* """
# RefactorDS - FileHandler
""" Standard Methods ========================================================================================== """
def LogFailAndExit(self, text=""):
if text != "": print(text)
self.WriteToLog("\nERROR: An error occurred\n" + text)
util.Fail()
def WriteToLog(self, text, qWithTime=False):
prepend = ""
if qWithTime:
prepend = str(datetime.datetime.now()).rsplit(".", 1)[0] + " : "
with open(self.rd1 + "Log.txt", 'a') as outfile:
outfile.write(prepend + text)
def StartLog(self):
self.WriteToLog("Started OrthoFinder version " + util.version + "\n", True)
text = "Command Line: " + " ".join(sys.argv) + "\n\n"
text += "WorkingDirectory_Base: %s\n" % self.wd_base[0]
self.WriteToLog(text)
if self.clustersFilename != None:self.LogOGs()
def LogOGs(self):
self.WriteToLog("FN_Orthogroups: %s\n" % (self.clustersFilename + "_id_pairs.txt"))
def LogWorkingDirectoryTrees(self):
self.WriteToLog("WorkingDirectory_Trees: %s\n" % self.wd_trees)
def MakeResultsDirectory2(self, tree_generation_method, stop_after="", append_name=""):
"""
Args
tree_method: msa, dendroblast, phyldog (determines the directory structure that will be created)
stop_after: seqs, align
"""
# RefactorDS - need to change where it puts things
if self.rd1 == None: raise Exception("No rd1")
self.wd_trees = self.wd_current
os.mkdir(self.rd1 + "Orthologues/")
if tree_generation_method == "msa":
for i, d in enumerate([self.GetResultsSeqsDir(), self.wd_current + "Sequences_ids/", self.GetResultsAlignDir(), self.wd_current + "Alignments_ids/", self.GetResultsTreesDir(), self.wd_current + "Trees_ids/"]):
if stop_after == "seqs" and i == 2: break
if stop_after == "align" and i == 4: break
if not os.path.exists(d): os.mkdir(d)
elif tree_generation_method == "dendroblast":
for i, d in enumerate([self.wd_current + "Distances/", self.GetResultsTreesDir(), self.wd_current + "Trees_ids/"]):
if not os.path.exists(d): os.mkdir(d)
def GetOrthogroupResultsFNBase(self):
if self.rd1 == None:
raise Exception("No rd1")
if self.iResultsVersion == None:
raise Exception("Base results identifier has not been created")
d = self.rd1 + "Orthogroups/"
if not os.path.exists(d): os.mkdir(d)
return d + "Orthogroups" + ("" if self.iResultsVersion == 0 else "_%d" % self.iResultsVersion)
def GetOGsStatsResultsDirectory(self):
d = self.rd1 + "Comparative_Genomics_Statistics/"
if not os.path.exists(d): os.mkdir(d)
return d
def GetDuplicationsFN(self):
d = self.rd1 + "Gene_Duplication_Events/"
if not os.path.exists(d): os.mkdir(d)
return d + "Duplications.tsv"
def GetSuspectGenesDir(self):
d = self.rd1 + "Phylogenetically_Misplaced_Genes/"
if not os.path.exists(d): os.mkdir(d)
return d
def GetPutativeXenelogsDir(self):
d = self.rd1 + "Putative_Xenologs/"
if not os.path.exists(d): os.mkdir(d)
return d
FileHandler = __Files_new_dont_manually_create__()
""" ************************************************************************************************************************* """
""" ************************************************************************************************************************* """
""" ************************************************************************************************************************* """
class Unprocessable(Exception):
pass
class PreviousFilesLocator(object):
def __init__(self):
self.wd_base_prev = []
self.clustersFilename_pairs = None
self.wd_trees = None
self.home_for_results = None
self.speciesTreeRootedIDsFN = None
self.species_ids_lines = None
def GetHomeForResults(self):
return self.home_for_results
def GetStartFromBlast(self):
return self.wd_base_prev
def GetStartFromOGs(self):
return self.wd_base_prev, self.clustersFilename_pairs
def GetStartFromTrees(self):
return self.wd_base_prev, self.clustersFilename_pairs, self.wd_trees, self.speciesTreeRootedIDsFN
""" ************************************************************************************************************************* """
class PreviousFilesLocator_new(PreviousFilesLocator):
def __init__(self, options, continuationDir):
PreviousFilesLocator.__init__(self)
if not continuationDir.endswith("/"): continuationDir += "/"
self.home_for_results = continuationDir + "../"
if (options.qStartFromFasta and not options.qStartFromBlast):
# there are no files to find
return
if not self._IsNewDirStructure(continuationDir): raise Unprocessable("Input directory structure is not processable as new structure")
self._ProcessLog(continuationDir + "/Log.txt")
def _IsNewDirStructure(self, inputDir):
return os.path.exists(inputDir + "/Log.txt")
def _ProcessLog(self, logFN):
"""
Get all relevant data from log file.
Checks the paths saved do exist still
Should work with relevant paths to allow directory to move
Other methods can then check that the data required for a particular run is available
"""
with open(logFN, 'r') as infile:
for line in infile:
if line.startswith("Species used:"):
self.species_ids_lines = ""
line = next(infile)
while line.rstrip() != "":
self.species_ids_lines += line
line = next(infile)
wd_base_str = "WorkingDirectory_Base: "
wd_trees_str = "WorkingDirectory_Trees: "
clusters_str = "FN_Orthogroups: "
if line.startswith(wd_base_str):
wd_base_anchor = line.rstrip()[len(wd_base_str):]
if not os.path.exists(wd_base_anchor):
# try to see if it's a relative directory to current one
path, d_wd = os.path.split(wd_base_anchor[:-1])
path, d_res = os.path.split(path)
wd_base_anchor = os.path.split(logFN)[0] + ("/../%s/%s/" % (d_res, d_wd))
if not os.path.exists(wd_base_anchor):
print("ERROR: Missing directory: %s" % wd_base_anchor)
util.Fail()
self.wd_base_prev = self.GetWDBaseChain(wd_base_anchor)
self.wd_trees = self.wd_base_prev[0]
if line.startswith(clusters_str):
clusters_fn_full_path = line.rstrip()[len(clusters_str):]
self.clustersFilename_pairs = clusters_fn_full_path
if not os.path.exists(self.clustersFilename_pairs):
# try to see if it's a relative directory to current one
path, clusters_fn = os.path.split(self.clustersFilename_pairs)
path, d_wd = os.path.split(path)
path, d_res = os.path.split(path)
self.clustersFilename_pairs = os.path.split(logFN)[0] + ("/../%s/%s/%s" %(d_res, d_wd, clusters_fn))
if not os.path.exists(self.clustersFilename_pairs):
print("ERROR: Missing orthogroups file: %s or %s" % (self.clustersFilename_pairs, clusters_fn_full_path))
util.Fail()
# self._GetOGsFile(wd_ogs_path)
if line.startswith(wd_trees_str):
self.wd_trees = line.rstrip()[len(wd_trees_str):]
if not os.path.exists(self.wd_trees):
# try to see if it's a relative directory to current one
path, d_wd = os.path.split(self.wd_trees[:-1])
path, d_res = os.path.split(path)
self.wd_trees = os.path.split(logFN)[0] + ("/../%s/%s/" % (d_res, d_wd))
if not os.path.exists(self.wd_trees):
print("ERROR: Missing directory: %s" % self.wd_trees)
util.Fail()
self.speciesTreeRootedIDsFN = self.wd_trees + "SpeciesTree_rooted_ids.txt"
@staticmethod
def GetWDBaseChain(wd_base_anchor):
chain = [wd_base_anchor]
while os.path.exists(chain[-1] + "previous_wd.txt"):
with open(chain[-1] + "previous_wd.txt", 'r') as infile:
wd = infile.readline().rstrip()
if not os.path.exists(wd):
# try to see if it's a relative directory to current one
path, d_wd = os.path.split(wd[:-1])
path, d_res = os.path.split(path)
wd = wd_base_anchor + ("/../../%s/%s/" % (d_res, d_wd))
chain.append(wd)
return chain
""" ************************************************************************************************************************* """
class PreviousFilesLocator_old(PreviousFilesLocator):
def __init__(self, options, continuationDir):
PreviousFilesLocator.__init__(self)
if not continuationDir.endswith("/"): continuationDir += "/"
self.home_for_results = continuationDir + "OrthoFinder/"
if options.qStartFromGroups or options.qStartFromTrees:
# User can specify it using clusters_id_pairs file, process this first to get the workingDirectory
ogs_dir = continuationDir + "../" if options.qStartFromTrees else continuationDir
self.wd_base_prev, self.orthofinderResultsDir, self.clustersFilename_pairs = self._GetOGsFile(ogs_dir)
if options.qStartFromTrees:
self._FindFromTrees(continuationDir, options.speciesTreeFN)
elif options.qStartFromBlast:
if self._IsWorkingDirectory(continuationDir):
self.wd_base_prev = continuationDir
elif self._IsWorkingDirectory(continuationDir + "WorkingDirectory/"):
self.wd_base_prev = continuationDir + "WorkingDirectory/"
else:
self.wd_base_prev = continuationDir # nothing much to do, set this as the one to try and fail later
self.wd_base_prev = [self.wd_base_prev]
def _GetOGsFile(self, userArg):
"""returns the WorkingDirectory, ResultsDirectory and clusters_id_pairs filename"""
qSpecifiedResultsFile = False
if userArg == None:
print("ERROR: orthofinder_results_directory has not been specified")
util.Fail()
if os.path.isfile(userArg):
fn = os.path.split(userArg)[1]
if ("clusters_OrthoFinder_" not in fn) or ("txt_id_pairs.txt" not in fn):
print("ERROR:\n %s\nis neither a directory or a clusters_OrthoFinder_*.txt_id_pairs.txt file." % userArg)
util.Fail()
qSpecifiedResultsFile = True
# user has specified specific results file
elif userArg[-1] != os.path.sep:
userArg += os.path.sep
# find required files
if qSpecifiedResultsFile:
orthofinderWorkingDir = os.path.split(userArg)[0] + os.sep
if not self._IsWorkingDirectory(orthofinderWorkingDir):
print("ERROR: cannot find files from OrthoFinder run in directory:\n %s" % orthofinderWorkingDir)
util.Fail()
else:
orthofinderWorkingDir = os.path.split(userArg)[0] if qSpecifiedResultsFile else userArg
if not self._IsWorkingDirectory(orthofinderWorkingDir):
orthofinderWorkingDir = userArg + "WorkingDirectory" + os.sep
if not self._IsWorkingDirectory(orthofinderWorkingDir):
print("ERROR: cannot find files from OrthoFinder run in directory:\n %s\nor\n %s\n" % (userArg, orthofinderWorkingDir))
util.Fail()
if qSpecifiedResultsFile:
print("\nUsing orthogroups in file:\n %s" % userArg)
return orthofinderWorkingDir, orthofinderWorkingDir, userArg
else:
# identify orthogroups file
clustersFiles = glob.glob(orthofinderWorkingDir + "clusters_OrthoFinder_*.txt_id_pairs.txt")
orthogroupFiles = glob.glob(orthofinderWorkingDir + "OrthologousGroups*.txt") + glob.glob(orthofinderWorkingDir + "Orthogroups*.txt")
if orthofinderWorkingDir != userArg:
orthogroupFiles += glob.glob(userArg + "OrthologousGroups*.txt")
orthogroupFiles += glob.glob(userArg + "Orthogroups*.txt")
# User may have specified a WorkingDirectory and results could be in directory above
if len(orthogroupFiles) < len(clustersFiles):
orthogroupFiles += glob.glob(userArg + ".." + os.sep + "OrthologousGroups*.txt")
orthogroupFiles += glob.glob(userArg + ".." + os.sep + "Orthogroups*.txt")
clustersFiles = sorted(clustersFiles)
orthogroupFiles = sorted(orthogroupFiles)
if len(clustersFiles) > 1 or len(orthogroupFiles) > 1:
print("ERROR: Results from multiple OrthoFinder runs found\n")
print("Tab-delimiter Orthogroups*.txt/OrthologousGroups*.txt files:")
for fn in orthogroupFiles:
print(" " + fn)
print("With corresponding cluster files:")
for fn in clustersFiles:
print(" " + fn)
print("\nPlease run with only one set of results in directories or specifiy the specific clusters_OrthoFinder_*.txt_id_pairs.txt file on the command line")
util.Fail()
if len(clustersFiles) != 1 or len(orthogroupFiles) != 1:
print("ERROR: Results not found in <orthofinder_results_directory> or <orthofinder_results_directory>/WorkingDirectory")
print("\nCould not find:\n Orthogroups*.txt/OrthologousGroups*.txt\nor\n clusters_OrthoFinder_*.txt_id_pairs.txt")
util.Fail()
print("\nUsing orthogroups in file:\n %s" % orthogroupFiles[0])
print("and corresponding clusters file:\n %s" % clustersFiles[0])
return orthofinderWorkingDir, userArg, clustersFiles[0]
def _IsWorkingDirectory(self, orthofinderWorkingDir):
ok = True
ok = ok and len(glob.glob(orthofinderWorkingDir + "clusters_OrthoFinder_*.txt_id_pairs.txt")) > 0
ok = ok and len(glob.glob(orthofinderWorkingDir + "Species*.fa")) > 0
return ok
def _FindFromTrees(self, orthologuesDir, userSpeciesTree):
"""
if userSpeciesTree == None: Use existing tree
"""
print("\nFind from trees:")
print((orthologuesDir, userSpeciesTree))
self.wd_trees = orthologuesDir + "WorkingDirectory/"
# Find species tree
if userSpeciesTree == None:
possibilities = ["SpeciesTree_ids_0_rooted.txt", "SpeciesTree_ids_1_rooted.txt", "SpeciesTree_user_ids.txt", "SpeciesTree_unrooted_0_rooted.txt", "STAG_SpeciesTree_ids_0_rooted.txt"] # etc (only need to determine if unique)
nTrees = 0
for p in possibilities:
for d in [self.wd_trees, self.wd_trees + "Trees_ids/"]:
fn = d + p
if os.path.exists(fn):
nTrees += 1
speciesTree_fn = fn
if nTrees == 0:
print("\nERROR: There is a problem with the specified directory. The rooted species tree %s or %s is not present." % (possibilities[0], possibilities[2]))
print("Please rectify the problem or alternatively use the -s option to specify the species tree to use.\n")
util.Fail()
if nTrees > 1:
print("\nERROR: There is more than one rooted species tree in the specified directory structure. Please use the -s option to specify which species tree should be used\n")
util.Fail()
self.speciesTreeRootedIDsFN = speciesTree_fn
else:
if not os.path.exists(userSpeciesTree):
print("\nERROR: %s does not exist\n" % userSpeciesTree)
util.Fail()
self.speciesTreeRootedIDsFN = userSpeciesTree
""" ************************************************************************************************************************* """
""" ************************************************************************************************************************* """
""" ************************************************************************************************************************* """
def InitialiseFileHandler(options, fastaDir=None, continuationDir=None, resultsDir_nonDefault=None, pickleDir_nonDefault=None):
"""
Creates a file handler object which will determine the location of all the files:
Results will be under the user specified directory of the default results location. Defaults:
- New, from start:
FastaDir/OrthoFinder/Results_Date
or
resultsDir_nonDefault/Results_Date
- New, continuation: Existing_OrthoFinder_Dir/Results_Date
- Old, continuation:
ContinuationDir/OrthoFinder/Results_Date
or
resultsDir_nonDefault/Results_Date
Implementation
1. Working out if an old directory structure is being used
2. Construct and appropriate PreviousFilesLocator if necessary - this locates all required files
3. Pass this to FileHandler - this creates the directory structure required for this run
4. if error: print and exit
5. Return FileHandler
Tasks:
- Switch this round, I can tell if it's and old or new directory right from the start - read log and check info present,
perhaps just psss it to the new file handler and let it decide if everything is there
"""
# 1 & 2
# If starting from scratch, no need for a PreviousFileLocator
if options.qStartFromFasta and not options.qStartFromBlast:
pfl = None
base_dir = resultsDir_nonDefault if resultsDir_nonDefault != None else fastaDir + "OrthoFinder/"
else:
try:
# Try to process these as the new directory structure
pfl = PreviousFilesLocator_new(options, continuationDir)
# don't create any new directory, it already exists
base_dir = pfl.GetHomeForResults()
except Unprocessable:
pfl = PreviousFilesLocator_old(options, continuationDir)
base_dir = resultsDir_nonDefault if resultsDir_nonDefault != None else pfl.GetHomeForResults()
if not os.path.exists(base_dir): os.mkdir(base_dir)
# 3
# RefactorDS - this might be suitable as a constructor now
# base_dir - should now exist
"""The previous file locator should decide where the output directory should be rooted?
Rules:
- If starting from Fasta then Fasta/OrthoFinder/Results_Date
- Or, SpecifiedDirectory/Results_Date if user specified
- If starting from a previous new-structure directory (TopLevel/Results_X) then TopLevel/Results_Date
- If starting from a previous old-structure directory then, as high up as we can go and still be in the directory structure:
- Fasta/Results_OldDate/OrthoFinder/Results_Date
"""
FileHandler.CreateOutputDirectories(options, pfl, base_dir, fastaDir)
| davidemms/OrthoFinder | scripts_of/files.py | Python | gpl-3.0 | 41,519 | [
"BLAST"
] | da031b5190b76f3edc5c4a3c39e42bf97ae99d91c280a2741ed4fe96f50e38b8 |
# !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-08-04 14:25:03
# @Last modified by: Brian Cherinka
# @Last Modified time: 2017-08-10 23:08:13
from __future__ import print_function, division, absolute_import
from setuptools import setup, find_packages
import os
requirements_file = os.path.join(os.path.dirname(__file__), 'requirements.txt')
install_requires = [line.strip().replace('==', '>=') for line in open(requirements_file)
if not line.strip().startswith('#') and line.strip() != '']
NAME = 'sciserver'
VERSION = '1.11.0dev'
setup(
name=NAME,
version=VERSION,
license='BSD3',
description='Python toolsuite for the SciServer product',
author='SciServer Team',
keywords='sdss sciserver',
url='https://github.com/havok2063/SciScript-Python',
packages=find_packages(where='python', exclude=['*egg-info']),
package_dir={'': 'python'},
install_requires=install_requires,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: MacOS X',
'Framework :: Jupyter',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Database :: Front-Ends',
'Topic :: Documentation :: Sphinx',
'Topic :: Education :: Computer Aided Instruction (CAI)',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: User Interfaces'
],
)
| havok2063/SciScript-Python | setup.py | Python | apache-2.0 | 2,021 | [
"Brian"
] | 762a9597d511019ab67bfeab5765433da51238270037968e6a29eea0fc55dce7 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Brian Coca <bcoca@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: systemd
author:
- Ansible Core Team
version_added: "2.2"
short_description: Manage services
description:
- Controls systemd services on remote hosts.
options:
name:
description:
- Name of the service. This parameter takes the name of exactly one service to work with.
- When using in a chroot environment you always need to specify the full name i.e. (crond.service).
type: str
aliases: [ service, unit ]
state:
description:
- C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
C(restarted) will always bounce the service. C(reloaded) will always reload.
type: str
choices: [ reloaded, restarted, started, stopped ]
enabled:
description:
- Whether the service should start on boot. B(At least one of state and enabled are required.)
type: bool
force:
description:
- Whether to override existing symlinks.
type: bool
version_added: 2.6
masked:
description:
- Whether the unit should be masked or not, a masked unit is impossible to start.
type: bool
daemon_reload:
description:
- Run daemon-reload before doing any other operations, to make sure systemd has read any changes.
- When set to C(yes), runs daemon-reload even if the module does not start or stop anything.
type: bool
default: no
aliases: [ daemon-reload ]
daemon_reexec:
description:
- Run daemon_reexec command before doing any other operations, the systemd manager will serialize the manager state.
type: bool
default: no
aliases: [ daemon-reexec ]
version_added: "2.8"
scope:
description:
- run systemctl within a given service manager scope, either as the default system scope (system),
the current user's scope (user), or the scope of all users (global).
- "For systemd to work with 'user', the executing user must have its own instance of dbus started (systemd requirement).
The user dbus process is normally started during normal login, but not during the run of Ansible tasks.
Otherwise you will probably get a 'Failed to connect to bus: no such file or directory' error."
type: str
choices: [ system, user, global ]
default: system
version_added: "2.7"
no_block:
description:
- Do not synchronously wait for the requested operation to finish.
Enqueued job will continue without Ansible blocking on its completion.
type: bool
default: no
version_added: "2.3"
notes:
- Since 2.4, one of the following options is required 'state', 'enabled', 'masked', 'daemon_reload', ('daemon_reexec' since 2.8),
and all except 'daemon_reload' (and 'daemon_reexec' since 2.8) also require 'name'.
- Before 2.4 you always required 'name'.
- Globs are not supported in name, i.e ``postgres*.service``.
requirements:
- A system managed by systemd.
'''
EXAMPLES = '''
- name: Make sure a service is running
systemd:
state: started
name: httpd
- name: Stop service cron on debian, if running
systemd:
name: cron
state: stopped
- name: Restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes
systemd:
state: restarted
daemon_reload: yes
name: crond
- name: Reload service httpd, in all cases
systemd:
name: httpd
state: reloaded
- name: Enable service httpd and ensure it is not masked
systemd:
name: httpd
enabled: yes
masked: no
- name: Enable a timer for dnf-automatic
systemd:
name: dnf-automatic.timer
state: started
enabled: yes
- name: Just force systemd to reread configs (2.4 and above)
systemd:
daemon_reload: yes
- name: Just force systemd to re-execute itself (2.8 and above)
systemd:
daemon_reexec: yes
'''
RETURN = '''
status:
description: A dictionary with the key=value pairs returned from `systemctl show`
returned: success
type: complex
sample: {
"ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ActiveEnterTimestampMonotonic": "8135942",
"ActiveExitTimestampMonotonic": "0",
"ActiveState": "active",
"After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice",
"AllowIsolate": "no",
"Before": "shutdown.target multi-user.target",
"BlockIOAccounting": "no",
"BlockIOWeight": "1000",
"CPUAccounting": "no",
"CPUSchedulingPolicy": "0",
"CPUSchedulingPriority": "0",
"CPUSchedulingResetOnFork": "no",
"CPUShares": "1024",
"CanIsolate": "no",
"CanReload": "yes",
"CanStart": "yes",
"CanStop": "yes",
"CapabilityBoundingSet": "18446744073709551615",
"ConditionResult": "yes",
"ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ConditionTimestampMonotonic": "7902742",
"Conflicts": "shutdown.target",
"ControlGroup": "/system.slice/crond.service",
"ControlPID": "0",
"DefaultDependencies": "yes",
"Delegate": "no",
"Description": "Command Scheduler",
"DevicePolicy": "auto",
"EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)",
"ExecMainCode": "0",
"ExecMainExitTimestampMonotonic": "0",
"ExecMainPID": "595",
"ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ExecMainStartTimestampMonotonic": "8134990",
"ExecMainStatus": "0",
"ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"FragmentPath": "/usr/lib/systemd/system/crond.service",
"GuessMainPID": "yes",
"IOScheduling": "0",
"Id": "crond.service",
"IgnoreOnIsolate": "no",
"IgnoreOnSnapshot": "no",
"IgnoreSIGPIPE": "yes",
"InactiveEnterTimestampMonotonic": "0",
"InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"InactiveExitTimestampMonotonic": "8135942",
"JobTimeoutUSec": "0",
"KillMode": "process",
"KillSignal": "15",
"LimitAS": "18446744073709551615",
"LimitCORE": "18446744073709551615",
"LimitCPU": "18446744073709551615",
"LimitDATA": "18446744073709551615",
"LimitFSIZE": "18446744073709551615",
"LimitLOCKS": "18446744073709551615",
"LimitMEMLOCK": "65536",
"LimitMSGQUEUE": "819200",
"LimitNICE": "0",
"LimitNOFILE": "4096",
"LimitNPROC": "3902",
"LimitRSS": "18446744073709551615",
"LimitRTPRIO": "0",
"LimitRTTIME": "18446744073709551615",
"LimitSIGPENDING": "3902",
"LimitSTACK": "18446744073709551615",
"LoadState": "loaded",
"MainPID": "595",
"MemoryAccounting": "no",
"MemoryLimit": "18446744073709551615",
"MountFlags": "0",
"Names": "crond.service",
"NeedDaemonReload": "no",
"Nice": "0",
"NoNewPrivileges": "no",
"NonBlocking": "no",
"NotifyAccess": "none",
"OOMScoreAdjust": "0",
"OnFailureIsolate": "no",
"PermissionsStartOnly": "no",
"PrivateNetwork": "no",
"PrivateTmp": "no",
"RefuseManualStart": "no",
"RefuseManualStop": "no",
"RemainAfterExit": "no",
"Requires": "basic.target",
"Restart": "no",
"RestartUSec": "100ms",
"Result": "success",
"RootDirectoryStartOnly": "no",
"SameProcessGroup": "no",
"SecureBits": "0",
"SendSIGHUP": "no",
"SendSIGKILL": "yes",
"Slice": "system.slice",
"StandardError": "inherit",
"StandardInput": "null",
"StandardOutput": "journal",
"StartLimitAction": "none",
"StartLimitBurst": "5",
"StartLimitInterval": "10000000",
"StatusErrno": "0",
"StopWhenUnneeded": "no",
"SubState": "running",
"SyslogLevelPrefix": "yes",
"SyslogPriority": "30",
"TTYReset": "no",
"TTYVHangup": "no",
"TTYVTDisallocate": "no",
"TimeoutStartUSec": "1min 30s",
"TimeoutStopUSec": "1min 30s",
"TimerSlackNSec": "50000",
"Transient": "no",
"Type": "simple",
"UMask": "0022",
"UnitFileState": "enabled",
"WantedBy": "multi-user.target",
"Wants": "system.slice",
"WatchdogTimestampMonotonic": "0",
"WatchdogUSec": "0",
}
''' # NOQA
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.facts.system.chroot import is_chroot
from ansible.module_utils.service import sysv_exists, sysv_is_enabled, fail_if_missing
from ansible.module_utils._text import to_native
def is_running_service(service_status):
return service_status['ActiveState'] in set(['active', 'activating'])
def is_deactivating_service(service_status):
return service_status['ActiveState'] in set(['deactivating'])
def request_was_ignored(out):
return '=' not in out and ('ignoring request' in out or 'ignoring command' in out)
def parse_systemctl_show(lines):
# The output of 'systemctl show' can contain values that span multiple lines. At first glance it
# appears that such values are always surrounded by {}, so the previous version of this code
# assumed that any value starting with { was a multi-line value; it would then consume lines
# until it saw a line that ended with }. However, it is possible to have a single-line value
# that starts with { but does not end with } (this could happen in the value for Description=,
# for example), and the previous version of this code would then consume all remaining lines as
# part of that value. Cryptically, this would lead to Ansible reporting that the service file
# couldn't be found.
#
# To avoid this issue, the following code only accepts multi-line values for keys whose names
# start with Exec (e.g., ExecStart=), since these are the only keys whose values are known to
# span multiple lines.
parsed = {}
multival = []
k = None
for line in lines:
if k is None:
if '=' in line:
k, v = line.split('=', 1)
if k.startswith('Exec') and v.lstrip().startswith('{'):
if not v.rstrip().endswith('}'):
multival.append(v)
continue
parsed[k] = v.strip()
k = None
else:
multival.append(line)
if line.rstrip().endswith('}'):
parsed[k] = '\n'.join(multival).strip()
multival = []
k = None
return parsed
# ===========================================
# Main control flow
def main():
# initialize
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', aliases=['service', 'unit']),
state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped']),
enabled=dict(type='bool'),
force=dict(type='bool'),
masked=dict(type='bool'),
daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']),
daemon_reexec=dict(type='bool', default=False, aliases=['daemon-reexec']),
scope=dict(type='str', default='system', choices=['system', 'user', 'global']),
no_block=dict(type='bool', default=False),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled', 'masked', 'daemon_reload', 'daemon_reexec']],
required_by=dict(
state=('name', ),
enabled=('name', ),
masked=('name', ),
),
)
unit = module.params['name']
if unit is not None:
for globpattern in (r"*", r"?", r"["):
if globpattern in unit:
module.fail_json(msg="This module does not currently support using glob patterns, found '%s' in service name: %s" % (globpattern, unit))
systemctl = module.get_bin_path('systemctl', True)
if os.getenv('XDG_RUNTIME_DIR') is None:
os.environ['XDG_RUNTIME_DIR'] = '/run/user/%s' % os.geteuid()
''' Set CLI options depending on params '''
# if scope is 'system' or None, we can ignore as there is no extra switch.
# The other choices match the corresponding switch
if module.params['scope'] != 'system':
systemctl += " --%s" % module.params['scope']
if module.params['no_block']:
systemctl += " --no-block"
if module.params['force']:
systemctl += " --force"
rc = 0
out = err = ''
result = dict(
name=unit,
changed=False,
status=dict(),
)
# Run daemon-reload first, if requested
if module.params['daemon_reload'] and not module.check_mode:
(rc, out, err) = module.run_command("%s daemon-reload" % (systemctl))
if rc != 0:
module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err))
# Run daemon-reexec
if module.params['daemon_reexec'] and not module.check_mode:
(rc, out, err) = module.run_command("%s daemon-reexec" % (systemctl))
if rc != 0:
module.fail_json(msg='failure %d during daemon-reexec: %s' % (rc, err))
if unit:
found = False
is_initd = sysv_exists(unit)
is_systemd = False
# check service data, cannot error out on rc as it changes across versions, assume not found
(rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))
if rc == 0 and not (request_was_ignored(out) or request_was_ignored(err)):
# load return of systemctl show into dictionary for easy access and return
if out:
result['status'] = parse_systemctl_show(to_native(out).split('\n'))
is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found'
is_masked = 'LoadState' in result['status'] and result['status']['LoadState'] == 'masked'
# Check for loading error
if is_systemd and not is_masked and 'LoadError' in result['status']:
module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError']))
else:
# list taken from man systemctl(1) for systemd 244
valid_enabled_states = [
"enabled",
"enabled-runtime",
"linked",
"linked-runtime",
"masked",
"masked-runtime",
"static",
"indirect",
"disabled",
"generated",
"transient"]
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
if out.strip() in valid_enabled_states:
is_systemd = True
else:
# fallback list-unit-files as show does not work on some systems (chroot)
# not used as primary as it skips some services (like those using init.d) and requires .service/etc notation
(rc, out, err) = module.run_command("%s list-unit-files '%s'" % (systemctl, unit))
if rc == 0:
is_systemd = True
else:
# Check for systemctl command
module.run_command(systemctl, check_rc=True)
# Does service exist?
found = is_systemd or is_initd
if is_initd and not is_systemd:
module.warn('The service (%s) is actually an init script but the system is managed by systemd' % unit)
# mask/unmask the service, if requested, can operate on services before they are installed
if module.params['masked'] is not None:
# state is not masked unless systemd affirms otherwise
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
masked = out.strip() == "masked"
if masked != module.params['masked']:
result['changed'] = True
if module.params['masked']:
action = 'mask'
else:
action = 'unmask'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
# some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't
fail_if_missing(module, found, unit, msg='host')
# Enable/disable service startup at boot if requested
if module.params['enabled'] is not None:
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
fail_if_missing(module, found, unit, msg='host')
# do we need to enable the service?
enabled = False
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
# check systemctl result or if it is a init script
if rc == 0:
enabled = True
elif rc == 1:
# if not a user or global user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries
if module.params['scope'] == 'system' and \
is_initd and \
not out.strip().endswith('disabled') and \
sysv_is_enabled(unit):
enabled = True
# default to current state
result['enabled'] = enabled
# Change enable/disable if needed
if enabled != module.params['enabled']:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err))
result['enabled'] = not enabled
# set service state if requested
if module.params['state'] is not None:
fail_if_missing(module, found, unit, msg="host")
# default to desired state
result['state'] = module.params['state']
# What is current service state?
if 'ActiveState' in result['status']:
action = None
if module.params['state'] == 'started':
if not is_running_service(result['status']):
action = 'start'
elif module.params['state'] == 'stopped':
if is_running_service(result['status']) or is_deactivating_service(result['status']):
action = 'stop'
else:
if not is_running_service(result['status']):
action = 'start'
else:
action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
result['state'] = 'started'
if action:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
# check for chroot
elif is_chroot(module) or os.environ.get('SYSTEMD_OFFLINE') == '1':
module.warn("Target is a chroot or systemd is offline. This can lead to false positives or prevent the init system tools from working.")
else:
# this should not happen?
module.fail_json(msg="Service is in unknown state", status=result['status'])
module.exit_json(**result)
if __name__ == '__main__':
main()
| j-carl/ansible | lib/ansible/modules/systemd.py | Python | gpl-3.0 | 21,464 | [
"Brian"
] | 6738fcd4b78cb0198c2ea2eaa42e9c8302b0279b17bd7e0b622bf7c98b852d8a |
# -*- coding: utf-8 -*-
# see:
# https://github.com/carthage-college/django-djpsilobus/blob/2841d7aa2e9a7e41fcbfd12533f3266b1966778b/djpsilobus/core/data.py
# for last stable version of this file before the archivist moved all of the collections
# around and all of the collection IDs changed as a result.
#
DIVISIONS = {
'ACPR': 'All College Programs',
'ARHU': 'Arts and Humanities',
'NSSS': 'Natural and Social Science',
'PRST': 'Professional Studies',
}
DEPARTMENTS = {
'ACC': 'b4cc11fa-a628-4640-a6e2-9ed3c71cb034', # Accounting
'AFR': '5de10907-54b1-4067-9a2b-3108bf013534', # African Studies
'AHS': '1bca3f3f-fccb-4e31-a688-e25b5ae09f2b', # Allied Health Science (major in EXS department)
#'AHS': '21e9a99f-2c4e-4f22-8adb-16b039c1209c4', # Allied Health Science
'ARH': '0cf2bd73-5d8d-47fc-a98a-5bc2ba149941', # Art History
'ART': '0e8724db-2d2b-4eb7-8c33-080817c8432b', # Art
'ASN': '650cb058-c518-42eb-b31f-3344e3903fd4', # Asian Studies
'_ASN': '650cb058-c518-42eb-b31f-3344e3903fd4', # Asian Studies
'ATH': 'd3316ea5-11a1-495d-bc38-760ba223042e', # Athletic Training
'_ATH': 'd3316ea5-11a1-495d-bc38-760ba223042e', # Athletic Training
'BIO': 'ba717219-9369-4373-a52b-f73850312fbc', # Biology
'BUS': 'abf1f60c-feff-4e9c-bb0f-588091438195', # Business
'CHM': '7776a795-79a3-4250-b9f0-ef9d3430e4d6', # Chemistry
'CHN': '2187bca6-4cd6-48f1-b809-04c94962b8c2', # Chinese
'CLS': 'ecb00450-5c35-440e-bb1b-71da8914e684', # Classics
'CDM': '171c42e9-5bbd-4132-bec2-9bf6685430fd', # Communications and Digital Media
'CSC': 'ac5859fb-6268-4c78-810d-0f1a385e899b', # Computer Science
'CONF': 'Conference', # Conference
'ADUL': 'Continuing Studies', # Continuing Studies
'COR': '8bb7b320-fa7d-472b-acbf-861e76964327', # Core
'CRJ': '73b6452b-8cce-476f-8648-1caa277d147d', # Criminal Justice
'_CRJ': '73b6452b-8cce-476f-8648-1caa277d147d', # Criminal Justice
'DIS': 'Discovery Program', # Discovery Program
'_DIS': 'Discovery Program', # Discovery Program
'DNC': 'b8269663-1155-4ab9-994d-3b9c664fb5bb', # Dance (major in theatre dept)
'ECN': '54ef63b3-d80d-4267-8421-e9358dea4187', # Economics
'EDU': 'c72ad5e9-363d-48b5-a254-6802876e7a21', # Education
'EGR': '86f61367-10fd-425d-b5f4-f816a408ef65', # Engineering Science
'ENG': 'b233ed81-0a4c-4ee2-b2bd-60f23e51d4c7', # English
'ENV': '25c88996-ba22-403d-8193-953660142df1', # Environmental Science
'_ENV': '25c88996-ba22-403d-8193-953660142df1', # Environmental Science
'EXS': '1bca3f3f-fccb-4e31-a688-e25b5ae09f2b', # Exercise and Sport Science
'FAC': 'Finance and Accounting', # Sub-Community
'FAR': '0e8724db-2d2b-4eb7-8c33-080817c8432b', # Fine Arts (sub of Art)
'FIN': '3b86d78f-cda2-4721-9b23-d3c4101da98d', # Finance
'FRN': 'd2b861a4-5b17-4c20-bc6b-4092a1d3bb2b', # French
'GBL': 'a97cfedb-8ab6-4f9c-aabb-1a1672cb51ac', # Global Heritage Program
'_GBL': 'a97cfedb-8ab6-4f9c-aabb-1a1672cb51ac', # Global Heritage Program
'GNR': '900adebd-8466-4d97-b350-d8b087820333', # General
'_GNR': '900adebd-8466-4d97-b350-d8b087820333', # General
'GEO': '0f251de2-f815-419c-adb2-75b614399362', # Geospatial Science
'GRM': 'e6cdde3d-0322-42bc-9f9c-0fbf7c89dfad', # German
'GFW': 'c3e3d79f-04d6-4c3b-a667-3edb8d4d383a', # Great Ideas
'_GFW': 'c3e3d79f-04d6-4c3b-a667-3edb8d4d383a', # Great Ideas
'GRK': '6f2dfb0a-25a0-4cdb-805e-a3e9dc2707ab', # Greek
'HIS': '262c408e-d666-445d-835b-cd6aafa98f22', # History
'HON': 'ff05194e-addb-47f2-a4a0-fdce0ccaa5c6', # Honors Program
'_HON': 'ff05194e-addb-47f2-a4a0-fdce0ccaa5c6', # Honors Program
'IPE': 'e63824f8-2468-4171-917d-d8b3ccc11074', # International Political Economy
'_IPE': 'e63824f8-2468-4171-917d-d8b3ccc11074', # International Political Economy
'JPN': '6cfa8b16-d819-46c5-8054-e6bac0c4c42b', # Japanese
'LTN': 'c2e11797-940d-4637-ab7d-bd2cb524b09b', # Latin
'MGT': '2023a6b2-105a-4c7e-97a4-28c185043778', # Management
'MKT': 'fcae1391-4f18-4fd9-8d32-f83f35bcb455', # Marketing
'MTH': 'c2c0e59c-dfa5-42d4-9a29-d9c447f45595', # Mathematics
'MLA': 'a4b425d0-1c63-4966-89a7-4291f127d3dd', # Modern Languages
'MUS': '165ddae4-e3d9-4110-b478-d85f2d70dde1', # Music
'NEU': '78f7d0c0-eb53-41c5-bb0f-5e6ea28eed87', # Neuroscience Program
'_NEU': '78f7d0c0-eb53-41c5-bb0f-5e6ea28eed87', # Neuroscience Program
'NSG': 'f70fa889-311d-4e97-84cc-dfa80a2c5c2a', # Nursing
'PARA': 'Paralegal Program', # Paralegal
'PEH': '51486552-ecdf-47ba-aceb-d5797048e63f', # Physical Education/Health
'_PEH': '51486552-ecdf-47ba-aceb-d5797048e63f', # Physical Education/Health
'PHL': 'cdef6494-533b-40f0-a095-e14a9c450090', # Philosophy
'PHY': '80c78d08-a4d8-427f-bcf4-dded67fbe0be', # Physics and Astronomy
'POL': 'd7c186d4-e028-4f18-ae3a-beb831993b8a', # Political Science
'PYC': 'd66882fd-f7e1-4ee4-8bbf-a31e4e7ddce7', # Psychological Science
'REL': '0e1c1944-1255-4191-a74a-52572def589f', # Religion
'ESN': 'Science Works Program', # Science Works
'_ESN': 'Science Works Program', # Science Works
'SSC': '5bc1aa3d-3e90-40d4-9297-64262acf2dd7', # Social Science Program
'_SSC': '5bc1aa3d-3e90-40d4-9297-64262acf2dd7', # Social Science Program
'SWK': '691ca37c-819c-464d-adb8-79de4f29bab0', # Social Work
'SOC': '4f50e65a-5a0e-4141-8620-bef33fbd6101', # Sociology
'SPN': 'cc6cc373-6bb6-43d9-9822-7362c0af337e', # Spanish
'THR': 'b8269663-1155-4ab9-994d-3b9c664fb5bb', # Theatre
'WHE': '88cc91a4-8004-4513-8446-7b12ee1becde', # Western Heritage Program
'_WHE': '88cc91a4-8004-4513-8446-7b12ee1becde', # Western Heritage Program
'WMG': 'e352433f-b40b-4a14-ad84-9ce95d4f7a41', # Women's and Gender Studies
'_WMG': 'e352433f-b40b-4a14-ad84-9ce95d4f7a41', # Women's and Gender Studies
}
# each name maps to a value that should be used as the Department code
# e.g. FRN (French) is an MLA (Modern Languages) Department.
# used only for file paths and where to store the file locally.
# see ~207 line number: sendero = os.path.join()
# we should remove this bit of code since we really do not need to store
# the files locally.
DEPARTMENT_EXCEPTIONS = {
'AHS': 'EXS',
'COR': '_WHE',
'ESN': '_ESN',
'EDUC': 'EDU',
'MGT': 'MMK',
'ARH': 'ART',
'JPN': 'MLA',
'FRN': 'MLA',
'GRM': 'MLA',
'SPN': 'MLA',
'CHN': 'MLA',
'MKT': 'MMK',
'GRK': 'CLS',
'LTN': 'CLS',
'IPE': '_IPE',
'ACC': 'FAC',
'FIN': 'FAC',
'SSC': '_SSC',
'ASN': '_ASN',
'NEU': '_NEU',
'NSG': 'NUR',
'DIS': '_DIS',
'GFW': '_GFW',
'WHE': '_WHE',
'WMG': '_WMG',
'PEH': '_PEH',
'GBL': '_GBL',
'CRJ': '_CRJ',
'ATH': '_ATH',
'GNR': '_GNR',
'DNC': 'THR',
'ENV': '_ENV',
'FAR': 'ART',
'NAT': '_GNR',
}
# metadata for creating a new item in a collection
ITEM_METADATA = {
'metadata': [
{
'key': 'dc.contributor.author',
'value': '',
},
{
'key': 'dc.description',
'language': 'en_US',
'value': '',
},
{
'key': 'dc.title',
'language': 'en_US',
'value': '',
},
{
'key': 'dc.title.alternative',
'language': 'en_US',
'value': '',
},
{
'key': 'dc.subject',
'language': 'en_US',
'value': '',
},
{
'key': 'dc.subject',
'language': 'en_US',
'value': '',
},
],
}
HEADERS = [
'Course Number',
'Catelog Year',
'Year',
'Session',
'Section',
'Sub-Session',
'Course Title',
'Section Title',
'Faculty ID',
'Faculty First name',
'Faculty Lastname',
'Faculty Full Name',
'Needs Syllabus',
'Status',
]
| carthage-college/django-djpsilobus | djpsilobus/core/data.py | Python | mit | 8,227 | [
"FEFF"
] | e0981fd9a27f0156b40bcb505e0041d37657fe8a6a97a4757fa9c2d913c8c8b1 |
"""
"""
import os, sys, posixpath
import py
# Moved from local.py.
iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt')
class Checkers:
_depend_on_existence = 'exists', 'link', 'dir', 'file'
def __init__(self, path):
self.path = path
def dir(self):
raise NotImplementedError
def file(self):
raise NotImplementedError
def dotfile(self):
return self.path.basename.startswith('.')
def ext(self, arg):
if not arg.startswith('.'):
arg = '.' + arg
return self.path.ext == arg
def exists(self):
raise NotImplementedError
def basename(self, arg):
return self.path.basename == arg
def basestarts(self, arg):
return self.path.basename.startswith(arg)
def relto(self, arg):
return self.path.relto(arg)
def fnmatch(self, arg):
return self.path.fnmatch(arg)
def endswith(self, arg):
return str(self.path).endswith(arg)
def _evaluate(self, kw):
for name, value in kw.items():
invert = False
meth = None
try:
meth = getattr(self, name)
except AttributeError:
if name[:3] == 'not':
invert = True
try:
meth = getattr(self, name[3:])
except AttributeError:
pass
if meth is None:
raise TypeError(
"no %r checker available for %r" % (name, self.path))
try:
if py.code.getrawcode(meth).co_argcount > 1:
if (not meth(value)) ^ invert:
return False
else:
if bool(value) ^ bool(meth()) ^ invert:
return False
except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY):
# EBUSY feels not entirely correct,
# but its kind of necessary since ENOMEDIUM
# is not accessible in python
for name in self._depend_on_existence:
if name in kw:
if kw.get(name):
return False
name = 'not' + name
if name in kw:
if not kw.get(name):
return False
return True
class NeverRaised(Exception):
pass
class PathBase(object):
""" shared implementation for filesystem path objects."""
Checkers = Checkers
def __div__(self, other):
return self.join(str(other))
__truediv__ = __div__ # py3k
def basename(self):
""" basename part of path. """
return self._getbyspec('basename')[0]
basename = property(basename, None, None, basename.__doc__)
def dirname(self):
""" dirname part of path. """
return self._getbyspec('dirname')[0]
dirname = property(dirname, None, None, dirname.__doc__)
def purebasename(self):
""" pure base name of the path."""
return self._getbyspec('purebasename')[0]
purebasename = property(purebasename, None, None, purebasename.__doc__)
def ext(self):
""" extension of the path (including the '.')."""
return self._getbyspec('ext')[0]
ext = property(ext, None, None, ext.__doc__)
def dirpath(self, *args, **kwargs):
""" return the directory path joined with any given path arguments. """
return self.new(basename='').join(*args, **kwargs)
def read_binary(self):
""" read and return a bytestring from reading the path. """
with self.open('rb') as f:
return f.read()
def read_text(self, encoding):
""" read and return a Unicode string from reading the path. """
with self.open("r", encoding=encoding) as f:
return f.read()
def read(self, mode='r'):
""" read and return a bytestring from reading the path. """
with self.open(mode) as f:
return f.read()
def readlines(self, cr=1):
""" read and return a list of lines from the path. if cr is False, the
newline will be removed from the end of each line. """
if not cr:
content = self.read('rU')
return content.split('\n')
else:
f = self.open('rU')
try:
return f.readlines()
finally:
f.close()
def load(self):
""" (deprecated) return object unpickled from self.read() """
f = self.open('rb')
try:
return py.error.checked_call(py.std.pickle.load, f)
finally:
f.close()
def move(self, target):
""" move this path to target. """
if target.relto(self):
raise py.error.EINVAL(target,
"cannot move path into a subdirectory of itself")
try:
self.rename(target)
except py.error.EXDEV: # invalid cross-device link
self.copy(target)
self.remove()
def __repr__(self):
""" return a string representation of this path. """
return repr(str(self))
def check(self, **kw):
""" check a path for existence and properties.
Without arguments, return True if the path exists, otherwise False.
valid checkers::
file=1 # is a file
file=0 # is not a file (may not even exist)
dir=1 # is a dir
link=1 # is a link
exists=1 # exists
You can specify multiple checker definitions, for example::
path.check(file=1, link=1) # a link pointing to a file
"""
if not kw:
kw = {'exists' : 1}
return self.Checkers(self)._evaluate(kw)
def fnmatch(self, pattern):
"""return true if the basename/fullname matches the glob-'pattern'.
valid pattern characters::
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
If the pattern contains a path-separator then the full path
is used for pattern matching and a '*' is prepended to the
pattern.
if the pattern doesn't contain a path-separator the pattern
is only matched against the basename.
"""
return FNMatcher(pattern)(self)
def relto(self, relpath):
""" return a string which is the relative part of the path
to the given 'relpath'.
"""
if not isinstance(relpath, (str, PathBase)):
raise TypeError("%r: not a string or path object" %(relpath,))
strrelpath = str(relpath)
if strrelpath and strrelpath[-1] != self.sep:
strrelpath += self.sep
#assert strrelpath[-1] == self.sep
#assert strrelpath[-2] != self.sep
strself = str(self)
if sys.platform == "win32" or getattr(os, '_name', None) == 'nt':
if os.path.normcase(strself).startswith(
os.path.normcase(strrelpath)):
return strself[len(strrelpath):]
elif strself.startswith(strrelpath):
return strself[len(strrelpath):]
return ""
def ensure_dir(self, *args):
""" ensure the path joined with args is a directory. """
return self.ensure(*args, **{"dir": True})
def bestrelpath(self, dest):
""" return a string which is a relative path from self
(assumed to be a directory) to dest such that
self.join(bestrelpath) == dest and if not such
path can be determined return dest.
"""
try:
if self == dest:
return os.curdir
base = self.common(dest)
if not base: # can be the case on windows
return str(dest)
self2base = self.relto(base)
reldest = dest.relto(base)
if self2base:
n = self2base.count(self.sep) + 1
else:
n = 0
l = [os.pardir] * n
if reldest:
l.append(reldest)
target = dest.sep.join(l)
return target
except AttributeError:
return str(dest)
def exists(self):
return self.check()
def isdir(self):
return self.check(dir=1)
def isfile(self):
return self.check(file=1)
def parts(self, reverse=False):
""" return a root-first list of all ancestor directories
plus the path itself.
"""
current = self
l = [self]
while 1:
last = current
current = current.dirpath()
if last == current:
break
l.append(current)
if not reverse:
l.reverse()
return l
def common(self, other):
""" return the common part shared with the other path
or None if there is no common part.
"""
last = None
for x, y in zip(self.parts(), other.parts()):
if x != y:
return last
last = x
return last
def __add__(self, other):
""" return new path object with 'other' added to the basename"""
return self.new(basename=self.basename+str(other))
def __cmp__(self, other):
""" return sort value (-1, 0, +1). """
try:
return cmp(self.strpath, other.strpath)
except AttributeError:
return cmp(str(self), str(other)) # self.path, other.path)
def __lt__(self, other):
try:
return self.strpath < other.strpath
except AttributeError:
return str(self) < str(other)
def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):
""" yields all paths below the current one
fil is a filter (glob pattern or callable), if not matching the
path will not be yielded, defaulting to None (everything is
returned)
rec is a filter (glob pattern or callable) that controls whether
a node is descended, defaulting to None
ignore is an Exception class that is ignoredwhen calling dirlist()
on any of the paths (by default, all exceptions are reported)
bf if True will cause a breadthfirst search instead of the
default depthfirst. Default: False
sort if True will sort entries within each directory level.
"""
for x in Visitor(fil, rec, ignore, bf, sort).gen(self):
yield x
def _sortlist(self, res, sort):
if sort:
if hasattr(sort, '__call__'):
res.sort(sort)
else:
res.sort()
def samefile(self, other):
""" return True if other refers to the same stat object as self. """
return self.strpath == str(other)
class Visitor:
def __init__(self, fil, rec, ignore, bf, sort):
if isinstance(fil, str):
fil = FNMatcher(fil)
if isinstance(rec, str):
self.rec = FNMatcher(rec)
elif not hasattr(rec, '__call__') and rec:
self.rec = lambda path: True
else:
self.rec = rec
self.fil = fil
self.ignore = ignore
self.breadthfirst = bf
self.optsort = sort and sorted or (lambda x: x)
def gen(self, path):
try:
entries = path.listdir()
except self.ignore:
return
rec = self.rec
dirs = self.optsort([p for p in entries
if p.check(dir=1) and (rec is None or rec(p))])
if not self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
for p in self.optsort(entries):
if self.fil is None or self.fil(p):
yield p
if self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
class FNMatcher:
def __init__(self, pattern):
self.pattern = pattern
def __call__(self, path):
pattern = self.pattern
if (pattern.find(path.sep) == -1 and
iswin32 and
pattern.find(posixpath.sep) != -1):
# Running on Windows, the pattern has no Windows path separators,
# and the pattern has one or more Posix path separators. Replace
# the Posix path separators with the Windows path separator.
pattern = pattern.replace(posixpath.sep, path.sep)
if pattern.find(path.sep) == -1:
name = path.basename
else:
name = str(path) # path.strpath # XXX svn?
if not os.path.isabs(pattern):
pattern = '*' + path.sep + pattern
return py.std.fnmatch.fnmatch(name, pattern)
| WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/py/_path/common.py | Python | bsd-3-clause | 13,045 | [
"VisIt"
] | 04209f58299fcdd5bc17c5892d1ca8ae24c9d0a233173645d083c821a9d9b40c |
"""
Extracted the state propagation bits to individual functions
"""
import logging
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as spl
from .utils import block_diag
class NoHessianMethod(Exception):
"""An exception triggered when the forward model isn't able to provide an
estimation of the Hessian"""
def __init__(self, message):
self.message = message
def band_selecta(band):
if band == 0:
return np.array([0, 1, 6, 2])
else:
return np.array([3, 4, 6, 5])
def hessian_correction_pixel(gp, x0, C_obs_inv, innovation, band, nparams):
selecta = band_selecta(band)
ddH = gp.hessian(np.atleast_2d(x0[selecta]))
big_ddH = np.zeros((nparams, nparams))
for i, ii in enumerate(selecta):
for j, jj in enumerate(selecta):
big_ddH[ii, jj] = ddH.squeeze()[i, j]
big_hessian_corr = big_ddH*C_obs_inv*innovation
return big_hessian_corr
def hessian_correction(gp, x0, R_mat, innovation, mask, state_mask, band,
nparams):
"""Calculates higher order Hessian correction for the likelihood term.
Needs the GP, the Observational uncertainty, the mask...."""
if not hasattr(gp, "hessian"):
# The observation operator does not provide a Hessian method. We just
# return 0, meaning no Hessian correction.
return 0.
C_obs_inv = R_mat.diagonal()[state_mask.flatten()]
mask = mask[state_mask].flatten()
little_hess = []
for i, (innov, C, m) in enumerate(zip(innovation, C_obs_inv, mask)):
if not m:
# Pixel is masked
hessian_corr = np.zeros((nparams, nparams))
else:
# Get state for current pixel
x0_pixel = x0.squeeze()[(nparams*i):(nparams*(i + 1))]
# Calculate the Hessian correction for this pixel
hessian_corr = m * hessian_correction_pixel(gp, x0_pixel, C,
innov, band, nparams)
little_hess.append(hessian_corr)
hessian_corr = block_diag(little_hess)
return hessian_corr
def hessian_correction_multiband(gp, x0, R_mats, innovations, masks, state_mask, n_bands,
nparams):
""" Non linear correction for the Hessian of the cost function. This handles
multiple bands. """
little_hess_cor = []
for R, innovation, mask, band in zip(R_mats, innovations, masks, range(n_bands)):
little_hess_cor.append(hessian_correction(gp, x0, R, innovation, mask, state_mask, band,
nparams))
hessian_corr = sum(little_hess_cor) #block_diag(little_hess_cor)
return hessian_corr
def blend_prior(prior_mean, prior_cov_inverse, x_forecast, P_forecast_inverse):
"""
combine prior mean and inverse covariance with the mean and inverse covariance
from the previous timestep as the product of gaussian distributions
:param prior_mean: 1D sparse array
The prior mean
:param prior_cov_inverse: sparse array
The inverse covariance matrix of the prior
:param x_forecast:
:param P_forecast_inverse:
:return: the combined mean and inverse covariance matrix
"""
# calculate combined covariance
combined_cov_inv = P_forecast_inverse + prior_cov_inverse
b = P_forecast_inverse.dot(prior_mean) + prior_cov_inverse.dot(x_forecast)
b = b.astype(np.float32)
# Solve for combined mean
AI = sp.linalg.splu(combined_cov_inv.tocsc())
x_combined = AI.solve(b)
return x_combined, combined_cov_inv
def tip_prior():
"""The JRC-TIP prior in a convenient function which is fun for the whole
family. Note that the effective LAI is here defined in transformed space
where TLAI = exp(-0.5*LAIe).
Returns
-------
The mean prior vector, covariance and inverse covariance matrices."""
# broadly TLAI 0->7 for 1sigma
sigma = np.array([0.12, 0.7, 0.0959, 0.15, 1.5, 0.2, 0.5])
x0 = np.array([0.17, 1.0, 0.1, 0.7, 2.0, 0.18, np.exp(-0.5*1.5)])
# The individual covariance matrix
little_p = np.diag(sigma**2).astype(np.float32)
little_p[5, 2] = 0.8862*0.0959*0.2
little_p[2, 5] = 0.8862*0.0959*0.2
inv_p = np.linalg.inv(little_p)
return x0, little_p, inv_p
def tip_prior_noLAI(prior):
n_pixels = prior['n_pixels']
mean, prior_cov_inverse = tip_prior(prior)
def tip_prior_full(prior):
# This is yet to be properly defined. For now it will create the TIP prior and
# prior just contains the size of the array - this function will be replaced with
# the real code when we know what the priors look like.
x_prior, c_prior, c_inv_prior = tip_prior()
n_pixels = prior['n_pixels']
mean = np.array([x_prior for i in range(n_pixels)]).flatten()
c_inv_prior_mat = [c_inv_prior for n in range(n_pixels)]
prior_cov_inverse=block_diag(c_inv_prior_mat, dtype=np.float32)
return mean, prior_cov_inverse
def propagate_and_blend_prior(x_analysis, P_analysis, P_analysis_inverse,
M_matrix, Q_matrix,
prior=None, state_propagator=None, date=None):
"""
:param x_analysis:
:param P_analysis:
:param P_analysis_inverse:
:param M_matrix:
:param Q_matrix:
:param prior: dictionay that must contain the key 'function' mapped to a
function that defines the prior and takes the prior dictionary as an argument
see tip_prior for example). Other dictionary items are optional arguments for
the prior.
:param state_propagator:
:return:
"""
if state_propagator is not None:
x_forecast, P_forecast, P_forecast_inverse = state_propagator(
x_analysis, P_analysis, P_analysis_inverse, M_matrix, Q_matrix)
if prior is not None:
# Prior should call `process_prior` method of prior object
# this requires a list of parameters, the date and the state grid (a GDAL-
# readable file)
prior_mean, prior_cov_inverse = prior.process_prior(date, inv_cov=True)
if prior is not None and state_propagator is not None:
x_combined, combined_cov_inv = blend_prior(prior_mean, prior_cov_inverse,
x_forecast, P_forecast_inverse)
return x_combined, None, combined_cov_inv
elif prior is not None:
return prior_mean, None, prior_cov_inverse
elif state_propagator is not None:
return x_forecast, P_forecast, P_forecast_inverse
else:
# Clearly not getting a prior here
return None, None, None
def propagate_standard_kalman(x_analysis, P_analysis, P_analysis_inverse,
M_matrix, Q_matrix,
prior=None, state_propagator=None, date=None):
"""Standard Kalman filter state propagation using the state covariance
matrix and a linear state transition model. This function returns `None`
for the forecast inverse covariance matrix.
Parameters
-----------
x_analysis : array
The analysis state vector. This comes either from the assimilation or
directly from a previoulsy propagated state.
P_analysis : 2D sparse array
The analysis covariance matrix (typically will be a sparse matrix).
P_analysis_inverse : 2D sparse array
The INVERSE analysis covariance matrix (typically a sparse matrix).
As this is a Kalman update, you will typically pass `None` to it, as
it is unused.
M_matrix : 2D array
The linear state propagation model.
Q_matrix: 2D array (sparse)
The state uncertainty inflation matrix that is added to the covariance
matrix.
Returns
-------
x_forecast (forecast state vector), P_forecast (forecast covariance matrix)
and `None`"""
x_forecast = M_matrix.dot(x_analysis)
P_forecast = P_analysis + Q_matrix
return x_forecast, P_forecast, None
def propagate_information_filter_SLOW(x_analysis, P_analysis, P_analysis_inverse,
M_matrix, Q_matrix,
prior=None, state_propagator=None, date=None):
"""Information filter state propagation using the INVERSER state covariance
matrix and a linear state transition model. This function returns `None`
for the forecast covariance matrix (as this takes forever). This method is
based on the approximation to the inverse of the KF covariance matrix.
Parameters
-----------
x_analysis : array
The analysis state vector. This comes either from the assimilation or
directly from a previoulsy propagated state.
P_analysis : 2D sparse array
The analysis covariance matrix (typically will be a sparse matrix).
As this is an information filter update, you will typically pass `None`
to it, as it is unused.
P_analysis_inverse : 2D sparse array
The INVERSE analysis covariance matrix (typically a sparse matrix).
M_matrix : 2D array
The linear state propagation model.
Q_matrix: 2D array (sparse)
The state uncertainty inflation matrix that is added to the covariance
matrix.
Returns
-------
x_forecast (forecast state vector), `None` and P_forecast_inverse (forecast
inverse covariance matrix)"""
logging.info("Starting the propagation...")
x_forecast = M_matrix.dot(x_analysis)
n, n = P_analysis_inverse.shape
S= P_analysis_inverse.dot(Q_matrix)
A = (sp.eye(n) + S).tocsc()
P_forecast_inverse = spl.spsolve(A, P_analysis_inverse)
logging.info("DOne with propagation")
return x_forecast, None, P_forecast_inverse
def propagate_information_filter_approx_SLOW(x_analysis, P_analysis, P_analysis_inverse,
M_matrix, Q_matrix,
prior=None, state_propagator=None, date=None):
"""Information filter state propagation using the INVERSER state covariance
matrix and a linear state transition model. This function returns `None`
for the forecast covariance matrix (as this takes forever). This method is
based on calculating the actual matrix from the inverse of the inverse
covariance, so it is **SLOW**. Mostly here for testing purposes.
Parameters
-----------
x_analysis : array
The analysis state vector. This comes either from the assimilation or
directly from a previoulsy propagated state.
P_analysis : 2D sparse array
The analysis covariance matrix (typically will be a sparse matrix).
As this is an information filter update, you will typically pass `None`
to it, as it is unused.
P_analysis_inverse : 2D sparse array
The INVERSE analysis covariance matrix (typically a sparse matrix).
M_matrix : 2D array
The linear state propagation model.
Q_matrix: 2D array (sparse)
The state uncertainty inflation matrix that is added to the covariance
matrix.
Returns
-------
x_forecast (forecast state vector), `None` and P_forecast_inverse (forecast
inverse covariance matrix)"""
x_forecast = M_matrix.dot(x_analysis)
# These is an approximation to the information filter equations
# (see e.g. Terejanu's notes)
M = P_analysis_inverse # for convenience and to stay with
# Terejanu's notation
# Main assumption here is that the "inflation" factor is
# calculated using the main diagonal of M
D = 1./(1. + M.diagonal()*Q_matrix.diagonal())
M = sp.dia_matrix((M.diagonal(), 0), shape=M.shape)
P_forecast_inverse = M.dot(sp.dia_matrix((D, 0),
shape=M.shape))
return x_forecast, None, P_forecast_inverse
def propagate_information_filter_LAI(x_analysis, P_analysis,
P_analysis_inverse,
M_matrix, Q_matrix,
prior=None, state_propagator=None, date=None):
x_forecast = M_matrix.dot(x_analysis)
x_prior, c_prior, c_inv_prior = tip_prior()
n_pixels = len(x_analysis)//7
x0 = np.array([x_prior for i in range(n_pixels)]).flatten()
x0[6::7] = x_forecast[6::7] # Update LAI
lai_post_cov = P_analysis_inverse.diagonal()[6::7]
lai_Q = Q_matrix.diagonal()[6::7]
c_inv_prior_mat = []
for n in range(n_pixels):
# inflate uncertainty
lai_inv_cov = 1.0/((1.0/lai_post_cov[n])+lai_Q[n])
little_P_forecast_inverse = c_inv_prior.copy()
little_P_forecast_inverse[6, 6] = lai_inv_cov
c_inv_prior_mat.append(little_P_forecast_inverse)
P_forecast_inverse=block_diag(c_inv_prior_mat, dtype=np.float32)
return x0, None, P_forecast_inverse
def no_propagation(x_analysis, P_analysis,
P_analysis_inverse,
M_matrix, Q_matrix,
prior=None, state_propagator=None, date=None):
"""No propagation. In this case, we return the original prior. As the
information filter behaviour is the standard behaviour in KaFKA, we
only return the inverse covariance matrix. **NOTE** the input parameters
are there to comply with the API, but are **UNUSED**.
Parameters
-----------
x_analysis : array
The analysis state vector. This comes either from the assimilation or
directly from a previoulsy propagated state.
P_analysis : 2D sparse array
The analysis covariance matrix (typically will be a sparse matrix).
As this is an information filter update, you will typically pass `None`
to it, as it is unused.
P_analysis_inverse : 2D sparse array
The INVERSE analysis covariance matrix (typically a sparse matrix).
M_matrix : 2D array
The linear state propagation model.
Q_matrix: 2D array (sparse)
The state uncertainty inflation matrix that is added to the covariance
matrix.
Returns
-------
x_forecast (forecast state vector), `None` and P_forecast_inverse (forecast
inverse covariance matrix)"""
x_prior, c_prior, c_inv_prior = tip_prior()
n_pixels = len(x_analysis)//7
x_forecast = np.array([x_prior for i in range(n_pixels)]).flatten()
c_inv_prior_mat = [c_inv_prior for n in range(n_pixels)]
P_forecast_inverse=block_diag(c_inv_prior_mat, dtype=np.float32)
return x_forecast, None, P_forecast_inverse
| jgomezdans/KaFKA | kafka/inference/kf_tools.py | Python | gpl-3.0 | 14,447 | [
"Gaussian"
] | bf2595ced91b731564f777dda82616218bb7bbd55bdaa15e5c4a022873e4f865 |
#! /usr/bin/env python
from MDAnalysis import *
#from MDAnalysis.analysis.align import *
import numpy
import math
import sys
my_traj = sys.argv[1]
end = my_traj.find('.pdb')
u = Universe("init.pdb",my_traj)
v = Universe("init.pdb")
# residues
a1 = u.selectAtoms("segid A and (resid 108)")
#31,32,52,53,54,100,101,102,103,106,107,109,110,159,161,179,180,181,182")
b1 = u.selectAtoms("segid B and resid 108")
#31,32,52,53,54,100,101,102,103,106,107,109,110,159,161,179,180,181,182")
fout_dist = my_traj[0:end] + '_bsite_dist.dat'
f = open(fout_dist,'w')
#g = open('angle','w')
for ts in u.trajectory:
distance1 = numpy.linalg.norm(a1.centerOfMass() - b1.centerOfMass())
#distance2 = numpy.linalg.norm(a2.centerOfMass() - b2.centerOfMass())
#distance3 = numpy.linalg.norm(a3.centerOfMass() - b3.centerOfMass())
#distance4 = numpy.linalg.norm(a4.centerOfMass() - b4.centerOfMass())
#a4_1,a4_2,a4_3 = a4.principalAxes()
#b4_1,b4_2,b4_3 = b4.principalAxes()
# helix12_1,helix12_2,helix12_3 = helix12.principalAxes()
# helix21_1,helix21_2,helix21_3 = helix21.principalAxes()
# helix22_1,helix22_2,helix22_3 = helix22.principalAxes()
#angle = math.degrees(math.acos(numpy.dot(a4_1,b4_1)))
# angle2 = math.degrees(math.acos(numpy.dot(helix21_1,helix22_1)))
#if angle > 90:
# angle = 180-angle
# print "%6i %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (ts.frame,rmsd0,rmsd1,rmsd2,distance1,distance2,angle1,angle2)
#f.write('%7.3f %7.3f % 7.3f % 7.3f\n' % (distance1,distance2,distance3,distance4))
f.write('%7.3f\n' % (distance1))
#g.write('%7.3f\n' % angle)
f.close()
#g.close()
| demharters/git_scripts | dist_DA10_bs.py | Python | apache-2.0 | 1,714 | [
"MDAnalysis"
] | e2750c62be291da2258511af4f7b72ec12179fe0d84a097d8f6836caaf2cd6fc |
"""
Numerical python functions written for compatability with MATLAB
commands with the same names.
MATLAB compatible functions
-------------------------------
:func:`cohere`
Coherence (normalized cross spectral density)
:func:`csd`
Cross spectral density uing Welch's average periodogram
:func:`detrend`
Remove the mean or best fit line from an array
:func:`find`
Return the indices where some condition is true;
numpy.nonzero is similar but more general.
:func:`griddata`
interpolate irregularly distributed data to a
regular grid.
:func:`prctile`
find the percentiles of a sequence
:func:`prepca`
Principal Component Analysis
:func:`psd`
Power spectral density uing Welch's average periodogram
:func:`rk4`
A 4th order runge kutta integrator for 1D or ND systems
:func:`specgram`
Spectrogram (power spectral density over segments of time)
Miscellaneous functions
-------------------------
Functions that don't exist in MATLAB, but are useful anyway:
:meth:`cohere_pairs`
Coherence over all pairs. This is not a MATLAB function, but we
compute coherence a lot in my lab, and we compute it for a lot of
pairs. This function is optimized to do this efficiently by
caching the direct FFTs.
:meth:`rk4`
A 4th order Runge-Kutta ODE integrator in case you ever find
yourself stranded without scipy (and the far superior
scipy.integrate tools)
:meth:`contiguous_regions`
return the indices of the regions spanned by some logical mask
:meth:`cross_from_below`
return the indices where a 1D array crosses a threshold from below
:meth:`cross_from_above`
return the indices where a 1D array crosses a threshold from above
record array helper functions
-------------------------------
A collection of helper methods for numpyrecord arrays
.. _htmlonly:
See :ref:`misc-examples-index`
:meth:`rec2txt`
pretty print a record array
:meth:`rec2csv`
store record array in CSV file
:meth:`csv2rec`
import record array from CSV file with type inspection
:meth:`rec_append_fields`
adds field(s)/array(s) to record array
:meth:`rec_drop_fields`
drop fields from record array
:meth:`rec_join`
join two record arrays on sequence of fields
:meth:`recs_join`
a simple join of multiple recarrays using a single column as a key
:meth:`rec_groupby`
summarize data by groups (similar to SQL GROUP BY)
:meth:`rec_summarize`
helper code to filter rec array fields into new fields
For the rec viewer functions(e rec2csv), there are a bunch of Format
objects you can pass into the functions that will do things like color
negative values red, set percent formatting and scaling, etc.
Example usage::
r = csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = FormatFloat(2),
change = FormatPercent(2),
cost = FormatThousands(2),
)
rec2excel(r, 'test.xls', formatd=formatd)
rec2csv(r, 'test.csv', formatd=formatd)
scroll = rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
Deprecated functions
---------------------
The following are deprecated; please import directly from numpy (with
care--function signatures may differ):
:meth:`load`
load ASCII file - use numpy.loadtxt
:meth:`save`
save ASCII file - use numpy.savetxt
"""
import csv, warnings, copy, os, operator
import numpy as np
ma = np.ma
from matplotlib import verbose
import matplotlib.cbook as cbook
from matplotlib import docstring
from matplotlib.path import Path
def logspace(xmin,xmax,N):
return np.exp(np.linspace(np.log(xmin), np.log(xmax), N))
def _norm(x):
"return sqrt(x dot x)"
return np.sqrt(np.dot(x,x))
def window_hanning(x):
"return x times the hanning window of len(x)"
return np.hanning(len(x))*x
def window_none(x):
"No window function; simply return x"
return x
def detrend(x, key=None):
if key is None or key=='constant':
return detrend_mean(x)
elif key=='linear':
return detrend_linear(x)
def demean(x, axis=0):
"Return x minus its mean along the specified axis"
x = np.asarray(x)
if axis == 0 or axis is None or x.ndim <= 1:
return x - x.mean(axis)
ind = [slice(None)] * x.ndim
ind[axis] = np.newaxis
return x - x.mean(axis)[ind]
def detrend_mean(x):
"Return x minus the mean(x)"
return x - x.mean()
def detrend_none(x):
"Return x: no detrending"
return x
def detrend_linear(y):
"Return y minus best fit line; 'linear' detrending "
# This is faster than an algorithm based on linalg.lstsq.
x = np.arange(len(y), dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0,1]/C[0,0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
#This is a helper function that implements the commonality between the
#psd, csd, and spectrogram. It is *NOT* meant to be used outside of mlab
def _spectral_helper(x, y, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None):
#The checks for if y is x are so that we can use the same function to
#implement the core of psd(), csd(), and spectrogram() without doing
#extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
#Make sure we're dealing with a numpy array. If y and x were the same
#object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
else:
y = x
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x)<NFFT:
n = len(x)
x = np.resize(x, (NFFT,))
x[n:] = 0
if not same_data and len(y)<NFFT:
n = len(y)
y = np.resize(y, (NFFT,))
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if scale_by_freq is None:
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided':
numFreqs = pad_to
scaling_factor = 1.
elif sides in ('default', 'onesided'):
numFreqs = pad_to//2 + 1
scaling_factor = 2.
else:
raise ValueError("sides must be one of: 'default', 'onesided', or "
"'twosided'")
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), x.dtype))
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
Pxy = np.zeros((numFreqs, n), np.complex_)
# do the ffts of the slices
for i in range(n):
thisX = x[ind[i]:ind[i]+NFFT]
thisX = windowVals * detrend(thisX)
fx = np.fft.fft(thisX, n=pad_to)
if same_data:
fy = fx
else:
thisY = y[ind[i]:ind[i]+NFFT]
thisY = windowVals * detrend(thisY)
fy = np.fft.fft(thisY, n=pad_to)
Pxy[:,i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs]
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2.
Pxy /= (np.abs(windowVals)**2).sum()
# Also include scaling factors for one-sided densities and dividing by the
# sampling frequency, if desired. Scale everything, except the DC component
# and the NFFT/2 component:
Pxy[1:-1] *= scaling_factor
# MATLAB divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
Pxy /= Fs
t = 1./Fs * (ind + NFFT / 2.)
freqs = float(Fs) / pad_to * np.arange(numFreqs)
if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[numFreqs//2:] - Fs, freqs[:numFreqs//2]))
Pxy = np.concatenate((Pxy[numFreqs//2:, :], Pxy[:numFreqs//2, :]), 0)
return Pxy, freqs, t
#Split out these keyword docs so that they can be used elsewhere
docstring.interpd.update(PSD=cbook.dedent("""
Keyword arguments:
*NFFT*: integer
The number of data points used in each block for the FFT.
Must be even; a power 2 is most efficient. The default value is 256.
This should *NOT* be used to get zero padding, or the scaling of the
result will be incorrect. Use *pad_to* for this instead.
*Fs*: scalar
The sampling frequency (samples per time unit). It is used
to calculate the Fourier frequencies, freqs, in cycles per time
unit. The default value is 2.
*detrend*: callable
The function applied to each segment before fft-ing,
designed to remove the mean or linear trend. Unlike in
MATLAB, where the *detrend* parameter is a vector, in
matplotlib is it a function. The :mod:`~matplotlib.pylab`
module defines :func:`~matplotlib.pylab.detrend_none`,
:func:`~matplotlib.pylab.detrend_mean`, and
:func:`~matplotlib.pylab.detrend_linear`, but you can use
a custom function as well.
*window*: callable or ndarray
A function or a vector of length *NFFT*. To create window
vectors see :func:`window_hanning`, :func:`window_none`,
:func:`numpy.blackman`, :func:`numpy.hamming`,
:func:`numpy.bartlett`, :func:`scipy.signal`,
:func:`scipy.signal.get_window`, etc. The default is
:func:`window_hanning`. If a function is passed as the
argument, it must take a data segment as an argument and
return the windowed version of the segment.
*pad_to*: integer
The number of points to which the data segment is padded when
performing the FFT. This can be different from *NFFT*, which
specifies the number of data points used. While not increasing
the actual resolution of the psd (the minimum distance between
resolvable peaks), this can give more points in the plot,
allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to*
equal to *NFFT*
*sides*: [ 'default' | 'onesided' | 'twosided' ]
Specifies which sides of the PSD to return. Default gives the
default behavior, which returns one-sided for real data and both
for complex data. 'onesided' forces the return of a one-sided PSD,
while 'twosided' forces two-sided.
*scale_by_freq*: boolean
Specifies whether the resulting density values should be scaled
by the scaling frequency, which gives density in units of Hz^-1.
This allows for integration over the returned frequency values.
The default is True for MATLAB compatibility.
"""))
@docstring.dedent_interpd
def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The power spectral density by Welch's average periodogram method.
The vector *x* is divided into *NFFT* length blocks. Each block
is detrended by the function *detrend* and windowed by the function
*window*. *noverlap* gives the length of the overlap between blocks.
The absolute(fft(block))**2 of each segment are averaged to compute
*Pxx*, with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*
Array or sequence containing the data
%(PSD)s
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
Returns the tuple (*Pxx*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxx,freqs = csd(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
return Pxx.real,freqs
@docstring.dedent_interpd
def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The cross power spectral density by Welch's average periodogram
method. The vectors *x* and *y* are divided into *NFFT* length
blocks. Each block is detrended by the function *detrend* and
windowed by the function *window*. *noverlap* gives the length
of the overlap between blocks. The product of the direct FFTs
of *x* and *y* are averaged over each segment to compute *Pxy*,
with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*
Array or sequence containing the data
%(PSD)s
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
Returns the tuple (*Pxy*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxy, freqs, t = _spectral_helper(x, y, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
if len(Pxy.shape) == 2 and Pxy.shape[1]>1:
Pxy = Pxy.mean(axis=1)
return Pxy, freqs
@docstring.dedent_interpd
def specgram(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=128, pad_to=None, sides='default', scale_by_freq=None):
"""
Compute a spectrogram of data in *x*. Data are split into *NFFT*
length segments and the PSD of each section is computed. The
windowing function *window* is applied to each segment, and the
amount of overlap of each segment is specified with *noverlap*.
If *x* is real (i.e. non-complex) only the spectrum of the positive
frequencie is returned. If *x* is complex then the complete
spectrum is returned.
%(PSD)s
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 128.
Returns a tuple (*Pxx*, *freqs*, *t*):
- *Pxx*: 2-D array, columns are the periodograms of
successive segments
- *freqs*: 1-D array of frequencies corresponding to the rows
in Pxx
- *t*: 1-D array of times corresponding to midpoints of
segments.
.. seealso::
:func:`psd`
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; and in not returning
times.
"""
assert(NFFT > noverlap)
Pxx, freqs, t = _spectral_helper(x, x, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
Pxx = Pxx.real #Needed since helper implements generically
return Pxx, freqs, t
_coh_error = """Coherence is calculated by averaging over *NFFT*
length segments. Your signal is too short for your choice of *NFFT*.
"""
@docstring.dedent_interpd
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
*x*, *y*
Array or sequence containing the data
%(PSD)s
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
The return value is the tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector. For cohere, scaling the
individual densities by the sampling frequency has no effect,
since the factors cancel out.
.. seealso::
:func:`psd` and :func:`csd`
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x)<2*NFFT:
raise ValueError(_coh_error)
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy)
Cxy.shape = (len(f),)
return Cxy, f
def donothing_callback(*args):
pass
def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0,
preferSpeedOverMemory=True,
progressCallback=donothing_callback,
returnPxx=False):
"""
Call signature::
Cxy, Phase, freqs = cohere_pairs( X, ij, ...)
Compute the coherence and phase for all pairs *ij*, in *X*.
*X* is a *numSamples* * *numCols* array
*ij* is a list of tuples. Each tuple is a pair of indexes into
the columns of X for which you want to compute coherence. For
example, if *X* has 64 columns, and you want to compute all
nonredundant pairs, define *ij* as::
ij = []
for i in range(64):
for j in range(i+1,64):
ij.append( (i,j) )
*preferSpeedOverMemory* is an optional bool. Defaults to true. If
False, limits the caching by only making one, rather than two,
complex cache arrays. This is useful if memory becomes critical.
Even when *preferSpeedOverMemory* is False, :func:`cohere_pairs`
will still give significant performace gains over calling
:func:`cohere` for each pair, and will use subtantially less
memory than if *preferSpeedOverMemory* is True. In my tests with
a 43000,64 array over all nonredundant pairs,
*preferSpeedOverMemory* = True delivered a 33% performance boost
on a 1.7GHZ Athlon with 512MB RAM compared with
*preferSpeedOverMemory* = False. But both solutions were more
than 10x faster than naively crunching all possible pairs through
:func:`cohere`.
Returns::
(Cxy, Phase, freqs)
where:
- *Cxy*: dictionary of (*i*, *j*) tuples -> coherence vector for
that pair. I.e., ``Cxy[(i,j) = cohere(X[:,i], X[:,j])``.
Number of dictionary keys is ``len(ij)``.
- *Phase*: dictionary of phases of the cross spectral density at
each frequency for each pair. Keys are (*i*, *j*).
- *freqs*: vector of frequencies, equal in length to either the
coherence or phase vectors for any (*i*, *j*) key.
e.g., to make a coherence Bode plot::
subplot(211)
plot( freqs, Cxy[(12,19)])
subplot(212)
plot( freqs, Phase[(12,19)])
For a large number of pairs, :func:`cohere_pairs` can be much more
efficient than just calling :func:`cohere` for each pair, because
it caches most of the intensive computations. If :math:`N` is the
number of pairs, this function is :math:`O(N)` for most of the
heavy lifting, whereas calling cohere for each pair is
:math:`O(N^2)`. However, because of the caching, it is also more
memory intensive, making 2 additional complex arrays with
approximately the same number of elements as *X*.
See :file:`test/cohere_pairs_test.py` in the src tree for an
example script that shows that this :func:`cohere_pairs` and
:func:`cohere` give the same results for a given pair.
.. seealso::
:func:`psd`
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
numRows, numCols = X.shape
# zero pad if X is too short
if numRows < NFFT:
tmp = X
X = np.zeros( (NFFT, numCols), X.dtype)
X[:numRows,:] = tmp
del tmp
numRows, numCols = X.shape
# get all the columns of X that we are interested in by checking
# the ij tuples
allColumns = set()
for i,j in ij:
allColumns.add(i); allColumns.add(j)
Ncols = len(allColumns)
# for real X, ignore the negative frequencies
if np.iscomplexobj(X): numFreqs = NFFT
else: numFreqs = NFFT//2+1
# cache the FFT of every windowed, detrended NFFT length segement
# of every channel. If preferSpeedOverMemory, cache the conjugate
# as well
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones(NFFT, X.dtype))
ind = list(range(0, numRows-NFFT+1, NFFT-noverlap))
numSlices = len(ind)
FFTSlices = {}
FFTConjSlices = {}
Pxx = {}
slices = list(range(numSlices))
normVal = np.linalg.norm(windowVals)**2
for iCol in allColumns:
progressCallback(i/Ncols, 'Cacheing FFTs')
Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_)
for iSlice in slices:
thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
thisSlice = windowVals*detrend(thisSlice)
Slices[iSlice,:] = np.fft.fft(thisSlice)[:numFreqs]
FFTSlices[iCol] = Slices
if preferSpeedOverMemory:
FFTConjSlices[iCol] = np.conjugate(Slices)
Pxx[iCol] = np.divide(np.mean(abs(Slices)**2, axis=0), normVal)
del Slices, ind, windowVals
# compute the coherences and phases for all pairs using the
# cached FFTs
Cxy = {}
Phase = {}
count = 0
N = len(ij)
for i,j in ij:
count +=1
if count%10==0:
progressCallback(count/N, 'Computing coherences')
if preferSpeedOverMemory:
Pxy = FFTSlices[i] * FFTConjSlices[j]
else:
Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j])
if numSlices>1: Pxy = np.mean(Pxy, axis=0)
#Pxy = np.divide(Pxy, normVal)
Pxy /= normVal
#Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j])
Cxy[i,j] = abs(Pxy)**2 / (Pxx[i]*Pxx[j])
Phase[i,j] = np.arctan2(Pxy.imag, Pxy.real)
freqs = Fs/NFFT*np.arange(numFreqs)
if returnPxx:
return Cxy, Phase, freqs, Pxx
else:
return Cxy, Phase, freqs
def entropy(y, bins):
r"""
Return the entropy of the data in *y*.
.. math::
\sum p_i \log_2(p_i)
where :math:`p_i` is the probability of observing *y* in the
:math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a
range of bins; see :func:`numpy.histogram`.
Compare *S* with analytic calculation for a Gaussian::
x = mu + sigma * randn(200000)
Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) )
"""
n, bins = np.histogram(y, bins)
n = n.astype(np.float_)
n = np.take(n, np.nonzero(n)[0]) # get the positive
p = np.divide(n, len(y))
delta = bins[1] - bins[0]
S = -1.0 * np.sum(p * np.log(p)) + np.log(delta)
return S
def normpdf(x, *args):
"Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
mu, sigma = args
return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
def levypdf(x, gamma, alpha):
"Returm the levy pdf evaluated at *x* for params *gamma*, *alpha*"
N = len(x)
if N % 2 != 0:
raise ValueError('x must be an event length array; try\n' + \
'x = np.linspace(minx, maxx, N), where N is even')
dx = x[1] - x[0]
f = 1/(N*dx)*np.arange(-N / 2, N / 2, np.float_)
ind = np.concatenate([np.arange(N / 2, N, int),
np.arange(0, N / 2, int)])
df = f[1] - f[0]
cfl = np.exp(-gamma * np.absolute(2 * np.pi * f) ** alpha)
px = np.fft.fft(np.take(cfl, ind) * df).astype(np.float_)
return np.take(px, ind)
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def longest_contiguous_ones(x):
"""
Return the indices of the longest stretch of contiguous ones in *x*,
assuming *x* is a vector of zeros and ones. If there are two
equally long stretches, pick the first.
"""
x = np.ravel(x)
if len(x)==0:
return np.array([])
ind = (x==0).nonzero()[0]
if len(ind)==0:
return np.arange(len(x))
if len(ind)==len(x):
return np.array([])
y = np.zeros( (len(x)+2,), x.dtype)
y[1:-1] = x
dif = np.diff(y)
up = (dif == 1).nonzero()[0];
dn = (dif == -1).nonzero()[0];
i = (dn-up == max(dn - up)).nonzero()[0][0]
ind = np.arange(up[i], dn[i])
return ind
def longest_ones(x):
'''alias for longest_contiguous_ones'''
return longest_contiguous_ones(x)
def prepca(P, frac=0):
"""
WARNING: this function is deprecated -- please see class PCA instead
Compute the principal components of *P*. *P* is a (*numVars*,
*numObs*) array. *frac* is the minimum fraction of variance that a
component must contain to be included.
Return value is a tuple of the form (*Pcomponents*, *Trans*,
*fracVar*) where:
- *Pcomponents* : a (numVars, numObs) array
- *Trans* : the weights matrix, ie, *Pcomponents* = *Trans* *
*P*
- *fracVar* : the fraction of the variance accounted for by each
component returned
A similar function of the same name was in the MATLAB
R13 Neural Network Toolbox but is not found in later versions;
its successor seems to be called "processpcs".
"""
warnings.warn('This function is deprecated -- see class PCA instead')
U,s,v = np.linalg.svd(P)
varEach = s**2/P.shape[1]
totVar = varEach.sum()
fracVar = varEach/totVar
ind = slice((fracVar>=frac).sum())
# select the components that are greater
Trans = U[:,ind].transpose()
# The transformed data
Pcomponents = np.dot(Trans,P)
return Pcomponents, Trans, fracVar[ind]
class PCA:
def __init__(self, a):
"""
compute the SVD of a and store data for PCA. Use project to
project the data onto a reduced set of dimensions
Inputs:
*a*: a numobservations x numdims array
Attrs:
*a* a centered unit sigma version of input a
*numrows*, *numcols*: the dimensions of a
*mu* : a numdims array of means of a
*sigma* : a numdims array of atandard deviation of a
*fracs* : the proportion of variance of each of the principal components
*Wt* : the weight vector for projecting a numdims point or array into PCA space
*Y* : a projected into PCA space
The factor loadings are in the Wt factor, ie the factor
loadings for the 1st principal component are given by Wt[0]
"""
n, m = a.shape
if n<m:
raise RuntimeError('we assume data in a is organized with numrows>numcols')
self.numrows, self.numcols = n, m
self.mu = a.mean(axis=0)
self.sigma = a.std(axis=0)
a = self.center(a)
self.a = a
U, s, Vh = np.linalg.svd(a, full_matrices=False)
Y = np.dot(Vh, a.T).T
vars = s**2/float(len(s))
self.fracs = vars/vars.sum()
self.Wt = Vh
self.Y = Y
def project(self, x, minfrac=0.):
'project x onto the principle axes, dropping any axes where fraction of variance<minfrac'
x = np.asarray(x)
ndims = len(x.shape)
if (x.shape[-1]!=self.numcols):
raise ValueError('Expected an array with dims[-1]==%d'%self.numcols)
Y = np.dot(self.Wt, self.center(x).T).T
mask = self.fracs>=minfrac
if ndims==2:
Yreduced = Y[:,mask]
else:
Yreduced = Y[mask]
return Yreduced
def center(self, x):
'center the data using the mean and sigma from training set a'
return (x - self.mu)/self.sigma
@staticmethod
def _get_colinear():
c0 = np.array([
0.19294738, 0.6202667 , 0.45962655, 0.07608613, 0.135818 ,
0.83580842, 0.07218851, 0.48318321, 0.84472463, 0.18348462,
0.81585306, 0.96923926, 0.12835919, 0.35075355, 0.15807861,
0.837437 , 0.10824303, 0.1723387 , 0.43926494, 0.83705486])
c1 = np.array([
-1.17705601, -0.513883 , -0.26614584, 0.88067144, 1.00474954,
-1.1616545 , 0.0266109 , 0.38227157, 1.80489433, 0.21472396,
-1.41920399, -2.08158544, -0.10559009, 1.68999268, 0.34847107,
-0.4685737 , 1.23980423, -0.14638744, -0.35907697, 0.22442616])
c2 = c0 + 2*c1
c3 = -3*c0 + 4*c1
a = np.array([c3, c0, c1, c2]).T
return a
def prctile(x, p = (0.0, 25.0, 50.0, 75.0, 100.0)):
"""
Return the percentiles of *x*. *p* can either be a sequence of
percentile values or a scalar. If *p* is a sequence, the ith
element of the return sequence is the *p*(i)-th percentile of *x*.
If *p* is a scalar, the largest value of *x* less than or equal to
the *p* percentage point in the sequence is returned.
"""
# This implementation derived from scipy.stats.scoreatpercentile
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a)*fraction
scalar = True
if cbook.iterable(p):
scalar = False
per = np.array(p)
values = np.array(x).ravel() # copy
values.sort()
idxs = per /100. * (values.shape[0] - 1)
ai = idxs.astype(np.int)
bi = ai + 1
frac = idxs % 1
# handle cases where attempting to interpolate past last index
cond = bi >= len(values)
if scalar:
if cond:
ai -= 1
bi -= 1
frac += 1
else:
ai[cond] -= 1
bi[cond] -= 1
frac[cond] += 1
return _interpolate(values[ai],values[bi],frac)
def prctile_rank(x, p):
"""
Return the rank for each element in *x*, return the rank
0..len(*p*). e.g., if *p* = (25, 50, 75), the return value will be a
len(*x*) array with values in [0,1,2,3] where 0 indicates the
value is less than the 25th percentile, 1 indicates the value is
>= the 25th and < 50th percentile, ... and 3 indicates the value
is above the 75th percentile cutoff.
*p* is either an array of percentiles in [0..100] or a scalar which
indicates how many quantiles of data you want ranked.
"""
if not cbook.iterable(p):
p = np.arange(100.0/p, 100.0, 100.0/p)
else:
p = np.asarray(p)
if p.max()<=1 or p.min()<0 or p.max()>100:
raise ValueError('percentiles should be in range 0..100, not 0..1')
ptiles = prctile(x, p)
return np.searchsorted(ptiles, x)
def center_matrix(M, dim=0):
"""
Return the matrix *M* with each row having zero mean and unit std.
If *dim* = 1 operate on columns instead of rows. (*dim* is
opposite to the numpy axis kwarg.)
"""
M = np.asarray(M, np.float_)
if dim:
M = (M - M.mean(axis=0)) / M.std(axis=0)
else:
M = (M - M.mean(axis=1)[:,np.newaxis])
M = M / M.std(axis=1)[:,np.newaxis]
return M
def rk4(derivs, y0, t):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try: Ny = len(y0)
except TypeError:
yout = np.zeros( (len(t),), np.float_)
else:
yout = np.zeros( (len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t)-1):
thist = t[i]
dt = t[i+1] - thist
dt2 = dt/2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist))
k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))
k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))
k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))
yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4)
return yout
def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0,
mux=0.0, muy=0.0, sigmaxy=0.0):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp( -z/(2*(1-rho**2))) / denom
def get_xyz_where(Z, Cond):
"""
*Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is
a boolean matrix where some condition is satisfied. Return value
is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and
*z* are the values of *Z* at those indices. *x*, *y*, and *z* are
1D arrays.
"""
X,Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond]
def get_sparse_matrix(M,N,frac=0.1):
"""
Return a *M* x *N* sparse matrix with *frac* elements randomly
filled.
"""
data = np.zeros((M,N))*0.
for i in range(int(M*N*frac)):
x = np.random.randint(0,M-1)
y = np.random.randint(0,N-1)
data[x,y] = np.random.rand()
return data
def dist(x,y):
"""
Return the distance between two points.
"""
d = x-y
return np.sqrt(np.dot(d,d))
def dist_point_to_segment(p, s0, s1):
"""
Get the distance of a point to a segment.
*p*, *s0*, *s1* are *xy* sequences
This algorithm from
http://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment
"""
p = np.asarray(p, np.float_)
s0 = np.asarray(s0, np.float_)
s1 = np.asarray(s1, np.float_)
v = s1 - s0
w = p - s0
c1 = np.dot(w,v);
if ( c1 <= 0 ):
return dist(p, s0);
c2 = np.dot(v,v)
if ( c2 <= c1 ):
return dist(p, s1);
b = c1 / c2
pb = s0 + b * v;
return dist(p, pb)
def segments_intersect(s1, s2):
"""
Return *True* if *s1* and *s2* intersect.
*s1* and *s2* are defined as::
s1: (x1, y1), (x2, y2)
s2: (x3, y3), (x4, y4)
"""
(x1, y1), (x2, y2) = s1
(x3, y3), (x4, y4) = s2
den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))
n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))
n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))
if den == 0:
# lines parallel
return False
u1 = n1/den
u2 = n2/den
return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0
def fftsurr(x, detrend=detrend_none, window=window_none):
"""
Compute an FFT phase randomized surrogate of *x*.
"""
if cbook.iterable(window):
x=window*detrend(x)
else:
x = window(detrend(x))
z = np.fft.fft(x)
a = 2.*np.pi*1j
phase = a * np.random.rand(len(x))
z = z*np.exp(phase)
return np.fft.ifft(z).real
class FIFOBuffer:
"""
A FIFO queue to hold incoming *x*, *y* data in a rotating buffer
using numpy arrays under the hood. It is assumed that you will
call asarrays much less frequently than you add data to the queue
-- otherwise another data structure will be faster.
This can be used to support plots where data is added from a real
time feed and the plot object wants to grab data from the buffer
and plot it to screen less freqeuently than the incoming.
If you set the *dataLim* attr to
:class:`~matplotlib.transforms.BBox` (eg
:attr:`matplotlib.Axes.dataLim`), the *dataLim* will be updated as
new data come in.
TODO: add a grow method that will extend nmax
.. note::
mlab seems like the wrong place for this class.
"""
@cbook.deprecated('1.3', name='FIFOBuffer', obj_type='class')
def __init__(self, nmax):
"""
Buffer up to *nmax* points.
"""
self._xa = np.zeros((nmax,), np.float_)
self._ya = np.zeros((nmax,), np.float_)
self._xs = np.zeros((nmax,), np.float_)
self._ys = np.zeros((nmax,), np.float_)
self._ind = 0
self._nmax = nmax
self.dataLim = None
self.callbackd = {}
def register(self, func, N):
"""
Call *func* every time *N* events are passed; *func* signature
is ``func(fifo)``.
"""
self.callbackd.setdefault(N, []).append(func)
def add(self, x, y):
"""
Add scalar *x* and *y* to the queue.
"""
if self.dataLim is not None:
xy = np.asarray([(x,y),])
self.dataLim.update_from_data_xy(xy, None)
ind = self._ind % self._nmax
#print 'adding to fifo:', ind, x, y
self._xs[ind] = x
self._ys[ind] = y
for N,funcs in self.callbackd.items():
if (self._ind%N)==0:
for func in funcs:
func(self)
self._ind += 1
def last(self):
"""
Get the last *x*, *y* or *None*. *None* if no data set.
"""
if self._ind==0: return None, None
ind = (self._ind-1) % self._nmax
return self._xs[ind], self._ys[ind]
def asarrays(self):
"""
Return *x* and *y* as arrays; their length will be the len of
data added or *nmax*.
"""
if self._ind<self._nmax:
return self._xs[:self._ind], self._ys[:self._ind]
ind = self._ind % self._nmax
self._xa[:self._nmax-ind] = self._xs[ind:]
self._xa[self._nmax-ind:] = self._xs[:ind]
self._ya[:self._nmax-ind] = self._ys[ind:]
self._ya[self._nmax-ind:] = self._ys[:ind]
return self._xa, self._ya
def update_datalim_to_current(self):
"""
Update the *datalim* in the current data in the fifo.
"""
if self.dataLim is None:
raise ValueError('You must first set the dataLim attr')
x, y = self.asarrays()
self.dataLim.update_from_data(x, y, True)
def movavg(x,n):
"""
Compute the len(*n*) moving average of *x*.
"""
w = np.empty((n,), dtype=np.float_)
w[:] = 1.0/n
return np.convolve(x, w, mode='valid')
### the following code was written and submitted by Fernando Perez
### from the ipython numutils package under a BSD license
# begin fperez functions
"""
A set of convenient utilities for numerical work.
Most of this module requires numpy or is meant to be used with it.
Copyright (c) 2001-2004, Fernando Perez. <Fernando.Perez@colorado.edu>
All rights reserved.
This license was generated from the BSD license template as found in:
http://www.opensource.org/licenses/bsd-license.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the IPython project nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import math
#*****************************************************************************
# Globals
#****************************************************************************
# function definitions
exp_safe_MIN = math.log(2.2250738585072014e-308)
exp_safe_MAX = 1.7976931348623157e+308
def exp_safe(x):
"""
Compute exponentials which safely underflow to zero.
Slow, but convenient to use. Note that numpy provides proper
floating point exception handling with access to the underlying
hardware.
"""
if type(x) is np.ndarray:
return np.exp(np.clip(x,exp_safe_MIN,exp_safe_MAX))
else:
return math.exp(x)
def amap(fn,*args):
"""
amap(function, sequence[, sequence, ...]) -> array.
Works like :func:`map`, but it returns an array. This is just a
convenient shorthand for ``numpy.array(map(...))``.
"""
return np.array(list(map(fn,*args)))
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return np.sqrt(np.mean(np.absolute(a)**2))
def l1norm(a):
"""
Return the *l1* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sum(np.absolute(a))
def l2norm(a):
"""
Return the *l2* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sqrt(np.sum(np.absolute(a)**2))
def norm_flat(a,p=2):
"""
norm(a,p=2) -> l-p norm of a.flat
Return the l-p norm of *a*, considered as a flat array. This is NOT a true
matrix norm, since arrays of arbitrary rank are always flattened.
*p* can be a number or the string 'Infinity' to get the L-infinity norm.
"""
# This function was being masked by a more general norm later in
# the file. We may want to simply delete it.
if p=='Infinity':
return np.amax(np.absolute(a))
else:
return (np.sum(np.absolute(a)**p))**(1.0/p)
def frange(xini,xfin=None,delta=None,**kw):
"""
frange([start,] stop[, step, keywords]) -> array of floats
Return a numpy ndarray containing a progression of floats. Similar to
:func:`numpy.arange`, but defaults to a closed interval.
``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*
defaults to 0, and the endpoint *is included*. This behavior is
different from that of :func:`range` and
:func:`numpy.arange`. This is deliberate, since :func:`frange`
will probably be more useful for generating lists of points for
function evaluation, and endpoints are often desired in this
use. The usual behavior of :func:`range` can be obtained by
setting the keyword *closed* = 0, in this case, :func:`frange`
basically becomes :func:numpy.arange`.
When *step* is given, it specifies the increment (or
decrement). All arguments can be floating point numbers.
``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where
*xfin* <= *x1*.
:func:`frange` can also be called with the keyword *npts*. This
sets the number of points the list should contain (and overrides
the value *step* might have been given). :func:`numpy.arange`
doesn't offer this option.
Examples::
>>> frange(3)
array([ 0., 1., 2., 3.])
>>> frange(3,closed=0)
array([ 0., 1., 2.])
>>> frange(1,6,2)
array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries
>>> frange(1,6.5,npts=5)
array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])
"""
#defaults
kw.setdefault('closed',1)
endpoint = kw['closed'] != 0
# funny logic to allow the *first* argument to be optional (like range())
# This was modified with a simpler version from a similar frange() found
# at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472
if xfin == None:
xfin = xini + 0.0
xini = 0.0
if delta == None:
delta = 1.0
# compute # of points, spacing and return final list
try:
npts=kw['npts']
delta=(xfin-xini)/float(npts-endpoint)
except KeyError:
npts = int(round((xfin-xini)/delta)) + endpoint
#npts = int(floor((xfin-xini)/delta)*(1.0+1e-10)) + endpoint
# round finds the nearest, so the endpoint can be up to
# delta/2 larger than xfin.
return np.arange(npts)*delta+xini
# end frange()
def identity(n, rank=2, dtype='l', typecode=None):
"""
Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).
For ranks higher than 2, this object is simply a multi-index Kronecker
delta::
/ 1 if i0=i1=...=iR,
id[i0,i1,...,iR] = -|
\ 0 otherwise.
Optionally a *dtype* (or typecode) may be given (it defaults to 'l').
Since rank defaults to 2, this function behaves in the default case (when
only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is
much faster.
"""
if typecode is not None:
dtype = typecode
iden = np.zeros((n,)*rank, dtype)
for i in range(n):
idx = (i,)*rank
iden[idx] = 1
return iden
def base_repr (number, base = 2, padding = 0):
"""
Return the representation of a *number* in any given *base*.
"""
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if number < base: \
return (padding - 1) * chars [0] + chars [int (number)]
max_exponent = int (math.log (number)/math.log (base))
max_power = int (base) ** max_exponent
lead_digit = int (number/max_power)
return chars [lead_digit] + \
base_repr (number - max_power * lead_digit, base, \
max (padding - 1, max_exponent))
def binary_repr(number, max_length = 1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
#assert number < 2L << max_length
shifts = list(map (operator.rshift, max_length * [number], \
list(range(max_length - 1, -1, -1))))
digits = list(map (operator.mod, shifts, max_length * [2]))
if not digits.count (1): return 0
digits = digits [digits.index (1):]
return ''.join (map (repr, digits)).replace('L','')
def log2(x,ln2 = math.log(2.0)):
"""
Return the log(*x*) in base 2.
This is a _slow_ function but which is guaranteed to return the correct
integer value if the input is an integer exact power of 2.
"""
try:
bin_n = binary_repr(x)[1:]
except (AssertionError,TypeError):
return math.log(x)/ln2
else:
if '1' in bin_n:
return math.log(x)/ln2
else:
return len(bin_n)
def ispower2(n):
"""
Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.
Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.
"""
bin_n = binary_repr(n)[1:]
if '1' in bin_n:
return 0
else:
return len(bin_n)
def isvector(X):
"""
Like the MATLAB function with the same name, returns *True*
if the supplied numpy array or matrix *X* looks like a vector,
meaning it has a one non-singleton axis (i.e., it can have
multiple axes, but all must have length 1, except for one of
them).
If you just want to see if the array has 1 axis, use X.ndim == 1.
"""
return np.prod(X.shape)==np.max(X.shape)
### end fperez numutils code
#helpers for loading, saving, manipulating and viewing numpy record arrays
def safe_isnan(x):
':func:`numpy.isnan` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isnan(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def safe_isinf(x):
':func:`numpy.isinf` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isinf(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def rec_append_fields(rec, names, arrs, dtypes=None):
"""
Return a new record array with field names populated with data
from arrays in *arrs*. If appending a single field, then *names*,
*arrs* and *dtypes* do not have to be lists. They can just be the
values themselves.
"""
if (not cbook.is_string_like(names) and cbook.iterable(names) \
and len(names) and cbook.is_string_like(names[0])):
if len(names) != len(arrs):
raise ValueError("number of arrays do not match number of names")
else: # we have only 1 name and 1 array
names = [names]
arrs = [arrs]
arrs = list(map(np.asarray, arrs))
if dtypes is None:
dtypes = [a.dtype for a in arrs]
elif not cbook.iterable(dtypes):
dtypes = [dtypes]
if len(arrs) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(arrs)
else:
raise ValueError("dtypes must be None, a single dtype or a list")
newdtype = np.dtype(rec.dtype.descr + list(zip(names, dtypes)))
newrec = np.recarray(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
for name, arr in zip(names, arrs):
newrec[name] = arr
return newrec
def rec_drop_fields(rec, names):
"""
Return a new numpy record array with fields in *names* dropped.
"""
names = set(names)
newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names
if name not in names])
newrec = np.recarray(rec.shape, dtype=newdtype)
for field in newdtype.names:
newrec[field] = rec[field]
return newrec
def rec_keep_fields(rec, names):
"""
Return a new numpy record array with only fields listed in names
"""
if cbook.is_string_like(names):
names = names.split(',')
arrays = []
for name in names:
arrays.append(rec[name])
return np.rec.fromarrays(arrays, names=names)
def rec_groupby(r, groupby, stats):
"""
*r* is a numpy record array
*groupby* is a sequence of record array attribute names that
together form the grouping key. eg ('date', 'productcode')
*stats* is a sequence of (*attr*, *func*, *outname*) tuples which
will call ``x = func(attr)`` and assign *x* to the record array
output with attribute *outname*. For example::
stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') )
Return record array has *dtype* names for each attribute name in
the the *groupby* argument, with the associated group values, and
for each outname name in the *stats* argument, with the associated
stat summary output.
"""
# build a dictionary from groupby keys-> list of indices into r with
# those keys
rowd = dict()
for i, row in enumerate(r):
key = tuple([row[attr] for attr in groupby])
rowd.setdefault(key, []).append(i)
# sort the output by groupby keys
keys = list(rowd.keys())
keys.sort()
rows = []
for key in keys:
row = list(key)
# get the indices for this groupby key
ind = rowd[key]
thisr = r[ind]
# call each stat function for this groupby slice
row.extend([func(thisr[attr]) for attr, func, outname in stats])
rows.append(row)
# build the output record array with groupby and outname attributes
attrs, funcs, outnames = list(zip(*stats))
names = list(groupby)
names.extend(outnames)
return np.rec.fromrecords(rows, names=names)
def rec_summarize(r, summaryfuncs):
"""
*r* is a numpy record array
*summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples
which will apply *func* to the the array *r*[attr] and assign the
output to a new attribute name *outname*. The returned record
array is identical to *r*, with extra arrays for each element in
*summaryfuncs*.
"""
names = list(r.dtype.names)
arrays = [r[name] for name in names]
for attr, func, outname in summaryfuncs:
names.append(outname)
arrays.append(np.asarray(func(r[attr])))
return np.rec.fromarrays(arrays, names=names)
def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1', r2postfix='2'):
"""
Join record arrays *r1* and *r2* on *key*; *key* is a tuple of
field names -- if *key* is a string it is assumed to be a single
attribute name. If *r1* and *r2* have equal values on all the keys
in the *key* tuple, then their fields will be merged into a new
record array containing the intersection of the fields of *r1* and
*r2*.
*r1* (also *r2*) must not have any duplicate keys.
The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To
do a rightouter join just reverse *r1* and *r2*.
The *defaults* keyword is a dictionary filled with
``{column_name:default_value}`` pairs.
The keywords *r1postfix* and *r2postfix* are postfixed to column names
(other than keys) that are both in *r1* and *r2*.
"""
if cbook.is_string_like(key):
key = (key, )
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s'%name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s'%name)
def makekey(row):
return tuple([row[name] for name in key])
r1d = dict([(makekey(row),i) for i,row in enumerate(r1)])
r2d = dict([(makekey(row),i) for i,row in enumerate(r2)])
r1keys = set(r1d.keys())
r2keys = set(r2d.keys())
common_keys = r1keys & r2keys
r1ind = np.array([r1d[k] for k in common_keys])
r2ind = np.array([r2d[k] for k in common_keys])
common_len = len(common_keys)
left_len = right_len = 0
if jointype == "outer" or jointype == "leftouter":
left_keys = r1keys.difference(r2keys)
left_ind = np.array([r1d[k] for k in left_keys])
left_len = len(left_ind)
if jointype == "outer":
right_keys = r2keys.difference(r1keys)
right_ind = np.array([r2d[k] for k in right_keys])
right_len = len(right_ind)
def key_desc(name):
'if name is a string key, use the larger size of r1 or r2 before merging'
dt1 = r1.dtype[name]
if dt1.type != np.string_:
return (name, dt1.descr[0][1])
dt2 = r1.dtype[name]
assert dt2==dt1
if dt1.num>dt2.num:
return (name, dt1.descr[0][1])
else:
return (name, dt2.descr[0][1])
keydesc = [key_desc(name) for name in key]
def mapped_r1field(name):
"""
The column name in *newrec* that corresponds to the column in *r1*.
"""
if name in key or name not in r2.dtype.names: return name
else: return name + r1postfix
def mapped_r2field(name):
"""
The column name in *newrec* that corresponds to the column in *r2*.
"""
if name in key or name not in r1.dtype.names: return name
else: return name + r2postfix
r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr if desc[0] not in key]
r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr if desc[0] not in key]
newdtype = np.dtype(keydesc + r1desc + r2desc)
newrec = np.recarray((common_len + left_len + right_len,), dtype=newdtype)
if defaults is not None:
for thiskey in defaults:
if thiskey not in newdtype.names:
warnings.warn('rec_join defaults key="%s" not in new dtype names "%s"'%(
thiskey, newdtype.names))
for name in newdtype.names:
dt = newdtype[name]
if dt.kind in ('f', 'i'):
newrec[name] = 0
if jointype != 'inner' and defaults is not None: # fill in the defaults enmasse
newrec_fields = list(newrec.dtype.fields.keys())
for k, v in defaults.items():
if k in newrec_fields:
newrec[k] = v
for field in r1.dtype.names:
newfield = mapped_r1field(field)
if common_len:
newrec[newfield][:common_len] = r1[field][r1ind]
if (jointype == "outer" or jointype == "leftouter") and left_len:
newrec[newfield][common_len:(common_len+left_len)] = r1[field][left_ind]
for field in r2.dtype.names:
newfield = mapped_r2field(field)
if field not in key and common_len:
newrec[newfield][:common_len] = r2[field][r2ind]
if jointype == "outer" and right_len:
newrec[newfield][-right_len:] = r2[field][right_ind]
newrec.sort(order=key)
return newrec
def recs_join(key, name, recs, jointype='outer', missing=0., postfixes=None):
"""
Join a sequence of record arrays on single column key.
This function only joins a single column of the multiple record arrays
*key*
is the column name that acts as a key
*name*
is the name of the column that we want to join
*recs*
is a list of record arrays to join
*jointype*
is a string 'inner' or 'outer'
*missing*
is what any missing field is replaced by
*postfixes*
if not None, a len recs sequence of postfixes
returns a record array with columns [rowkey, name0, name1, ... namen-1].
or if postfixes [PF0, PF1, ..., PFN-1] are supplied,
[rowkey, namePF0, namePF1, ... namePFN-1].
Example::
r = recs_join("date", "close", recs=[r0, r1], missing=0.)
"""
results = []
aligned_iters = cbook.align_iterators(operator.attrgetter(key), *[iter(r) for r in recs])
def extract(r):
if r is None: return missing
else: return r[name]
if jointype == "outer":
for rowkey, row in aligned_iters:
results.append([rowkey] + list(map(extract, row)))
elif jointype == "inner":
for rowkey, row in aligned_iters:
if None not in row: # throw out any Nones
results.append([rowkey] + list(map(extract, row)))
if postfixes is None:
postfixes = ['%d'%i for i in range(len(recs))]
names = ",".join([key] + ["%s%s" % (name, postfix) for postfix in postfixes])
return np.rec.fromrecords(results, names=names)
def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',',
converterd=None, names=None, missing='', missingd=None,
use_mrecords=False, dayfirst=False, yearfirst=False):
"""
Load data from comma/space/tab delimited file in *fname* into a
numpy record array and return the record array.
If *names* is *None*, a header row is required to automatically
assign the recarray names. The headers will be lower cased,
spaces will be converted to underscores, and illegal attribute
name characters removed. If *names* is not *None*, it is a
sequence of names to use for the column names. In this case, it
is assumed there is no header row.
- *fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
- *comments*: the character used to indicate the start of a comment
in the file, or *None* to switch off the removal of comments
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *converterd*: if not *None*, is a dictionary mapping column number or
munged column name to a converter function.
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
- *missingd* is a dictionary mapping munged column names to field values
which signify that the field does not contain actual data and should
be masked, e.g., '0000-00-00' or 'unused'
- *missing*: a string whose value signals a missing field regardless of
the column it appears in
- *use_mrecords*: if True, return an mrecords.fromrecords record array if any of the data are missing
- *dayfirst*: default is False so that MM-DD-YY has precedence over
DD-MM-YY. See http://labix.org/python-dateutil#head-b95ce2094d189a89f80f5ae52a05b4ab7b41af47
for further information.
- *yearfirst*: default is False so that MM-DD-YY has precedence over
YY-MM-DD. See http://labix.org/python-dateutil#head-b95ce2094d189a89f80f5ae52a05b4ab7b41af47
for further information.
If no rows are found, *None* is returned -- see :file:`examples/loadrec.py`
"""
if converterd is None:
converterd = dict()
if missingd is None:
missingd = {}
import dateutil.parser
import datetime
fh = cbook.to_filehandle(fname)
class FH:
"""
For space-delimited files, we want different behavior than
comma or tab. Generally, we want multiple spaces to be
treated as a single separator, whereas with comma and tab we
want multiple commas to return multiple (empty) fields. The
join/strip trick below effects this.
"""
def __init__(self, fh):
self.fh = fh
def close(self):
self.fh.close()
def seek(self, arg):
self.fh.seek(arg)
def fix(self, s):
return ' '.join(s.split())
def __next__(self):
return self.fix(next(self.fh))
def __iter__(self):
for line in self.fh:
yield self.fix(line)
if delimiter==' ':
fh = FH(fh)
reader = csv.reader(fh, delimiter=delimiter)
def process_skiprows(reader):
if skiprows:
for i, row in enumerate(reader):
if i>=(skiprows-1): break
return fh, reader
process_skiprows(reader)
def ismissing(name, val):
"Should the value val in column name be masked?"
if val == missing or val == missingd.get(name) or val == '':
return True
else:
return False
def with_default_value(func, default):
def newfunc(name, val):
if ismissing(name, val):
return default
else:
return func(val)
return newfunc
def mybool(x):
if x=='True': return True
elif x=='False': return False
else: raise ValueError('invalid bool')
dateparser = dateutil.parser.parse
mydateparser = with_default_value(dateparser, datetime.date(1,1,1))
myfloat = with_default_value(float, np.nan)
myint = with_default_value(int, -1)
mystr = with_default_value(str, '')
mybool = with_default_value(mybool, None)
def mydate(x):
# try and return a date object
d = dateparser(x, dayfirst=dayfirst, yearfirst=yearfirst)
if d.hour>0 or d.minute>0 or d.second>0:
raise ValueError('not a date')
return d.date()
mydate = with_default_value(mydate, datetime.date(1,1,1))
def get_func(name, item, func):
# promote functions in this order
funcmap = {mybool:myint,myint:myfloat, myfloat:mydate, mydate:mydateparser, mydateparser:mystr}
try: func(name, item)
except:
if func==mystr:
raise ValueError('Could not find a working conversion function')
else: return get_func(name, item, funcmap[func]) # recurse
else: return func
# map column names that clash with builtins -- TODO - extend this list
itemd = {
'return' : 'return_',
'file' : 'file_',
'print' : 'print_',
}
def get_converters(reader):
converters = None
for i, row in enumerate(reader):
if i==0:
converters = [mybool]*len(row)
if checkrows and i>checkrows:
break
#print i, len(names), len(row)
#print 'converters', zip(converters, row)
for j, (name, item) in enumerate(zip(names, row)):
func = converterd.get(j)
if func is None:
func = converterd.get(name)
if func is None:
#if not item.strip(): continue
func = converters[j]
if len(item.strip()):
func = get_func(name, item, func)
else:
# how should we handle custom converters and defaults?
func = with_default_value(func, None)
converters[j] = func
return converters
# Get header and remove invalid characters
needheader = names is None
if needheader:
for row in reader:
#print 'csv2rec', row
if len(row) and comments is not None and row[0].startswith(comments):
continue
headers = row
break
# remove these chars
delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
delete.add('"')
names = []
seen = dict()
for i, item in enumerate(headers):
item = item.strip().lower().replace(' ', '_')
item = ''.join([c for c in item if c not in delete])
if not len(item):
item = 'column%d'%i
item = itemd.get(item, item)
cnt = seen.get(item, 0)
if cnt>0:
names.append(item + '_%d'%cnt)
else:
names.append(item)
seen[item] = cnt+1
else:
if cbook.is_string_like(names):
names = [n.strip() for n in names.split(',')]
# get the converter functions by inspecting checkrows
converters = get_converters(reader)
if converters is None:
raise ValueError('Could not find any valid data in CSV file')
# reset the reader and start over
fh.seek(0)
reader = csv.reader(fh, delimiter=delimiter)
process_skiprows(reader)
if needheader:
while 1:
# skip past any comments and consume one line of column header
row = next(reader)
if len(row) and comments is not None and row[0].startswith(comments):
continue
break
# iterate over the remaining rows and convert the data to date
# objects, ints, or floats as approriate
rows = []
rowmasks = []
for i, row in enumerate(reader):
if not len(row):
continue
if comments is not None and row[0].startswith(comments):
continue
# Ensure that the row returned always has the same nr of elements
row.extend([''] * (len(converters) - len(row)))
rows.append([func(name, val) for func, name, val in zip(converters, names, row)])
rowmasks.append([ismissing(name, val) for name, val in zip(names, row)])
fh.close()
if not len(rows):
return None
if use_mrecords and np.any(rowmasks):
try: from numpy.ma import mrecords
except ImportError:
raise RuntimeError('numpy 1.05 or later is required for masked array support')
else:
r = mrecords.fromrecords(rows, names=names, mask=rowmasks)
else:
r = np.rec.fromrecords(rows, names=names)
return r
# a series of classes for describing the format intentions of various rec views
class FormatObj:
def tostr(self, x):
return self.toval(x)
def toval(self, x):
return str(x)
def fromstr(self, s):
return s
def __hash__(self):
"""
override the hash function of any of the formatters, so that we don't create duplicate excel format styles
"""
return hash(self.__class__)
class FormatString(FormatObj):
def tostr(self, x):
val = repr(x)
return val[1:-1]
#class FormatString(FormatObj):
# def tostr(self, x):
# return '"%r"'%self.toval(x)
class FormatFormatStr(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def tostr(self, x):
if x is None: return 'None'
return self.fmt%self.toval(x)
class FormatFloat(FormatFormatStr):
def __init__(self, precision=4, scale=1.):
FormatFormatStr.__init__(self, '%%1.%df'%precision)
self.precision = precision
self.scale = scale
def __hash__(self):
return hash((self.__class__, self.precision, self.scale))
def toval(self, x):
if x is not None:
x = x * self.scale
return x
def fromstr(self, s):
return float(s)/self.scale
class FormatInt(FormatObj):
def tostr(self, x):
return '%d'%int(x)
def toval(self, x):
return int(x)
def fromstr(self, s):
return int(s)
class FormatBool(FormatObj):
def toval(self, x):
return str(x)
def fromstr(self, s):
return bool(s)
class FormatPercent(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=100.)
class FormatThousands(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-3)
class FormatMillions(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-6)
class FormatDate(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def __hash__(self):
return hash((self.__class__, self.fmt))
def toval(self, x):
if x is None: return 'None'
return x.strftime(self.fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x).date()
class FormatDatetime(FormatDate):
def __init__(self, fmt='%Y-%m-%d %H:%M:%S'):
FormatDate.__init__(self, fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x)
defaultformatd = {
np.bool_ : FormatBool(),
np.int16 : FormatInt(),
np.int32 : FormatInt(),
np.int64 : FormatInt(),
np.float32 : FormatFloat(),
np.float64 : FormatFloat(),
np.object_ : FormatObj(),
np.string_ : FormatString(),
}
def get_formatd(r, formatd=None):
'build a formatd guaranteed to have a key for every dtype name'
if formatd is None:
formatd = dict()
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = defaultformatd.get(dt.type, FormatObj())
formatd[name] = format
return formatd
def csvformat_factory(format):
format = copy.deepcopy(format)
if isinstance(format, FormatFloat):
format.scale = 1. # override scaling for storage
format.fmt = '%r'
return format
def rec2txt(r, header=None, padding=3, precision=3, fields=None):
"""
Returns a textual representation of a record array.
*r*: numpy recarray
*header*: list of column headers
*padding*: space between each column
*precision*: number of decimal places to use for floats.
Set to an integer to apply to all floats. Set to a
list of integers to apply precision individually.
Precision for non-floats is simply ignored.
*fields* : if not None, a list of field names to print. fields
can be a list of strings like ['field1', 'field2'] or a single
comma separated string like 'field1,field2'
Example::
precision=[0,2,3]
Output::
ID Price Return
ABC 12.54 0.234
XYZ 6.32 -0.076
"""
if fields is not None:
r = rec_keep_fields(r, fields)
if cbook.is_numlike(precision):
precision = [precision]*len(r.dtype)
def get_type(item,atype=int):
tdict = {None:int, int:float, float:str}
try: atype(str(item))
except: return get_type(item,tdict[atype])
return atype
def get_justify(colname, column, precision):
ntype = type(column[0])
if ntype==np.str or ntype==np.str_ or ntype==np.string0 or ntype==np.string_:
length = max(len(colname),column.itemsize)
return 0, length+padding, "%s" # left justify
if ntype==np.int or ntype==np.int16 or ntype==np.int32 or ntype==np.int64 or ntype==np.int8 or ntype==np.int_:
length = max(len(colname),np.max(list(map(len,list(map(str,column))))))
return 1, length+padding, "%d" # right justify
# JDH: my powerbook does not have np.float96 using np 1.3.0
"""
In [2]: np.__version__
Out[2]: '1.3.0.dev5948'
In [3]: !uname -a
Darwin Macintosh-5.local 9.4.0 Darwin Kernel Version 9.4.0: Mon Jun 9 19:30:53 PDT 2008; root:xnu-1228.5.20~1/RELEASE_I386 i386 i386
In [4]: np.float96
---------------------------------------------------------------------------
AttributeError Traceback (most recent call la
"""
if ntype==np.float or ntype==np.float32 or ntype==np.float64 or (hasattr(np, 'float96') and (ntype==np.float96)) or ntype==np.float_:
fmt = "%." + str(precision) + "f"
length = max(len(colname),np.max(list(map(len,[fmt%x for x in column]))))
return 1, length+padding, fmt # right justify
return 0, max(len(colname),np.max(list(map(len,list(map(str,column))))))+padding, "%s"
if header is None:
header = r.dtype.names
justify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)]
justify_pad_prec_spacer = []
for i in range(len(justify_pad_prec)):
just,pad,prec = justify_pad_prec[i]
if i == 0:
justify_pad_prec_spacer.append((just,pad,prec,0))
else:
pjust,ppad,pprec = justify_pad_prec[i-1]
if pjust == 0 and just == 1:
justify_pad_prec_spacer.append((just,pad-padding,prec,0))
elif pjust == 1 and just == 0:
justify_pad_prec_spacer.append((just,pad,prec,padding))
else:
justify_pad_prec_spacer.append((just,pad,prec,0))
def format(item, just_pad_prec_spacer):
just, pad, prec, spacer = just_pad_prec_spacer
if just == 0:
return spacer*' ' + str(item).ljust(pad)
else:
if get_type(item) == float:
item = (prec%float(item))
elif get_type(item) == int:
item = (prec%int(item))
return item.rjust(pad)
textl = []
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)]))
for i, row in enumerate(r):
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)]))
if i==0:
textl[0] = textl[0].rstrip()
text = os.linesep.join(textl)
return text
def rec2csv(r, fname, delimiter=',', formatd=None, missing='',
missingd=None, withheader=True):
"""
Save the data from numpy recarray *r* into a
comma-/space-/tab-delimited file. The record array dtype names
will be used for column headers.
*fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
*withheader*: if withheader is False, do not write the attribute
names in the first row
for formatd type FormatFloat, we override the precision to store
full precision floats in the CSV file
.. seealso::
:func:`csv2rec`
For information about *missing* and *missingd*, which can
be used to fill in masked values into your CSV file.
"""
if missingd is None:
missingd = dict()
def with_mask(func):
def newfunc(val, mask, mval):
if mask:
return mval
else:
return func(val)
return newfunc
if r.ndim != 1:
raise ValueError('rec2csv only operates on 1 dimensional recarrays')
formatd = get_formatd(r, formatd)
funcs = []
for i, name in enumerate(r.dtype.names):
funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))
fh, opened = cbook.to_filehandle(fname, 'wb', return_opened=True)
writer = csv.writer(fh, delimiter=delimiter)
header = r.dtype.names
if withheader:
writer.writerow(header)
# Our list of specials for missing values
mvals = []
for name in header:
mvals.append(missingd.get(name, missing))
ismasked = False
if len(r):
row = r[0]
ismasked = hasattr(row, '_fieldmask')
for row in r:
if ismasked:
row, rowmask = row.item(), row._fieldmask.item()
else:
rowmask = [False] * len(row)
writer.writerow([func(val, mask, mval) for func, val, mask, mval
in zip(funcs, row, rowmask, mvals)])
if opened:
fh.close()
def griddata(x,y,z,xi,yi,interp='nn'):
"""
``zi = griddata(x,y,z,xi,yi)`` fits a surface of the form *z* =
*f*(*x*, *y*) to the data in the (usually) nonuniformly spaced
vectors (*x*, *y*, *z*). :func:`griddata` interpolates this
surface at the points specified by (*xi*, *yi*) to produce
*zi*. *xi* and *yi* must describe a regular grid, can be either 1D
or 2D, but must be monotonically increasing.
A masked array is returned if any grid points are outside convex
hull defined by input data (no extrapolation is done).
If interp keyword is set to '`nn`' (default),
uses natural neighbor interpolation based on Delaunay
triangulation. By default, this algorithm is provided by the
:mod:`matplotlib.delaunay` package, written by Robert Kern. The
triangulation algorithm in this package is known to fail on some
nearly pathological cases. For this reason, a separate toolkit
(:mod:`mpl_tookits.natgrid`) has been created that provides a more
robust algorithm fof triangulation and interpolation. This
toolkit is based on the NCAR natgrid library, which contains code
that is not redistributable under a BSD-compatible license. When
installed, this function will use the :mod:`mpl_toolkits.natgrid`
algorithm, otherwise it will use the built-in
:mod:`matplotlib.delaunay` package.
If the interp keyword is set to '`linear`', then linear interpolation
is used instead of natural neighbor. In this case, the output grid
is assumed to be regular with a constant grid spacing in both the x and
y directions. For regular grids with nonconstant grid spacing, you
must use natural neighbor interpolation. Linear interpolation is only valid if
:mod:`matplotlib.delaunay` package is used - :mod:`mpl_tookits.natgrid`
only provides natural neighbor interpolation.
The natgrid matplotlib toolkit can be downloaded from
http://sourceforge.net/project/showfiles.php?group_id=80706&package_id=142792
"""
try:
from mpl_toolkits.natgrid import _natgrid, __version__
_use_natgrid = True
except ImportError:
import matplotlib.delaunay as delaunay
from matplotlib.delaunay import __version__
_use_natgrid = False
if not griddata._reported:
if _use_natgrid:
verbose.report('using natgrid version %s' % __version__)
else:
verbose.report('using delaunay version %s' % __version__)
griddata._reported = True
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if not len(x)==len(y)==len(z):
raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
if hasattr(z,'mask'):
# make sure mask is not a scalar boolean array.
if z.mask.ndim:
x = x.compress(z.mask == False)
y = y.compress(z.mask == False)
z = z.compressed()
if _use_natgrid: # use natgrid toolkit if available.
if interp != 'nn':
raise ValueError("only natural neighor interpolation"
" allowed when using natgrid toolkit in griddata.")
if xi.ndim == 2:
xi = xi[0,:]
yi = yi[:,0]
# override default natgrid internal parameters.
_natgrid.seti('ext',0)
_natgrid.setr('nul',np.nan)
# cast input arrays to doubles (this makes a copy)
x = x.astype(np.float)
y = y.astype(np.float)
z = z.astype(np.float)
xo = xi.astype(np.float)
yo = yi.astype(np.float)
if min(xo[1:]-xo[0:-1]) < 0 or min(yo[1:]-yo[0:-1]) < 0:
raise ValueError('output grid defined by xi,yi must be monotone increasing')
# allocate array for output (buffer will be overwritten by nagridd)
zo = np.empty((yo.shape[0],xo.shape[0]), np.float)
_natgrid.natgridd(x,y,z,xo,yo,zo)
else: # use Robert Kern's delaunay package from scikits (default)
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if xi.ndim == 1:
xi,yi = np.meshgrid(xi,yi)
# triangulate data
tri = delaunay.Triangulation(x,y)
# interpolate data
if interp == 'nn':
interp = tri.nn_interpolator(z)
zo = interp(xi,yi)
elif interp == 'linear':
# make sure grid has constant dx, dy
dx = xi[0,1:]-xi[0,0:-1]
dy = yi[1:,0]-yi[0:-1,0]
epsx = np.finfo(xi.dtype).resolution
epsy = np.finfo(yi.dtype).resolution
if dx.max()-dx.min() > epsx or dy.max()-dy.min() > epsy:
raise ValueError("output grid must have constant spacing"
" when using interp='linear'")
interp = tri.linear_interpolator(z)
zo = interp[yi.min():yi.max():complex(0,yi.shape[0]),
xi.min():xi.max():complex(0,xi.shape[1])]
else:
raise ValueError("interp keyword must be one of"
" 'linear' (for linear interpolation) or 'nn'"
" (for natural neighbor interpolation). Default is 'nn'.")
# mask points on grid outside convex hull of input data.
if np.any(np.isnan(zo)):
zo = np.ma.masked_where(np.isnan(zo),zo)
return zo
griddata._reported = False
##################################################
# Linear interpolation algorithms
##################################################
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function provides simple (but somewhat less so than
:func:`cbook.simple_linear_interpolation`) linear interpolation.
:func:`simple_linear_interpolation` will give a list of point
between a start and an end, while this does true linear
interpolation at an arbitrary set of points.
This is very inefficient linear interpolation meant to be used
only for a small number of points in relatively non-intensive use
cases. For real linear interpolation, use scipy.
"""
if cbook.is_scalar(xi): xi = [xi]
x = np.asarray(x)
y = np.asarray(y)
xi = np.asarray(xi)
s = list(y.shape)
s[0] = len(xi)
yi = np.tile( np.nan, s )
for ii,xx in enumerate(xi):
bb = x == xx
if np.any(bb):
jj, = np.nonzero(bb)
yi[ii] = y[jj[0]]
elif xx<x[0]:
if extrap:
yi[ii] = y[0]
elif xx>x[-1]:
if extrap:
yi[ii] = y[-1]
else:
jj, = np.nonzero(x<xx)
jj = max(jj)
yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
return yi
def slopes(x,y):
"""
:func:`slopes` calculates the slope *y*'(*x*)
The slope is estimated using the slope obtained from that of a
parabola through any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between
*x*- and *y*-values. For many functions, however, the abscissa
are given in different dimensions, so an aspect ratio is
completely arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases.
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
##################################################
# Code related to things in and around polygons
##################################################
def inside_poly(points, verts):
"""
*points* is a sequence of *x*, *y* points.
*verts* is a sequence of *x*, *y* vertices of a polygon.
Return value is a sequence of indices into points for the points
that are inside the polygon.
"""
# Make a closed polygon path
poly = Path( verts )
# Check to see which points are contained withing the Path
return [ idx for idx, p in enumerate(points) if poly.contains_point(p) ]
def poly_below(xmin, xs, ys):
"""
Given a sequence of *xs* and *ys*, return the vertices of a
polygon that has a horizontal base at *xmin* and an upper bound at
the *ys*. *xmin* is a scalar.
Intended for use with :meth:`matplotlib.axes.Axes.fill`, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
if ma.isMaskedArray(xs) or ma.isMaskedArray(ys):
numpy = ma
else:
numpy = np
xs = numpy.asarray(xs)
ys = numpy.asarray(ys)
Nx = len(xs)
Ny = len(ys)
assert(Nx==Ny)
x = xmin*numpy.ones(2*Nx)
y = numpy.ones(2*Nx)
x[:Nx] = xs
y[:Nx] = ys
y[Nx:] = ys[::-1]
return x, y
def poly_between(x, ylower, yupper):
"""
Given a sequence of *x*, *ylower* and *yupper*, return the polygon
that fills the regions between them. *ylower* or *yupper* can be
scalar or iterable. If they are iterable, they must be equal in
length to *x*.
Return value is *x*, *y* arrays for use with
:meth:`matplotlib.axes.Axes.fill`.
"""
if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x):
numpy = ma
else:
numpy = np
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*numpy.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*numpy.ones(Nx)
x = numpy.concatenate( (x, x[::-1]) )
y = numpy.concatenate( (yupper, ylower[::-1]) )
return x,y
def is_closed_polygon(X):
"""
Tests whether first and last object in a sequence are the same. These are
presumably coordinates on a polygonal curve, in which case this function
tests if that curve is closed.
"""
return np.all(X[0] == X[-1])
def contiguous_regions(mask):
"""
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
TODO: this is a pure python implementation which probably has a much faster numpy impl
"""
in_region = None
boundaries = []
for i, val in enumerate(mask):
if in_region is None and val:
in_region = i
elif in_region is not None and not val:
boundaries.append((in_region, i))
in_region = None
if in_region is not None:
boundaries.append((in_region, i+1))
return boundaries
def cross_from_below(x, threshold):
"""
return the indices into *x* where *x* crosses some threshold from
below, eg the i's where::
x[i-1]<threshold and x[i]>=threshold
Example code::
import matplotlib.pyplot as plt
t = np.arange(0.0, 2.0, 0.1)
s = np.sin(2*np.pi*t)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t, s, '-o')
ax.axhline(0.5)
ax.axhline(-0.5)
ind = cross_from_below(s, 0.5)
ax.vlines(t[ind], -1, 1)
ind = cross_from_above(s, -0.5)
ax.vlines(t[ind], -1, 1)
plt.show()
.. seealso::
:func:`cross_from_above` and :func:`contiguous_regions`
"""
x = np.asarray(x)
threshold = threshold
ind = np.nonzero( (x[:-1]<threshold) & (x[1:]>=threshold))[0]
if len(ind): return ind+1
else: return ind
def cross_from_above(x, threshold):
"""
return the indices into *x* where *x* crosses some threshold from
below, eg the i's where::
x[i-1]>threshold and x[i]<=threshold
.. seealso::
:func:`cross_from_below` and :func:`contiguous_regions`
"""
x = np.asarray(x)
ind = np.nonzero( (x[:-1]>=threshold) & (x[1:]<threshold))[0]
if len(ind): return ind+1
else: return ind
##################################################
# Vector and path length geometry calculations
##################################################
def vector_lengths( X, P=2., axis=None ):
"""
Finds the length of a set of vectors in *n* dimensions. This is
like the :func:`numpy.norm` function for vectors, but has the ability to
work over a particular axis of the supplied array or matrix.
Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
elements of *X* along the given axis. If *axis* is *None*,
compute over all elements of *X*.
"""
X = np.asarray(X)
return (np.sum(X**(P),axis=axis))**(1./P)
def distances_along_curve( X ):
"""
Computes the distance between a set of successive points in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. The distances between
successive rows is computed. Distance is the standard Euclidean
distance.
"""
X = np.diff( X, axis=0 )
return vector_lengths(X,axis=1)
def path_length(X):
"""
Computes the distance travelled along a polygonal curve in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. Returns an array of
length *M* consisting of the distance along the curve at each point
(i.e., the rows of *X*).
"""
X = distances_along_curve(X)
return np.concatenate( (np.zeros(1), np.cumsum(X)) )
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
Converts a quadratic Bezier curve to a cubic approximation.
The inputs are the *x* and *y* coordinates of the three control
points of a quadratic curve, and the output is a tuple of *x* and
*y* coordinates of the four control points of the cubic curve.
"""
# c0x, c0y = q0x, q0y
c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y)
c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y)
# c3x, c3y = q2x, q2y
return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
def offset_line(y, yerr):
"""
Offsets an array *y* by +/- an error and returns a tuple (y - err, y + err).
The error term can be:
* A scalar. In this case, the returned tuple is obvious.
* A vector of the same length as *y*. The quantities y +/- err are computed
component-wise.
* A tuple of length 2. In this case, yerr[0] is the error below *y* and
yerr[1] is error above *y*. For example::
from pylab import *
x = linspace(0, 2*pi, num=100, endpoint=True)
y = sin(x)
y_minus, y_plus = mlab.offset_line(y, 0.1)
plot(x, y)
fill_between(x, ym, y2=yp)
show()
"""
if cbook.is_numlike(yerr) or (cbook.iterable(yerr) and len(yerr) == len(y)):
ymin = y - yerr
ymax = y + yerr
elif len(yerr) == 2:
ymin, ymax = y - yerr[0], y + yerr[1]
else:
raise ValueError("yerr must be scalar, 1xN or 2xN")
return ymin, ymax
| alephu5/Soundbyte | environment/lib/python3.3/site-packages/matplotlib/mlab.py | Python | gpl-3.0 | 97,233 | [
"Gaussian"
] | 4fab652c19aeda588edd66fec56ce626f61e764ffcfa59e331a3d5a6d148d1d9 |
#!/usr/bin/env python
#
# $File: virtualSplitter.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
import random
pop = sim.Population(size=[200, 400], loci=[30], infoFields='x')
# assign random information fields
sim.initSex(pop)
sim.initInfo(pop, lambda: random.randint(0, 3), infoFields='x')
# define a virtual splitter by sex
pop.setVirtualSplitter(sim.SexSplitter())
pop.numVirtualSubPop() # Number of defined VSPs
pop.subPopName([0, 0]) # Each VSP has a name
pop.subPopSize([0, 1]) # Size of VSP 1 in subpopulation 0
pop.subPopSize([0, 'Female']) # Refer to vsp by its name
# define a virtual splitter by information field 'x'
pop.setVirtualSplitter(sim.InfoSplitter(field='x', values=[0, 1, 2, 3]))
pop.numVirtualSubPop() # Number of defined VSPs
pop.subPopName([0, 0]) # Each VSP has a name
pop.subPopSize([0, 0]) # Size of VSP 0 in subpopulation 0
pop.subPopSize([1, 0]) # Size of VSP 0 in subpopulation 1
| BoPeng/simuPOP | docs/virtualSplitter.py | Python | gpl-2.0 | 1,951 | [
"VisIt"
] | 3243279b90fccde99333fe6f49c181783c61e73ad0e5efee9779ec1a54251ed9 |
import os
import unittest
from __main__ import vtk, qt, ctk, slicer
#
# TortuosityLogicTests
#
class TortuosityLogicTests:
def __init__(self, parent):
parent.title = "TortuosityLogicTests" # TODO make this more human readable by adding spaces
parent.categories = ["Testing.TestCases"]
parent.dependencies = []
parent.contributors = ["Johan Andruejol (Kitware)"] # replace with "Firstname Lastname (Org)"
parent.helpText = """
"""
parent.acknowledgementText = """TODO""" # replace with organization, grant and thanks.
self.parent = parent
# Add this test to the SelfTest module's list for discovery when the module
# is created. Since this module may be discovered before SelfTests itself,
# create the list if it doesn't already exist.
try:
slicer.selfTests
except AttributeError:
slicer.selfTests = {}
slicer.selfTests['TortuosityLogicTests'] = self.runTest
def runTest(self):
tester = TortuosityLogicTestsTest()
tester.runTests()
#
# qTortuosityLogicTestsTest
#
class TortuosityLogicTestsTest(unittest.TestCase):
def delayDisplay(self,message,msec=1000):
"""This utility method displays a small dialog and waits.
This does two things: 1) it lets the event loop catch up
to the state of the test so that rendering and widget updates
have all taken place before the test continues and 2) it
shows the user/developer/tester the state of the test
so that we'll know when it breaks.
"""
print(message)
self.info = qt.QDialog()
self.infoLayout = qt.QVBoxLayout()
self.info.setLayout(self.infoLayout)
self.label = qt.QLabel(message,self.info)
self.infoLayout.addWidget(self.label)
qt.QTimer.singleShot(msec, self.info.close)
self.info.exec_()
def getTestMethodNames(self):
methods = []
for method in dir(self):
if (callable(getattr(self, method)) and method.find('test_') != -1):
methods.append(method)
return methods
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
def tearDown(self):
pass
def findWidget(self, widget, objectName):
if widget.objectName == objectName:
return widget
else:
children = []
for w in widget.children():
resulting_widget = self.findWidget(w, objectName)
if resulting_widget:
return resulting_widget
return None
def runTests(self):
"""Run as few or as many tests as needed here.
"""
for methodName in self.getTestMethodNames():
self.runTest(methodName)
def runTest(self, method):
self.setUp()
getattr(self, method)()
self.tearDown()
def runAndCheckMetrics(self, nameTemplate, expectedValues):
for i in range(len(expectedValues)):
self.delayDisplay('testing %s ' %(nameTemplate %i))
node = slicer.util.getFirstNodeByClassByName('vtkMRMLSpatialObjectsNode', nameTemplate %i)
self.assertTrue(node, 'loading node failed')
logic = slicer.modules.tortuosity.logic()
self.assertTrue(logic.RunMetrics(node, logic.All), 'RunMetrics failed')
dm = logic.GetDistanceMetricArray(node)
self.assertTrue(dm, 'No distance metric array')
icm = logic.GetInflectionCountMetricArray(node)
self.assertTrue(icm, 'No inflection count array')
soam = logic.GetSumOfAnglesMetricArray(node)
self.assertTrue(soam, 'No sum of angles array')
for index in range(dm.GetNumberOfTuples()):
dmValue = dm.GetValue(index)
icmValue = icm.GetValue(index)
soamValue = soam.GetValue(index)
msg = '%s value look up failed. Expected: %s Got: %s (Case #%s)'
self.assertAlmostEqual(dmValue, expectedValues[i]['DM'], 4, msg %('DM', expectedValues[i]['DM'], dmValue, i))
self.assertAlmostEqual(icmValue, expectedValues[i]['ICM'], 4, msg %('ICM', expectedValues[i]['ICM'], icmValue, i) )
self.assertAlmostEqual(soamValue, expectedValues[i]['SOAM'], 4, msg %('SOAM', expectedValues[i]['SOAM'], soamValue, i) )
def test_TestStraightVessels(self):
self.delayDisplay('test_TestStraightVessels')
nameTemplate = 'StraightTube_test%s'
expectedValues = [
{
'DM': 1.0,
'ICM': 1.0,
'SOAM:': 0.0,
},
{
'DM': 1.0,
'ICM': 1.0,
'SOAM:': 0.0,
},
{
'DM': 1.0,
'ICM': 1.0,
'SOAM:': 0.0,
},
]
self.runAndCheckMetrics(nameTemplate, expectedValues)
self.delayDisplay('Test passed!')
def test_TestSinusVessels(self):
self.delayDisplay('test_TestSinusVessels')
nameTemplate = 'SinusTube_test%s'
expectedValues = [
{
'DM': 1.21581,
'ICM': 1.21581 * 2.0,
'SOAM:': 0.411187,
},
{
'DM': 1.21581,
'ICM': 1.21581 * 4.0,
'SOAM:': 0.411187,
},
{
'DM': 5.87042,
'ICM': 5.87042 * 2.0,
'SOAM:': 0.158497,
},
{
'DM': 3.40308,
'ICM': 3.40308 * 2.0,
'SOAM:': 1.28584,
},
]
self.runAndCheckMetrics(nameTemplate, expectedValues)
self.delayDisplay('Test passed!')
#
# qWelcomeModuleTestWidget
#
class TortuosityLogicTestsWidget():
def __init__(self, parent = None):
if not parent:
self.parent = slicer.qMRMLWidget()
self.parent.setLayout(qt.QVBoxLayout())
self.parent.setMRMLScene(slicer.mrmlScene)
else:
self.parent = parent
self.layout = self.parent.layout()
if not parent:
self.setup()
self.parent.show()
self.moduleName = 'TortuosityLogicTests'
self.tester = TortuosityLogicTestsTest()
def setup(self):
# Instantiate and connect widgets ...
# reload button
# (use this during development, but remove it when delivering
# your module to users)
self.reloadButton = qt.QPushButton("Reload")
self.reloadButton.toolTip = "Reload this module."
self.reloadButton.name = "Tests Reload"
self.layout.addWidget(self.reloadButton)
self.reloadButton.connect('clicked()', self.onReload)
# reload and test button
# (use this during development, but remove it when delivering
# your module to users)
self.reloadAndTestButton = qt.QPushButton("Reload and Test")
self.reloadAndTestButton.toolTip = "Reload this module and then run the self tests."
self.layout.addWidget(self.reloadAndTestButton)
self.reloadAndTestButton.connect('clicked()', self.onReloadAndTest)
self.testButton = qt.QPushButton('Run Tests')
self.layout.addWidget(self.testButton)
self.testButton.connect('clicked(bool)', self.tester.runTests)
# Add vertical spacer
self.layout.addStretch(1)
def onReload(self):
"""Generic reload method for any scripted module.
ModuleWizard will subsitute correct default.
"""
globals()[self.moduleName] = slicer.util.reloadScriptedModule(self.moduleName)
def onReloadAndTest(self):
self.onReload()
self.tester.runTests()
| KitwareMedical/VesselView | Applications/App/Testing/Python/TortuosityLogicTests.py | Python | apache-2.0 | 7,095 | [
"VTK"
] | 184e465533752826dd5f10b90726b1f948e7d9acd0704bea649471aeab134641 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Construct and visualize phylogenetic trees from:
1. MCSCAN output
2. CDS sequences in FASTA format
Options are provided for each step:
1. sequence alignment:
ClustalW2 or MUSCLE (wrapped on Biopython)
2. alignment editting:
GBlocks (optional)
3. build trees:
NJ: PHYLIP
ML: RAxML or PHYML
Optional steps:
- reroot tree
- alternative topology test (SH test)
- TreeFix
The external software needs be installed first.
"""
import sys
import os
import os.path as op
import logging
import re
import warnings
from math import ceil
from itertools import chain
from functools import partial
import numpy as np
from ete2 import Tree
from Bio import SeqIO, AlignIO
from Bio.Data import CodonTable
from Bio.Emboss.Applications import FSeqBootCommandline, FDNADistCommandline, \
FNeighborCommandline, FConsenseCommandline
from Bio.Phylo.Applications import PhymlCommandline, RaxmlCommandline
from jcvi.apps.ks import AbstractCommandline, find_first_isoform, \
run_mrtrans, clustal_align_protein, muscle_align_protein
from jcvi.formats.base import must_open, DictFile, LineFile
from jcvi.formats.fasta import Fasta
from jcvi.utils.orderedcollections import OrderedDict
from jcvi.graphics.base import plt, savefig
from jcvi.apps.base import OptionParser, ActionDispatcher, mkdir, sh, getpath
GBLOCKS_BIN = partial(getpath, name="GBLOCKS", warn="warn")
PHYML_BIN = partial(getpath, name="PHYML", warn="warn")
RAXML_BIN = partial(getpath, name="RAXML", warn="warn")
FPHYLIP_BIN = partial(getpath, name="FPHYLIP", warn="warn")
TREEFIX_BIN = partial(getpath, name="TREEFIX", warn="warn")
class GblocksCommandline(AbstractCommandline):
"""Little commandline for Gblocks
(http://molevol.cmima.csic.es/castresana/Gblocks.html).
Accepts alignment in FASTA or NBRF/PIR format.
"""
def __init__(self, aln_file, aln_type="c", \
command=GBLOCKS_BIN("Gblocks"), **kwargs):
self.aln_file = aln_file
self.aln_type = aln_type
self.command = command
params = {"b4":5, "b5":"h", "p":"n"}
params.update(kwargs)
self.parameters = ["-{0}={1}".format(k,v) for k,v in params.items()]
def __str__(self):
return self.command + " %s -t=%s " % (self.aln_file, self.aln_type) \
+ " ".join(self.parameters)
class FfitchCommandline(AbstractCommandline):
"""Little commandline for ffitch in EMBOSS
(http://www.molgen.mpg.de/~beck/embassy/phylipnew/ffitch.html).
Infer branch lengths of tree.
"""
def __init__(self, datafile, outtreefile, command=FPHYLIP_BIN("ffitch"), \
intreefile=None, **kwargs):
self.datafile = datafile
self.outtreefile = outtreefile
self.outfile = datafile.rsplit(".",1)[0] + ".ffitch"
self.command = command
self.intreefile = intreefile if intreefile else '""'
self.parameters = ["-{0} {1}".format(k,v) for k,v in kwargs.items()]
def __str__(self):
return self.command + " -datafile %s -intreefile %s -outfile %s " \
"-outtreefile %s " % (self.datafile, self.intreefile, \
self.outfile, self.outtreefile) + " ".join(self.parameters)
class TreeFixCommandline(AbstractCommandline):
"""Little commandline for TreeFix
(http://compbio.mit.edu/treefix/).
"""
def __init__(self, input, stree_file, smap_file, a_ext, \
command=TREEFIX_BIN("treefix"), r=False, **kwargs):
self.input = input
self.s = stree_file
self.S = smap_file
self.A = a_ext
self.command = command
params = {"V":1, \
"l":input.rsplit(".", 1)[0] + ".treefix.log"}
params.update(kwargs)
self.parameters = ["-{0} {1}".format(k,v) for k,v in params.items()]
if r:
self.parameters.append("-r")
def __str__(self):
return self.command + " -s %s -S %s -A %s " % (self.s, self.S, self.A) \
+ " ".join(self.parameters) + " %s" % self.input
def run_treefix(input, stree_file, smap_file, a_ext=".fasta", \
o_ext=".dnd", n_ext = ".treefix.dnd", **kwargs):
"""
get the ML tree closest to the species tree
"""
cl = TreeFixCommandline(input=input, \
stree_file=stree_file, smap_file=smap_file, a_ext=a_ext, \
o=o_ext, n=n_ext, **kwargs)
outtreefile = input.rsplit(o_ext, 1)[0] + n_ext
print >>sys.stderr, "TreeFix:", cl
r, e = cl.run()
if e:
print >>sys.stderr, "***TreeFix could not run"
return None
else:
logging.debug("new tree written to {0}".format(outtreefile))
return outtreefile
def run_gblocks(align_fasta_file, **kwargs):
"""
remove poorly aligned positions and divergent regions with Gblocks
"""
cl = GblocksCommandline(aln_file=align_fasta_file, **kwargs)
r, e = cl.run()
print >>sys.stderr, "Gblocks:", cl
if e:
print >>sys.stderr, "***Gblocks could not run"
return None
else:
print >>sys.stderr, r
alignp = re.sub(r'.*Gblocks alignment:.*\(([0-9]{1,3}) %\).*', \
r'\1', r, flags=re.DOTALL)
alignp = int(alignp)
if alignp <= 10:
print >>sys.stderr, \
"** WARNING ** Only %s %% positions retained by Gblocks. " \
"Results aborted. Using original alignment instead.\n" % alignp
return None
else:
return align_fasta_file+"-gb"
def run_ffitch(distfile, outtreefile, intreefile=None, **kwargs):
"""
Infer tree branch lengths using ffitch in EMBOSS PHYLIP
"""
cl = FfitchCommandline(datafile=distfile, outtreefile=outtreefile, \
intreefile=intreefile, **kwargs)
r, e = cl.run()
if e:
print >>sys.stderr, "***ffitch could not run"
return None
else:
print >>sys.stderr, "ffitch:", cl
return outtreefile
def smart_reroot(treefile, outgroupfile, outfile, format=0):
"""
simple function to reroot Newick format tree using ete2
Tree reading format options see here:
http://packages.python.org/ete2/tutorial/tutorial_trees.html#reading-newick-trees
"""
tree = Tree(treefile, format=format)
leaves = [t.name for t in tree.get_leaves()][::-1]
outgroup = []
for o in must_open(outgroupfile):
o = o.strip()
for leaf in leaves:
if leaf[:len(o)] == o:
outgroup.append(leaf)
if outgroup:
break
if not outgroup:
print >>sys.stderr, \
"Outgroup not found. Tree {0} cannot be rerooted.".format(treefile)
return treefile
try:
tree.set_outgroup(tree.get_common_ancestor(*outgroup))
except ValueError:
assert type(outgroup) == list
outgroup = outgroup[0]
tree.set_outgroup(outgroup)
tree.write(outfile=outfile, format=format)
logging.debug("Rerooted tree printed to {0}".format(outfile))
return outfile
def build_nj_phylip(alignment, outfile, outgroup, work_dir="."):
"""
build neighbor joining tree of DNA seqs with PHYLIP in EMBOSS
PHYLIP manual
http://evolution.genetics.washington.edu/phylip/doc/
"""
phy_file = op.join(work_dir, "work", "aln.phy")
try:
AlignIO.write(alignment, file(phy_file, "w"), "phylip")
except ValueError:
print >>sys.stderr, \
"Repeated seq name, possibly due to truncation. NJ tree not built."
return None
seqboot_out = phy_file.rsplit(".",1)[0] + ".fseqboot"
seqboot_cl = FSeqBootCommandline(FPHYLIP_BIN("fseqboot"), \
sequence=phy_file, outfile=seqboot_out, \
seqtype="d", reps=100, seed=12345)
stdout, stderr = seqboot_cl()
logging.debug("Resampling alignment: %s" % seqboot_cl)
dnadist_out = phy_file.rsplit(".",1)[0] + ".fdnadist"
dnadist_cl = FDNADistCommandline(FPHYLIP_BIN("fdnadist"), \
sequence=seqboot_out, outfile=dnadist_out, method="f")
stdout, stderr = dnadist_cl()
logging.debug\
("Calculating distance for bootstrapped alignments: %s" % dnadist_cl)
neighbor_out = phy_file.rsplit(".",1)[0] + ".njtree"
e = phy_file.rsplit(".",1)[0] + ".fneighbor"
neighbor_cl = FNeighborCommandline(FPHYLIP_BIN("fneighbor"), \
datafile=dnadist_out, outfile=e, outtreefile=neighbor_out)
stdout, stderr = neighbor_cl()
logging.debug("Building Neighbor Joining tree: %s" % neighbor_cl)
consense_out = phy_file.rsplit(".",1)[0] + ".consensustree.nodesupport"
e = phy_file.rsplit(".",1)[0] + ".fconsense"
consense_cl = FConsenseCommandline(FPHYLIP_BIN("fconsense"), \
intreefile=neighbor_out, outfile=e, outtreefile=consense_out)
stdout, stderr = consense_cl()
logging.debug("Building consensus tree: %s" % consense_cl)
# distance without bootstrapping
dnadist_out0 = phy_file.rsplit(".",1)[0] + ".fdnadist0"
dnadist_cl0 = FDNADistCommandline(FPHYLIP_BIN("fdnadist"), \
sequence=phy_file, outfile=dnadist_out0, method="f")
stdout, stderr = dnadist_cl0()
logging.debug\
("Calculating distance for original alignment: %s" % dnadist_cl0)
# infer branch length on consensus tree
consensustree1 = phy_file.rsplit(".",1)[0] + ".consensustree.branchlength"
run_ffitch(distfile=dnadist_out0, outtreefile=consensustree1, \
intreefile=consense_out)
# write final tree
ct_s = Tree(consense_out)
if outgroup:
t1 = consensustree1 + ".rooted"
t2 = smart_reroot(consensustree1, outgroup, t1)
if t2 == t1:
outfile = outfile.replace(".unrooted", "")
ct_b = Tree(t2)
else:
ct_b = Tree(consensustree1)
nodesupport = {}
for node in ct_s.traverse("postorder"):
node_children = tuple(sorted([f.name for f in node]))
if len(node_children) > 1:
nodesupport[node_children] = node.dist/100.
for k,v in nodesupport.items():
ct_b.get_common_ancestor(*k).support = v
print ct_b
ct_b.write(format=0, outfile=outfile)
try:
s = op.getsize(outfile)
except OSError:
s = 0
if s:
logging.debug("NJ tree printed to %s" % outfile)
return outfile, phy_file
else:
logging.debug("Something was wrong. NJ tree was not built.")
return None
def build_ml_phyml(alignment, outfile, work_dir=".", **kwargs):
"""
build maximum likelihood tree of DNA seqs with PhyML
"""
phy_file = op.join(work_dir, "work", "aln.phy")
AlignIO.write(alignment, file(phy_file, "w"), "phylip-relaxed")
phyml_cl = PhymlCommandline(cmd=PHYML_BIN("phyml"), input=phy_file, **kwargs)
logging.debug("Building ML tree using PhyML: %s" % phyml_cl)
stdout, stderr = phyml_cl()
tree_file = phy_file + "_phyml_tree.txt"
if not op.exists(tree_file):
print >>sys.stderr, "***PhyML failed."
return None
sh("cp {0} {1}".format(tree_file, outfile), log=False)
logging.debug("ML tree printed to %s" % outfile)
return outfile, phy_file
def build_ml_raxml(alignment, outfile, work_dir=".", **kwargs):
"""
build maximum likelihood tree of DNA seqs with RAxML
"""
work_dir = op.join(work_dir, "work")
mkdir(work_dir)
phy_file = op.join(work_dir, "aln.phy")
AlignIO.write(alignment, file(phy_file, "w"), "phylip-relaxed")
raxml_work = op.abspath(op.join(op.dirname(phy_file), "raxml_work"))
mkdir(raxml_work)
raxml_cl = RaxmlCommandline(cmd=RAXML_BIN("raxmlHPC"), \
sequences=phy_file, algorithm="a", model="GTRGAMMA", \
parsimony_seed=12345, rapid_bootstrap_seed=12345, \
num_replicates=100, name="aln", \
working_dir=raxml_work, **kwargs)
logging.debug("Building ML tree using RAxML: %s" % raxml_cl)
stdout, stderr = raxml_cl()
tree_file = "{0}/RAxML_bipartitions.aln".format(raxml_work)
if not op.exists(tree_file):
print >>sys.stderr, "***RAxML failed."
sh("rm -rf %s" % raxml_work, log=False)
return None
sh("cp {0} {1}".format(tree_file, outfile), log=False)
logging.debug("ML tree printed to %s" % outfile)
sh("rm -rf %s" % raxml_work)
return outfile, phy_file
def SH_raxml(reftree, querytree, phy_file, shout="SH_out.txt"):
"""
SH test using RAxML
querytree can be a single tree or a bunch of trees (eg. from bootstrapping)
"""
assert op.isfile(reftree)
shout = must_open(shout, "a")
raxml_work = op.abspath(op.join(op.dirname(phy_file), "raxml_work"))
mkdir(raxml_work)
raxml_cl = RaxmlCommandline(cmd=RAXML_BIN("raxmlHPC"), \
sequences=phy_file, algorithm="h", model="GTRGAMMA", \
name="SH", starting_tree=reftree, bipartition_filename=querytree, \
working_dir=raxml_work)
logging.debug("Running SH test in RAxML: %s" % raxml_cl)
o, stderr = raxml_cl()
# hard coded
try:
pval = re.search('(Significantly.*:.*)', o).group(0)
except:
print >>sys.stderr, "SH test failed."
else:
pval = pval.strip().replace("\t"," ").replace("%","\%")
print >>shout, "{0}\t{1}".format(op.basename(querytree), pval)
logging.debug("SH p-value appended to %s" % shout.name)
shout.close()
return shout.name
CODON_TRANSLATION = CodonTable.standard_dna_table.forward_table
FOURFOLD = {"CTT": "L", "ACA": "T", "ACG": "T", "CCT": "P", "CTG": "L",
"CTA": "L", "ACT": "T", "CCG": "P", "CCA": "P", "CCC": "P",
"GGT": "G", "CGA": "R", "CGC": "R", "CGG": "R", "GGG": "G",
"GGA": "G", "GGC": "G", "CGT": "R", "GTA": "V", "GTC": "V",
"GTG": "V", "GTT": "V", "CTC": "L", "TCT": "S", "TCG": "S",
"TCC": "S", "ACC": "T", "TCA": "S", "GCA": "A", "GCC": "A",
"GCG": "A", "GCT": "A"}
def subalignment(alnfle, subtype, alntype="fasta"):
"""
Subset synonymous or fourfold degenerate sites from an alignment
input should be a codon alignment
"""
aln = AlignIO.read(alnfle, alntype)
alnlen = aln.get_alignment_length()
nseq = len(aln)
subaln = None
subalnfile = alnfle.rsplit(".", 1)[0] + "_{0}.{1}".format(subtype, alntype)
if subtype == "synonymous":
for j in range( 0, alnlen, 3 ):
aa = None
for i in range(nseq):
codon = str(aln[i, j: j + 3].seq)
if codon not in CODON_TRANSLATION:
break
if aa and CODON_TRANSLATION[codon] != aa:
break
else:
aa = CODON_TRANSLATION[codon]
else:
if subaln is None:
subaln = aln[:, j: j + 3]
else:
subaln += aln[:, j: j + 3]
if subtype == "fourfold":
for j in range( 0, alnlen, 3 ):
for i in range(nseq):
codon = str(aln[i, j: j + 3].seq)
if codon not in FOURFOLD:
break
else:
if subaln is None:
subaln = aln[:, j: j + 3]
else:
subaln += aln[:, j: j + 3]
if subaln:
AlignIO.write(subaln, subalnfile, alntype)
return subalnfile
else:
print >>sys.stderr, "No sites {0} selected.".format(subtype)
return None
def merge_rows_local(filename, ignore=".", colsep="\t", local=10, \
fieldcheck=True, fsep=","):
"""
merge overlapping rows within given row count distance
"""
fw = must_open(filename+".merged", "w")
rows = file(filename).readlines()
rows = [row.strip().split(colsep) for row in rows]
l = len(rows[0])
for rowi, row in enumerate(rows):
n = len(rows)
i = rowi+1
while i <= min(rowi+local, n-1):
merge = 1
row2 = rows[i]
for j in range(l):
a = row[j]
b = row2[j]
if fieldcheck:
a = set(a.split(fsep))
a = fsep.join(sorted(list(a)))
b = set(b.split(fsep))
b = fsep.join(sorted(list(b)))
if all([a!=ignore, b!=ignore, a not in b, b not in a]):
merge = 0
i += 1
break
if merge:
for x in range(l):
if row[x] == ignore:
rows[rowi][x] = row2[x]
elif row[x] in row2[x]:
rows[rowi][x] = row2[x]
else:
rows[rowi][x] = row[x]
row = rows[rowi]
rows.remove(row2)
print >>fw, colsep.join(row)
fw.close()
return fw.name
def add_tandems(mcscanfile, tandemfile):
"""
add tandem genes to anchor genes in mcscan file
"""
tandems = [f.strip().split(",") for f in file(tandemfile)]
fw = must_open(mcscanfile+".withtandems", "w")
fp = must_open(mcscanfile)
seen =set()
for i, row in enumerate(fp):
if row[0] == '#':
continue
anchorslist = row.strip().split("\t")
anchors = set([a.split(",")[0] for a in anchorslist])
anchors.remove(".")
if anchors & seen == anchors:
continue
newanchors = []
for a in anchorslist:
if a == ".":
newanchors.append(a)
continue
for t in tandems:
if a in t:
newanchors.append(",".join(t))
seen.update(t)
break
else:
newanchors.append(a)
seen.add(a)
print >>fw, "\t".join(newanchors)
fw.close()
newmcscanfile = merge_rows_local(fw.name)
logging.debug("Tandems added to `{0}`. Results in `{1}`".\
format(mcscanfile, newmcscanfile))
fp.seek(0)
logging.debug("{0} rows merged to {1} rows".\
format(len(fp.readlines()), len(file(newmcscanfile).readlines())))
sh("rm %s" % fw.name)
return newmcscanfile
def main():
actions = (
('prepare', 'prepare cds sequences from .mcscan'),
('build', 'build NJ and ML trees from cds'),
('draw', 'draw Newick formatted trees'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def prepare(args):
"""
%prog prepare mcscanfile cdsfile [options]
Pick sequences from cdsfile to form fasta files, according to multiple
alignment in the mcscanfile.
The fasta sequences can then be used to construct phylogenetic tree.
Use --addtandem=tandemfile to collapse tandems of anchors into single row.
The tandemfile must be provided with *ALL* genomes involved, otherwise
result will be incomplete and redundant.
"""
from jcvi.graphics.base import discrete_rainbow
p = OptionParser(prepare.__doc__)
p.add_option("--addtandem", help="path to tandemfile [default: %default]")
p.add_option("--writecolors", default=False, action="store_true", \
help="generate a gene_name to color mapping file which will be taken " \
"by jcvi.apps.phylo.draw [default: %default]")
p.add_option("--outdir", type="string", default="sequences", \
help="path to output dir. New dir is made if not existing [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
mcscanfile, cdsfile = args
if opts.addtandem:
tandemfile = opts.addtandem
mcscanfile_with_tandems = add_tandems(mcscanfile, tandemfile)
mcscanfile = mcscanfile_with_tandems
seqdir = opts.outdir
mkdir(seqdir)
f = Fasta(cdsfile)
fp = must_open(mcscanfile)
if opts.writecolors:
fc = must_open("leafcolors.txt", "w")
n = 0
for i, row in enumerate(fp):
row = row.strip().split("\t")
if i == 0:
l = len(row)
if l <= 20:
colors = discrete_rainbow(l, shuffle=False)[1]
else:
colors = discrete_rainbow(l, usepreset=False, shuffle=False)[1]
warnings.warn("*** WARNING ***\n" \
"Too many columns. Colors may not be all distinctive.")
assert len(row)==l, "All rows should have same number of fields."
anchors = set()
for j, atom in enumerate(row):
color = "%s,%s,%s" % colors[j]
if atom == ".":
continue
elif "," in atom:
atom = atom.split(",")
for a in atom:
fc.write("{0}\t{1}\n".format(a, color))
anchors.add(a)
else:
fc.write("{0}\t{1}\n".format(atom, color))
anchors.add(atom)
if len(anchors) <= 3:
print >>sys.stderr, \
"Not enough seqs to build trees for {0}".format(anchors)
continue
pivot = row[0]
fw = must_open("%s/%s.cds" % (seqdir, pivot), "w")
for a in anchors:
if a not in f:
print a
a = find_first_isoform(a, f)
assert a, a
arec = f[a]
SeqIO.write((arec), fw, "fasta")
fw.close()
n+=1
if opts.writecolors:
fc.close()
logging.debug("leaf colors written to `{0}`".format(fc.name))
logging.debug("cds of {0} syntelog groups written to {1}/".format(n, seqdir))
return seqdir
def build(args):
"""
%prog build [prot.fasta] cds.fasta [options] --outdir=outdir
This function wraps on the following steps:
1. msa using ClustalW2 or MUSCLE(default)
2. (optional) alignment editing using Gblocks
3. build NJ tree using PHYLIP in EMBOSS package
seq names should be unique by first 10 chars (restriction of PHYLIP)
4. build ML tree using RAxML(default) or PHYML, use keywords raxml or phyml,
*WARNING* maybe slow with large dataset
If an outgroup file is provided, the result tree will be rooted on the
outgroup according to order in the file, i.e. the name in row1 will be
tried first. If not found, row2 will be used, etc.
Tail truncated names can be provided so long as it is unique among the seqs.
If not uniq, the first occurrence will be used. For example, if you have
two moss sequences in your input, then the tree will be rooted on the
first moss sequence encountered by the program, unless they are monophylic,
in which case the root will be their common ancestor.
--stree and --smap are required if --treefix is set.
Trees can be edited again using an editor such as Dendroscope. This
is the recommended way to get highly customized trees.
Newick format trees will be deposited into outdir (. by default).
"""
from jcvi.formats.fasta import translate
p = OptionParser(build.__doc__)
p.add_option("--longest", action="store_true",
help="Get longest ORF, only works if no pep file, "\
"e.g. ESTs [default: %default]")
p.add_option("--nogblocks", action="store_true",
help="don't use Gblocks to edit alignment [default: %default]")
p.add_option("--synonymous", action="store_true",
help="extract synonymous sites of the alignment [default: %default]")
p.add_option("--fourfold", action="store_true",
help="extract fourfold degenerate sites of the alignment [default: %default]")
p.add_option("--msa", default="muscle", choices=("clustalw", "muscle"),
help="software used to align the proteins [default: %default]")
p.add_option("--noneighbor", action="store_true",
help="don't build NJ tree [default: %default]")
p.add_option("--ml", default=None, choices=("raxml", "phyml"),
help="software used to build ML tree [default: %default]")
p.add_option("--outgroup",
help="path to file containing outgroup orders [default: %default]")
p.add_option("--SH", help="path to reference Newick tree [default: %default]")
p.add_option("--shout", default="SH_out.txt", \
help="SH output file name [default: %default]")
p.add_option("--treefix", action="store_true",
help="use TreeFix to rearrange ML tree [default: %default]")
p.add_option("--stree", help="path to species Newick tree [default: %default]")
p.add_option("--smap", help="path to smap file: " \
"gene_name_pattern<tab>species_name [default: %default]")
p.add_option("--outdir", type="string", default=".", \
help="path to output dir. New dir is made if not existing [default: %default]")
opts, args = p.parse_args(args)
gblocks = not opts.nogblocks
synonymous = opts.synonymous
fourfold = opts.fourfold
neighbor = not opts.noneighbor
outgroup = opts.outgroup
outdir = opts.outdir
if len(args) == 1:
protein_file, dna_file = None, args[0]
elif len(args) == 2:
protein_file, dna_file = args
else:
print >>sys.stderr, "Incorrect arguments"
sys.exit(not p.print_help())
if opts.treefix:
stree = opts.stree
smap = opts.smap
assert stree and smap, "TreeFix requires stree and smap files."
opts.ml = "raxml"
treedir = op.join(outdir, "tree")
mkdir(treedir)
if not protein_file:
protein_file = dna_file + ".pep"
translate_args = [dna_file, "--outfile=" + protein_file]
if opts.longest:
translate_args += ["--longest"]
dna_file, protein_file = translate(translate_args)
work_dir = op.join(outdir, "alignment")
mkdir(work_dir)
p_recs = list(SeqIO.parse(open(protein_file), "fasta"))
if opts.msa == "clustalw":
align_fasta = clustal_align_protein(p_recs, work_dir)
elif opts.msa == "muscle":
align_fasta = muscle_align_protein(p_recs, work_dir)
n_recs = list(SeqIO.parse(open(dna_file), "fasta"))
mrtrans_fasta = run_mrtrans(align_fasta, n_recs, work_dir, outfmt="fasta")
if not mrtrans_fasta:
logging.debug("pal2nal aborted. " \
"Cannot reliably build tree for {0}".format(dna_file))
return
codon_aln_fasta = mrtrans_fasta
if gblocks:
gb_fasta = run_gblocks(mrtrans_fasta)
codon_aln_fasta = gb_fasta if gb_fasta else codon_aln_fasta
else:
if synonymous:
codon_aln_fasta = subalignment(mrtrans_fasta, "synonymous")
if fourfold:
codon_aln_fasta = subalignment(mrtrans_fasta, "fourfold")
if not neighbor and not opts.ml:
return codon_aln_fasta
alignment = AlignIO.read(codon_aln_fasta, "fasta")
if len(alignment) <= 3:
raise ValueError("Too few seqs to build tree.")
mkdir(op.join(treedir, "work"))
if neighbor:
out_file = op.join(treedir, op.basename(dna_file).rsplit(".", 1)[0] + \
".NJ.unrooted.dnd")
try:
outfile, phy_file = build_nj_phylip(alignment, \
outfile=out_file, outgroup=outgroup, work_dir=treedir)
except:
print "NJ tree cannot be built for {0}".format(dna_file)
if opts.SH:
reftree = opts.SH
querytree = outfile
SH_raxml(reftree, querytree, phy_file, shout=opts.shout)
if opts.ml:
out_file = op.join(treedir, op.basename(dna_file).rsplit(".", 1)[0] + \
".ML.unrooted.dnd")
if opts.ml == "phyml":
try:
outfile, phy_file = build_ml_phyml\
(alignment, outfile=out_file, work_dir=treedir)
except:
print "ML tree cannot be built for {0}".format(dna_file)
elif opts.ml == "raxml":
try:
outfile, phy_file = build_ml_raxml\
(alignment, outfile=out_file, work_dir=treedir)
except:
print "ML tree cannot be built for {0}".format(dna_file)
if outgroup:
new_out_file = out_file.replace(".unrooted", "")
t = smart_reroot(treefile=out_file, outgroupfile=outgroup, \
outfile=new_out_file)
if t == new_out_file:
sh("rm %s" % out_file)
outfile = new_out_file
if opts.SH:
reftree = opts.SH
querytree = outfile
SH_raxml(reftree, querytree, phy_file, shout=opts.shout)
if opts.treefix:
treefix_dir = op.join(treedir, "treefix")
assert mkdir(treefix_dir, overwrite=True)
sh("cp {0} {1}/".format(outfile, treefix_dir))
input = op.join(treefix_dir, op.basename(outfile))
aln_file = input.rsplit(".", 1)[0] + ".fasta"
SeqIO.write(alignment, aln_file, "fasta")
outfile = run_treefix(input=input, stree_file=stree, smap_file=smap, \
a_ext=".fasta", o_ext=".dnd", n_ext = ".treefix.dnd")
return outfile
def _draw_trees(trees, nrow=1, ncol=1, rmargin=.3, iopts=None, outdir=".",
shfile=None, **kwargs):
"""
Draw one or multiple trees on one plot.
"""
from jcvi.graphics.tree import draw_tree
if shfile:
SHs = DictFile(shfile, delimiter="\t")
ntrees = len(trees)
n = nrow * ncol
for x in xrange(int(ceil(float(ntrees)/n))):
fig = plt.figure(1, (iopts.w, iopts.h)) if iopts \
else plt.figure(1, (5, 5))
root = fig.add_axes([0, 0, 1, 1])
xiv = 1. / ncol
yiv = 1. / nrow
xstart = list(np.arange(0, 1, xiv)) * nrow
ystart = list(chain(*zip(*[list(np.arange(0, 1, yiv))[::-1]] * ncol)))
for i in xrange(n*x, n*(x+1)):
if i == ntrees:
break
ax = fig.add_axes([xstart[i%n], ystart[i%n], xiv, yiv])
f = trees.keys()[i]
tree = trees[f]
try:
SH = SHs[f]
except:
SH = None
draw_tree(ax, tree, rmargin=rmargin, reroot=False, \
supportcolor="r", SH=SH, **kwargs)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
format = iopts.format if iopts else "pdf"
dpi = iopts.dpi if iopts else 300
if n == 1:
image_name = f.rsplit(".", 1)[0] + "." + format
else:
image_name = "trees{0}.{1}".format(x, format)
image_name = op.join(outdir, image_name)
savefig(image_name, dpi=dpi, iopts=iopts)
plt.clf()
def draw(args):
"""
%prog draw --input newicktrees [options]
Draw phylogenetic trees into single or combined plots.
Input trees should be one of the following:
1. single Newick format tree file
2. a dir containing *ONLY* the tree files to be drawn
Newick format:
http://evolution.genetics.washington.edu/phylip/newicktree.html
This function wraps on jcvi.graphics.tree
This function is better used for trees generated by jcvi.apps.phylo (rooted
if possible). For drawing general Newick trees from external sources invoke
jcvi.graphics.tree directly, which also gives more drawing options.
"""
trunc_name_options = ['headn', 'oheadn', 'tailn', 'otailn']
p = OptionParser(draw.__doc__)
p.add_option("--input", help="path to single input tree file or a dir "\
"containing ONLY the input tree files")
p.add_option("--combine", type="string", default="1x1", \
help="combine multiple trees into one plot in nrowxncol")
p.add_option("--trunc_name", default=None, help="Options are: {0}. " \
"truncate first n chars, retains only first n chars, " \
"truncate last n chars, retain only last chars. " \
"n=1~99. [default: %default]".format(trunc_name_options))
p.add_option("--SH", default=None,
help="path to a file containing SH test p-values in format:" \
"tree_file_name<tab>p-values " \
"This file can be generated with jcvi.apps.phylo build [default: %default]")
p.add_option("--scutoff", default=50, type="int",
help="cutoff for displaying node support, 0-100 [default: %default]")
p.add_option("--barcode", default=None,
help="path to seq/taxon name barcode mapping file: " \
"barcode<tab>new_name " \
"This option is downstream of `--trunc_name` [default: %default]")
p.add_option("--leafcolorfile", default=None,
help="path to a mapping file containing font colors " \
"for the OTUs: leafname<tab>color [default: %default]")
p.add_option("--outdir", type="string", default=".", \
help="path to output dir. New dir is made if not existed [default: %default]")
opts, args, iopts = p.set_image_options(figsize="8x6")
input = opts.input
outdir = opts.outdir
combine = opts.combine.split("x")
trunc_name = opts.trunc_name
SH = opts.SH
mkdir(outdir)
if not input:
sys.exit(not p.print_help())
elif op.isfile(input):
trees_file = input
treenames = [op.basename(input)]
elif op.isdir(input):
trees_file = op.join(outdir, "alltrees.dnd")
treenames = []
for f in sorted(os.listdir(input)):
sh("cat {0}/{1} >> {2}".format(input, f, trees_file), log=False)
treenames.append(f)
else:
sys.exit(not p.print_help())
trees = OrderedDict()
tree = ""
i = 0
for row in LineFile(trees_file, comment="#", load=True).lines:
if i == len(treenames):
break
if not len(row):
continue
if ";" in row:
# sanity check
if row.index(";") != len(row)-1:
ts = row.split(";")
for ii in xrange(len(ts)-1):
ts[ii] += ";"
else:
ts = [row]
for t in ts:
if ";" in t:
tree += t
if tree:
trees[treenames[i]] = tree
tree = ""
i+=1
else:
tree += t
else:
tree += row
logging.debug("A total of {0} trees imported.".format(len(trees)))
sh("rm {0}".format(op.join(outdir, "alltrees.dnd")))
_draw_trees(trees, nrow=int(combine[0]), ncol=int(combine[1]), rmargin=.3,\
iopts=iopts, outdir=outdir, shfile=SH, trunc_name=trunc_name, \
scutoff=opts.scutoff, barcodefile = opts.barcode,
leafcolorfile=opts.leafcolorfile)
if __name__ == '__main__':
main()
| sgordon007/jcvi_062915 | apps/phylo.py | Python | bsd-2-clause | 34,972 | [
"Biopython"
] | 5dd59e9b6e8ddd09733b1e0bcf5d05c0377cd0b93eb64752f418843193dacf62 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libcint(CMakePackage):
"""Library for analytical Gaussian integrals for quantum chemistry."""
homepage = "https://github.com/sunqm/libcint"
url = "https://github.com/sunqm/libcint/archive/v3.0.4.tar.gz"
maintainers = ['mfherbst']
#
# Versions
#
version('3.0.13', sha256='ee64f0bc7fb6073063ac3c9bbef8951feada141e197b1a5cc389c8cccf8dc360')
version('3.0.12', sha256='7409ef41f1465cf4c1ae9834dfc0b0585c0fdc63b55d8ee8b8a7a6d5e31f309d')
version('3.0.11', sha256='4c9c24d4bd4791391848f19a4be5177137aca27a8e0375574101a7a1261157cf')
version('3.0.10', sha256='aac6d9630dc4c62840f03262166e877d3aeaf27b6b33498fb490fa3428f12fe4')
version('3.0.8', sha256='ca94772f74aaf7b8ad4d7c1b09578c9115ec909c3d8b82dacc908c351c631c35')
version('3.0.7', sha256='e603cd90567c6116d4f704ea66a010b447c11052e90db1d91488adc187142ead')
version('3.0.6', sha256='a7d6d46de9be044409270b27727a1d620d21b5fda6aa7291548938e1ced25404')
version('3.0.5', sha256='7bde241ce83c00b89c80459e3af5734d40925d8fd9fcaaa7245f61b08192c722')
version('3.0.4', sha256='0f25ef7ad282dd7a20e4decf283558e4f949243a5423ff4c0cd875276c310c47')
#
# Variants
#
variant('f12', default=True,
description="Enable explicitly correlated f12 integrals.")
variant('coulomb_erf', default=True,
description="Enable attenuated coulomb operator integrals.")
variant('test', default=False, description="Build test programs")
variant('shared', default=True,
description="Build the shared library")
#
# Dependencies and conflicts
#
depends_on('cmake@2.6:', type="build")
depends_on('blas')
depends_on('python', type=("build", "test"), when="+test")
depends_on('py-numpy', type=("build", "test"), when="+test")
# Libcint tests only work with a shared libcint library
conflicts('+test~shared')
#
# Settings and cmake cache
#
def cmake_args(self):
spec = self.spec
args = [
"-DWITH_COULOMB_ERF=" + str("+coulomb_erf" in spec),
"-DWITH_F12=" + str("+f12" in spec),
"-DBUILD_SHARED_LIBS=" + str("+shared" in spec),
"-DENABLE_TEST=" + str("+test" in spec),
"-DENABLE_EXAMPLE=OFF", # Requires fortran compiler
]
return args
| iulian787/spack | var/spack/repos/builtin/packages/libcint/package.py | Python | lgpl-2.1 | 2,555 | [
"Gaussian"
] | c06993f26e6cccc1ee61fd06894755b2407bf37d3eeaa45d5dd2a10600426b60 |
from hidparser.Item import ItemType, Item
from hidparser.enums import CollectionType, ReportFlags, ReportType
from hidparser.DeviceBuilder import DeviceBuilder
class InputItem(Item):
flags = None # type: ReportFlags
def visit(self, descriptor: DeviceBuilder):
descriptor.add_report(ReportType.INPUT, self.flags)
@classmethod
def _get_tag(cls):
return 0x80
@classmethod
def _get_type(cls):
return ItemType.MAIN
def __init__(self, **kwargs):
super(InputItem, self).__init__(**kwargs)
self.flags = ReportFlags.from_bytes(self.data)
def __repr__(self):
return "<{0}: {1}>".format(self.__class__.__name__, self.flags)
class OutputItem(Item):
flags = None
def visit(self, descriptor: DeviceBuilder):
descriptor.add_report(ReportType.OUTPUT, self.flags)
@classmethod
def _get_tag(cls):
return 0x90
@classmethod
def _get_type(cls):
return ItemType.MAIN
def __init__(self, **kwargs):
super(OutputItem, self).__init__(**kwargs)
self.flags = ReportFlags.from_bytes(self.data)
def __repr__(self):
return "<{0}: {1}>".format(self.__class__.__name__, self.flags)
class FeatureItem(Item):
flags = None
def visit(self, descriptor: DeviceBuilder):
descriptor.add_report(ReportType.FEATURE, self.flags)
@classmethod
def _get_tag(cls):
return 0xB0
@classmethod
def _get_type(cls):
return ItemType.MAIN
def __init__(self, **kwargs):
super(FeatureItem, self).__init__(**kwargs)
self.flags = ReportFlags.from_bytes(self.data)
def __repr__(self):
return "<{0}: {1}>".format(self.__class__.__name__, self.flags)
class CollectionItem(Item):
collection = None
@classmethod
def _get_tag(cls):
return 0xA0
@classmethod
def _get_type(cls):
return ItemType.MAIN
def visit(self, descriptor: DeviceBuilder):
if not isinstance(self.collection, CollectionType):
raise ValueError("CollectionItem does not have a valid collection set")
descriptor.push_collection(self.collection)
def __init__(self, **kwargs):
super(CollectionItem, self).__init__(**kwargs)
if self.data is None or len(self.data) is not 1:
raise ValueError("Collection must contain one byte of data")
self.collection = CollectionType(self.data[0])
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self.collection)
class EndCollectionItem(Item):
def visit(self, descriptor: DeviceBuilder):
descriptor.pop_collection()
@classmethod
def _get_tag(cls):
return 0xC0
@classmethod
def _get_type(cls):
return ItemType.MAIN
| NZSmartie/PyHIDParser | hidparser/ItemMain.py | Python | mit | 2,806 | [
"VisIt"
] | 12ae0e8605cdb66a7919e68e2c43fb4afa38f9ed129a28ebdd6f9323768fa5c3 |
#!/usr/bin/env python
"""
After a library is mapped to the genome (using map_single_fragments.py or any
other mapper), the bam file is screened for reads that weren't mapped to the
genome or weren't concise and try to map wach of the ends to a different
location. This script report the reads that are chimeric in a table of the
format:
chr1 position1 strand1 chr2 position2 strand2 read_name read_type
where the position1 is the first position of the first read and position2 is
the last position of read2.
The input is a list of bam files, the output is always one list. The list can
be separated afterwards according to read names.
"""
import sys
import argparse
import pysam
import os
import errno
import pkg_resources
import RILseq
def process_command_line(argv):
"""
Return a 2-tuple: (settings object, args list).
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
# initialize the parser object, replace the description
parser = argparse.ArgumentParser(
description='Map unmapped reads as chimeric fragments',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'genome_fasta',
help='Name of genome fasta file. The file must be indexed using'
'bwa index command prior to this run.')
parser.add_argument(
'bamfiles', nargs='+', action='append',
help='One or more bam files.')
parser.add_argument(
'-r', '--reverse_complement', default=False,
action='store_true',
help='Treat the reads as reverse complement. This means that the first'
" read is actually the 3' end of the fragment. Use this when using "
"Jonathan Livny's protocol for library construction")
parser.add_argument(
'-t', '--transcripts',
help='A gff file of transcripts. If given, screen reads that might'
' reside from the same transcript. Very useful for screening ribosomal'
' RNAs. Otherwise use only the size limit.')
parser.add_argument(
'-s', '--distance', type=int, default=1000,
help='Maximal distance between concordant reads. If they are generated'
' from the same strand but larger than this distance they will be'
' considered as chimeric.')
parser.add_argument(
'--dust_thr', type=float, default=10,
help='Threshold for dust filter. If 0 skip.')
parser.add_argument(
'-d', '--dirout', default='./remapped-data/',
help='Output directory, default is this directory.')
parser.add_argument(
'-a', '--all_reads',
help='Map all reads in the BAM file, write all the fragments that are'
' not chimeric to the file specified here e.g. '
'-a single_fragments_mapping.txt. By default these reads will be '
'written to the standard output.')
parser.add_argument(
'-A', '--add_all_reads', default=True, action='store_false',
help='By default map all reads in the BAM file, write all the fragments'
', either chimeric ro single to the output file (stdout). '
"If this option is selected don't wirte the single reads.")
parser.add_argument(
'--keep_circular', default=False, action='store_true',
help='Remove reads that are probably a result of circular RNAs by'
' default. If the reads are close but in opposite order they will be'
' removed unless this argument is set.')
parser.add_argument(
'-l', '--length', type=int, default=25,
help='Length of sequence to map. Take the ends of the fragment and map'
' each to the genome. The length of the region will be this length.')
parser.add_argument(
'--max_mismatches', type=int, default=3,
help='Find alignment allowing this number of mismatches. If there are '
'more than one match with this number of mismatches the read will be'
' treated as if it might match all of them and if there is one '
'scenario in which the two ends are concordant it will be removed.')
parser.add_argument(
'--allowed_mismatches', type=int, default=1,
help='This number of mismatches is allowed between the a match and '
'the genome. If there are mapped reads with less than --max_mismatches'
' mismatches but more than this number the read will be ignored.')
parser.add_argument(
'--skip_mapping', action='store_true', default=False,
help='Skip the mapping step, use previously mapped files.')
parser.add_argument(
'--maxG', type=float, default=0.8,
help='If a read has more than this fraction of Gs remove this read'
'from the screen. This is due to nextseq technology which puts G '
'where there is no signal, the poly G might just be noise.'
' When using other sequencing technologies set to 1.')
parser.add_argument(
'-f', '--feature', default='exon',
help='Name of features to count on the GTF file (column 2).')
parser.add_argument(
'-i', '--identifier', default='gene_id',
help='Name of identifier to print (in column 8 of the GTF file).')
parser.add_argument(
'--bwa_exec', default='bwa',
help='bwa command')
parser.add_argument(
'-S', '--samtools_cmd', default='samtools',
help='Samtools executable.')
parser.add_argument(
'--params_aln', default='-t 8 -N -M 0',
help='Additional parameters for aln function of bwa.')
parser.add_argument(
'--samse_params', default='-n 1000',
help='Additional parameters for samse function of bwa.')
settings = parser.parse_args(argv)
return settings
def main(argv=None):
sys.stderr.write("RILseq version: {}\n".format(pkg_resources.get_distribution("RILseq").version))
settings = process_command_line(argv)
# Read the transcripts if given
try:
os.makedirs(settings.dirout)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if settings.transcripts:
trans_dict = RILseq.read_transcripts(settings.transcripts, settings.feature, settings.identifier)
else:
trans_dict = None
# Get the ends of the reads from the bam files
# sys.stderr.write('%s\n'%str(settings.bamfiles))
if settings.all_reads:
try:
outall = open(settings.all_reads, 'w')
except IOError:
outall = None
elif settings.add_all_reads:
outall = sys.stdout
else:
outall = None
for bf in RILseq.flat_list(settings.bamfiles):
bfin = pysam.AlignmentFile(bf,'rb')
outhead = bf.rsplit('.', 1)[0]
libname = outhead.rsplit('/',1)[-1]
fsq1name = "%s/%s_ends_1.fastq"%(settings.dirout, libname)
fsq2name = "%s/%s_ends_2.fastq"%(settings.dirout, libname)
if settings.skip_mapping:
fsq1 = open(os.devnull, 'w')
fsq2 = fsq1
else:
fsq1 = open(fsq1name, 'w')
fsq2 = open(fsq2name, 'w')
single_mapped = RILseq.get_unmapped_reads(
bfin, fsq1, fsq2, settings.length, settings.maxG,
rev=settings.reverse_complement, all_reads=True,
dust_thr=settings.dust_thr)
reads_in = []
# Map the fastq files to the genome
for fqname in (fsq1name, fsq2name):
bamheadname = fqname.rsplit('.',1)[0].rsplit('/',1)[-1]
if settings.skip_mapping:
bamname = "%s/%s.bam"%(settings.dirout, bamheadname)
else:
bamname = RILseq.run_bwa(
settings.bwa_exec, fqname, None,
os.path.abspath(settings.dirout), bamheadname, settings.max_mismatches,
os.path.abspath(settings.genome_fasta), settings.params_aln,
'', settings.samse_params,
settings.samtools_cmd)
bamin = pysam.AlignmentFile(bamname,'rb')
reads_in.append(RILseq.read_bam_file(
bamin, bamin.references, settings.allowed_mismatches))
RILseq.write_reads_table(
sys.stdout, reads_in[0], reads_in[1], bfin.references,
settings.distance, not settings.keep_circular,
trans_dict, write_single=outall, single_mapped=single_mapped,
max_NM=settings.allowed_mismatches)
return 0 # success
if __name__ == '__main__':
status = main()
sys.exit(status)
| asafpr/RILseq | bin/map_chimeric_fragments.py | Python | mit | 8,553 | [
"BWA",
"pysam"
] | 052fed3449bdb279b365f27f9ac5ccf2193dbf28e00ea41ee98a9e0d7c41d0d3 |
# -*- coding: utf-8 -*-
"""
:author: Rinze de Laat <laat@delmic.com>
:copyright: © 2012 Rinze de Laat, Delmic
This file is part of Odemis.
.. license::
Odemis is free software: you can redistribute it and/or modify it under the terms of the GNU
General Public License version 2 as published by the Free Software Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License along with Odemis. If not,
see http://www.gnu.org/licenses/.
This module contains classes needed to construct stream panels.
Stream panels are custom, specialized controls that allow the user to view and manipulate various
data streams coming from the microscope.
"""
from __future__ import division
from decorator import decorator
import logging
from odemis import acq
from odemis.gui import FG_COLOUR_EDIT, FG_COLOUR_MAIN, BG_COLOUR_MAIN, BG_COLOUR_STREAM, \
FG_COLOUR_DIS
from odemis.gui import img
from odemis.gui.comp.combo import ComboBox
from odemis.gui.comp.foldpanelbar import FoldPanelItem, FoldPanelBar
from odemis.gui.comp.radio import GraphicalRadioButtonControl
from odemis.gui.comp.slider import UnitFloatSlider, VisualRangeSlider, UnitIntegerSlider, Slider
from odemis.gui.comp.text import SuggestTextCtrl, UnitFloatCtrl, FloatTextCtrl, UnitIntegerCtrl
from odemis.gui.util import call_in_wx_main
from odemis.gui.util.widgets import VigilantAttributeConnector
import wx
import wx.lib.newevent
from wx.lib.pubsub import pub
import odemis.gui as gui
import odemis.gui.comp.buttons as buttons
stream_remove_event, EVT_STREAM_REMOVE = wx.lib.newevent.NewEvent()
stream_visible_event, EVT_STREAM_VISIBLE = wx.lib.newevent.NewEvent()
stream_peak_event, EVT_STREAM_PEAK = wx.lib.newevent.NewEvent()
# Values to control which option is available
OPT_NAME_EDIT = 1 # allow the renaming of the stream (for one time only)
OPT_BTN_REMOVE = 2 # remove the stream entry
OPT_BTN_SHOW = 4 # show/hide the stream image
OPT_BTN_UPDATE = 8 # update/stop the stream acquisition
OPT_BTN_TINT = 16 # tint of the stream (if the VA exists)
OPT_BTN_PEAK = 32 # show/hide the peak fitting data
CAPTION_PADDING_RIGHT = 5
ICON_WIDTH, ICON_HEIGHT = 16, 16
@decorator
def control_bookkeeper(f, self, *args, **kwargs):
""" Clear the default message, if needed, and advance the row count """
result = f(self, *args, **kwargs)
# This makes the 2nd column's width variable
if not self.gb_sizer.IsColGrowable(1):
self.gb_sizer.AddGrowableCol(1)
# Redo FoldPanelBar layout
win = self
while not isinstance(win, FoldPanelBar):
win = win.Parent
win.Layout()
self.num_rows += 1
return result
class StreamPanelHeader(wx.Control):
""" This class describes a clickable control responsible for expanding and collapsing the
StreamPanel to which it belongs.
It can also contain various sub buttons that allow for stream manipulation.
"""
BUTTON_SIZE = (18, 18) # The pixel size of the button
BUTTON_BORDER_SIZE = 9 # Border space around the buttons
def __init__(self, parent, wid=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.NO_BORDER):
assert(isinstance(parent, StreamPanel))
super(StreamPanelHeader, self).__init__(parent, wid, pos, size, style)
self.SetBackgroundColour(self.Parent.BackgroundColour)
# This style enables us to draw the background with our own paint event handler
self.SetBackgroundStyle(wx.BG_STYLE_PAINT)
# Callback when the label changes: (string (text) -> None)
self.label_change_callback = None
# Create and add sizer and populate with controls
self._sz = wx.BoxSizer(wx.HORIZONTAL)
# Fold indicator icon, drawn directly in the background in a fixed position
self._foldIcons = wx.ImageList(16, 16)
self._foldIcons.Add(img.getBitmap("icon/arr_down_s.png"))
self._foldIcons.Add(img.getBitmap("icon/arr_right_s.png"))
# Add the needed controls to the sizer
self.btn_remove = self._add_remove_btn() if self.Parent.options & OPT_BTN_REMOVE else None
if self.Parent.options & OPT_NAME_EDIT:
self.ctrl_label = self._add_suggest_ctrl()
else:
self.ctrl_label = self._add_label_ctrl()
self.btn_peak = self._add_peak_btn() if self.Parent.options & OPT_BTN_PEAK else None
self.btn_tint = self._add_tint_btn() if self.Parent.options & OPT_BTN_TINT else None
self.btn_show = self._add_visibility_btn() if self.Parent.options & OPT_BTN_SHOW else None
self.btn_update = self._add_update_btn() if self.Parent.options & OPT_BTN_UPDATE else None
# The spacer is responsible for creating padding on the right side of the header panel
self._sz.AddSpacer((64, 16))
# Set the sizer of the Control
self.SetSizerAndFit(self._sz)
self.Bind(wx.EVT_SIZE, self.on_size)
self.Layout()
# Control creation methods
def _add_remove_btn(self):
""" Add a button for stream removal """
btn_rem = buttons.ImageButton(self.Parent,
bitmap=img.getBitmap("icon/ico_rem_str.png"),
size=self.BUTTON_SIZE)
btn_rem.bmpHover = img.getBitmap("icon/ico_rem_str_h.png")
btn_rem.SetToolTipString("Remove stream")
self._add_ctrl(btn_rem)
return btn_rem
def _add_suggest_ctrl(self):
""" Add a suggest control to the header panel """
suggest_ctrl = SuggestTextCtrl(self, id=-1, value=self.Parent.stream.name.value)
suggest_ctrl.SetBackgroundColour(self.Parent.GetBackgroundColour())
suggest_ctrl.SetForegroundColour(FG_COLOUR_EDIT)
suggest_ctrl.Bind(wx.EVT_COMMAND_ENTER, self._on_label_change)
self._add_ctrl(suggest_ctrl, stretch=True)
return suggest_ctrl
def _add_label_ctrl(self):
""" Add a label control to the header panel """
label_ctrl = wx.StaticText(self, -1, self.Parent.stream.name.value)
label_ctrl.SetBackgroundColour(self.Parent.GetBackgroundColour())
label_ctrl.SetForegroundColour(FG_COLOUR_MAIN)
self._add_ctrl(label_ctrl, stretch=True)
return label_ctrl
def _add_tint_btn(self):
""" Add a tint button to the stream header"""
tint_btn = buttons.ColourButton(
self, -1,
size=self.BUTTON_SIZE,
colour=self.Parent.stream.tint.value,
use_hover=True
)
tint_btn.SetToolTipString("Select colour")
# Tint event handlers
tint_btn.Bind(wx.EVT_BUTTON, self._on_tint_click)
self.Parent.stream.tint.subscribe(self._on_tint_value)
self._add_ctrl(tint_btn)
return tint_btn
def _add_peak_btn(self):
""" Add the peak toggle button to the stream panel header """
peak_btn = buttons.ImageStateButton(self, bitmap=img.getBitmap("icon/ico_peak_none.png"))
peak_btn.bmpHover = img.getBitmap("icon/ico_peak_none_h.png")
peak_btn.bmpSelected = [img.getBitmap("icon/ico_peak_%s.png" % (m,)) for m in ("gaussian", "lorentzian")]
peak_btn.bmpSelectedHover = [img.getBitmap("icon/ico_peak_%s_h.png" % (m,)) for m in ("gaussian", "lorentzian")]
peak_btn.SetToolTipString("Select peak fitting (Gaussian, Lorentzian, or none)")
self._add_ctrl(peak_btn)
return peak_btn
def _add_visibility_btn(self):
""" Add the visibility toggle button to the stream panel header """
visibility_btn = buttons.ImageToggleButtonImageButton(self,
bitmap=img.getBitmap("icon/ico_eye_closed.png"))
visibility_btn.bmpHover = img.getBitmap("icon/ico_eye_closed_h.png")
visibility_btn.bmpSelected = img.getBitmap("icon/ico_eye_open.png")
visibility_btn.bmpSelectedHover = img.getBitmap("icon/ico_eye_open_h.png")
visibility_btn.SetToolTipString("Show stream")
self._add_ctrl(visibility_btn)
return visibility_btn
def _add_update_btn(self):
""" Add a button for (de)activation of the stream """
update_btn = buttons.ImageToggleButtonImageButton(self,
bitmap=img.getBitmap("icon/ico_pause.png"))
update_btn.bmpHover = img.getBitmap("icon/ico_pause_h.png")
update_btn.bmpSelected = img.getBitmap("icon/ico_play.png")
update_btn.bmpSelectedHover = img.getBitmap("icon/ico_play_h.png")
update_btn.SetToolTipString("Update stream")
self._vac_updated = VigilantAttributeConnector(
self.Parent.stream.should_update,
update_btn,
update_btn.SetToggle,
update_btn.GetToggle,
events=wx.EVT_BUTTON
)
self._add_ctrl(update_btn)
return update_btn
def _add_ctrl(self, ctrl, stretch=False):
""" Add the given control to the header panel
:param ctrl: (wx.Control) Control to add to the header panel
:param stretch: True if the control should expand to fill space
"""
# Only the first element has a left border
border = wx.ALL if self._sz.IsEmpty() else wx.RIGHT
self._sz.Add(
ctrl,
proportion=1 if stretch else 0,
flag=(border | wx.ALIGN_CENTRE_VERTICAL | wx.RESERVE_SPACE_EVEN_IF_HIDDEN),
border=self.BUTTON_BORDER_SIZE
)
# END Control creation methods
# Layout and painting
def on_size(self, event):
""" Handle the wx.EVT_SIZE event for the Expander class """
self.SetSize((self.Parent.GetSize().x, -1))
self.Layout()
self.Refresh()
event.Skip()
def on_draw_expander(self, dc):
""" Draw the expand/collapse arrow icon
It needs to be called from the parent's paint event handler.
"""
win_rect = self.GetRect()
x_pos = win_rect.GetRight() - ICON_WIDTH - CAPTION_PADDING_RIGHT
self._foldIcons.Draw(
1 if self.Parent.collapsed else 0,
dc,
x_pos,
(win_rect.GetHeight() - ICON_HEIGHT) // 2,
wx.IMAGELIST_DRAW_TRANSPARENT
)
# END Layout and painting
# Show/hide/disable controls
def _show_ctrl(self, ctrl, show):
""" Show or hide the given control """
if ctrl:
self._sz.Show(ctrl, show)
self._sz.Layout()
def show_remove_btn(self, show):
""" Show or hide the remove button """
self._show_ctrl(self.btn_remove, show)
def show_updated_btn(self, show):
""" Show or hide the update button """
self._show_ctrl(self.btn_update, show)
def show_peak_btn(self, show):
""" Show or hide the peak button """
self._show_ctrl(self.btn_peak, show)
def show_show_btn(self, show):
""" Show or hide the show button """
self._show_ctrl(self.btn_show, show)
def show_tint_btn(self, show):
""" Show or hide the tint button """
self._show_ctrl(self.btn_tint, show)
def enable_remove_btn(self, enabled):
""" Enable or disable the remove button """
self.btn_remove.Enable(enabled)
def enable_updated_btn(self, enabled):
""" Enable or disable the update button """
self.btn_update.Enable(enabled)
def enable_show_btn(self, enabled):
""" Enable or disable the show button """
self.btn_show.Enable(enabled)
def enable_peak_btn(self, enabled):
""" Enable or disable the peak button """
self.btn_peak.Enable(enabled)
def enable_tint_btn(self, enabled):
""" Enable or disable the tint button """
self.btn_tint.Enable(enabled)
def enable(self, enabled):
""" Enable or disable all buttons that are present """
if self.btn_remove:
self.enable_remove_btn(enabled)
if self.btn_update:
self.enable_updated_btn(enabled)
if self.btn_show:
self.enable_show_btn(enabled)
if self.btn_peak:
self.enable_peak_btn(enabled)
if self.btn_tint:
self.enable_tint_btn(enabled)
def to_static_mode(self):
""" Remove or disable the controls not needed for a static view of the stream """
self.show_remove_btn(False)
self.show_updated_btn(False)
if isinstance(self.ctrl_label, SuggestTextCtrl):
self.ctrl_label.Disable()
def to_locked_mode(self):
""" Remove or disable all controls """
self.to_static_mode()
self.show_show_btn(False)
self.show_peak_btn(False)
# END Show/hide/disable controls
# GUI event handlers
def _on_label_change(self, evt):
""" Call the label change callback when the label value changes """
if callable(self.label_change_callback):
self.label_change_callback(self.ctrl_label.GetValue())
@call_in_wx_main
def _on_tint_value(self, colour):
""" Update the colour button to reflect the provided colour """
self.btn_tint.set_colour(colour)
def _on_tint_click(self, evt):
""" Handle the mouse click event on the tint button """
# Remove the hover effect
self.btn_tint.OnLeave(evt)
# Set default colour to the current value
cldata = wx.ColourData()
cldata.SetColour(wx.Colour(*self.Parent.stream.tint.value))
dlg = wx.ColourDialog(self, cldata)
if dlg.ShowModal() == wx.ID_OK:
colour = dlg.ColourData.GetColour().Get() # convert to a 3-tuple
logging.debug("Colour %r selected", colour)
# Setting the VA will automatically update the button's colour
self.Parent.stream.tint.value = colour
# END GUI event handlers
def set_label_choices(self, choices):
""" Assign a list of predefined labels to the suggest control form which the user may choose
:param choices: [str]
"""
try:
self.ctrl_label.SetChoices(choices)
except AttributeError:
raise TypeError("SuggestTextCtrl required, %s found!!" % type(self.ctrl_label))
def set_focus_on_label(self):
""" Set the focus on the label (and select the text if it's editable) """
self.ctrl_label.SetFocus()
if self.Parent.options & OPT_NAME_EDIT:
self.ctrl_label.SelectAll()
class StreamPanel(wx.Panel):
""" The StreamPanel class, a special case collapsible panel.
The StreamPanel consists of the following widgets:
StreamPanel
BoxSizer
StreamPanelHeader
Panel
BoxSizer
GridBagSizer
Additional controls can be added to the GridBagSizer in the 'finalize' method.
The controls contained within a StreamPanel are typically connected to the VigilantAttribute
properties of the Stream it's representing.
"""
def __init__(self, parent, stream, options=(OPT_BTN_REMOVE | OPT_BTN_SHOW | OPT_BTN_UPDATE),
wid=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.CP_DEFAULT_STYLE, name="StreamPanel", collapsed=False):
"""
:param parent: (StreamBar) The parent widget.
:param stream: (Stream) The stream data model to be displayed to and
modified by the user.
"""
assert(isinstance(parent, StreamBar))
wx.Panel.__init__(self, parent, wid, pos, size, style, name)
self.options = options
self.stream = stream # TODO: Should this also be moved to the StreamController? YES!
# Dye attributes
self._btn_excitation = None
self._btn_emission = None
# Appearance
# self._agwStyle = agwStyle | wx.CP_NO_TLW_RESIZE # |wx.CP_GTK_EXPANDER
self.SetBackgroundColour(BG_COLOUR_STREAM)
self.SetForegroundColour(FG_COLOUR_MAIN)
# State
self._collapsed = collapsed
# Child widgets
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.main_sizer)
self._header = None
self._panel = None
self._prev_drange = None
self.gb_sizer = wx.GridBagSizer()
# Counter that keeps track of the number of rows containing controls inside this panel
self.num_rows = 0
self._create_controls()
def _create_controls(self):
""" Set up the basic structure for the controls that are going to be used """
# Create stream header
self._header = StreamPanelHeader(self)
self._header.Bind(wx.EVT_LEFT_UP, self.on_toggle)
self._header.Bind(wx.EVT_PAINT, self.on_draw_expander)
self.Bind(wx.EVT_BUTTON, self.on_button, self._header)
self._header.btn_remove.Bind(wx.EVT_BUTTON, self.on_remove_btn)
self._header.btn_show.Bind(wx.EVT_BUTTON, self.on_visibility_btn)
if self._header.btn_peak is not None:
self._header.btn_peak.Bind(wx.EVT_BUTTON, self.on_peak_btn)
if wx.Platform == "__WXMSW__":
self._header.Bind(wx.EVT_LEFT_DCLICK, self.on_button)
self.main_sizer.Add(self._header, 0, wx.EXPAND)
# Create the control panel
self._panel = wx.Panel(self, style=wx.TAB_TRAVERSAL | wx.NO_BORDER)
# Add a simple sizer so we can create padding for the panel
border_sizer = wx.BoxSizer(wx.HORIZONTAL)
border_sizer.Add(self.gb_sizer, border=5, flag=wx.ALL | wx.EXPAND, proportion=1)
self._panel.SetSizer(border_sizer)
self._panel.SetBackgroundColour(BG_COLOUR_MAIN)
self._panel.SetForegroundColour(FG_COLOUR_MAIN)
self._panel.SetFont(self.GetFont())
self.collapse()
self.main_sizer.Add(self._panel, 0, wx.EXPAND)
@property
def collapsed(self):
return self._collapsed
@property
def header_change_callback(self):
return self._header.label_change_callback
@header_change_callback.setter
def header_change_callback(self, f):
self._header.label_change_callback = f
def set_header_choices(self, choices):
self._header.set_label_choices(choices)
def flatten(self):
""" Unfold the stream panel and hide the header """
self.collapse(False)
self._header.Show(False)
def set_focus_on_label(self):
""" Focus the text label in the header """
self._header.set_focus_on_label()
def Layout(self, *args, **kwargs):
""" Layout the StreamPanel. """
if not self._header or not self._panel or not self.main_sizer:
return False # we need to complete the creation first!
oursz = self.GetSize()
# move & resize the button and the static line
self.main_sizer.SetDimension(0, 0, oursz.GetWidth(),
self.main_sizer.GetMinSize().GetHeight())
self.main_sizer.Layout()
if not self._collapsed:
# move & resize the container window
yoffset = self.main_sizer.GetSize().GetHeight()
if oursz.y - yoffset > 0:
self._panel.SetDimensions(0, yoffset, oursz.x, oursz.y - yoffset)
# this is very important to make the pane window layout show
# correctly
self._panel.Show()
self._panel.Layout()
return True
def DoGetBestSize(self, *args, **kwargs):
""" Gets the size which best suits the window
For a control, it would be the minimal size which doesn't truncate the control, for a panel
the same size as it would have after a call to `Fit()`.
TODO: This method seems deprecated. Test if it's really so.
"""
# do not use GetSize() but rather GetMinSize() since it calculates
# the required space of the sizer
sz = self.main_sizer.GetMinSize()
# when expanded, we need more space
if not self._collapsed:
pbs = self._panel.GetBestSize()
sz.width = max(sz.GetWidth(), pbs.x)
sz.height = sz.y + pbs.y
return sz
def Destroy(self, *args, **kwargs):
""" Delete the widget from the GUI
"""
# Avoid receiving data after the object is deleted
if hasattr(self, "_sld_hist"):
self.stream.histogram.unsubscribe(self.on_histogram)
if hasattr(self, "_sld_spec"):
self.stream.image.unsubscribe(self.on_new_spec_data)
super(StreamPanel, self).Destroy(*args, **kwargs)
def set_visible(self, visible):
""" Set the "visible" toggle button of the stream panel """
self._header.btn_show.SetToggle(visible)
def set_peak(self, state):
""" Set the "peak" toggle button of the stream panel
state (None or 0<=int): None for no peak, 0 for gaussian, 1 for lorentzian
"""
self._header.btn_peak.SetState(state)
def collapse(self, collapse=None):
""" Collapses or expands the pane window """
if collapse is not None and self._collapsed == collapse:
return
self.Freeze()
# update our state
self._panel.Show(not collapse)
self._collapsed = collapse
# Call after is used, so the fit will occur after everything has been hidden or shown
wx.CallAfter(self.Parent.fit_streams)
self.Thaw()
# GUI events: update the stream when the user changes the values
def on_remove_btn(self, evt):
logging.debug("Remove button clicked for '%s'", self.stream.name.value)
# generate EVT_STREAM_REMOVE
event = stream_remove_event(spanel=self)
wx.PostEvent(self, event)
def on_visibility_btn(self, evt):
# generate EVT_STREAM_VISIBLE
event = stream_visible_event(visible=self._header.btn_show.GetToggle())
wx.PostEvent(self, event)
def on_peak_btn(self, evt):
# generate EVT_STREAM_PEAK
event = stream_peak_event(state=self._header.btn_peak.GetState())
wx.PostEvent(self, event)
# Manipulate expander buttons
def show_updated_btn(self, show):
self._header.show_updated_btn(show)
def enable_updated_btn(self, enabled):
self._header.enable_updated_btn(enabled)
def show_remove_btn(self, show):
self._header.show_remove_btn(show)
def show_visible_btn(self, show):
self._header.show_show_btn(show)
def show_peak_btn(self, show):
self._header.show_peak_btn(show)
def enable(self, enabled):
self._header.enable(enabled)
def OnSize(self, event):
""" Handles the wx.EVT_SIZE event for StreamPanel
"""
self.Layout()
event.Skip()
def on_toggle(self, evt):
""" Detect click on the collapse button of the StreamPanel """
w = evt.GetEventObject().GetSize().GetWidth()
if evt.GetX() > w * 0.85:
self.collapse(not self._collapsed)
else:
evt.Skip()
def on_button(self, event):
""" Handles the wx.EVT_BUTTON event for StreamPanel """
if event.GetEventObject() != self._header:
event.Skip()
return
self.collapse(not self._collapsed)
def on_draw_expander(self, event):
""" Handle the ``wx.EVT_PAINT`` event for the stream panel
:note: This is a drawing routine to paint the GTK-style expander.
"""
dc = wx.AutoBufferedPaintDC(self._header)
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
self._header.on_draw_expander(dc)
def to_static_mode(self):
""" Hide or make read-only any button or data that should not change during acquisition """
self._header.to_static_mode()
def to_locked_mode(self):
""" Hide or make read-only all buttons and data controls"""
self._header.to_static_mode()
self._header.to_locked_mode()
# Setting Control Addition Methods
def _add_side_label(self, label_text, tooltip=None):
""" Add a text label to the control grid
This method should only be called from other methods that add control to the control grid
:param label_text: (str)
:return: (wx.StaticText)
"""
lbl_ctrl = wx.StaticText(self._panel, -1, label_text)
if tooltip:
lbl_ctrl.SetToolTipString(tooltip)
self.gb_sizer.Add(lbl_ctrl, (self.num_rows, 0),
flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL, border=5)
return lbl_ctrl
@control_bookkeeper
def add_autobc_ctrls(self):
""" Create and return controls needed for (auto) brightness and contrast manipulation """
btn_autobc = buttons.ImageTextToggleButton(self._panel, height=24,
icon=img.getBitmap("icon/ico_contrast.png"),
label="Auto")
btn_autobc.SetToolTipString("Toggle auto brightness and contrast")
lbl_bc_outliers = wx.StaticText(self._panel, -1, "Outliers")
sld_bc_outliers = UnitFloatSlider(
self._panel,
value=self.stream.auto_bc_outliers.value,
min_val=self.stream.auto_bc_outliers.range[0],
max_val=self.stream.auto_bc_outliers.range[1],
unit="%",
scale="cubic",
accuracy=2
)
sld_bc_outliers.SetToolTipString("Percentage of values to ignore "
"in auto brightness and contrast")
autobc_sz = wx.BoxSizer(wx.HORIZONTAL)
autobc_sz.Add(btn_autobc, 0, flag=wx.ALIGN_CENTRE_VERTICAL | wx.RIGHT, border=5)
autobc_sz.Add(lbl_bc_outliers, 0, flag=wx.ALIGN_CENTRE_VERTICAL | wx.LEFT, border=5)
autobc_sz.Add(sld_bc_outliers, 1,
flag=wx.ALIGN_CENTRE_VERTICAL | wx.LEFT | wx.EXPAND, border=5)
self.gb_sizer.Add(autobc_sz, (self.num_rows, 0), span=(1, 3),
flag=wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND | wx.ALL, border=5)
return btn_autobc, lbl_bc_outliers, sld_bc_outliers
@control_bookkeeper
def add_outliers_ctrls(self):
""" Add controls for the manipulation of the outlier values """
# TODO: Move min/max to controller too?
hist_min = self.stream.intensityRange.range[0][0]
hist_max = self.stream.intensityRange.range[1][1]
sld_hist = VisualRangeSlider(self._panel, size=(-1, 40),
value=self.stream.intensityRange.value,
min_val=hist_min, max_val=hist_max)
sld_hist.SetBackgroundColour("#000000")
self.gb_sizer.Add(sld_hist, pos=(self.num_rows, 0), span=(1, 3), border=5,
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT)
self.num_rows += 1
# Low/ High values are in raw data. So it's typically uint, but could
# be float for some weird cases. So we make them float, with high
# accuracy to avoid rounding.
lbl_lowi = wx.StaticText(self._panel, -1, "Low")
tooltip_txt = "Value mapped to black"
lbl_lowi.SetToolTipString(tooltip_txt)
txt_lowi = FloatTextCtrl(self._panel, -1,
self.stream.intensityRange.value[0],
style=wx.NO_BORDER, size=(-1, 14),
min_val=hist_min, max_val=hist_max,
key_step=1, accuracy=6)
txt_lowi.SetForegroundColour(FG_COLOUR_EDIT)
txt_lowi.SetOwnBackgroundColour(BG_COLOUR_MAIN)
txt_lowi.SetToolTipString(tooltip_txt)
lbl_highi = wx.StaticText(self._panel, -1, "High")
tooltip_txt = "Value mapped to white"
lbl_highi.SetToolTipString(tooltip_txt)
txt_highi = FloatTextCtrl(self._panel, -1,
self.stream.intensityRange.value[1],
style=wx.NO_BORDER, size=(-1, 14),
min_val=hist_min, max_val=hist_max,
key_step=1, accuracy=6)
txt_highi.SetBackgroundColour(BG_COLOUR_MAIN)
txt_highi.SetForegroundColour(FG_COLOUR_EDIT)
txt_highi.SetToolTipString(tooltip_txt)
# Add controls to sizer for spacing
lh_sz = wx.BoxSizer(wx.HORIZONTAL)
lh_sz.Add(lbl_lowi, 0, border=5, flag=wx.ALIGN_CENTRE_VERTICAL | wx.LEFT)
lh_sz.Add(txt_lowi, 1, border=5,
flag=wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND | wx.RIGHT | wx.LEFT)
lh_sz.Add(lbl_highi, 0, border=5, flag=wx.ALIGN_CENTRE_VERTICAL | wx.LEFT)
lh_sz.Add(txt_highi, 1, border=5,
flag=wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND | wx.RIGHT | wx.LEFT)
# Add spacing sizer to grid sizer
self.gb_sizer.Add(lh_sz, (self.num_rows, 0), span=(1, 3), border=5,
flag=wx.BOTTOM | wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND)
return sld_hist, txt_lowi, txt_highi
@control_bookkeeper
def add_hw_setting_ctrl(self, name, value=None):
""" Add a generic number control to manipulate a hardware setting """
lbl_ctrl = self._add_side_label(name)
value_ctrl = FloatTextCtrl(self._panel, -1, value or 0.0, style=wx.NO_BORDER)
value_ctrl.SetForegroundColour(gui.FG_COLOUR_EDIT)
value_ctrl.SetBackgroundColour(gui.BG_COLOUR_MAIN)
self.gb_sizer.Add(value_ctrl, (self.num_rows, 1), span=(1, 3),
flag=wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND | wx.ALL, border=5)
return lbl_ctrl, value_ctrl
def _add_slider(self, klass, label_text, value, conf):
""" Add a slider of type 'klass' to the settings panel """
lbl_ctrl = self._add_side_label(label_text)
value_ctrl = klass(self._panel, value=value, **conf)
self.gb_sizer.Add(value_ctrl, (self.num_rows, 1), span=(1, 3),
flag=wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND | wx.ALL, border=5)
return lbl_ctrl, value_ctrl
@control_bookkeeper
def add_slider(self, label_text, value=None, conf=None):
""" Add an integer value slider to the settings panel
:param label_text: (str) Label text to display
:param value: (None or int) Value to display
:param conf: (None or dict) Dictionary containing parameters for the control
"""
return self._add_slider(Slider, label_text, value, conf)
@control_bookkeeper
def add_integer_slider(self, label_text, value=None, conf=None):
""" Add an integer value slider to the settings panel
:param label_text: (str) Label text to display
:param value: (None or int) Value to display
:param conf: (None or dict) Dictionary containing parameters for the control
"""
return self._add_slider(UnitIntegerSlider, label_text, value, conf)
@control_bookkeeper
def add_float_slider(self, label_text, value=None, conf=None):
""" Add a float value slider to the settings panel
:param label_text: (str) Label text to display
:param value: (None or float) Value to display
:param conf: (None or dict) Dictionary containing parameters for the control
"""
return self._add_slider(UnitFloatSlider, label_text, value, conf)
@control_bookkeeper
def add_int_field(self, label_text, value=None, conf=None):
""" Add an integer value field to the settings panel
:param label_text: (str) Label text to display
:param value: (None or int) Value to display
:param conf: (None or dict) Dictionary containing parameters for the control
"""
return self._add_num_field(UnitIntegerCtrl, label_text, value, conf)
@control_bookkeeper
def add_float_field(self, label_text, value=None, conf=None):
""" Add a float value field to the settings panel
:param label_text: (str) Label text to display
:param value: (None or float) Value to display
:param conf: (None or dict) Dictionary containing parameters for the control
"""
return self._add_num_field(UnitFloatCtrl, label_text, value, conf)
def _add_num_field(self, klass, label_text, value, conf):
lbl_ctrl = self._add_side_label(label_text)
value_ctrl = klass(self._panel, value=value, style=wx.NO_BORDER, **conf)
self.gb_sizer.Add(value_ctrl, (self.num_rows, 1),
flag=wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL, border=5)
value_ctrl.SetForegroundColour(gui.FG_COLOUR_EDIT)
value_ctrl.SetBackgroundColour(gui.BG_COLOUR_MAIN)
return lbl_ctrl, value_ctrl
@control_bookkeeper
def add_combobox_control(self, label_text, value=None, conf=None):
""" Add a combobox control to manipulate a hardware setting """
lbl_ctrl = self._add_side_label(label_text)
value_ctrl = ComboBox(self._panel, wx.ID_ANY, pos=(0, 0), size=(-1, 16),
style=wx.NO_BORDER | wx.TE_PROCESS_ENTER, **conf if conf else {})
self.gb_sizer.Add(value_ctrl, (self.num_rows, 1), span=(1, 3),
flag=wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND | wx.ALL, border=5)
if value is not None:
value_ctrl.SetValue(unicode(value))
return lbl_ctrl, value_ctrl
@control_bookkeeper
def add_readonly_field(self, label_text, value=None, selectable=True):
""" Adds a value to the control panel that cannot directly be changed by the user
:param label_text: (str) Label text to display
:param value: (None or object) Value to display next to the label
:param selectable: (boolean) whether the value can be selected for copying by the user
:return: (Ctrl, Ctrl or None) Label and value control
"""
lbl_ctrl = self._add_side_label(label_text)
if value:
if selectable:
value_ctrl = wx.TextCtrl(self._panel, value=unicode(value),
style=wx.BORDER_NONE | wx.TE_READONLY)
value_ctrl.SetForegroundColour(gui.FG_COLOUR_DIS)
value_ctrl.SetBackgroundColour(gui.BG_COLOUR_MAIN)
self.gb_sizer.Add(value_ctrl, (self.num_rows, 1),
flag=wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL, border=5)
else:
value_ctrl = wx.StaticText(self._panel, label=unicode(value))
value_ctrl.SetForegroundColour(gui.FG_COLOUR_DIS)
self.gb_sizer.Add(value_ctrl, (self.num_rows, 1), flag=wx.ALL, border=5)
else:
value_ctrl = None
return lbl_ctrl, value_ctrl
@control_bookkeeper
def add_checkbox_control(self, label_text, value=True, conf=None):
""" Add a checkbox to the settings panel
:param label_text: (str) Label text to display
:param value: (bool) Value to display (True == checked)
:param conf: (None or dict) Dictionary containing parameters for the control
"""
if conf is None:
conf = {}
lbl_ctrl = self._add_side_label(label_text)
# wx.ALIGN_RIGHT has the effect of only highlighting the box on hover,
# which makes it less ugly with Ubuntu
value_ctrl = wx.CheckBox(self._panel, wx.ID_ANY,
style=wx.ALIGN_RIGHT | wx.NO_BORDER,
**conf)
self.gb_sizer.Add(value_ctrl, (self.num_rows, 1), span=(1, 3),
flag=wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND | wx.TOP | wx.BOTTOM, border=5)
value_ctrl.SetValue(value)
return lbl_ctrl, value_ctrl
@control_bookkeeper
def add_radio_control(self, label_text, value=None, conf=None):
""" Add a series of radio buttons to the settings panel
:param label_text: (str) Label text to display
:param value: (None or float) Value to display
:param conf: (None or dict) Dictionary containing parameters for the control
"""
lbl_ctrl = self._add_side_label(label_text)
value_ctrl = GraphicalRadioButtonControl(self._panel, -1, style=wx.NO_BORDER,
**conf if conf else {})
self.gb_sizer.Add(value_ctrl, (self.num_rows, 1),
flag=wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL, border=5)
if value is not None:
value_ctrl.SetValue(value)
return lbl_ctrl, value_ctrl
@control_bookkeeper
def add_text_field(self, label_text, value=None, readonly=False):
""" Add a label and text control to the settings panel
:param label_text: (str) Label text to display
:param value: (None or str) Value to display
:param readonly: (boolean) Whether the value can be changed by the user
:return: (Ctrl, Ctrl) Label and text control
"""
lbl_ctrl = self._add_side_label(label_text)
value_ctrl = wx.TextCtrl(self._panel, value=unicode(value or ""),
style=wx.TE_PROCESS_ENTER | wx.BORDER_NONE | (wx.TE_READONLY if readonly else 0))
if readonly:
value_ctrl.SetForegroundColour(gui.FG_COLOUR_DIS)
else:
value_ctrl.SetForegroundColour(gui.FG_COLOUR_EDIT)
value_ctrl.SetBackgroundColour(gui.BG_COLOUR_MAIN)
self.gb_sizer.Add(value_ctrl, (self.num_rows, 1),
flag=wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL, border=5)
return lbl_ctrl, value_ctrl
@control_bookkeeper
def add_divider(self):
""" Add a dividing line to the stream panel """
line_ctrl = wx.StaticLine(self._panel, size=(-1, 1))
self.gb_sizer.Add(line_ctrl, (self.num_rows, 0), span=(1, 3),
flag=wx.ALL | wx.EXPAND, border=5)
@control_bookkeeper
def add_dye_excitation_ctrl(self, band, readonly, center_wl_color):
lbl_ctrl, value_ctrl, lbl_exc_peak, btn_excitation = self._add_filter_line("Excitation",
band,
readonly,
center_wl_color)
return lbl_ctrl, value_ctrl, lbl_exc_peak, btn_excitation
@control_bookkeeper
def add_dye_emission_ctrl(self, band, readonly, center_wl_color):
lbl_ctrl, value_ctrl, lbl_em_peak, btn_emission = self._add_filter_line("Emission",
band,
readonly,
center_wl_color)
return lbl_ctrl, value_ctrl, lbl_em_peak, btn_emission
def _add_filter_line(self, name, band, readonly, center_wl_color):
""" Create the controls for dye emission/excitation colour filter setting
:param name: (str): the label name
:param band (str): the current wavelength band to display
:param readonly (bool) read-only when there's no or just one band value
:param center_wl_color: None or (r, g, b) center wavelength color of the
current band of the VA. If None, no button is shown.
:return: (4 wx.Controls) the respective controls created
"""
# Note: va.value is in m, but we present everything in nm
lbl_ctrl = self._add_side_label(name)
# will contain both the combo box and the peak label
exc_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.gb_sizer.Add(exc_sizer, (self.num_rows, 1), flag=wx.EXPAND)
if readonly:
hw_set = wx.TextCtrl(self._panel, value=band, size=(-1, 16),
style=wx.BORDER_NONE | wx.TE_READONLY)
hw_set.SetBackgroundColour(self._panel.BackgroundColour)
hw_set.SetForegroundColour(FG_COLOUR_DIS)
exc_sizer.Add(hw_set, 1, flag=wx.LEFT | wx.RIGHT | wx.ALIGN_CENTRE_VERTICAL, border=5)
else:
hw_set = ComboBox(self._panel, value=band, size=(-1, 16),
style=wx.CB_READONLY | wx.BORDER_NONE)
# To avoid catching mouse wheels events when scrolling the panel
hw_set.Bind(wx.EVT_MOUSEWHEEL, lambda e: None)
exc_sizer.Add(hw_set, 1, border=5, flag=wx.ALL | wx.ALIGN_CENTRE_VERTICAL)
# Label for peak information
lbl_peak = wx.StaticText(self._panel)
exc_sizer.Add(lbl_peak, 1, border=5, flag=wx.ALL | wx.ALIGN_CENTRE_VERTICAL | wx.ALIGN_LEFT)
if center_wl_color:
# A button, but not clickable, just to show the wavelength
# If a dye is selected, the colour of the peak is used, otherwise we
# use the hardware setting
btn_color = buttons.ColourButton(self._panel, -1, colour=center_wl_color,
size=(18, 18))
self.gb_sizer.Add(btn_color,
(self.num_rows, 2),
flag=wx.RIGHT | wx.ALIGN_CENTRE_VERTICAL | wx.ALIGN_RIGHT,
border=5)
else:
btn_color = None
return lbl_ctrl, hw_set, lbl_peak, btn_color
# END Setting Control Addition Methods
@control_bookkeeper
def add_rgbfit_ctrl(self):
""" Add an 'rgb fit' button to the stream panel
:return: (ImageTextToggleButton)
"""
btn_fit_rgb = buttons.ImageTextToggleButton(self._panel, height=24,
icon=img.getBitmap("icon/ico_bgr.png"),
label="RGB")
btn_fit_rgb.SetToolTipString("Toggle sub-bandwidths to Blue/Green/Red display")
self.gb_sizer.Add(btn_fit_rgb, (self.num_rows, 0), flag=wx.LEFT | wx.TOP | wx.BOTTOM,
border=5)
return btn_fit_rgb
@control_bookkeeper
def add_specbw_ctrls(self):
""" Add controls to manipulate the spectrum data bandwidth
Returns:
(VisualRangeSlider, wx.StaticText, wx.StaticText)
"""
# 1st row, center label, slider and value
wl = self.stream.spectrumBandwidth.value
# TODO: Move min/max to controller too?
wl_rng = (self.stream.spectrumBandwidth.range[0][0],
self.stream.spectrumBandwidth.range[1][1])
sld_spec = VisualRangeSlider(self._panel, size=(-1, 40),
value=wl, min_val=wl_rng[0], max_val=wl_rng[1])
sld_spec.SetBackgroundColour("#000000")
self.gb_sizer.Add(sld_spec, pos=(self.num_rows, 0), span=(1, 3), border=5,
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT)
self.num_rows += 1
# 2nd row, text fields for intensity (ratios)
tooltip_txt = "Center wavelength of the spectrum"
lbl_scenter = wx.StaticText(self._panel, -1, "Center")
lbl_scenter.SetToolTipString(tooltip_txt)
txt_scenter = UnitFloatCtrl(self._panel, -1, (wl[0] + wl[1]) / 2,
style=wx.NO_BORDER, size=(-1, 14),
min_val=wl_rng[0], max_val=wl_rng[1],
unit=self.stream.spectrumBandwidth.unit, # m or px
accuracy=3)
txt_scenter.SetBackgroundColour(BG_COLOUR_MAIN)
txt_scenter.SetForegroundColour(FG_COLOUR_EDIT)
txt_scenter.SetToolTipString(tooltip_txt)
tooltip_txt = "Bandwidth of the spectrum"
lbl_sbw = wx.StaticText(self._panel, -1, "Bandwidth")
lbl_sbw.SetToolTipString(tooltip_txt)
txt_sbw = UnitFloatCtrl(self._panel, -1, (wl[1] - wl[0]),
style=wx.NO_BORDER, size=(-1, 14),
min_val=0, max_val=(wl_rng[1] - wl_rng[0]),
unit=self.stream.spectrumBandwidth.unit,
accuracy=3)
txt_sbw.SetBackgroundColour(BG_COLOUR_MAIN)
txt_sbw.SetForegroundColour(FG_COLOUR_EDIT)
txt_sbw.SetToolTipString(tooltip_txt)
cb_wl_sz = wx.BoxSizer(wx.HORIZONTAL)
cb_wl_sz.Add(lbl_scenter, 0,
flag=wx.ALIGN_CENTRE_VERTICAL | wx.LEFT,
border=5)
cb_wl_sz.Add(txt_scenter, 1,
flag=wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND | wx.RIGHT | wx.LEFT,
border=5)
cb_wl_sz.Add(lbl_sbw, 0,
flag=wx.ALIGN_CENTRE_VERTICAL | wx.LEFT,
border=5)
cb_wl_sz.Add(txt_sbw, 1,
flag=wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND | wx.RIGHT | wx.LEFT,
border=5)
self.gb_sizer.Add(cb_wl_sz, (self.num_rows, 0), span=(1, 3), border=5,
flag=wx.BOTTOM | wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND)
return sld_spec, txt_scenter, txt_sbw
@control_bookkeeper
def add_specselwidth_ctrl(self):
""" Add a control to manipulate the spectrum selection width
:return: wx.StaticText, UnitIntegerSlider
"""
# Add the selectionWidth VA
tooltip_txt = "Width of the point or line selected"
lbl_selection_width = self._add_side_label("Width", tooltip_txt)
sld_selection_width = UnitIntegerSlider(
self._panel,
value=self.stream.selectionWidth.value,
min_val=self.stream.selectionWidth.range[0],
max_val=self.stream.selectionWidth.range[1],
unit="px",
)
sld_selection_width.SetToolTipString(tooltip_txt)
self.gb_sizer.Add(sld_selection_width, (self.num_rows, 1), span=(1, 2), border=5,
flag=wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND | wx.ALL)
return lbl_selection_width, sld_selection_width
class StreamBar(wx.Panel):
"""
The whole panel containing stream panels and a button to add more streams
There are multiple levels of visibility of a stream panel:
* the stream panel is shown in the panel and has the visible icon on:
The current view is compatible with the stream and has it in its list
of streams.
* the stream panel is shown in the panel and has the visible icon off:
The current view is compatible with the stream, but the stream is not
in its list of streams
* the stream panel is not present in the panel (hidden):
The current view is not compatible with the stream
"""
DEFAULT_BORDER = 2
DEFAULT_STYLE = wx.BOTTOM | wx.EXPAND
# the order in which the streams are displayed
STREAM_ORDER = (
acq.stream.SEMStream,
acq.stream.StaticSEMStream,
acq.stream.BrightfieldStream,
acq.stream.StaticStream,
acq.stream.FluoStream,
acq.stream.CLStream,
acq.stream.CameraStream,
acq.stream.ARSettingsStream,
acq.stream.SpectrumSettingsStream,
acq.stream.MonochromatorSettingsStream,
acq.stream.MomentOfInertiaLiveStream,
acq.stream.CameraCountStream,
)
def __init__(self, *args, **kwargs):
add_btn = kwargs.pop('add_button', False)
wx.Panel.__init__(self, *args, **kwargs)
self.stream_panels = []
self._sz = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self._sz)
msg = "No streams available."
# logging.debug("Point size %s" % self.GetFont().GetPointSize())
self.txt_no_stream = wx.StaticText(self, -1, msg)
self._sz.Add(self.txt_no_stream, 0, wx.ALL | wx.ALIGN_CENTER, 10)
self.btn_add_stream = None
if add_btn:
self.btn_add_stream = buttons.PopupImageButton(
self, -1,
label="ADD STREAM",
style=wx.ALIGN_CENTER
)
self.btn_add_stream.SetForegroundColour("#999999")
self._sz.Add(self.btn_add_stream, flag=wx.ALL, border=10)
# self.btn_add_stream.Bind(wx.EVT_BUTTON, self.on_add_stream)
self.fit_streams()
def fit_streams(self):
logging.debug("Refitting stream panels")
self._set_warning()
h = self._sz.GetMinSize().GetHeight()
self.SetSize((-1, h))
# The panel size is cached in the _PanelSize attribute.
# Make sure it's updated by calling ResizePanel
p = self.Parent
while not isinstance(p, FoldPanelItem):
p = p.Parent
p.Refresh()
# TODO: maybe should be provided after init by the controller (like key of
# sorted()), to separate the GUI from the model ?
def _get_stream_order(self, stream):
""" Gives the "order" of the given stream, as defined in STREAM_ORDER.
Args:
stream (Stream): a stream
Returns:
(int >= 0): the order
"""
for i, c in enumerate(self.STREAM_ORDER):
if isinstance(stream, c):
return i
msg = "Stream %s of unknown order type %s"
logging.warning(msg, stream.name.value, stream.__class__.__name__)
return len(self.STREAM_ORDER)
# === VA handlers
# Moved to stream controller
# === Event Handlers
# def on_add_stream(self, evt):
# evt.Skip()
def on_stream_remove(self, evt):
"""
Called when user request to remove a stream via the stream panel
"""
logging.debug("StreamBar received remove event %r", evt)
# delete stream panel
self.remove_stream_panel(evt.spanel)
# Publish removal notification
logging.debug("Sending stream.remove message")
pub.sendMessage("stream.remove", stream=evt.spanel.stream)
def on_streamp_destroy(self, evt):
"""
Called when a stream panel is completely removed
"""
wx.CallAfter(self.fit_streams)
# === API of the stream panel
def show_add_button(self):
if self.btn_add_stream:
self.btn_add_stream.Show()
self.fit_streams()
def hide_add_button(self):
if self.btn_add_stream:
self.btn_add_stream.Hide()
self.fit_streams()
def is_empty(self):
return len(self.stream_panels) == 0
def get_size(self):
""" Return the number of streams contained within the StreamBar """
return len(self.stream_panels)
def add_stream_panel(self, spanel, show=True):
"""
This method adds a stream panel to the stream bar. The appropriate
position is automatically determined.
spanel (StreamPanel): a stream panel
"""
# Insert the spanel in the order of STREAM_ORDER. If there are already
# streams with the same type, insert after them.
ins_pos = 0
order_s = self._get_stream_order(spanel.stream)
for e in self.stream_panels:
order_e = self._get_stream_order(e.stream)
if order_s < order_e:
break
ins_pos += 1
logging.debug("Inserting %s at position %s", spanel.stream.__class__.__name__, ins_pos)
self.stream_panels.insert(ins_pos, spanel)
if self._sz is None:
self._sz = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self._sz)
self._sz.InsertWindow(ins_pos, spanel,
flag=self.DEFAULT_STYLE,
border=self.DEFAULT_BORDER)
spanel.Bind(EVT_STREAM_REMOVE, self.on_stream_remove)
spanel.Bind(wx.EVT_WINDOW_DESTROY, self.on_streamp_destroy, source=spanel)
spanel.Layout()
# hide the stream if the current view is not compatible
spanel.Show(show)
self.fit_streams()
def remove_stream_panel(self, spanel):
"""
Removes a stream panel
Deletion of the actual stream must be done separately.
"""
self.stream_panels.remove(spanel)
# CallAfter is used to make sure all GUI updates occur in the main
# thread. (Note: this was causing issues with the garbage collection of Streams, because
# StreamPanel have a direct reference to Streams, which should be moved to the controller)
#
# Interesting side note: with CallAfter every time the same image was loaded, Odemis would
# leak 11 MB, when Destroyed is called directly, it would leak 9 MB each time
wx.CallAfter(spanel.Destroy)
def clear(self):
""" Remove all stream panels """
for p in list(self.stream_panels):
# Only refit the (empty) bar after all streams are gone
p.Unbind(wx.EVT_WINDOW_DESTROY, source=p, handler=self.on_streamp_destroy)
self.remove_stream_panel(p)
wx.CallAfter(self.fit_streams)
def _set_warning(self):
""" Display a warning text when no streams are present, or show it
otherwise.
"""
if self.txt_no_stream is not None:
self.txt_no_stream.Show(self.is_empty())
| ktsitsikas/odemis | src/odemis/gui/comp/stream.py | Python | gpl-2.0 | 53,833 | [
"Gaussian"
] | de88761a61d57fec918717e5a326b802b610bca9cbb981b6bbcc0a5bce92b20e |
import matplotlib.mlab as mlab
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import plotly.offline as offl
def dist_plot(rating_df):
x = np.linspace(0, 50, 500)
data_dict = {}
for row in rating_df.iterrows():
label_name = (row[1]['first_name'] + ' ' + row[1]['last_name'][0] + '.')
data_dict[label_name] = (x, mlab.normpdf(x, row[1]['rating'], row[1]['sigma']))
final_df = pd.DataFrame()
for k, v in data_dict.iteritems():
final_df[k] = v[1]
final_df['index'] = x
final_df.set_index('index', inplace=True)
trace_dict = dict()
for n, col in enumerate(final_df.columns):
trace_dict[n] = go.Scatter(
x=final_df.index,
y=final_df[col],
name=col
)
data = trace_dict.values()
# Edit the layout
layout = dict(title='Individual Gaussian Skill Distribution',
xaxis=dict(title='Mu'),
yaxis=dict(title='Value'),
height=750
)
return offl.plot(dict(data=data, layout=layout), output_type='div')
def win_probability_matrix(matrix_df):
'returns the win probability matrix plot as a plotly heatmap'
trace = go.Heatmap(
z=matrix_df.transpose().values.tolist(),
x=matrix_df.columns[::-1],
y=matrix_df.columns[::-1],
colorscale='Viridis'
)
data = [trace]
layout = go.Layout(
title='Win Probability Matrix',
xaxis=dict(title='Loser', ticks=''),
yaxis=dict(title='Winner', ticks=''),
height=750
)
return offl.plot(dict(data=data, layout=layout), output_type='div')
| wseaton/pongr | app/plots.py | Python | mit | 1,677 | [
"Gaussian"
] | 26ff9b1d5cb27ab507054410fb01fb15ebdbb51d2a70c25c9888266d61cd4c59 |
###############################################################################
# #
# GALORE: Gaussian and Lorentzian broadening for simulated spectra #
# #
# Developed by Adam J. Jackson (2016) at University College London #
# #
###############################################################################
# #
# This file is part of Galore. Galore is free software: you can redistribute #
# it and/or modify it under the terms of the GNU General Public License as #
# published by the Free Software Foundation, either version 3 of the License, #
# or (at your option) any later version. This program is distributed in the #
# hope that it will be useful, but WITHOUT ANY WARRANTY; without even the #
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. #
# See the GNU General Public License for more details. You should have #
# received a copy of the GNU General Public License along with this program. #
# If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import os
import csv
import re
import sys
from collections import OrderedDict
import numpy as np
def is_gpw(filename):
"""Determine whether file is GPAW calculation by checking extension"""
return filename.split('.')[-1] == 'gpw'
def is_doscar(filename):
"""Determine whether file is a DOSCAR by checking fourth line"""
# This doesn't break when the file is 3 lines or less; f.readline() just
# starts returning empty strings which also fail the test.
with open(filename, 'r') as f:
for i in range(3):
f.readline()
if f.readline().strip() == 'CAR':
return True
else:
return False
def is_vasp_raman(filename):
"""Determine if file is raman-sc/vasp_raman.py data by checking header"""
with open(filename, 'r') as f:
line = f.readline()
return line.strip() == '# mode freq(cm-1) alpha beta2 activity'
def is_csv(filename):
"""Determine whether file is CSV by checking extension"""
return filename.split('.')[-1] == 'csv'
def is_xml(filename):
"""Determine whether file is XML by checking extension"""
if filename.split('.')[-1] == 'gz':
return filename.split('.')[-2] == 'xml'
else:
return filename.split('.')[-1] == 'xml'
def is_complete_dos(pdos):
"""Determine whether the object is a pymatgen CompleteDos object"""
densities_fn = getattr(pdos, "get_densities", None)
return callable(densities_fn)
def write_txt(x_values, y_values, filename="galore_output.txt", header=None):
"""Write output to a simple space-delimited file
Args:
x_values (iterable): Values to print in first column
y_value (iterable): Values to print in second column
filename (str): Path to output file, including extension. If None,
write to standard output instead.
header (str): Additional line to prepend to file. If None, no header
is used.
"""
rows = zip(x_values, y_values)
_write_txt_rows(rows, filename=filename, header=header)
def _write_txt_rows(rows, filename=None, header=None):
"""Write rows of data to space-separated text output
Args:
rows (iterable): Rows to write. Rows should be a list of values.
filename (str or None): Filename for text output. If None, write to
standard output instead.
header (iterable or None): Optionally add another row to the top of the
file. Useful if rows is a generator you don't want to mess with.
"""
def _format_line(row):
return ' '.join(('{0:10.6e}'.format(x) for x in row)) + '\n'
lines = map(_format_line, rows)
if filename is not None:
with open(filename, 'w') as f:
if header is not None:
f.write(header + '\n')
f.writelines(lines)
else:
if header is not None:
print(header)
for line in lines:
print(line, end='')
def _write_csv_rows(rows, filename=None, header=None):
"""Write rows of data to output in CSV format
Args:
rows (iterable): Rows to write. Rows should be a list of values.
filename (str or None): Filename for CSV output. If None, write to
standard output instead.
header (iterable or None): Optionally add another row to the top of the
file. Useful if rows is a generator you don't want to mess with.
"""
def _write_csv(rows, f, header):
writer = csv.writer(f, lineterminator=os.linesep)
if header is not None:
writer.writerow(header)
writer.writerows(rows)
if filename is None:
_write_csv(rows, sys.stdout, header=header)
else:
with open(filename, 'w') as f:
_write_csv(rows, f, header=header)
def write_csv(x_values, y_values, filename="galore_output.csv", header=None):
"""Write output to a simple space-delimited file
Args:
x_values (iterable): Values to print in first column
y_value (iterable): Values to print in second column
filename (str): Path to output file, including extension. If None,
write to standard output instead.
header (iterable): Additional line to prepend to file. If None,
no header is used.
"""
rows = zip(x_values, y_values)
_write_csv_rows(rows, filename=filename, header=header)
def write_pdos(pdos_data, filename=None, filetype="txt", flipx=False):
"""Write PDOS or XPS data to CSV file
Args:
pdos_data (dict): Data for pdos plot in format::
{'el1': {'energy': values, 's': values, 'p': values ...},
'el2': {'energy': values, 's': values, ...}, ...}
where DOS values are 1D numpy arrays. For deterministic output,
use ordered dictionaries!
filename (str or None): Filename for output. If None, write to stdout
filetype (str): Format for output; "csv" or "txt.
flipx (bool): Negate the x-axis (i.e. energy) values to make binding
energies
"""
header = ['energy']
cols = [list(pdos_data.values())[0]['energy']]
if flipx:
cols[0] = -cols[0]
for el, orbitals in pdos_data.items():
for orbital, values in orbitals.items():
if orbital.lower() != 'energy':
header += ['_'.join((el, orbital))]
cols.append(values)
data = np.array(cols).T
total = data[:, 1:].sum(axis=1)
data = np.insert(data, 1, total, axis=1)
header.insert(1, 'total')
if filetype == 'csv':
_write_csv_rows(data, filename=filename, header=header)
elif filetype == 'txt':
header = ' ' + ' '.join(('{0:12s}'.format(x) for x in header))
_write_txt_rows(data, filename=filename, header=header)
else:
raise ValueError('filetype "{0}" not recognised. Use "txt" or "csv".')
def read_csv(filename):
"""Read a txt file containing frequencies and intensities
If input file contains three columns, the first column is ignored. (It is
presumed to be a vibrational mode index.)
Args:
filename (str): Path to data file
Returns:
n x 2 Numpy array of frequencies and intensities
"""
return read_txt(filename, delimiter=',')
def read_txt(filename, delimiter=None):
"""Read a txt file containing frequencies and intensities
If input file contains three columns, the first column is ignored. (It is
presumed to be a vibrational mode index.)
Args:
filename (str): Path to data file
Returns:
n x 2 Numpy array of frequencies and intensities
"""
xy_data = np.genfromtxt(filename, comments='#', delimiter=delimiter)
columns = np.shape(xy_data)[1]
if columns == 2:
return xy_data
elif columns == 3:
return xy_data[:, 1:]
elif columns < 2:
raise Exception("Not sure how to interpret {0}: "
"not enough columns.".format(filename))
else:
raise Exception("Not sure how to interpret {0}: "
"too many columns.".format(filename))
def read_pdos_txt(filename, abs_values=True):
"""Read a text file containing projected density-of-states (PDOS) data
The first row should be a header identifying the orbitals, e.g.
"# Energy s p d f". The following rows contain the corresponding energy and
DOS values. Spin channels indicated by (up) or (down) suffixes will be
combined.
Args:
filename (str): Path to file for import
abs_values (bool, optional):
Convert intensity values to absolute numbers. This is primarily for
compatibility with spin-polarised .dat files from Sumo. Set to
False if negative values in spectrum are resonable.
Returns:
data (np.ndarray): Numpy structured array with named columns
corresponding to input data format.
"""
data = np.genfromtxt(filename, names=True)
if abs_values:
for col in data.dtype.names[1:]:
data[col] = np.abs(data[col])
# Get a list of orbitals that have 'up' and 'down' variants
spin_pairs = []
for col in data.dtype.names:
if re.match('.+up', col):
orbital = re.match('(.+)up', col).groups()[0]
if orbital + 'down' in data.dtype.names:
spin_pairs.append(orbital)
if len(spin_pairs) == 0:
return data
else:
# Sum up/down channel pairs into their respective up channels
for orbital in spin_pairs:
data[orbital + 'up'] += data[orbital + 'down']
# Rename spin-up channels
column_names = list(data.dtype.names)
for orbital in spin_pairs:
column_names[column_names.index(orbital + 'up')] = orbital
data.dtype.names = tuple(column_names)
# Exclude spin-down channels from returned data
spin_down_orbs = [orb + 'down' for orb in spin_pairs]
return data[[col for col in data.dtype.names
if col not in spin_down_orbs]]
def read_doscar(filename="DOSCAR"):
"""Read an x, y series of frequencies and DOS from a VASP DOSCAR file
Args:
filename (str): Path to DOSCAR file
Returns:
data (2-tuple): Tuple containing x values and y values as lists
"""
with open(filename, 'r') as f:
# Scroll to line 6 which contains NEDOS
for i in range(5):
f.readline()
nedos = int(f.readline().split()[2])
# Get number of fields and infer number of spin channels
first_dos_line = f.readline().split()
spin_channels = (len(first_dos_line) - 1) / 2
if spin_channels == 1:
def _tdos_from_line(line):
return (float(line[0]), float(line[1]))
elif spin_channels == 2:
def _tdos_from_line(line):
line = [float(x) for x in line]
return (line[0], line[1] + line[2])
else:
raise Exception("Too many columns in DOSCAR")
dos_pairs = (
[_tdos_from_line(first_dos_line)] +
[_tdos_from_line(f.readline().split()) for i in range(nedos - 1)])
return np.array(dos_pairs)
def read_vasprun(filename='vasprun.xml'):
"""Read a VASP vasprun.xml file to obtain the density of states
Pymatgen must be present on the system to use this method
Args:
filename (str): Path to vasprun.xml file
Returns:
data (pymatgen.electronic_structure.dos.Dos): A pymatgen Dos object
"""
try:
from pymatgen.io.vasp.outputs import Vasprun
except ImportError as e:
e.msg = "pymatgen package neccessary to load vasprun files"
raise
vr = Vasprun(filename)
band = vr.get_band_structure()
dos = vr.complete_dos
if band.is_metal():
zero_point = vr.efermi
else:
zero_point = band.get_vbm()['energy']
# Shift the energies so that the vbm is at 0 eV, also taking into account
# any gaussian broadening
dos.energies -= zero_point
if vr.parameters['ISMEAR'] == 0 or vr.parameters['ISMEAR'] == -1:
dos.energies -= vr.parameters['SIGMA']
return dos
def read_gpaw_totaldos(filename, npts=50001, width=1e-3, ref='vbm'):
"""Read total DOS from GPAW with minimal broadening
This requires GPAW to be installed and on your PYTHONPATH!
Args:
filename (str): Path to GPAW calculation file. This should be a .gpw
file generated with ``calc.write('myfilename.gpw')``.
npts (int): Number of DOS samples
width (float): Gaussian broadening parameter applied by GPAW. Default
is minimal so that broadening is dominated by choices in Galore.
Beware that there is a strong interaction between this parameter
and npts; with a small npts and small width, many energy levels
will be missed from the DOS!
ref (str): Reference energy for DOS. 'vbm' or 'efermi' are accepted for
the valence-band maximum or the Fermi energy, respectively. VBM is
determined from calculation eigenvalues and not DOS values. If set
to None, raw values are used.
Returns:
data (np.ndarray): 2D array of energy and DOS values
"""
from gpaw import GPAW
calc = GPAW(filename)
if ref is None:
ref_energy = 0
elif ref.lower() == 'vbm':
ref_energy, _ = calc.get_homo_lumo()
elif ref.lower() == 'efermi':
ref_energy = calc.get_fermi_level()
energies, dos = calc.get_dos(npts=npts, width=width)
return np.array(list(zip(energies - ref_energy, dos)))
def read_gpaw_pdos(filename, npts=50001, width=1e-3, ref='vbm'):
"""Read orbital-projected DOS from GPAW with minimal broadening.
This requires GPAW to be installed and on your PYTHONPATH!
Args:
filename (str): Path to GPAW calculation file. This should be a .gpw
file generated with ``calc.write('myfilename.gpw')``.
npts (int): Number of DOS samples
width (float): Gaussian broadening parameter applied by GPAW. Default
is minimal so that broadening is dominated by choices in Galore.
Beware that there is a strong interaction between this parameter
and npts; with a small npts and small width, many energy levels
will be missed from the DOS!
ref (str): Reference energy for DOS. 'vbm' or 'efermi' are accepted for
the valence-band maximum or the Fermi energy, respectively. VBM is
determined from calculation eigenvalues and not DOS values. If set
to None, raw values are used.
Returns:
pdos_data (OrderedDict): PDOS data formatted as nestled OrderedDict of:
{element: {'energy': energies, 's': densities, 'p' ... }
"""
from gpaw import GPAW
calc = GPAW(filename)
if ref is None:
ref_energy = 0
elif ref.lower() == 'vbm':
ref_energy, _ = calc.get_homo_lumo()
elif ref.lower() == 'efermi':
ref_energy = calc.get_fermi_level()
# Set up the structure of elements and orbitals.
# Repeated elements will leave a single entry in the dict
proto_orbitals = OrderedDict((('energy', np.zeros(npts)),
('s', np.zeros(npts)),
('p', np.zeros(npts)),
('d', np.zeros(npts)),
('f', np.zeros(npts))))
pdos_data = OrderedDict([(atom.symbol, proto_orbitals.copy())
for atom in calc.atoms])
# Read orbital DOS, adding to collected PDOS for that element/orbital
for atom in calc.atoms:
for orbital in 'spdf':
energies, dos = calc.get_orbital_ldos(atom.index, angular=orbital,
npts=npts, width=width)
pdos_data[atom.symbol][orbital] += dos
pdos_data[atom.symbol]['energy'] = energies - ref_energy
# Set any zero arrays to None so they can be easily skipped over
# This should get rid of unused orbitals; if GPAW put some density in those
# orbitals then we should keep that evidence rather than discard it.
for element, orbitals in pdos_data.items():
for orbital, dos in orbitals.items():
if orbital != 'energy' and max(dos) == 0.:
pdos_data[element][orbital] = None
return pdos_data
def read_vasprun_totaldos(filename='vasprun.xml'):
"""Read an x, y series of energies and DOS from a VASP vasprun.xml file
Pymatgen must be present on the system to use this method
Args:
filename (str): Path to vasprun.xml file
Returns:
data (np.ndarray): 2D array of energy and DOS values
"""
dos = read_vasprun(filename)
from pymatgen.electronic_structure.core import Spin
# sum spin up and spin down channels
densities = dos.densities[Spin.up]
if len(dos.densities) > 1:
densities += dos.densities[Spin.down]
return np.array(list(zip(dos.energies, densities)))
def read_vasprun_pdos(filename='vasprun.xml'):
"""Read a vasprun.xml containing projected density-of-states (PDOS) data
Pymatgen must be present on the system to use this method
Args:
filename (str or CompleteDos):
Path to vasprun.xml file or pymatgen CompleteDos object.
Returns:
pdos_data (np.ndarray): PDOS data formatted as nestled OrderedDict of:
{element: {'energy': energies, 's': densities, 'p' ... }
"""
if isinstance(filename, str):
dos = read_vasprun(filename)
else:
# filename is actually a pre-loaded CompleteDos
dos = filename
from pymatgen.electronic_structure.core import Spin, OrbitalType
pdos_data = OrderedDict()
for element in dos.structure.symbol_set:
pdos_data[element] = OrderedDict([('energy', dos.energies)])
pdos = dos.get_element_spd_dos(element)
for orb in sorted([orb.value for orb in pdos.keys()]):
# this way we can ensure the orbitals remain in the correct order
orbital = OrbitalType(orb)
# sum spin up and spin down channels
densities = pdos[orbital].densities[Spin.up]
if len(dos.densities) > 1:
densities += pdos[orbital].densities[Spin.down]
pdos_data[element][orbital.name] = densities
return pdos_data
def read_vasp_raman(filename="vasp_raman.dat"):
"""Read output file from Vasp raman simulation
Args:
filename (str): Path to formatted data file generated by
https://github.com/raman-sc/VASP - Raman intensities are computed
by following vibrational modes and calculating polarisability. The
generated output file is named *vasp_raman.dat* but can be renamed
if desired. The format is five space-separated columns, headed by
``# mode freq(cm-1) alpha beta2 activity``.
Returns:
2-D np.array:
Only the columns corresponding to frequency and activity are
retained.
"""
data = np.genfromtxt(filename)
return data[:, [1, -1]]
| SMTG-UCL/galore | galore/formats.py | Python | gpl-3.0 | 19,840 | [
"GPAW",
"Gaussian",
"VASP",
"pymatgen"
] | 112b470b7905d8a97cee904f471be39a68bf80c97a0ae60e036e7c534253002e |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
# Based on Copyright (C) 2016 Radim Rehurek <radimrehurek@seznam.cz>
"""Lda Sequence model, inspired by `David M. Blei, John D. Lafferty: "Dynamic Topic Models"
<https://mimno.infosci.cornell.edu/info6150/readings/dynamic_topic_models.pdf>`_ .
The original C/C++ implementation can be found on `blei-lab/dtm <https://github.com/blei-lab/dtm>`.
TODO: The next steps to take this forward would be:
#. Include DIM mode. Most of the infrastructure for this is in place.
#. See if LdaPost can be replaced by LdaModel completely without breaking anything.
#. Heavy lifting going on in the Sslm class - efforts can be made to cythonise mathematical methods, in particular,
update_obs and the optimization takes a lot time.
#. Try and make it distributed, especially around the E and M step.
#. Remove all C/C++ coding style/syntax.
Examples
--------
Set up a model using have 30 documents, with 5 in the first time-slice, 10 in the second, and 15 in the third
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus
>>> from gensim.models import LdaSeqModel
>>>
>>> ldaseq = LdaSeqModel(corpus=common_corpus, time_slice=[2, 4, 3], num_topics=2, chunksize=1)
Persist a model to disk and reload it later
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> temp_file = datapath("model")
>>> ldaseq.save(temp_file)
>>>
>>> # Load a potentially pre-trained model from disk.
>>> ldaseq = LdaSeqModel.load(temp_file)
Access the document embeddings generated from the DTM
.. sourcecode:: pycon
>>> doc = common_corpus[1]
>>>
>>> embedding = ldaseq[doc]
"""
import logging
import numpy as np
from scipy.special import digamma, gammaln
from scipy import optimize
from gensim import utils, matutils
from gensim.models import ldamodel
logger = logging.getLogger(__name__)
class LdaSeqModel(utils.SaveLoad):
"""Estimate Dynamic Topic Model parameters based on a training corpus."""
def __init__(
self, corpus=None, time_slice=None, id2word=None, alphas=0.01, num_topics=10,
initialize='gensim', sstats=None, lda_model=None, obs_variance=0.5, chain_variance=0.005, passes=10,
random_state=None, lda_inference_max_iter=25, em_min_iter=6, em_max_iter=20, chunksize=100,
):
"""
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}, optional
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`).
If not given, the model is left untrained (presumably because you want to call
:meth:`~gensim.models.ldamodel.LdaSeqModel.update` manually).
time_slice : list of int, optional
Number of documents in each time-slice. Each time slice could for example represent a year's published
papers, in case the corpus comes from a journal publishing over multiple years.
It is assumed that `sum(time_slice) == num_documents`.
id2word : dict of (int, str), optional
Mapping from word IDs to words. It is used to determine the vocabulary size, as well as for
debugging and topic printing.
alphas : float, optional
The prior probability for the model.
num_topics : int, optional
The number of requested latent topics to be extracted from the training corpus.
initialize : {'gensim', 'own', 'ldamodel'}, optional
Controls the initialization of the DTM model. Supports three different modes:
* 'gensim': Uses gensim's LDA initialization.
* 'own': Uses your own initialization matrix of an LDA model that has been previously trained.
* 'lda_model': Use a previously used LDA model, passing it through the `lda_model` argument.
sstats : numpy.ndarray , optional
Sufficient statistics used for initializing the model if `initialize == 'own'`. Corresponds to matrix
beta in the linked paper for time slice 0, expected shape (`self.vocab_len`, `num_topics`).
lda_model : :class:`~gensim.models.ldamodel.LdaModel`
Model whose sufficient statistics will be used to initialize the current object if `initialize == 'gensim'`.
obs_variance : float, optional
Observed variance used to approximate the true and forward variance as shown in
`David M. Blei, John D. Lafferty: "Dynamic Topic Models"
<https://mimno.infosci.cornell.edu/info6150/readings/dynamic_topic_models.pdf>`_.
chain_variance : float, optional
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve over time.
passes : int, optional
Number of passes over the corpus for the initial :class:`~gensim.models.ldamodel.LdaModel`
random_state : {numpy.random.RandomState, int}, optional
Can be a np.random.RandomState object, or the seed to generate one. Used for reproducibility of results.
lda_inference_max_iter : int, optional
Maximum number of iterations in the inference step of the LDA training.
em_min_iter : int, optional
Minimum number of iterations until converge of the Expectation-Maximization algorithm
em_max_iter : int, optional
Maximum number of iterations until converge of the Expectation-Maximization algorithm.
chunksize : int, optional
Number of documents in the corpus do be processed in in a chunk.
"""
self.id2word = id2word
if corpus is None and self.id2word is None:
raise ValueError(
'at least one of corpus/id2word must be specified, to establish input space dimensionality'
)
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.vocab_len = len(self.id2word)
elif self.id2word:
self.vocab_len = len(self.id2word)
else:
self.vocab_len = 0
if corpus is not None:
try:
self.corpus_len = len(corpus)
except TypeError:
logger.warning("input corpus stream has no len(); counting documents")
self.corpus_len = sum(1 for _ in corpus)
self.time_slice = time_slice
if self.time_slice is not None:
self.num_time_slices = len(time_slice)
self.num_topics = num_topics
self.num_time_slices = len(time_slice)
self.alphas = np.full(num_topics, alphas)
# topic_chains contains for each topic a 'state space language model' object
# which in turn has information about each topic
# the sslm class is described below and contains information
# on topic-word probabilities and doc-topic probabilities.
self.topic_chains = []
for topic in range(num_topics):
sslm_ = sslm(
num_time_slices=self.num_time_slices, vocab_len=self.vocab_len, num_topics=self.num_topics,
chain_variance=chain_variance, obs_variance=obs_variance
)
self.topic_chains.append(sslm_)
# the following are class variables which are to be integrated during Document Influence Model
self.top_doc_phis = None
self.influence = None
self.renormalized_influence = None
self.influence_sum_lgl = None
# if a corpus and time_slice is provided, depending on the user choice of initializing LDA, we start DTM.
if corpus is not None and time_slice is not None:
self.max_doc_len = max(len(line) for line in corpus)
if initialize == 'gensim':
lda_model = ldamodel.LdaModel(
corpus, id2word=self.id2word, num_topics=self.num_topics,
passes=passes, alpha=self.alphas, random_state=random_state,
dtype=np.float64
)
self.sstats = np.transpose(lda_model.state.sstats)
if initialize == 'ldamodel':
self.sstats = np.transpose(lda_model.state.sstats)
if initialize == 'own':
self.sstats = sstats
# initialize model from sstats
self.init_ldaseq_ss(chain_variance, obs_variance, self.alphas, self.sstats)
# fit DTM
self.fit_lda_seq(corpus, lda_inference_max_iter, em_min_iter, em_max_iter, chunksize)
def init_ldaseq_ss(self, topic_chain_variance, topic_obs_variance, alpha, init_suffstats):
"""Initialize State Space Language Model, topic-wise.
Parameters
----------
topic_chain_variance : float
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve.
topic_obs_variance : float
Observed variance used to approximate the true and forward variance as shown in
`David M. Blei, John D. Lafferty: "Dynamic Topic Models"
<https://mimno.infosci.cornell.edu/info6150/readings/dynamic_topic_models.pdf>`_.
alpha : float
The prior probability for the model.
init_suffstats : numpy.ndarray
Sufficient statistics used for initializing the model, expected shape (`self.vocab_len`, `num_topics`).
"""
self.alphas = alpha
for k, chain in enumerate(self.topic_chains):
sstats = init_suffstats[:, k]
sslm.sslm_counts_init(chain, topic_obs_variance, topic_chain_variance, sstats)
# initialize the below matrices only if running DIM
# ldaseq.topic_chains[k].w_phi_l = np.zeros((ldaseq.vocab_len, ldaseq.num_time_slices))
# ldaseq.topic_chains[k].w_phi_sum = np.zeros((ldaseq.vocab_len, ldaseq.num_time_slices))
# ldaseq.topic_chains[k].w_phi_sq = np.zeros((ldaseq.vocab_len, ldaseq.num_time_slices))
def fit_lda_seq(self, corpus, lda_inference_max_iter, em_min_iter, em_max_iter, chunksize):
"""Fit a LDA Sequence model (DTM).
This method will iteratively setup LDA models and perform EM steps until the sufficient statistics convergence,
or until the maximum number of iterations is reached. Because the true posterior is intractable, an
appropriately tight lower bound must be used instead. This function will optimize this bound, by minimizing
its true Kullback-Liebler Divergence with the true posterior.
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`).
lda_inference_max_iter : int
Maximum number of iterations for the inference step of LDA.
em_min_iter : int
Minimum number of time slices to be inspected.
em_max_iter : int
Maximum number of time slices to be inspected.
chunksize : int
Number of documents to be processed in each chunk.
Returns
-------
float
The highest lower bound for the true posterior produced after all iterations.
"""
LDASQE_EM_THRESHOLD = 1e-4
# if bound is low, then we increase iterations.
LOWER_ITER = 10
ITER_MULT_LOW = 2
MAX_ITER = 500
num_topics = self.num_topics
vocab_len = self.vocab_len
data_len = self.num_time_slices
corpus_len = self.corpus_len
bound = 0
convergence = LDASQE_EM_THRESHOLD + 1
iter_ = 0
while iter_ < em_min_iter or ((convergence > LDASQE_EM_THRESHOLD) and iter_ <= em_max_iter):
logger.info(" EM iter %i", iter_)
logger.info("E Step")
# TODO: bound is initialized to 0
old_bound = bound
# initiate sufficient statistics
topic_suffstats = []
for topic in range(num_topics):
topic_suffstats.append(np.zeros((vocab_len, data_len)))
# set up variables
gammas = np.zeros((corpus_len, num_topics))
lhoods = np.zeros((corpus_len, num_topics + 1))
# compute the likelihood of a sequential corpus under an LDA
# seq model and find the evidence lower bound. This is the E - Step
bound, gammas = \
self.lda_seq_infer(corpus, topic_suffstats, gammas, lhoods, iter_, lda_inference_max_iter, chunksize)
self.gammas = gammas
logger.info("M Step")
# fit the variational distribution. This is the M - Step
topic_bound = self.fit_lda_seq_topics(topic_suffstats)
bound += topic_bound
if (bound - old_bound) < 0:
# if max_iter is too low, increase iterations.
if lda_inference_max_iter < LOWER_ITER:
lda_inference_max_iter *= ITER_MULT_LOW
logger.info("Bound went down, increasing iterations to %i", lda_inference_max_iter)
# check for convergence
convergence = np.fabs((bound - old_bound) / old_bound)
if convergence < LDASQE_EM_THRESHOLD:
lda_inference_max_iter = MAX_ITER
logger.info("Starting final iterations, max iter is %i", lda_inference_max_iter)
convergence = 1.0
logger.info("iteration %i iteration lda seq bound is %f convergence is %f", iter_, bound, convergence)
iter_ += 1
return bound
def lda_seq_infer(self, corpus, topic_suffstats, gammas, lhoods,
iter_, lda_inference_max_iter, chunksize):
"""Inference (or E-step) for the lower bound EM optimization.
This is used to set up the gensim :class:`~gensim.models.ldamodel.LdaModel` to be used for each time-slice.
It also allows for Document Influence Model code to be written in.
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`).
topic_suffstats : numpy.ndarray
Sufficient statistics for time slice 0, used for initializing the model if `initialize == 'own'`,
expected shape (`self.vocab_len`, `num_topics`).
gammas : numpy.ndarray
Topic weight variational parameters for each document. If not supplied, it will be inferred from the model.
lhoods : list of float
The total log probability lower bound for each topic. Corresponds to the phi variational parameters in the
linked paper.
iter_ : int
Current iteration.
lda_inference_max_iter : int
Maximum number of iterations for the inference step of LDA.
chunksize : int
Number of documents to be processed in each chunk.
Returns
-------
(float, list of float)
The first value is the highest lower bound for the true posterior.
The second value is the list of optimized dirichlet variational parameters for the approximation of
the posterior.
"""
num_topics = self.num_topics
vocab_len = self.vocab_len
bound = 0.0
lda = ldamodel.LdaModel(num_topics=num_topics, alpha=self.alphas, id2word=self.id2word, dtype=np.float64)
lda.topics = np.zeros((vocab_len, num_topics))
ldapost = LdaPost(max_doc_len=self.max_doc_len, num_topics=num_topics, lda=lda)
model = "DTM"
if model == "DTM":
bound, gammas = self.inferDTMseq(
corpus, topic_suffstats, gammas, lhoods, lda,
ldapost, iter_, bound, lda_inference_max_iter, chunksize
)
elif model == "DIM":
self.InfluenceTotalFixed(corpus)
bound, gammas = self.inferDIMseq(
corpus, topic_suffstats, gammas, lhoods, lda,
ldapost, iter_, bound, lda_inference_max_iter, chunksize
)
return bound, gammas
def inferDTMseq(self, corpus, topic_suffstats, gammas, lhoods, lda,
ldapost, iter_, bound, lda_inference_max_iter, chunksize):
"""Compute the likelihood of a sequential corpus under an LDA seq model, and reports the likelihood bound.
Parameters
----------
corpus : {iterable of list of (int, float), scipy.sparse.csc}
Stream of document vectors or sparse matrix of shape (`num_documents`, `num_terms`).
topic_suffstats : numpy.ndarray
Sufficient statistics of the current model, expected shape (`self.vocab_len`, `num_topics`).
gammas : numpy.ndarray
Topic weight variational parameters for each document. If not supplied, it will be inferred from the model.
lhoods : list of float of length `self.num_topics`
The total log probability bound for each topic. Corresponds to phi from the linked paper.
lda : :class:`~gensim.models.ldamodel.LdaModel`
The trained LDA model of the previous iteration.
ldapost : :class:`~gensim.models.ldaseqmodel.LdaPost`
Posterior probability variables for the given LDA model. This will be used as the true (but intractable)
posterior.
iter_ : int
The current iteration.
bound : float
The LDA bound produced after all iterations.
lda_inference_max_iter : int
Maximum number of iterations for the inference step of LDA.
chunksize : int
Number of documents to be processed in each chunk.
Returns
-------
(float, list of float)
The first value is the highest lower bound for the true posterior.
The second value is the list of optimized dirichlet variational parameters for the approximation of
the posterior.
"""
doc_index = 0 # overall doc_index in corpus
time = 0 # current time-slice
doc_num = 0 # doc-index in current time-slice
lda = self.make_lda_seq_slice(lda, time) # create lda_seq slice
time_slice = np.cumsum(np.array(self.time_slice))
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
# iterates chunk size for constant memory footprint
for doc in chunk:
# this is used to update the time_slice and create a new lda_seq slice every new time_slice
if doc_index > time_slice[time]:
time += 1
lda = self.make_lda_seq_slice(lda, time) # create lda_seq slice
doc_num = 0
gam = gammas[doc_index]
lhood = lhoods[doc_index]
ldapost.gamma = gam
ldapost.lhood = lhood
ldapost.doc = doc
# TODO: replace fit_lda_post with appropriate ldamodel functions, if possible.
if iter_ == 0:
doc_lhood = LdaPost.fit_lda_post(
ldapost, doc_num, time, None, lda_inference_max_iter=lda_inference_max_iter
)
else:
doc_lhood = LdaPost.fit_lda_post(
ldapost, doc_num, time, self, lda_inference_max_iter=lda_inference_max_iter
)
if topic_suffstats is not None:
topic_suffstats = LdaPost.update_lda_seq_ss(ldapost, time, doc, topic_suffstats)
gammas[doc_index] = ldapost.gamma
bound += doc_lhood
doc_index += 1
doc_num += 1
return bound, gammas
def make_lda_seq_slice(self, lda, time):
"""Update the LDA model topic-word values using time slices.
Parameters
----------
lda : :class:`~gensim.models.ldamodel.LdaModel`
The stationary model to be updated
time : int
The time slice assigned to the stationary model.
Returns
-------
lda : :class:`~gensim.models.ldamodel.LdaModel`
The stationary model updated to reflect the passed time slice.
"""
for k in range(self.num_topics):
lda.topics[:, k] = self.topic_chains[k].e_log_prob[:, time]
lda.alpha = np.copy(self.alphas)
return lda
def fit_lda_seq_topics(self, topic_suffstats):
"""Fit the sequential model topic-wise.
Parameters
----------
topic_suffstats : numpy.ndarray
Sufficient statistics of the current model, expected shape (`self.vocab_len`, `num_topics`).
Returns
-------
float
The sum of the optimized lower bounds for all topics.
"""
lhood = 0
for k, chain in enumerate(self.topic_chains):
logger.info("Fitting topic number %i", k)
lhood_term = sslm.fit_sslm(chain, topic_suffstats[k])
lhood += lhood_term
return lhood
def print_topic_times(self, topic, top_terms=20):
"""Get the most relevant words for a topic, for each timeslice. This can be used to inspect the evolution of a
topic through time.
Parameters
----------
topic : int
The index of the topic.
top_terms : int, optional
Number of most relevant words associated with the topic to be returned.
Returns
-------
list of list of str
Top `top_terms` relevant terms for the topic for each time slice.
"""
topics = []
for time in range(self.num_time_slices):
topics.append(self.print_topic(topic, time, top_terms))
return topics
def print_topics(self, time=0, top_terms=20):
"""Get the most relevant words for every topic.
Parameters
----------
time : int, optional
The time slice in which we are interested in (since topics evolve over time, it is expected that the most
relevant words will also gradually change).
top_terms : int, optional
Number of most relevant words to be returned for each topic.
Returns
-------
list of list of (str, float)
Representation of all topics. Each of them is represented by a list of pairs of words and their assigned
probability.
"""
return [self.print_topic(topic, time, top_terms) for topic in range(self.num_topics)]
def print_topic(self, topic, time=0, top_terms=20):
"""Get the list of words most relevant to the given topic.
Parameters
----------
topic : int
The index of the topic to be inspected.
time : int, optional
The time slice in which we are interested in (since topics evolve over time, it is expected that the most
relevant words will also gradually change).
top_terms : int, optional
Number of words associated with the topic to be returned.
Returns
-------
list of (str, float)
The representation of this topic. Each element in the list includes the word itself, along with the
probability assigned to it by the topic.
"""
topic = self.topic_chains[topic].e_log_prob
topic = np.transpose(topic)
topic = np.exp(topic[time])
topic = topic / topic.sum()
bestn = matutils.argsort(topic, top_terms, reverse=True)
beststr = [(self.id2word[id_], topic[id_]) for id_ in bestn]
return beststr
def doc_topics(self, doc_number):
"""Get the topic mixture for a document.
Uses the priors for the dirichlet distribution that approximates the true posterior with the optimal
lower bound, and therefore requires the model to be already trained.
Parameters
----------
doc_number : int
Index of the document for which the mixture is returned.
Returns
-------
list of length `self.num_topics`
Probability for each topic in the mixture (essentially a point in the `self.num_topics - 1` simplex.
"""
doc_topic = self.gammas / self.gammas.sum(axis=1)[:, np.newaxis]
return doc_topic[doc_number]
def dtm_vis(self, time, corpus):
"""Get the information needed to visualize the corpus model at a given time slice, using the pyLDAvis format.
Parameters
----------
time : int
The time slice we are interested in.
corpus : {iterable of list of (int, float), scipy.sparse.csc}, optional
The corpus we want to visualize at the given time slice.
Returns
-------
doc_topics : list of length `self.num_topics`
Probability for each topic in the mixture (essentially a point in the `self.num_topics - 1` simplex.
topic_term : numpy.ndarray
The representation of each topic as a multinomial over words in the vocabulary,
expected shape (`num_topics`, vocabulary length).
doc_lengths : list of int
The number of words in each document. These could be fixed, or drawn from a Poisson distribution.
term_frequency : numpy.ndarray
The term frequency matrix (denoted as beta in the original Blei paper). This could also be the TF-IDF
representation of the corpus, expected shape (number of documents, length of vocabulary).
vocab : list of str
The set of unique terms existing in the cropuse's vocabulary.
"""
doc_topic = self.gammas / self.gammas.sum(axis=1)[:, np.newaxis]
def normalize(x):
return x / x.sum()
topic_term = [
normalize(np.exp(chain.e_log_prob.T[time]))
for k, chain in enumerate(self.topic_chains)
]
doc_lengths = []
term_frequency = np.zeros(self.vocab_len)
for doc_no, doc in enumerate(corpus):
doc_lengths.append(len(doc))
for term, freq in doc:
term_frequency[term] += freq
vocab = [self.id2word[i] for i in range(len(self.id2word))]
return doc_topic, np.array(topic_term), doc_lengths, term_frequency, vocab
def dtm_coherence(self, time):
"""Get the coherence for each topic.
Can be used to measure the quality of the model, or to inspect the convergence through training via a callback.
Parameters
----------
time : int
The time slice.
Returns
-------
list of list of str
The word representation for each topic, for each time slice. This can be used to check the time coherence
of topics as time evolves: If the most relevant words remain the same then the topic has somehow
converged or is relatively static, if they change rapidly the topic is evolving.
"""
coherence_topics = []
for topics in self.print_topics(time):
coherence_topic = []
for word, dist in topics:
coherence_topic.append(word)
coherence_topics.append(coherence_topic)
return coherence_topics
def __getitem__(self, doc):
"""Get the topic mixture for the given document, using the inferred approximation of the true posterior.
Parameters
----------
doc : list of (int, float)
The doc in BOW format. Can be an unseen document.
Returns
-------
list of float
Probabilities for each topic in the mixture. This is essentially a point in the `num_topics - 1` simplex.
"""
lda_model = ldamodel.LdaModel(
num_topics=self.num_topics, alpha=self.alphas, id2word=self.id2word, dtype=np.float64)
lda_model.topics = np.zeros((self.vocab_len, self.num_topics))
ldapost = LdaPost(num_topics=self.num_topics, max_doc_len=len(doc), lda=lda_model, doc=doc)
time_lhoods = []
for time in range(self.num_time_slices):
lda_model = self.make_lda_seq_slice(lda_model, time) # create lda_seq slice
lhood = LdaPost.fit_lda_post(ldapost, 0, time, self)
time_lhoods.append(lhood)
doc_topic = ldapost.gamma / ldapost.gamma.sum()
# should even the likelihoods be returned?
return doc_topic
class sslm(utils.SaveLoad):
"""Encapsulate the inner State Space Language Model for DTM.
Some important attributes of this class:
* `obs` is a matrix containing the document to topic ratios.
* `e_log_prob` is a matrix containing the topic to word ratios.
* `mean` contains the mean values to be used for inference for each word for a time slice.
* `variance` contains the variance values to be used for inference of word in a time slice.
* `fwd_mean` and`fwd_variance` are the forward posterior values for the mean and the variance.
* `zeta` is an extra variational parameter with a value for each time slice.
"""
def __init__(self, vocab_len=None, num_time_slices=None, num_topics=None, obs_variance=0.5, chain_variance=0.005):
self.vocab_len = vocab_len
self.num_time_slices = num_time_slices
self.obs_variance = obs_variance
self.chain_variance = chain_variance
self.num_topics = num_topics
# setting up matrices
self.obs = np.zeros((vocab_len, num_time_slices))
self.e_log_prob = np.zeros((vocab_len, num_time_slices))
self.mean = np.zeros((vocab_len, num_time_slices + 1))
self.fwd_mean = np.zeros((vocab_len, num_time_slices + 1))
self.fwd_variance = np.zeros((vocab_len, num_time_slices + 1))
self.variance = np.zeros((vocab_len, num_time_slices + 1))
self.zeta = np.zeros(num_time_slices)
# the following are class variables which are to be integrated during Document Influence Model
self.m_update_coeff = None
self.mean_t = None
self.variance_t = None
self.influence_sum_lgl = None
self.w_phi_l = None
self.w_phi_sum = None
self.w_phi_l_sq = None
self.m_update_coeff_g = None
def update_zeta(self):
"""Update the Zeta variational parameter.
Zeta is described in the appendix and is equal to sum (exp(mean[word] + Variance[word] / 2)),
over every time-slice. It is the value of variational parameter zeta which maximizes the lower bound.
Returns
-------
list of float
The updated zeta values for each time slice.
"""
for j, val in enumerate(self.zeta):
self.zeta[j] = np.sum(np.exp(self.mean[:, j + 1] + self.variance[:, j + 1] / 2))
return self.zeta
def compute_post_variance(self, word, chain_variance):
r"""Get the variance, based on the `Variational Kalman Filtering approach for Approximate Inference (section 3.1)
<https://mimno.infosci.cornell.edu/info6150/readings/dynamic_topic_models.pdf>`_.
This function accepts the word to compute variance for, along with the associated sslm class object,
and returns the `variance` and the posterior approximation `fwd_variance`.
Notes
-----
This function essentially computes Var[\beta_{t,w}] for t = 1:T
.. :math::
fwd\_variance[t] \equiv E((beta_{t,w}-mean_{t,w})^2 |beta_{t}\ for\ 1:t) =
(obs\_variance / fwd\_variance[t - 1] + chain\_variance + obs\_variance ) *
(fwd\_variance[t - 1] + obs\_variance)
.. :math::
variance[t] \equiv E((beta_{t,w}-mean\_cap_{t,w})^2 |beta\_cap_{t}\ for\ 1:t) =
fwd\_variance[t - 1] + (fwd\_variance[t - 1] / fwd\_variance[t - 1] + obs\_variance)^2 *
(variance[t - 1] - (fwd\_variance[t-1] + obs\_variance))
Parameters
----------
word: int
The word's ID.
chain_variance : float
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve over time.
Returns
-------
(numpy.ndarray, numpy.ndarray)
The first returned value is the variance of each word in each time slice, the second value is the
inferred posterior variance for the same pairs.
"""
INIT_VARIANCE_CONST = 1000
T = self.num_time_slices
variance = self.variance[word]
fwd_variance = self.fwd_variance[word]
# forward pass. Set initial variance very high
fwd_variance[0] = chain_variance * INIT_VARIANCE_CONST
for t in range(1, T + 1):
if self.obs_variance:
c = self.obs_variance / (fwd_variance[t - 1] + chain_variance + self.obs_variance)
else:
c = 0
fwd_variance[t] = c * (fwd_variance[t - 1] + chain_variance)
# backward pass
variance[T] = fwd_variance[T]
for t in range(T - 1, -1, -1):
if fwd_variance[t] > 0.0:
c = np.power((fwd_variance[t] / (fwd_variance[t] + chain_variance)), 2)
else:
c = 0
variance[t] = (c * (variance[t + 1] - chain_variance)) + ((1 - c) * fwd_variance[t])
return variance, fwd_variance
def compute_post_mean(self, word, chain_variance):
"""Get the mean, based on the `Variational Kalman Filtering approach for Approximate Inference (section 3.1)
<https://mimno.infosci.cornell.edu/info6150/readings/dynamic_topic_models.pdf>`_.
Notes
-----
This function essentially computes E[\beta_{t,w}] for t = 1:T.
.. :math::
Fwd_Mean(t) ≡ E(beta_{t,w} | beta_ˆ 1:t )
= (obs_variance / fwd_variance[t - 1] + chain_variance + obs_variance ) * fwd_mean[t - 1] +
(1 - (obs_variance / fwd_variance[t - 1] + chain_variance + obs_variance)) * beta
.. :math::
Mean(t) ≡ E(beta_{t,w} | beta_ˆ 1:T )
= fwd_mean[t - 1] + (obs_variance / fwd_variance[t - 1] + obs_variance) +
(1 - obs_variance / fwd_variance[t - 1] + obs_variance)) * mean[t]
Parameters
----------
word: int
The word's ID.
chain_variance : float
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve over time.
Returns
-------
(numpy.ndarray, numpy.ndarray)
The first returned value is the mean of each word in each time slice, the second value is the
inferred posterior mean for the same pairs.
"""
T = self.num_time_slices
obs = self.obs[word]
fwd_variance = self.fwd_variance[word]
mean = self.mean[word]
fwd_mean = self.fwd_mean[word]
# forward
fwd_mean[0] = 0
for t in range(1, T + 1):
c = self.obs_variance / (fwd_variance[t - 1] + chain_variance + self.obs_variance)
fwd_mean[t] = c * fwd_mean[t - 1] + (1 - c) * obs[t - 1]
# backward pass
mean[T] = fwd_mean[T]
for t in range(T - 1, -1, -1):
if chain_variance == 0.0:
c = 0.0
else:
c = chain_variance / (fwd_variance[t] + chain_variance)
mean[t] = c * fwd_mean[t] + (1 - c) * mean[t + 1]
return mean, fwd_mean
def compute_expected_log_prob(self):
"""Compute the expected log probability given values of m.
The appendix describes the Expectation of log-probabilities in equation 5 of the DTM paper;
The below implementation is the result of solving the equation and is implemented as in the original
Blei DTM code.
Returns
-------
numpy.ndarray of float
The expected value for the log probabilities for each word and time slice.
"""
for (w, t), val in np.ndenumerate(self.e_log_prob):
self.e_log_prob[w][t] = self.mean[w][t + 1] - np.log(self.zeta[t])
return self.e_log_prob
def sslm_counts_init(self, obs_variance, chain_variance, sstats):
"""Initialize the State Space Language Model with LDA sufficient statistics.
Called for each topic-chain and initializes initial mean, variance and Topic-Word probabilities
for the first time-slice.
Parameters
----------
obs_variance : float, optional
Observed variance used to approximate the true and forward variance.
chain_variance : float
Gaussian parameter defined in the beta distribution to dictate how the beta values evolve over time.
sstats : numpy.ndarray
Sufficient statistics of the LDA model. Corresponds to matrix beta in the linked paper for time slice 0,
expected shape (`self.vocab_len`, `num_topics`).
"""
W = self.vocab_len
T = self.num_time_slices
log_norm_counts = np.copy(sstats)
log_norm_counts /= sum(log_norm_counts)
log_norm_counts += 1.0 / W
log_norm_counts /= sum(log_norm_counts)
log_norm_counts = np.log(log_norm_counts)
# setting variational observations to transformed counts
self.obs = (np.repeat(log_norm_counts, T, axis=0)).reshape(W, T)
# set variational parameters
self.obs_variance = obs_variance
self.chain_variance = chain_variance
# compute post variance, mean
for w in range(W):
self.variance[w], self.fwd_variance[w] = self.compute_post_variance(w, self.chain_variance)
self.mean[w], self.fwd_mean[w] = self.compute_post_mean(w, self.chain_variance)
self.zeta = self.update_zeta()
self.e_log_prob = self.compute_expected_log_prob()
def fit_sslm(self, sstats):
"""Fits variational distribution.
This is essentially the m-step.
Maximizes the approximation of the true posterior for a particular topic using the provided sufficient
statistics. Updates the values using :meth:`~gensim.models.ldaseqmodel.sslm.update_obs` and
:meth:`~gensim.models.ldaseqmodel.sslm.compute_expected_log_prob`.
Parameters
----------
sstats : numpy.ndarray
Sufficient statistics for a particular topic. Corresponds to matrix beta in the linked paper for the
current time slice, expected shape (`self.vocab_len`, `num_topics`).
Returns
-------
float
The lower bound for the true posterior achieved using the fitted approximate distribution.
"""
W = self.vocab_len
bound = 0
old_bound = 0
sslm_fit_threshold = 1e-6
sslm_max_iter = 2
converged = sslm_fit_threshold + 1
# computing variance, fwd_variance
self.variance, self.fwd_variance = \
(np.array(x) for x in zip(*(self.compute_post_variance(w, self.chain_variance) for w in range(W))))
# column sum of sstats
totals = sstats.sum(axis=0)
iter_ = 0
model = "DTM"
if model == "DTM":
bound = self.compute_bound(sstats, totals)
if model == "DIM":
bound = self.compute_bound_fixed(sstats, totals)
logger.info("initial sslm bound is %f", bound)
while converged > sslm_fit_threshold and iter_ < sslm_max_iter:
iter_ += 1
old_bound = bound
self.obs, self.zeta = self.update_obs(sstats, totals)
if model == "DTM":
bound = self.compute_bound(sstats, totals)
if model == "DIM":
bound = self.compute_bound_fixed(sstats, totals)
converged = np.fabs((bound - old_bound) / old_bound)
logger.info("iteration %i iteration lda seq bound is %f convergence is %f", iter_, bound, converged)
self.e_log_prob = self.compute_expected_log_prob()
return bound
def compute_bound(self, sstats, totals):
"""Compute the maximized lower bound achieved for the log probability of the true posterior.
Uses the formula presented in the appendix of the DTM paper (formula no. 5).
Parameters
----------
sstats : numpy.ndarray
Sufficient statistics for a particular topic. Corresponds to matrix beta in the linked paper for the first
time slice, expected shape (`self.vocab_len`, `num_topics`).
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
Returns
-------
float
The maximized lower bound.
"""
w = self.vocab_len
t = self.num_time_slices
term_1 = 0
term_2 = 0
term_3 = 0
val = 0
ent = 0
chain_variance = self.chain_variance
# computing mean, fwd_mean
self.mean, self.fwd_mean = \
(np.array(x) for x in zip(*(self.compute_post_mean(w, self.chain_variance) for w in range(w))))
self.zeta = self.update_zeta()
val = sum(self.variance[w][0] - self.variance[w][t] for w in range(w)) / 2 * chain_variance
logger.info("Computing bound, all times")
for t in range(1, t + 1):
term_1 = 0.0
term_2 = 0.0
ent = 0.0
for w in range(w):
m = self.mean[w][t]
prev_m = self.mean[w][t - 1]
v = self.variance[w][t]
# w_phi_l is only used in Document Influence Model; the values are always zero in this case
# w_phi_l = sslm.w_phi_l[w][t - 1]
# exp_i = np.exp(-prev_m)
# term_1 += (np.power(m - prev_m - (w_phi_l * exp_i), 2) / (2 * chain_variance)) -
# (v / chain_variance) - np.log(chain_variance)
term_1 += \
(np.power(m - prev_m, 2) / (2 * chain_variance)) - (v / chain_variance) - np.log(chain_variance)
term_2 += sstats[w][t - 1] * m
ent += np.log(v) / 2 # note the 2pi's cancel with term1 (see doc)
term_3 = -totals[t - 1] * np.log(self.zeta[t - 1])
val += term_2 + term_3 + ent - term_1
return val
def update_obs(self, sstats, totals):
"""Optimize the bound with respect to the observed variables.
TODO:
This is by far the slowest function in the whole algorithm.
Replacing or improving the performance of this would greatly speed things up.
Parameters
----------
sstats : numpy.ndarray
Sufficient statistics for a particular topic. Corresponds to matrix beta in the linked paper for the first
time slice, expected shape (`self.vocab_len`, `num_topics`).
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
Returns
-------
(numpy.ndarray of float, numpy.ndarray of float)
The updated optimized values for obs and the zeta variational parameter.
"""
OBS_NORM_CUTOFF = 2
STEP_SIZE = 0.01
TOL = 1e-3
W = self.vocab_len
T = self.num_time_slices
runs = 0
mean_deriv_mtx = np.zeros((T, T + 1))
norm_cutoff_obs = None
for w in range(W):
w_counts = sstats[w]
counts_norm = 0
# now we find L2 norm of w_counts
for i in range(len(w_counts)):
counts_norm += w_counts[i] * w_counts[i]
counts_norm = np.sqrt(counts_norm)
if counts_norm < OBS_NORM_CUTOFF and norm_cutoff_obs is not None:
obs = self.obs[w]
norm_cutoff_obs = np.copy(obs)
else:
if counts_norm < OBS_NORM_CUTOFF:
w_counts = np.zeros(len(w_counts))
# TODO: apply lambda function
for t in range(T):
mean_deriv_mtx[t] = self.compute_mean_deriv(w, t, mean_deriv_mtx[t])
deriv = np.zeros(T)
args = self, w_counts, totals, mean_deriv_mtx, w, deriv
obs = self.obs[w]
model = "DTM"
if model == "DTM":
# slowest part of method
obs = optimize.fmin_cg(
f=f_obs, fprime=df_obs, x0=obs, gtol=TOL, args=args, epsilon=STEP_SIZE, disp=0
)
if model == "DIM":
pass
runs += 1
if counts_norm < OBS_NORM_CUTOFF:
norm_cutoff_obs = obs
self.obs[w] = obs
self.zeta = self.update_zeta()
return self.obs, self.zeta
def compute_mean_deriv(self, word, time, deriv):
"""Helper functions for optimizing a function.
Compute the derivative of:
.. :math::
E[\beta_{t,w}]/d obs_{s,w} for t = 1:T.
Parameters
----------
word : int
The word's ID.
time : int
The time slice.
deriv : list of float
Derivative for each time slice.
Returns
-------
list of float
Mean derivative for each time slice.
"""
T = self.num_time_slices
fwd_variance = self.variance[word]
deriv[0] = 0
# forward pass
for t in range(1, T + 1):
if self.obs_variance > 0.0:
w = self.obs_variance / (fwd_variance[t - 1] + self.chain_variance + self.obs_variance)
else:
w = 0.0
val = w * deriv[t - 1]
if time == t - 1:
val += (1 - w)
deriv[t] = val
for t in range(T - 1, -1, -1):
if self.chain_variance == 0.0:
w = 0.0
else:
w = self.chain_variance / (fwd_variance[t] + self.chain_variance)
deriv[t] = w * deriv[t] + (1 - w) * deriv[t + 1]
return deriv
def compute_obs_deriv(self, word, word_counts, totals, mean_deriv_mtx, deriv):
"""Derivation of obs which is used in derivative function `df_obs` while optimizing.
Parameters
----------
word : int
The word's ID.
word_counts : list of int
Total word counts for each time slice.
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
mean_deriv_mtx : list of float
Mean derivative for each time slice.
deriv : list of float
Mean derivative for each time slice.
Returns
-------
list of float
Mean derivative for each time slice.
"""
# flag
init_mult = 1000
T = self.num_time_slices
mean = self.mean[word]
variance = self.variance[word]
# only used for DIM mode
# w_phi_l = self.w_phi_l[word]
# m_update_coeff = self.m_update_coeff[word]
# temp_vector holds temporary zeta values
self.temp_vect = np.zeros(T)
for u in range(T):
self.temp_vect[u] = np.exp(mean[u + 1] + variance[u + 1] / 2)
for t in range(T):
mean_deriv = mean_deriv_mtx[t]
term1 = 0
term2 = 0
term3 = 0
term4 = 0
for u in range(1, T + 1):
mean_u = mean[u]
mean_u_prev = mean[u - 1]
dmean_u = mean_deriv[u]
dmean_u_prev = mean_deriv[u - 1]
term1 += (mean_u - mean_u_prev) * (dmean_u - dmean_u_prev)
term2 += (word_counts[u - 1] - (totals[u - 1] * self.temp_vect[u - 1] / self.zeta[u - 1])) * dmean_u
model = "DTM"
if model == "DIM":
# do some stuff
pass
if self.chain_variance:
term1 = - (term1 / self.chain_variance)
term1 = term1 - (mean[0] * mean_deriv[0]) / (init_mult * self.chain_variance)
else:
term1 = 0.0
deriv[t] = term1 + term2 + term3 + term4
return deriv
class LdaPost(utils.SaveLoad):
"""Posterior values associated with each set of documents.
TODO: use **Hoffman, Blei, Bach: Online Learning for Latent Dirichlet Allocation, NIPS 2010.**
to update phi, gamma. End game would be to somehow replace LdaPost entirely with LdaModel.
"""
def __init__(self, doc=None, lda=None, max_doc_len=None, num_topics=None, gamma=None, lhood=None):
"""Initialize the posterior value structure for the given LDA model.
Parameters
----------
doc : list of (int, int)
A BOW representation of the document. Each element in the list is a pair of a word's ID and its number
of occurences in the document.
lda : :class:`~gensim.models.ldamodel.LdaModel`, optional
The underlying LDA model.
max_doc_len : int, optional
The maximum number of words in a document.
num_topics : int, optional
Number of topics discovered by the LDA model.
gamma : numpy.ndarray, optional
Topic weight variational parameters for each document. If not supplied, it will be inferred from the model.
lhood : float, optional
The log likelihood lower bound.
"""
self.doc = doc
self.lda = lda
self.gamma = gamma
self.lhood = lhood
if self.gamma is None:
self.gamma = np.zeros(num_topics)
if self.lhood is None:
self.lhood = np.zeros(num_topics + 1)
if max_doc_len is not None and num_topics is not None:
self.phi = np.zeros((max_doc_len, num_topics))
self.log_phi = np.zeros((max_doc_len, num_topics))
# the following are class variables which are to be integrated during Document Influence Model
self.doc_weight = None
self.renormalized_doc_weight = None
def update_phi(self, doc_number, time):
"""Update variational multinomial parameters, based on a document and a time-slice.
This is done based on the original Blei-LDA paper, where:
log_phi := beta * exp(Ψ(gamma)), over every topic for every word.
TODO: incorporate lee-sueng trick used in
**Lee, Seung: Algorithms for non-negative matrix factorization, NIPS 2001**.
Parameters
----------
doc_number : int
Document number. Unused.
time : int
Time slice. Unused.
Returns
-------
(list of float, list of float)
Multinomial parameters, and their logarithm, for each word in the document.
"""
num_topics = self.lda.num_topics
# digamma values
dig = np.zeros(num_topics)
for k in range(num_topics):
dig[k] = digamma(self.gamma[k])
n = 0 # keep track of iterations for phi, log_phi
for word_id, count in self.doc:
for k in range(num_topics):
self.log_phi[n][k] = dig[k] + self.lda.topics[word_id][k]
log_phi_row = self.log_phi[n]
phi_row = self.phi[n]
# log normalize
v = log_phi_row[0]
for i in range(1, len(log_phi_row)):
v = np.logaddexp(v, log_phi_row[i])
# subtract every element by v
log_phi_row = log_phi_row - v
phi_row = np.exp(log_phi_row)
self.log_phi[n] = log_phi_row
self.phi[n] = phi_row
n += 1 # increase iteration
return self.phi, self.log_phi
def update_gamma(self):
"""Update variational dirichlet parameters.
This operations is described in the original Blei LDA paper:
gamma = alpha + sum(phi), over every topic for every word.
Returns
-------
list of float
The updated gamma parameters for each word in the document.
"""
self.gamma = np.copy(self.lda.alpha)
n = 0 # keep track of number of iterations for phi, log_phi
for word_id, count in self.doc:
phi_row = self.phi[n]
for k in range(self.lda.num_topics):
self.gamma[k] += phi_row[k] * count
n += 1
return self.gamma
def init_lda_post(self):
"""Initialize variational posterior. """
total = sum(count for word_id, count in self.doc)
self.gamma.fill(self.lda.alpha[0] + float(total) / self.lda.num_topics)
self.phi[:len(self.doc), :] = 1.0 / self.lda.num_topics
# doc_weight used during DIM
# ldapost.doc_weight = None
def compute_lda_lhood(self):
"""Compute the log likelihood bound.
Returns
-------
float
The optimal lower bound for the true posterior using the approximate distribution.
"""
num_topics = self.lda.num_topics
gamma_sum = np.sum(self.gamma)
# to be used in DIM
# sigma_l = 0
# sigma_d = 0
lhood = gammaln(np.sum(self.lda.alpha)) - gammaln(gamma_sum)
self.lhood[num_topics] = lhood
# influence_term = 0
digsum = digamma(gamma_sum)
model = "DTM" # noqa:F841
for k in range(num_topics):
# below code only to be used in DIM mode
# if ldapost.doc_weight is not None and (model == "DIM" or model == "fixed"):
# influence_topic = ldapost.doc_weight[k]
# influence_term = \
# - ((influence_topic * influence_topic + sigma_l * sigma_l) / 2.0 / (sigma_d * sigma_d))
e_log_theta_k = digamma(self.gamma[k]) - digsum
lhood_term = \
(self.lda.alpha[k] - self.gamma[k]) * e_log_theta_k + \
gammaln(self.gamma[k]) - gammaln(self.lda.alpha[k])
# TODO: check why there's an IF
n = 0
for word_id, count in self.doc:
if self.phi[n][k] > 0:
lhood_term += \
count * self.phi[n][k] * (e_log_theta_k + self.lda.topics[word_id][k] - self.log_phi[n][k])
n += 1
self.lhood[k] = lhood_term
lhood += lhood_term
# in case of DIM add influence term
# lhood += influence_term
return lhood
def fit_lda_post(self, doc_number, time, ldaseq, LDA_INFERENCE_CONVERGED=1e-8,
lda_inference_max_iter=25, g=None, g3_matrix=None, g4_matrix=None, g5_matrix=None):
"""Posterior inference for lda.
Parameters
----------
doc_number : int
The documents number.
time : int
Time slice.
ldaseq : object
Unused.
LDA_INFERENCE_CONVERGED : float
Epsilon value used to check whether the inference step has sufficiently converged.
lda_inference_max_iter : int
Maximum number of iterations in the inference step.
g : object
Unused. Will be useful when the DIM model is implemented.
g3_matrix: object
Unused. Will be useful when the DIM model is implemented.
g4_matrix: object
Unused. Will be useful when the DIM model is implemented.
g5_matrix: object
Unused. Will be useful when the DIM model is implemented.
Returns
-------
float
The optimal lower bound for the true posterior using the approximate distribution.
"""
self.init_lda_post()
# sum of counts in a doc
total = sum(count for word_id, count in self.doc)
model = "DTM"
if model == "DIM":
# if in DIM then we initialise some variables here
pass
lhood = self.compute_lda_lhood()
lhood_old = 0
converged = 0
iter_ = 0
# first iteration starts here
iter_ += 1
lhood_old = lhood
self.gamma = self.update_gamma()
model = "DTM"
if model == "DTM" or sslm is None:
self.phi, self.log_phi = self.update_phi(doc_number, time)
elif model == "DIM" and sslm is not None:
self.phi, self.log_phi = self.update_phi_fixed(doc_number, time, sslm, g3_matrix, g4_matrix, g5_matrix)
lhood = self.compute_lda_lhood()
converged = np.fabs((lhood_old - lhood) / (lhood_old * total))
while converged > LDA_INFERENCE_CONVERGED and iter_ <= lda_inference_max_iter:
iter_ += 1
lhood_old = lhood
self.gamma = self.update_gamma()
model = "DTM"
if model == "DTM" or sslm is None:
self.phi, self.log_phi = self.update_phi(doc_number, time)
elif model == "DIM" and sslm is not None:
self.phi, self.log_phi = self.update_phi_fixed(doc_number, time, sslm, g3_matrix, g4_matrix, g5_matrix)
lhood = self.compute_lda_lhood()
converged = np.fabs((lhood_old - lhood) / (lhood_old * total))
return lhood
def update_lda_seq_ss(self, time, doc, topic_suffstats):
"""Update lda sequence sufficient statistics from an lda posterior.
This is very similar to the :meth:`~gensim.models.ldaseqmodel.LdaPost.update_gamma` method and uses
the same formula.
Parameters
----------
time : int
The time slice.
doc : list of (int, float)
Unused but kept here for backwards compatibility. The document set in the constructor (`self.doc`) is used
instead.
topic_suffstats : list of float
Sufficient statistics for each topic.
Returns
-------
list of float
The updated sufficient statistics for each topic.
"""
num_topics = self.lda.num_topics
for k in range(num_topics):
topic_ss = topic_suffstats[k]
n = 0
for word_id, count in self.doc:
topic_ss[word_id][time] += count * self.phi[n][k]
n += 1
topic_suffstats[k] = topic_ss
return topic_suffstats
# the following functions are used in update_obs as the objective function.
def f_obs(x, *args):
"""Function which we are optimising for minimizing obs.
Parameters
----------
x : list of float
The obs values for this word.
sslm : :class:`~gensim.models.ldaseqmodel.sslm`
The State Space Language Model for DTM.
word_counts : list of int
Total word counts for each time slice.
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
mean_deriv_mtx : list of float
Mean derivative for each time slice.
word : int
The word's ID.
deriv : list of float
Mean derivative for each time slice.
Returns
-------
list of float
The value of the objective function evaluated at point `x`.
"""
sslm, word_counts, totals, mean_deriv_mtx, word, deriv = args
# flag
init_mult = 1000
T = len(x)
val = 0
term1 = 0
term2 = 0
# term 3 and 4 for DIM
term3 = 0
term4 = 0
sslm.obs[word] = x
sslm.mean[word], sslm.fwd_mean[word] = sslm.compute_post_mean(word, sslm.chain_variance)
mean = sslm.mean[word]
variance = sslm.variance[word]
# only used for DIM mode
# w_phi_l = sslm.w_phi_l[word]
# m_update_coeff = sslm.m_update_coeff[word]
for t in range(1, T + 1):
mean_t = mean[t]
mean_t_prev = mean[t - 1]
val = mean_t - mean_t_prev
term1 += val * val
term2 += word_counts[t - 1] * mean_t - totals[t - 1] * np.exp(mean_t + variance[t] / 2) / sslm.zeta[t - 1]
model = "DTM"
if model == "DIM":
# stuff happens
pass
if sslm.chain_variance > 0.0:
term1 = - (term1 / (2 * sslm.chain_variance))
term1 = term1 - mean[0] * mean[0] / (2 * init_mult * sslm.chain_variance)
else:
term1 = 0.0
final = -(term1 + term2 + term3 + term4)
return final
def df_obs(x, *args):
"""Derivative of the objective function which optimises obs.
Parameters
----------
x : list of float
The obs values for this word.
sslm : :class:`~gensim.models.ldaseqmodel.sslm`
The State Space Language Model for DTM.
word_counts : list of int
Total word counts for each time slice.
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
mean_deriv_mtx : list of float
Mean derivative for each time slice.
word : int
The word's ID.
deriv : list of float
Mean derivative for each time slice.
Returns
-------
list of float
The derivative of the objective function evaluated at point `x`.
"""
sslm, word_counts, totals, mean_deriv_mtx, word, deriv = args
sslm.obs[word] = x
sslm.mean[word], sslm.fwd_mean[word] = sslm.compute_post_mean(word, sslm.chain_variance)
model = "DTM"
if model == "DTM":
deriv = sslm.compute_obs_deriv(word, word_counts, totals, mean_deriv_mtx, deriv)
elif model == "DIM":
deriv = sslm.compute_obs_deriv_fixed(
p.word, p.word_counts, p.totals, p.sslm, p.mean_deriv_mtx, deriv) # noqa:F821
return np.negative(deriv)
| midnightradio/gensim | gensim/models/ldaseqmodel.py | Python | gpl-3.0 | 62,153 | [
"Gaussian"
] | ec8b293a7e94a75d1f0230841367ec509f9fe323097c26b43aad5baa59b3cc32 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Ruggero Marchei <ruggero.marchei@daemonzone.net>
# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
# Copyright: (c) 2016-2017, Konstantin Shalygin <k0ste@k0ste.ru>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: find
author: Brian Coca (based on Ruggero Marchei's Tidy)
version_added: "2.0"
short_description: Return a list of files based on specific criteria
description:
- Return a list of files based on specific criteria. Multiple criteria are AND'd together.
- For Windows targets, use the M(win_find) module instead.
options:
age:
description:
- Select files whose age is equal to or greater than the specified time.
Use a negative age to find files equal to or less than the specified time.
You can choose seconds, minutes, hours, days, or weeks by specifying the
first letter of any of those words (e.g., "1w").
patterns:
default: '*'
description:
- One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
- The patterns restrict the list of files to be returned to those whose basenames match at
least one of the patterns specified. Multiple patterns can be specified using a list.
- This parameter expects a list, which can be either comma separated or YAML. If any of the
patterns contain a comma, make sure to put them in a list to avoid splitting the patterns
in undesirable ways.
type: list
aliases: ['pattern']
excludes:
description:
- One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
- Items matching an C(excludes) pattern are culled from C(patterns) matches.
Multiple patterns can be specified using a list.
type: list
aliases: ['exclude']
version_added: "2.5"
contains:
description:
- One or more regex patterns which should be matched against the file content.
paths:
required: true
aliases: [ name, path ]
description:
- List of paths of directories to search. All paths must be fully qualified.
type: list
file_type:
description:
- Type of file to select.
- The 'link' and 'any' choices were added in version 2.3.
choices: [ any, directory, file, link ]
default: file
recurse:
description:
- If target is a directory, recursively descend into the directory looking for files.
type: bool
default: 'no'
size:
description:
- Select files whose size is equal to or greater than the specified size.
Use a negative size to find files equal to or less than the specified size.
Unqualified values are in bytes but b, k, m, g, and t can be appended to specify
bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively.
Size is not evaluated for directories.
age_stamp:
default: mtime
choices: [ atime, ctime, mtime ]
description:
- Choose the file property against which we compare age.
hidden:
description:
- Set this to true to include hidden files, otherwise they'll be ignored.
type: bool
default: 'no'
follow:
description:
- Set this to true to follow symlinks in path for systems with python 2.6+.
type: bool
default: 'no'
get_checksum:
description:
- Set this to true to retrieve a file's sha1 checksum.
type: bool
default: 'no'
use_regex:
description:
- If false, the patterns are file globs (shell). If true, they are python regexes.
type: bool
default: 'no'
depth:
description:
- Set the maximum number of levels to decend into. Setting recurse
to false will override this value, which is effectively depth 1.
Default is unlimited depth.
version_added: "2.6"
notes:
- For Windows targets, use the M(win_find) module instead.
'''
EXAMPLES = r'''
- name: Recursively find /tmp files older than 2 days
find:
paths: /tmp
age: 2d
recurse: yes
- name: Recursively find /tmp files older than 4 weeks and equal or greater than 1 megabyte
find:
paths: /tmp
age: 4w
size: 1m
recurse: yes
- name: Recursively find /var/tmp files with last access time greater than 3600 seconds
find:
paths: /var/tmp
age: 3600
age_stamp: atime
recurse: yes
- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz
find:
paths: /var/log
patterns: '*.old,*.log.gz'
size: 10m
# Note that YAML double quotes require escaping backslashes but yaml single quotes do not.
- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex
find:
paths: /var/log
patterns: "^.*?\\.(?:old|log\\.gz)$"
size: 10m
use_regex: yes
- name: Find /var/log all directories, exclude nginx and mysql
find:
paths: /var/log
recurse: no
file_type: directory
excludes: 'nginx,mysql'
# When using patterns that contain a comma, make sure they are formatted as lists to avoid splitting the pattern
- name: Use a single pattern that contains a comma formatted as a list
find:
paths: /var/log
file_type: file
use_regex: yes
patterns: ['^_[0-9]{2,4}_.*.log$']
- name: Use multiple patterns that contain a comma formatted as a YAML list
find:
paths: /var/log
file_type: file
use_regex: yes
patterns:
- '^_[0-9]{2,4}_.*.log$'
- '^[a-z]{1,5}_.*log$'
'''
RETURN = r'''
files:
description: all matches found with the specified criteria (see stat module for full output of each dictionary)
returned: success
type: list
sample: [
{ path: "/var/tmp/test1",
mode: "0644",
"...": "...",
checksum: 16fac7be61a6e4591a33ef4b729c5c3302307523
},
{ path: "/var/tmp/test2",
"...": "..."
},
]
matched:
description: number of matches
returned: success
type: string
sample: 14
examined:
description: number of filesystem objects looked at
returned: success
type: string
sample: 34
'''
import fnmatch
import grp
import os
import pwd
import re
import stat
import sys
import time
from ansible.module_utils.basic import AnsibleModule
def pfilter(f, patterns=None, excludes=None, use_regex=False):
'''filter using glob patterns'''
if patterns is None and excludes is None:
return True
if use_regex:
if patterns and excludes is None:
for p in patterns:
r = re.compile(p)
if r.match(f):
return True
elif patterns and excludes:
for p in patterns:
r = re.compile(p)
if r.match(f):
for e in excludes:
r = re.compile(e)
if r.match(f):
return False
return True
else:
if patterns and excludes is None:
for p in patterns:
if fnmatch.fnmatch(f, p):
return True
elif patterns and excludes:
for p in patterns:
if fnmatch.fnmatch(f, p):
for e in excludes:
if fnmatch.fnmatch(f, e):
return False
return True
return False
def agefilter(st, now, age, timestamp):
'''filter files older than age'''
if age is None:
return True
elif age >= 0 and now - st.__getattribute__("st_%s" % timestamp) >= abs(age):
return True
elif age < 0 and now - st.__getattribute__("st_%s" % timestamp) <= abs(age):
return True
return False
def sizefilter(st, size):
'''filter files greater than size'''
if size is None:
return True
elif size >= 0 and st.st_size >= abs(size):
return True
elif size < 0 and st.st_size <= abs(size):
return True
return False
def contentfilter(fsname, pattern):
"""
Filter files which contain the given expression
:arg fsname: Filename to scan for lines matching a pattern
:arg pattern: Pattern to look for inside of line
:rtype: bool
:returns: True if one of the lines in fsname matches the pattern. Otherwise False
"""
if pattern is None:
return True
prog = re.compile(pattern)
try:
with open(fsname) as f:
for line in f:
if prog.match(line):
return True
except Exception:
pass
return False
def statinfo(st):
pw_name = ""
gr_name = ""
try: # user data
pw_name = pwd.getpwuid(st.st_uid).pw_name
except Exception:
pass
try: # group data
gr_name = grp.getgrgid(st.st_gid).gr_name
except Exception:
pass
return {
'mode': "%04o" % stat.S_IMODE(st.st_mode),
'isdir': stat.S_ISDIR(st.st_mode),
'ischr': stat.S_ISCHR(st.st_mode),
'isblk': stat.S_ISBLK(st.st_mode),
'isreg': stat.S_ISREG(st.st_mode),
'isfifo': stat.S_ISFIFO(st.st_mode),
'islnk': stat.S_ISLNK(st.st_mode),
'issock': stat.S_ISSOCK(st.st_mode),
'uid': st.st_uid,
'gid': st.st_gid,
'size': st.st_size,
'inode': st.st_ino,
'dev': st.st_dev,
'nlink': st.st_nlink,
'atime': st.st_atime,
'mtime': st.st_mtime,
'ctime': st.st_ctime,
'gr_name': gr_name,
'pw_name': pw_name,
'wusr': bool(st.st_mode & stat.S_IWUSR),
'rusr': bool(st.st_mode & stat.S_IRUSR),
'xusr': bool(st.st_mode & stat.S_IXUSR),
'wgrp': bool(st.st_mode & stat.S_IWGRP),
'rgrp': bool(st.st_mode & stat.S_IRGRP),
'xgrp': bool(st.st_mode & stat.S_IXGRP),
'woth': bool(st.st_mode & stat.S_IWOTH),
'roth': bool(st.st_mode & stat.S_IROTH),
'xoth': bool(st.st_mode & stat.S_IXOTH),
'isuid': bool(st.st_mode & stat.S_ISUID),
'isgid': bool(st.st_mode & stat.S_ISGID),
}
def main():
module = AnsibleModule(
argument_spec=dict(
paths=dict(type='list', required=True, aliases=['name', 'path']),
patterns=dict(type='list', default=['*'], aliases=['pattern']),
excludes=dict(type='list', aliases=['exclude']),
contains=dict(type='str'),
file_type=dict(type='str', default="file", choices=['any', 'directory', 'file', 'link']),
age=dict(type='str'),
age_stamp=dict(type='str', default="mtime", choices=['atime', 'mtime', 'ctime']),
size=dict(type='str'),
recurse=dict(type='bool', default='no'),
hidden=dict(type='bool', default='no'),
follow=dict(type='bool', default='no'),
get_checksum=dict(type='bool', default='no'),
use_regex=dict(type='bool', default='no'),
depth=dict(type='int', default=None),
),
supports_check_mode=True,
)
params = module.params
filelist = []
if params['age'] is None:
age = None
else:
# convert age to seconds:
m = re.match(r"^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
if m:
age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
else:
module.fail_json(age=params['age'], msg="failed to process age")
if params['size'] is None:
size = None
else:
# convert size to bytes:
m = re.match(r"^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
if m:
size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
else:
module.fail_json(size=params['size'], msg="failed to process size")
now = time.time()
msg = ''
looked = 0
for npath in params['paths']:
npath = os.path.expanduser(os.path.expandvars(npath))
if os.path.isdir(npath):
''' ignore followlinks for python version < 2.6 '''
for root, dirs, files in (sys.version_info < (2, 6, 0) and os.walk(npath)) or os.walk(npath, followlinks=params['follow']):
if params['depth']:
depth = root.replace(npath.rstrip(os.path.sep), '').count(os.path.sep)
if files or dirs:
depth += 1
if depth > params['depth']:
del(dirs[:])
continue
looked = looked + len(files) + len(dirs)
for fsobj in (files + dirs):
fsname = os.path.normpath(os.path.join(root, fsobj))
if os.path.basename(fsname).startswith('.') and not params['hidden']:
continue
try:
st = os.lstat(fsname)
except Exception:
msg += "%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname
continue
r = {'path': fsname}
if params['file_type'] == 'any':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
if stat.S_ISREG(st.st_mode) and params['get_checksum']:
r['checksum'] = module.sha1(fsname)
filelist.append(r)
elif stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and \
agefilter(st, now, age, params['age_stamp']) and \
sizefilter(st, size) and contentfilter(fsname, params['contains']):
r.update(statinfo(st))
if params['get_checksum']:
r['checksum'] = module.sha1(fsname)
filelist.append(r)
elif stat.S_ISLNK(st.st_mode) and params['file_type'] == 'link':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
if not params['recurse']:
break
else:
msg += "%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" % npath
matched = len(filelist)
module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)
if __name__ == '__main__':
main()
| alexlo03/ansible | lib/ansible/modules/files/find.py | Python | gpl-3.0 | 16,079 | [
"Brian"
] | 2d245e2cd78672773af2347adb7ad3b20a43af6a3631162f38fce2979d07cd15 |
from decimal import Decimal
from urllib import parse
import ddt
import httpretty
from django.conf import settings
from django.urls import reverse
from mock import patch
from oscar.core.loading import get_model
from oscar.test import factories
from ecommerce.core.url_utils import get_lms_courseware_url, get_lms_program_dashboard_url
from ecommerce.coupons.tests.mixins import DiscoveryMockMixin
from ecommerce.enterprise.tests.mixins import EnterpriseServiceMockMixin
from ecommerce.extensions.basket.tests.test_utils import TEST_BUNDLE_ID
from ecommerce.extensions.checkout.exceptions import BasketNotFreeError
from ecommerce.extensions.checkout.utils import get_receipt_page_url
from ecommerce.extensions.checkout.views import ReceiptResponseView
from ecommerce.extensions.refund.tests.mixins import RefundTestMixin
from ecommerce.tests.mixins import LmsApiMockMixin
from ecommerce.tests.testcases import TestCase
Basket = get_model('basket', 'Basket')
BasketAttribute = get_model('basket', 'BasketAttribute')
BasketAttributeType = get_model('basket', 'BasketAttributeType')
Order = get_model('order', 'Order')
class FreeCheckoutViewTests(EnterpriseServiceMockMixin, TestCase):
""" FreeCheckoutView view tests. """
path = reverse('checkout:free-checkout')
def setUp(self):
super(FreeCheckoutViewTests, self).setUp()
self.user = self.create_user()
self.bundle_attribute_value = TEST_BUNDLE_ID
self.client.login(username=self.user.username, password=self.password)
def prepare_basket(self, price, bundle=False):
""" Helper function that creates a basket and adds a product with set price to it. """
basket = factories.BasketFactory(owner=self.user, site=self.site)
self.course_run.create_or_update_seat('verified', True, Decimal(price))
basket.add_product(self.course_run.seat_products[0])
self.assertEqual(basket.lines.count(), 1)
self.assertEqual(basket.total_incl_tax, Decimal(price))
if bundle:
BasketAttribute.objects.update_or_create(
basket=basket,
attribute_type=BasketAttributeType.objects.get(name='bundle_identifier'),
value_text=self.bundle_attribute_value
)
def test_empty_basket(self):
""" Verify redirect to basket summary in case of empty basket. """
response = self.client.get(self.path)
expected_url = reverse('basket:summary')
self.assertRedirects(response, expected_url)
def test_non_free_basket(self):
""" Verify an exception is raised when the URL is being accessed to with a non-free basket. """
self.prepare_basket(10)
with self.assertRaises(BasketNotFreeError):
self.client.get(self.path)
@httpretty.activate
def test_enterprise_offer_program_redirect(self):
""" Verify redirect to the program dashboard page. """
self.prepare_basket(10, bundle=True)
self.prepare_enterprise_offer()
self.assertEqual(Order.objects.count(), 0)
response = self.client.get(self.path)
self.assertEqual(Order.objects.count(), 1)
expected_url = get_lms_program_dashboard_url(self.bundle_attribute_value)
self.assertRedirects(response, expected_url, fetch_redirect_response=False)
@httpretty.activate
def test_enterprise_offer_course_redirect(self):
""" Verify redirect to the courseware info page. """
self.prepare_basket(10)
self.prepare_enterprise_offer()
self.assertEqual(Order.objects.count(), 0)
response = self.client.get(self.path)
self.assertEqual(Order.objects.count(), 1)
expected_url = get_lms_courseware_url(self.course_run.id)
self.assertRedirects(response, expected_url, fetch_redirect_response=False)
@httpretty.activate
def test_successful_redirect(self):
""" Verify redirect to the receipt page. """
self.prepare_basket(0)
self.assertEqual(Order.objects.count(), 0)
response = self.client.get(self.path)
self.assertEqual(Order.objects.count(), 1)
order = Order.objects.first()
expected_url = get_receipt_page_url(
order_number=order.number,
site_configuration=order.site.siteconfiguration,
disable_back_button=True,
)
self.assertRedirects(response, expected_url, fetch_redirect_response=False)
class CancelCheckoutViewTests(TestCase):
""" CancelCheckoutView view tests. """
path = reverse('checkout:cancel-checkout')
def setUp(self):
super(CancelCheckoutViewTests, self).setUp()
self.user = self.create_user()
self.client.login(username=self.user.username, password=self.password)
@httpretty.activate
def test_get_returns_payment_support_email_in_context(self):
"""
Verify that after receiving a GET response, the view returns a payment support email in its context.
"""
response = self.client.get(self.path)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context['payment_support_email'], self.request.site.siteconfiguration.payment_support_email
)
@httpretty.activate
def test_post_returns_payment_support_email_in_context(self):
"""
Verify that after receiving a POST response, the view returns a payment support email in its context.
"""
post_data = {'decision': 'CANCEL', 'reason_code': '200', 'signed_field_names': 'dummy'}
response = self.client.post(self.path, data=post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context['payment_support_email'], self.request.site.siteconfiguration.payment_support_email
)
class CheckoutErrorViewTests(TestCase):
""" CheckoutErrorView view tests. """
path = reverse('checkout:error')
def setUp(self):
super(CheckoutErrorViewTests, self).setUp()
self.user = self.create_user()
self.client.login(username=self.user.username, password=self.password)
@httpretty.activate
def test_get_returns_payment_support_email_in_context(self):
"""
Verify that after receiving a GET response, the view returns a payment support email in its context.
"""
response = self.client.get(self.path)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context['payment_support_email'], self.request.site.siteconfiguration.payment_support_email
)
@httpretty.activate
def test_post_returns_payment_support_email_in_context(self):
"""
Verify that after receiving a POST response, the view returns a payment support email in its context.
"""
post_data = {'decision': 'CANCEL', 'reason_code': '200', 'signed_field_names': 'dummy'}
response = self.client.post(self.path, data=post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context['payment_support_email'], self.request.site.siteconfiguration.payment_support_email
)
@ddt.ddt
class ReceiptResponseViewTests(DiscoveryMockMixin, LmsApiMockMixin, RefundTestMixin, TestCase):
"""
Tests for the receipt view.
"""
path = reverse('checkout:receipt')
def setUp(self):
super(ReceiptResponseViewTests, self).setUp()
self.user = self.create_user()
self.client.login(username=self.user.username, password=self.password)
# Note: actual response is far more rich. Just including the bits relevant to us
self.enterprise_learner_data_no_portal = {
'results': [{
'enterprise_customer': {
'name': 'Test Company',
'slug': 'test-company',
'enable_learner_portal': False,
}
}]
}
self.enterprise_learner_data_with_portal = {
'results': [{
'enterprise_customer': {
'name': 'Test Company',
'slug': 'test-company',
'enable_learner_portal': True,
}
}]
}
self.non_enterprise_learner_data = {}
def _get_receipt_response(self, order_number):
"""
Helper function for getting the receipt page response for an order.
Arguments:
order_number (str): Number of Order for which the Receipt Page should be opened.
Returns:
response (Response): Response object that's returned by a ReceiptResponseView
"""
url = '{path}?order_number={order_number}'.format(path=self.path, order_number=order_number)
return self.client.get(url)
def _visit_receipt_page_with_another_user(self, order, user):
"""
Helper function for logging in with another user and going to the Receipt Page.
Arguments:
order (Order): Order for which the Receipt Page should be opened.
user (User): User that's logging in.
Returns:
response (Response): Response object that's returned by a ReceiptResponseView
"""
self.client.logout()
self.client.login(username=user.username, password=self.password)
return self._get_receipt_response(order.number)
def _create_order_for_receipt(self, user, credit=False, entitlement=False, id_verification_required=False):
"""
Helper function for creating an order and mocking verification status API response.
Arguments:
user (User): User that's trying to visit the Receipt page.
credit (bool): Indicates whether or not the product is a Credit Course Seat.
Returns:
order (Order): Order for which the Receipt is requested.
"""
self.mock_verification_status_api(
self.site,
user,
status=200,
is_verified=False
)
return self.create_order(
credit=credit,
entitlement=entitlement,
id_verification_required=id_verification_required
)
def test_login_required_get_request(self):
""" The view should redirect to the login page if the user is not logged in. """
self.client.logout()
response = self.client.get(self.path)
expected_url = '{path}?next={next}'.format(path=reverse(settings.LOGIN_URL),
next=parse.quote(self.path))
self.assertRedirects(response, expected_url, target_status_code=302)
@patch('ecommerce.extensions.checkout.views.fetch_enterprise_learner_data')
def test_get_receipt_for_nonexisting_order(self, mock_learner_data):
""" The view should return 404 status if the Order is not found. """
mock_learner_data.return_value = self.non_enterprise_learner_data
order_number = 'ABC123'
response = self._get_receipt_response(order_number)
self.assertEqual(response.status_code, 404)
def test_get_payment_method_no_source(self):
""" Payment method should be None when an Order has no Payment source. """
order = self.create_order()
payment_method = ReceiptResponseView().get_payment_method(order)
self.assertEqual(payment_method, None)
def test_get_payment_method_source_type(self):
"""
Source Type name should be displayed as the Payment method
when the credit card wasn't used to purchase a product.
"""
order = self.create_order()
source = factories.SourceFactory(order=order)
payment_method = ReceiptResponseView().get_payment_method(order)
self.assertEqual(payment_method, source.source_type.name)
def test_get_payment_method_credit_card_purchase(self):
"""
Credit card type and Source label should be displayed as the Payment method
when a Credit card was used to purchase a product.
"""
order = self.create_order()
source = factories.SourceFactory(order=order, card_type='Dummy Card', label='Test')
payment_method = ReceiptResponseView().get_payment_method(order)
self.assertEqual(payment_method, '{} {}'.format(source.card_type, source.label))
@patch('ecommerce.extensions.checkout.views.fetch_enterprise_learner_data')
@httpretty.activate
def test_get_receipt_for_existing_order(self, mock_learner_data):
""" Order owner should be able to see the Receipt Page."""
mock_learner_data.return_value = self.non_enterprise_learner_data
order = self._create_order_for_receipt(self.user)
response = self._get_receipt_response(order.number)
context_data = {
'payment_method': None,
'display_credit_messaging': False,
'verification_url': self.site.siteconfiguration.IDVerification_workflow_url(self.course.id),
}
self.assertEqual(response.status_code, 200)
self.assertDictContainsSubset(context_data, response.context_data)
@patch('ecommerce.extensions.checkout.views.fetch_enterprise_learner_data')
@httpretty.activate
def test_awin_product_tracking_for_order(self, mock_learner_data):
""" Receipt Page should have context for awin product tracking"""
mock_learner_data.return_value = self.non_enterprise_learner_data
order = self._create_order_for_receipt(self.user)
response = self._get_receipt_response(order.number)
products = []
for line in order.lines.all():
products.append("AW:P|{id}|{order_number}|{course_id}|{title}|{price}|{quantity}|{partner_sku}|DEFAULT\r\n".
format(id=settings.AWIN_ADVERTISER_ID, order_number=order.number,
course_id=line.product.course.id, title=line.title, price=line.unit_price_incl_tax,
quantity=line.quantity, partner_sku=line.partner_sku))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context_data['product_tracking'], "".join(products))
@patch('ecommerce.extensions.checkout.views.fetch_enterprise_learner_data')
@httpretty.activate
def test_get_receipt_for_existing_entitlement_order(self, mock_learner_data):
""" Order owner should be able to see the Receipt Page."""
mock_learner_data.return_value = self.non_enterprise_learner_data
order = self._create_order_for_receipt(self.user, entitlement=True, id_verification_required=True)
response = self._get_receipt_response(order.number)
context_data = {
'payment_method': None,
'display_credit_messaging': False,
'verification_url': self.site.siteconfiguration.IDVerification_workflow_url(self.course.id),
}
self.assertEqual(response.status_code, 200)
self.assertDictContainsSubset(context_data, response.context_data)
@patch('ecommerce.extensions.checkout.views.fetch_enterprise_learner_data')
@httpretty.activate
def test_get_receipt_for_entitlement_order_no_id_required(self, mock_learner_data):
""" Order owner should be able to see the Receipt Page with no ID verification in context."""
mock_learner_data.return_value = self.non_enterprise_learner_data
order = self._create_order_for_receipt(self.user, entitlement=True, id_verification_required=False)
response = self._get_receipt_response(order.number)
context_data = {
'payment_method': None,
'display_credit_messaging': False,
}
self.assertEqual(response.status_code, 200)
self.assertDictContainsSubset(context_data, response.context_data)
@patch('ecommerce.extensions.checkout.views.fetch_enterprise_learner_data')
@httpretty.activate
def test_get_receipt_for_existing_order_as_staff_user(self, mock_learner_data):
""" Staff users can preview Receipts for all Orders."""
mock_learner_data.return_value = self.non_enterprise_learner_data
staff_user = self.create_user(is_staff=True)
order = self._create_order_for_receipt(staff_user)
response = self._visit_receipt_page_with_another_user(order, staff_user)
context_data = {
'payment_method': None,
'display_credit_messaging': False,
}
self.assertEqual(response.status_code, 200)
self.assertDictContainsSubset(context_data, response.context_data)
@patch('ecommerce.extensions.checkout.views.fetch_enterprise_learner_data')
@httpretty.activate
def test_get_receipt_for_existing_order_user_not_owner(self, mock_learner_data):
""" Users that don't own the Order shouldn't be able to see the Receipt. """
mock_learner_data.return_value = self.non_enterprise_learner_data
other_user = self.create_user()
order = self._create_order_for_receipt(other_user)
response = self._visit_receipt_page_with_another_user(order, other_user)
context_data = {'order_history_url': self.site.siteconfiguration.build_lms_url('account/settings')}
self.assertEqual(response.status_code, 404)
self.assertDictContainsSubset(context_data, response.context_data)
@patch('ecommerce.extensions.checkout.views.fetch_enterprise_learner_data')
@httpretty.activate
def test_order_data_for_credit_seat(self, mock_learner_data):
""" Ensure that the context is updated with Order data. """
mock_learner_data.return_value = self.non_enterprise_learner_data
order = self.create_order(credit=True)
self.mock_verification_status_api(
self.site,
self.user,
status=200,
is_verified=True
)
seat = order.lines.first().product
body = {'display_name': 'Hogwarts'}
response = self._get_receipt_response(order.number)
body['course_key'] = seat.attr.course_key
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context_data['display_credit_messaging'])
@patch('ecommerce.extensions.checkout.views.fetch_enterprise_learner_data')
@httpretty.activate
def test_order_value_unlocalized_for_tracking(self, mock_learner_data):
mock_learner_data.return_value = self.non_enterprise_learner_data
order = self._create_order_for_receipt(self.user)
self.client.cookies.load({settings.LANGUAGE_COOKIE_NAME: 'fr'})
response = self._get_receipt_response(order.number)
self.assertEqual(response.status_code, 200)
order_value_string = 'data-total-amount="{}"'.format(order.total_incl_tax)
self.assertContains(response, order_value_string)
@patch('ecommerce.extensions.checkout.views.fetch_enterprise_learner_data')
@httpretty.activate
def test_dashboard_link_for_course_purchase(self, mock_learner_data):
"""
The dashboard link at the bottom of the receipt for a course purchase
should point to the user dashboard.
"""
mock_learner_data.return_value = self.non_enterprise_learner_data
order = self._create_order_for_receipt(self.user)
response = self._get_receipt_response(order.number)
context_data = {
'order_dashboard_url': self.site.siteconfiguration.build_lms_url('dashboard')
}
self.assertEqual(response.status_code, 200)
self.assertDictContainsSubset(context_data, response.context_data)
@patch('ecommerce.extensions.checkout.views.fetch_enterprise_learner_data')
@httpretty.activate
def test_dashboard_link_for_bundle_purchase(self, mock_learner_data):
"""
The dashboard link at the bottom of the receipt for a bundle purchase
should point to the program dashboard.
"""
mock_learner_data.return_value = self.non_enterprise_learner_data
order = self._create_order_for_receipt(self.user)
bundle_id = TEST_BUNDLE_ID
BasketAttribute.objects.update_or_create(
basket=order.basket,
attribute_type=BasketAttributeType.objects.get(name='bundle_identifier'),
value_text=bundle_id
)
response = self._get_receipt_response(order.number)
context_data = {
'order_dashboard_url': self.site.siteconfiguration.build_lms_url(
'dashboard/programs/{}'.format(bundle_id)
)
}
self.assertEqual(response.status_code, 200)
self.assertDictContainsSubset(context_data, response.context_data)
@patch('ecommerce.extensions.checkout.views.fetch_enterprise_learner_data')
@httpretty.activate
def test_order_without_basket(self, mock_learner_data):
mock_learner_data.return_value = self.non_enterprise_learner_data
order = self.create_order()
Basket.objects.filter(id=order.basket.id).delete()
response = self._get_receipt_response(order.number)
self.assertEqual(response.status_code, 200)
@patch('ecommerce.extensions.checkout.views.fetch_enterprise_learner_data')
@httpretty.activate
def test_enterprise_learner_dashboard_link_in_messages(self, mock_learner_data):
"""
The receipt page should include a message with a link to the enterprise
learner portal for a learner if response from enterprise shows the portal
is configured.
"""
mock_learner_data.return_value = self.enterprise_learner_data_with_portal
order = self._create_order_for_receipt(self.user)
BasketAttribute.objects.update_or_create(
basket=order.basket,
attribute_type=BasketAttributeType.objects.get(name='bundle_identifier'),
value_text=TEST_BUNDLE_ID
)
response = self._get_receipt_response(order.number)
response_messages = list(response.context['messages'])
expected_message = (
'Your company, Test Company, has a dedicated page where you can see all of '
'your sponsored courses. Go to <a href="http://{}/test-company">'
'your learner portal</a>.'
).format(settings.ENTERPRISE_LEARNER_PORTAL_HOSTNAME)
actual_message = str(response_messages[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response_messages), 1)
self.assertEqual(expected_message, actual_message)
@patch('ecommerce.extensions.checkout.views.fetch_enterprise_learner_data')
@httpretty.activate
@ddt.data(
({'results': []}, None),
(None, [KeyError])
)
@ddt.unpack
def test_enterprise_not_enabled_for_learner_dashboard_link_in_messages(self, learner_data,
exception, mock_learner_data):
"""
The receipt page should not include a message with a link to the enterprise
learner portal for a learner if response from enterprise is empty results or error.
"""
mock_learner_data.side_effect = exception
mock_learner_data.return_value = learner_data
order = self._create_order_for_receipt(self.user)
BasketAttribute.objects.update_or_create(
basket=order.basket,
attribute_type=BasketAttributeType.objects.get(name='bundle_identifier'),
value_text='test_bundle'
)
response = self._get_receipt_response(order.number)
response_messages = list(response.context['messages'])
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response_messages), 0)
@patch('ecommerce.extensions.checkout.views.fetch_enterprise_learner_data')
@httpretty.activate
def test_no_enterprise_learner_dashboard_link_in_messages(self, mock_learner_data):
"""
The receipt page should NOT include a message with a link to the enterprise
learner portal for a learner if response from enterprise shows the portal
is not configured.
"""
mock_learner_data.return_value = self.enterprise_learner_data_no_portal
order = self._create_order_for_receipt(self.user)
BasketAttribute.objects.update_or_create(
basket=order.basket,
attribute_type=BasketAttributeType.objects.get(name='bundle_identifier'),
value_text=TEST_BUNDLE_ID
)
response = self._get_receipt_response(order.number)
response_messages = list(response.context['messages'])
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response_messages), 0)
@patch('ecommerce.extensions.checkout.views.fetch_enterprise_learner_data')
@httpretty.activate
def test_order_dashboard_url_points_to_enterprise_learner_portal(self, mock_learner_data):
"""
The "Go to dashboard" link at the bottom of the receipt page should
point to the enterprise learner portal if the response from enterprise
shows the portal is configured
"""
mock_learner_data.return_value = self.enterprise_learner_data_with_portal
order = self._create_order_for_receipt(self.user)
BasketAttribute.objects.update_or_create(
basket=order.basket,
attribute_type=BasketAttributeType.objects.get(name='bundle_identifier'),
value_text='test_bundle'
)
response = self._get_receipt_response(order.number)
expected_dashboard_url = (
"http://" +
settings.ENTERPRISE_LEARNER_PORTAL_HOSTNAME +
"/test-company"
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context_data['order_dashboard_url'], expected_dashboard_url)
@patch('ecommerce.extensions.checkout.views.fetch_enterprise_learner_data')
@httpretty.activate
def test_go_to_dashboard_points_to_lms_dashboard(self, mock_learner_data):
"""
The "Go to dashboard" link at the bottom of the receipt page should
point to the lms dashboard if the response from enterprise
shows the portal is not configured
"""
mock_learner_data.return_value = self.enterprise_learner_data_no_portal
order = self._create_order_for_receipt(self.user)
BasketAttribute.objects.update_or_create(
basket=order.basket,
attribute_type=BasketAttributeType.objects.get(name='bundle_identifier'),
value_text='test_bundle'
)
response = self._get_receipt_response(order.number)
expected_dashboard_url = self.site.siteconfiguration.build_lms_url('dashboard/programs/test_bundle')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context_data['order_dashboard_url'], expected_dashboard_url)
| eduNEXT/edunext-ecommerce | ecommerce/extensions/checkout/tests/test_views.py | Python | agpl-3.0 | 26,972 | [
"VisIt"
] | 4274a962f721e917d58da308e19dc93e97c67d2a2f3fd972f2c18ad6ce9f334c |
#!/usr/bin/env python
import argparse
import logging
import csv
import collections
import numpy as np
import pysam
from argparse import RawDescriptionHelpFormatter
csv.field_size_limit(1000000000)
def getOptions():
""" Function to pull in arguments """
description="""This script can be used to calculates coverage (RPKM and APN) in two different settings:\n
(1) Coverage can be calculated across an entire genomic region. To do
this a 3-column bed file must be provided (Try fasta2bed.py).
col[0] = chromosome/fusion name (eg., chr2L or S7_SI)
col[1] = start position (i.e., '0')
col[2] = end position (i.e., length)
(2) Coverage can also be calculated by excising specific exons/fusions
from a genome. For example if you have aligned to the genome, but want
coverage at the exon level. For this a 4-column bed must be provided.
col[0] = chromosome name (eg., chr2L)
col[1] = exon/fusion start position (eg., 2929)
col[2] = exon/fusion end position (eg., 3090)
col[3] = exon/fusion name (eg., S7_SI)
IMPORTANT: Setting 2 requires a lot of RAM ~10-12GB for calculating
coverage using fly fusions """
parser = argparse.ArgumentParser(description=description, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-m", "--mpileup", dest="mname", action='store', required=True, help="mpileup file [Required]",metavar="MPILEUP_FILE")
parser.add_argument("-n", "--name", dest="name", action='store', required=True, help="Name of file to be printed in output")
parser.add_argument("-s", "--sam", dest="sname", action='store', required=True, help="BAM alignment file [Required]", metavar="BAM_FILE")
parser.add_argument("-b", "--bed", dest="bname", action='store', required=True, help="bed file (3 or 4 columns) [Required]", metavar="BED_FILE")
parser.add_argument("-c", "--cv", dest="cv", action='store_true', required=False, help="A flag to indicate if you want output for the coefficient of variation [OPTIONAL]")
parser.add_argument("-g", "--log", dest="log", action='store', required=False, help="Log File", metavar="LOG_FILE")
parser.add_argument("-o", "--out", dest="out", action='store', required=True, help="Out File", metavar="OUT_FILE")
args = parser.parse_args()
return(args)
def setLogger(fname,loglevel):
""" Function to handle error logging """
logging.basicConfig(filename=fname, filemode='w', level=loglevel, format='%(asctime)s - %(levelname)s - %(message)s')
# SAM Functions
def read_sam(args):
""" Read BAM file to get read length and number of mapped reads. Note: if
you have allowed ambiguous mapping then reads are counted multiple times. """
logging.info("Reading the BAM File '%s'." % args.sname)
num_mapped_reads = 0
read_length = 0
bamfile=pysam.AlignmentFile(args.sname,'rb')
for read in bamfile:
#print str(row)
row=str(read)
record = row.strip().split('\t')
if record[1] != 4 or record[1] != 77 or record[1]!=141 or record[1] !=181 or record[1] !=121 or record[1] !=133 or record[1] !=117 or record[1] !=69: # only look at aligned reads, editing this to account for PE alignments.
num_mapped_reads += 1
read_length = max(read_length,len(record[9])) # find the maximum read length
return(num_mapped_reads,read_length)
# BED Functions
def read_bed(args):
""" Read BED file and create a dictionary containing all information """
logging.info("Reading the BED File '%s'." % args.bname)
bdict = collections.defaultdict(dict)
with open(args.bname,'r') as BED:
reader = csv.reader(BED,delimiter='\t')
for row in reader:
if len(row) == 4: # If BED file has 4 columns
chrom = row[0]
start = int(row[1])
end = int(row[2])
length = end - start
fusion = row[3]
elif len(row) == 3: # If BED file has 3 columns
chrom = row[0]
start = int(row[1])
end = int(row[2])
length = end
fusion = row[0]
else:
logging.error("I can only handle 3 or 4 column bed files. See Help for descriptions")
exit(-1)
bdict[fusion]['chrom'] = chrom
bdict[fusion]['start'] = start
bdict[fusion]['end'] = end
bdict[fusion]['region_length'] = length + 1 # convert back to 1 based scale
bdict[fusion]['count'] = np.zeros(length) # create a holding vector of 0's as long as the region, I will replace the 0's with counts from the mpileup
cdict = collections.defaultdict(dict)
for fusion in bdict:
chrom = bdict[fusion]['chrom']
start = bdict[fusion]['start']
end = bdict[fusion]['end']
cdict[chrom].update(dict((x,fusion) for x in xrange(start,end+1))) # create a look up dictionary by chromosome. This will make parsing the mpileup faster.
return(bdict,cdict)
# MPILEUP Functions
def read_mpileup(args,bdict,cdict):
""" Read mpileup and store depth and length into dictionary """
logging.info("Reading the Mpileup File '%s'." % args.mname)
with open(args.mname, 'r') as MPILEUP:
reader = csv.reader(MPILEUP, delimiter='\t',quoting=csv.QUOTE_NONE)
for row in reader:
mchrom = row[0]
mpos = int(row[1]) - 1 # mpileups are 1-based
mdepth = int(row[3])
try:
fusion = cdict[mchrom][mpos]
loc = mpos - bdict[fusion]['start']
bdict[fusion]['count'][loc] = mdepth
except:
continue
# Coverage Functions
def calc_coverage(bdict,num_mapped_reads,read_length):
""" Calculate different coverage metrics: Estimate number of reads in
region, Average per nucleotide coverage (APN), Reads per kilobase per
million mapped reads (RPKM), average coverage across region (mean),
standard deviation of coverage in region (std), and coefficient of
variation (cv). """
logging.info("Calculating Coverage Counts")
for fusion in bdict:
depth = np.sum(bdict[fusion]['count'])
bdict[fusion]['depth'] = int(depth)
bdict[fusion]['mean'] = np.mean(bdict[fusion]['count'])
bdict[fusion]['std'] = np.std(bdict[fusion]['count'])
if depth != 0:
bdict[fusion]['reads_in_region'] = depth / float(read_length) # Estimate the number of reads in region based on depth/read_length. Multiplying by 1.0 to tell python to use decimals.
bdict[fusion]['apn'] = depth / float(bdict[fusion]['region_length']) # Calculate average per nucleotide coverage APN (depth in region / length of region). Multiplying by 1.0 to tell python to use decimals.
bdict[fusion]['rpkm'] = (1000000000.0 * bdict[fusion]['reads_in_region']) / float(num_mapped_reads * bdict[fusion]['region_length']) # Calculate reads per kilobase per million mapped reads RPKM from Moretzavi et al.
bdict[fusion]['cv'] = bdict[fusion]['std'] / bdict[fusion]['mean'] # Calculate the coefficient of variation
else:
# if there is no coverage set everything to 0
bdict[fusion]['reads_in_region'] = 0
bdict[fusion]['apn'] = 0
bdict[fusion]['rpkm'] = 0
bdict[fusion]['cv'] = 0
# Output Functions
def writeOutput(args,bdict,num_mapped_reads,read_length):
""" I tried writing output using the CSV module, but this did not behave
well with SAS downstream. So I opted for the brute force method. """
logging.info("Writing Output")
if args.cv:
header = ['sample_id','event_id','mapped_reads','read_length','region_length','region_depth','reads_in_region','apn','rpkm','mean','std','cv']
with open(args.out, 'wb') as OUT:
OUT.write(','.join(header) + '\n')
for key in bdict:
OUT.write(','.join(str(x) for x in [args.name,key,num_mapped_reads,read_length,bdict[key]['region_length'],bdict[key]['depth'],bdict[key]['reads_in_region'],bdict[key]['apn'],bdict[key]['rpkm'],bdict[key]['mean'],bdict[key]['std'],bdict[key]['cv']]) + '\n')
else:
header = ['sample_id','fusion_id','mapped_reads','read_length','region_length','region_depth','reads_in_region','apn','rpkm']
with open(args.out, 'wb') as OUT:
OUT.write(','.join(header) + '\n')
for key in bdict:
OUT.write(','.join(str(x) for x in [args.name,key,num_mapped_reads,read_length,bdict[key]['region_length'],bdict[key]['depth'],bdict[key]['reads_in_region'],bdict[key]['apn'],bdict[key]['rpkm']]) + '\n')
def main():
""" MAIN Function to execute everything """
args = getOptions()
if args.log: # Turn on Logging if option -g was given
setLogger(args.log,logging.INFO)
num_mapped_reads, read_length = read_sam(args) # Use SAM file to count the number of mapped reads and the max read length
bdict,cdict = read_bed(args) # Read through BED file and create dictionary to sort all information.
read_mpileup(args,bdict,cdict) # Read Mpileup file and populate the bdict with pileup information
calc_coverage(bdict,num_mapped_reads,read_length) # Use information stored in bdict to calculate coverage (APN,RPKM) and other measures for the genomic region
writeOutput(args,bdict,num_mapped_reads,read_length) # Write output to CSV file
if __name__=='__main__':
main()
logging.info("Script Complete")
| McIntyre-Lab/papers | newman_events_2017/python_workflow/programs/rpkm_calculate.py | Python | lgpl-3.0 | 9,755 | [
"pysam"
] | 5b3a9d08f2ce20eedfc1f01680f8e99e454ce2e72a96a1b056a4bd8297759622 |
# Copyright (c) 2003-2016 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""classes checker for Python code
"""
from __future__ import generators
import sys
from collections import defaultdict
import six
import astroid
from astroid.bases import Generator, BUILTINS
from astroid.exceptions import InconsistentMroError, DuplicateBasesError
from astroid import objects
from astroid.scoped_nodes import function_to_method
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
from pylint.checkers.utils import (
PYMETHODS, SPECIAL_METHODS_PARAMS,
overrides_a_method, check_messages, is_attr_private,
is_attr_protected, node_frame_class, is_builtin_object,
decorated_with_property, unimplemented_abstract_methods,
decorated_with, class_is_abstract,
safe_infer, has_known_bases)
from pylint.utils import deprecated_option, get_global_option
if sys.version_info >= (3, 0):
NEXT_METHOD = '__next__'
else:
NEXT_METHOD = 'next'
ITER_METHODS = ('__iter__', '__getitem__')
INVALID_BASE_CLASSES = {'bool', 'range', 'slice', 'memoryview'}
def _get_method_args(method):
args = method.args.args
if method.type in ('classmethod', 'method'):
return len(args) - 1
return len(args)
def _is_invalid_base_class(cls):
return cls.name in INVALID_BASE_CLASSES and is_builtin_object(cls)
def _has_data_descriptor(cls, attr):
attributes = cls.getattr(attr)
for attribute in attributes:
try:
for inferred in attribute.infer():
if isinstance(inferred, astroid.Instance):
try:
inferred.getattr('__get__')
inferred.getattr('__set__')
except astroid.NotFoundError:
continue
else:
return True
except astroid.InferenceError:
# Can't infer, avoid emitting a false positive in this case.
return True
return False
def _called_in_methods(func, klass, methods):
""" Check if the func was called in any of the given methods,
belonging to the *klass*. Returns True if so, False otherwise.
"""
if not isinstance(func, astroid.FunctionDef):
return False
for method in methods:
try:
infered = klass.getattr(method)
except astroid.NotFoundError:
continue
for infer_method in infered:
for callfunc in infer_method.nodes_of_class(astroid.Call):
try:
bound = next(callfunc.func.infer())
except (astroid.InferenceError, StopIteration):
continue
if not isinstance(bound, astroid.BoundMethod):
continue
func_obj = bound._proxied
if isinstance(func_obj, astroid.UnboundMethod):
func_obj = func_obj._proxied
if func_obj.name == func.name:
return True
return False
def _is_attribute_property(name, klass):
""" Check if the given attribute *name* is a property
in the given *klass*.
It will look for `property` calls or for functions
with the given name, decorated by `property` or `property`
subclasses.
Returns ``True`` if the name is a property in the given klass,
``False`` otherwise.
"""
try:
attributes = klass.getattr(name)
except astroid.NotFoundError:
return False
property_name = "{0}.property".format(BUILTINS)
for attr in attributes:
try:
infered = next(attr.infer())
except astroid.InferenceError:
continue
if (isinstance(infered, astroid.FunctionDef) and
decorated_with_property(infered)):
return True
if infered.pytype() == property_name:
return True
return False
def _has_bare_super_call(fundef_node):
for call in fundef_node.nodes_of_class(astroid.Call):
func = call.func
if (isinstance(func, astroid.Name) and
func.name == 'super' and
not call.args):
return True
return False
def _safe_infer_call_result(node, caller, context=None):
"""
Safely infer the return value of a function.
Returns None if inference failed or if there is some ambiguity (more than
one node has been inferred). Otherwise returns infered value.
"""
try:
inferit = node.infer_call_result(caller, context=context)
value = next(inferit)
except astroid.InferenceError:
return # inference failed
except StopIteration:
return # no values infered
try:
next(inferit)
return # there is ambiguity on the inferred node
except astroid.InferenceError:
return # there is some kind of ambiguity
except StopIteration:
return value
MSGS = {
'F0202': ('Unable to check methods signature (%s / %s)',
'method-check-failed',
'Used when Pylint has been unable to check methods signature '
'compatibility for an unexpected reason. Please report this kind '
'if you don\'t make sense of it.'),
'E0202': ('An attribute defined in %s line %s hides this method',
'method-hidden',
'Used when a class defines a method which is hidden by an '
'instance attribute from an ancestor class or set by some '
'client code.'),
'E0203': ('Access to member %r before its definition line %s',
'access-member-before-definition',
'Used when an instance member is accessed before it\'s actually '
'assigned.'),
'W0201': ('Attribute %r defined outside __init__',
'attribute-defined-outside-init',
'Used when an instance attribute is defined outside the __init__ '
'method.'),
'W0212': ('Access to a protected member %s of a client class', # E0214
'protected-access',
'Used when a protected member (i.e. class member with a name '
'beginning with an underscore) is access outside the class or a '
'descendant of the class where it\'s defined.'),
'E0211': ('Method has no argument',
'no-method-argument',
'Used when a method which should have the bound instance as '
'first argument has no argument defined.'),
'E0213': ('Method should have "self" as first argument',
'no-self-argument',
'Used when a method has an attribute different the "self" as '
'first argument. This is considered as an error since this is '
'a so common convention that you shouldn\'t break it!'),
'C0202': ('Class method %s should have %s as first argument',
'bad-classmethod-argument',
'Used when a class method has a first argument named differently '
'than the value specified in valid-classmethod-first-arg option '
'(default to "cls"), recommended to easily differentiate them '
'from regular instance methods.'),
'C0203': ('Metaclass method %s should have %s as first argument',
'bad-mcs-method-argument',
'Used when a metaclass method has a first agument named '
'differently than the value specified in valid-classmethod-first'
'-arg option (default to "cls"), recommended to easily '
'differentiate them from regular instance methods.'),
'C0204': ('Metaclass class method %s should have %s as first argument',
'bad-mcs-classmethod-argument',
'Used when a metaclass class method has a first argument named '
'differently than the value specified in valid-metaclass-'
'classmethod-first-arg option (default to "mcs"), recommended to '
'easily differentiate them from regular instance methods.'),
'W0211': ('Static method with %r as first argument',
'bad-staticmethod-argument',
'Used when a static method has "self" or a value specified in '
'valid-classmethod-first-arg option or '
'valid-metaclass-classmethod-first-arg option as first argument.'
),
'R0201': ('Method could be a function',
'no-self-use',
'Used when a method doesn\'t use its bound instance, and so could '
'be written as a function.'
),
'W0221': ('Arguments number differs from %s %r method',
'arguments-differ',
'Used when a method has a different number of arguments than in '
'the implemented interface or in an overridden method.'),
'W0222': ('Signature differs from %s %r method',
'signature-differs',
'Used when a method signature is different than in the '
'implemented interface or in an overridden method.'),
'W0223': ('Method %r is abstract in class %r but is not overridden',
'abstract-method',
'Used when an abstract method (i.e. raise NotImplementedError) is '
'not overridden in concrete class.'
),
'W0231': ('__init__ method from base class %r is not called',
'super-init-not-called',
'Used when an ancestor class method has an __init__ method '
'which is not called by a derived class.'),
'W0232': ('Class has no __init__ method',
'no-init',
'Used when a class has no __init__ method, neither its parent '
'classes.'),
'W0233': ('__init__ method from a non direct base class %r is called',
'non-parent-init-called',
'Used when an __init__ method is called on a class which is not '
'in the direct ancestors for the analysed class.'),
'E0236': ('Invalid object %r in __slots__, must contain '
'only non empty strings',
'invalid-slots-object',
'Used when an invalid (non-string) object occurs in __slots__.'),
'E0237': ('Assigning to attribute %r not defined in class slots',
'assigning-non-slot',
'Used when assigning to an attribute not defined '
'in the class slots.'),
'E0238': ('Invalid __slots__ object',
'invalid-slots',
'Used when an invalid __slots__ is found in class. '
'Only a string, an iterable or a sequence is permitted.'),
'E0239': ('Inheriting %r, which is not a class.',
'inherit-non-class',
'Used when a class inherits from something which is not a '
'class.'),
'E0240': ('Inconsistent method resolution order for class %r',
'inconsistent-mro',
'Used when a class has an inconsistent method resolutin order.'),
'E0241': ('Duplicate bases for class %r',
'duplicate-bases',
'Used when a class has duplicate bases.'),
'R0202': ('Consider using a decorator instead of calling classmethod',
'no-classmethod-decorator',
'Used when a class method is defined without using the decorator '
'syntax.'),
'R0203': ('Consider using a decorator instead of calling staticmethod',
'no-staticmethod-decorator',
'Used when a static method is defined without using the decorator '
'syntax.'),
}
class ClassChecker(BaseChecker):
"""checks for :
* methods without self as first argument
* overridden methods signature
* access only to existent members via self
* attributes not defined in the __init__ method
* unreachable code
"""
__implements__ = (IAstroidChecker,)
# configuration section name
name = 'classes'
# messages
msgs = MSGS
priority = -2
# configuration options
options = (('ignore-iface-methods',
deprecated_option(opt_type="csv",
help_msg="This is deprecated, because "
"it is not used anymore.",
deprecation_msg="This option %r will be "
"removed in Pylint 2.0")),
('defining-attr-methods',
{'default' : ('__init__', '__new__', 'setUp'),
'type' : 'csv',
'metavar' : '<method names>',
'help' : 'List of method names used to declare (i.e. assign) \
instance attributes.'}
),
('valid-classmethod-first-arg',
{'default' : ('cls',),
'type' : 'csv',
'metavar' : '<argument names>',
'help' : 'List of valid names for the first argument in \
a class method.'}
),
('valid-metaclass-classmethod-first-arg',
{'default' : ('mcs',),
'type' : 'csv',
'metavar' : '<argument names>',
'help' : 'List of valid names for the first argument in \
a metaclass class method.'}
),
('exclude-protected',
{
'default': (
# namedtuple public API.
'_asdict', '_fields', '_replace', '_source', '_make'),
'type': 'csv',
'metavar': '<protected access exclusions>',
'help': ('List of member names, which should be excluded '
'from the protected access warning.')}
))
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self._accessed = []
self._first_attrs = []
self._meth_could_be_func = None
def visit_classdef(self, node):
"""init visit variable _accessed
"""
self._accessed.append(defaultdict(list))
self._check_bases_classes(node)
# if not an exception or a metaclass
if node.type == 'class' and has_known_bases(node):
try:
node.local_attr('__init__')
except astroid.NotFoundError:
self.add_message('no-init', args=node, node=node)
self._check_slots(node)
self._check_proper_bases(node)
self._check_consistent_mro(node)
def _check_consistent_mro(self, node):
"""Detect that a class has a consistent mro or duplicate bases."""
try:
node.mro()
except InconsistentMroError:
self.add_message('inconsistent-mro', args=node.name, node=node)
except DuplicateBasesError:
self.add_message('duplicate-bases', args=node.name, node=node)
except NotImplementedError:
# Old style class, there's no mro so don't do anything.
pass
def _check_proper_bases(self, node):
"""
Detect that a class inherits something which is not
a class or a type.
"""
for base in node.bases:
ancestor = safe_infer(base)
if ancestor in (astroid.YES, None):
continue
if (isinstance(ancestor, astroid.Instance) and
ancestor.is_subtype_of('%s.type' % (BUILTINS,))):
continue
if (not isinstance(ancestor, astroid.ClassDef) or
_is_invalid_base_class(ancestor)):
self.add_message('inherit-non-class',
args=base.as_string(), node=node)
def leave_classdef(self, cnode):
"""close a class node:
check that instance attributes are defined in __init__ and check
access to existent members
"""
# check access to existent members on non metaclass classes
ignore_mixins = get_global_option(self, 'ignore-mixin-members',
default=True)
if ignore_mixins and cnode.name[-5:].lower() == 'mixin':
# We are in a mixin class. No need to try to figure out if
# something is missing, since it is most likely that it will
# miss.
return
accessed = self._accessed.pop()
if cnode.type != 'metaclass':
self._check_accessed_members(cnode, accessed)
# checks attributes are defined in an allowed method such as __init__
if not self.linter.is_message_enabled('attribute-defined-outside-init'):
return
defining_methods = self.config.defining_attr_methods
current_module = cnode.root()
for attr, nodes in six.iteritems(cnode.instance_attrs):
# skip nodes which are not in the current module and it may screw up
# the output, while it's not worth it
nodes = [n for n in nodes if not
isinstance(n.statement(), (astroid.Delete, astroid.AugAssign))
and n.root() is current_module]
if not nodes:
continue # error detected by typechecking
# check if any method attr is defined in is a defining method
if any(node.frame().name in defining_methods
for node in nodes):
continue
# check attribute is defined in a parent's __init__
for parent in cnode.instance_attr_ancestors(attr):
attr_defined = False
# check if any parent method attr is defined in is a defining method
for node in parent.instance_attrs[attr]:
if node.frame().name in defining_methods:
attr_defined = True
if attr_defined:
# we're done :)
break
else:
# check attribute is defined as a class attribute
try:
cnode.local_attr(attr)
except astroid.NotFoundError:
for node in nodes:
if node.frame().name not in defining_methods:
# If the attribute was set by a callfunc in any
# of the defining methods, then don't emit
# the warning.
if _called_in_methods(node.frame(), cnode,
defining_methods):
continue
self.add_message('attribute-defined-outside-init',
args=attr, node=node)
def visit_functiondef(self, node):
"""check method arguments, overriding"""
# ignore actual functions
if not node.is_method():
return
klass = node.parent.frame()
self._meth_could_be_func = True
# check first argument is self if this is actually a method
self._check_first_arg_for_type(node, klass.type == 'metaclass')
if node.name == '__init__':
self._check_init(node)
return
# check signature if the method overloads inherited method
for overridden in klass.local_attr_ancestors(node.name):
# get astroid for the searched method
try:
meth_node = overridden[node.name]
except KeyError:
# we have found the method but it's not in the local
# dictionary.
# This may happen with astroid build from living objects
continue
if not isinstance(meth_node, astroid.FunctionDef):
continue
self._check_signature(node, meth_node, 'overridden', klass)
break
if node.decorators:
for decorator in node.decorators.nodes:
if isinstance(decorator, astroid.Attribute) and \
decorator.attrname in ('getter', 'setter', 'deleter'):
# attribute affectation will call this method, not hiding it
return
if isinstance(decorator, astroid.Name) and decorator.name == 'property':
# attribute affectation will either call a setter or raise
# an attribute error, anyway not hiding the function
return
# check if the method is hidden by an attribute
try:
overridden = klass.instance_attr(node.name)[0] # XXX
overridden_frame = overridden.frame()
if (isinstance(overridden_frame, astroid.FunctionDef)
and overridden_frame.type == 'method'):
overridden_frame = overridden_frame.parent.frame()
if (isinstance(overridden_frame, astroid.ClassDef)
and klass.is_subtype_of(overridden_frame.qname())):
args = (overridden.root().name, overridden.fromlineno)
self.add_message('method-hidden', args=args, node=node)
except astroid.NotFoundError:
pass
visit_asyncfunctiondef = visit_functiondef
def _check_slots(self, node):
if '__slots__' not in node.locals:
return
for slots in node.igetattr('__slots__'):
# check if __slots__ is a valid type
for meth in ITER_METHODS:
try:
slots.getattr(meth)
break
except astroid.NotFoundError:
continue
else:
self.add_message('invalid-slots', node=node)
continue
if isinstance(slots, astroid.Const):
# a string, ignore the following checks
continue
if not hasattr(slots, 'itered'):
# we can't obtain the values, maybe a .deque?
continue
if isinstance(slots, astroid.Dict):
values = [item[0] for item in slots.items]
else:
values = slots.itered()
if values is astroid.YES:
return
for elt in values:
try:
self._check_slots_elt(elt)
except astroid.InferenceError:
continue
def _check_slots_elt(self, elt):
for infered in elt.infer():
if infered is astroid.YES:
continue
if (not isinstance(infered, astroid.Const) or
not isinstance(infered.value, six.string_types)):
self.add_message('invalid-slots-object',
args=infered.as_string(),
node=elt)
continue
if not infered.value:
self.add_message('invalid-slots-object',
args=infered.as_string(),
node=elt)
def leave_functiondef(self, node):
"""on method node, check if this method couldn't be a function
ignore class, static and abstract methods, initializer,
methods overridden from a parent class.
"""
if node.is_method():
if node.args.args is not None:
self._first_attrs.pop()
if not self.linter.is_message_enabled('no-self-use'):
return
class_node = node.parent.frame()
if (self._meth_could_be_func and node.type == 'method'
and node.name not in PYMETHODS
and not (node.is_abstract() or
overrides_a_method(class_node, node.name) or
decorated_with_property(node) or
(six.PY3 and _has_bare_super_call(node)))):
self.add_message('no-self-use', node=node)
def visit_attribute(self, node):
"""check if the getattr is an access to a class member
if so, register it. Also check for access to protected
class member from outside its class (but ignore __special__
methods)
"""
attrname = node.attrname
# Check self
if self.is_first_attr(node):
self._accessed[-1][attrname].append(node)
return
if not self.linter.is_message_enabled('protected-access'):
return
self._check_protected_attribute_access(node)
def visit_assignattr(self, node):
if isinstance(node.assign_type(), astroid.AugAssign) and self.is_first_attr(node):
self._accessed[-1][node.attrname].append(node)
self._check_in_slots(node)
def _check_in_slots(self, node):
""" Check that the given assattr node
is defined in the class slots.
"""
infered = safe_infer(node.expr)
if infered and isinstance(infered, astroid.Instance):
klass = infered._proxied
if '__slots__' not in klass.locals or not klass.newstyle:
return
slots = klass.slots()
if slots is None:
return
# If any ancestor doesn't use slots, the slots
# defined for this class are superfluous.
if any('__slots__' not in ancestor.locals and
ancestor.name != 'object'
for ancestor in klass.ancestors()):
return
if not any(slot.value == node.attrname for slot in slots):
# If we have a '__dict__' in slots, then
# assigning any name is valid.
if not any(slot.value == '__dict__' for slot in slots):
if _is_attribute_property(node.attrname, klass):
# Properties circumvent the slots mechanism,
# so we should not emit a warning for them.
return
if (node.attrname in klass.locals
and _has_data_descriptor(klass, node.attrname)):
# Descriptors circumvent the slots mechanism as well.
return
self.add_message('assigning-non-slot',
args=(node.attrname, ), node=node)
@check_messages('protected-access', 'no-classmethod-decorator',
'no-staticmethod-decorator')
def visit_assign(self, assign_node):
self._check_classmethod_declaration(assign_node)
node = assign_node.targets[0]
if not isinstance(node, astroid.AssignAttr):
return
if self.is_first_attr(node):
return
self._check_protected_attribute_access(node)
def _check_classmethod_declaration(self, node):
"""Checks for uses of classmethod() or staticmethod()
When a @classmethod or @staticmethod decorator should be used instead.
A message will be emitted only if the assignment is at a class scope
and only if the classmethod's argument belongs to the class where it
is defined.
`node` is an assign node.
"""
if not isinstance(node.value, astroid.Call):
return
# check the function called is "classmethod" or "staticmethod"
func = node.value.func
if (not isinstance(func, astroid.Name) or
func.name not in ('classmethod', 'staticmethod')):
return
msg = ('no-classmethod-decorator' if func.name == 'classmethod' else
'no-staticmethod-decorator')
# assignment must be at a class scope
parent_class = node.scope()
if not isinstance(parent_class, astroid.ClassDef):
return
# Check if the arg passed to classmethod is a class member
classmeth_arg = node.value.args[0]
if not isinstance(classmeth_arg, astroid.Name):
return
method_name = classmeth_arg.name
if any(method_name == member.name
for member in parent_class.mymethods()):
self.add_message(msg, node=node.targets[0])
def _check_protected_attribute_access(self, node):
'''Given an attribute access node (set or get), check if attribute
access is legitimate. Call _check_first_attr with node before calling
this method. Valid cases are:
* self._attr in a method or cls._attr in a classmethod. Checked by
_check_first_attr.
* Klass._attr inside "Klass" class.
* Klass2._attr inside "Klass" class when Klass2 is a base class of
Klass.
'''
attrname = node.attrname
if (is_attr_protected(attrname) and
attrname not in self.config.exclude_protected):
klass = node_frame_class(node)
# XXX infer to be more safe and less dirty ??
# in classes, check we are not getting a parent method
# through the class object or through super
callee = node.expr.as_string()
# We are not in a class, no remaining valid case
if klass is None:
self.add_message('protected-access', node=node, args=attrname)
return
# If the expression begins with a call to super, that's ok.
if isinstance(node.expr, astroid.Call) and \
isinstance(node.expr.func, astroid.Name) and \
node.expr.func.name == 'super':
return
# We are in a class, one remaining valid cases, Klass._attr inside
# Klass
if not (callee == klass.name or callee in klass.basenames):
# Detect property assignments in the body of the class.
# This is acceptable:
#
# class A:
# b = property(lambda: self._b)
stmt = node.parent.statement()
if (isinstance(stmt, astroid.Assign)
and len(stmt.targets) == 1
and isinstance(stmt.targets[0], astroid.AssignName)):
name = stmt.targets[0].name
if _is_attribute_property(name, klass):
return
self.add_message('protected-access', node=node, args=attrname)
def visit_name(self, node):
"""check if the name handle an access to a class member
if so, register it
"""
if self._first_attrs and (node.name == self._first_attrs[-1] or
not self._first_attrs[-1]):
self._meth_could_be_func = False
def _check_accessed_members(self, node, accessed):
"""check that accessed members are defined"""
# XXX refactor, probably much simpler now that E0201 is in type checker
excs = ('AttributeError', 'Exception', 'BaseException')
for attr, nodes in six.iteritems(accessed):
try:
# is it a class attribute ?
node.local_attr(attr)
# yes, stop here
continue
except astroid.NotFoundError:
pass
# is it an instance attribute of a parent class ?
try:
next(node.instance_attr_ancestors(attr))
# yes, stop here
continue
except StopIteration:
pass
# is it an instance attribute ?
try:
defstmts = node.instance_attr(attr)
except astroid.NotFoundError:
pass
else:
# filter out augment assignment nodes
defstmts = [stmt for stmt in defstmts if stmt not in nodes]
if not defstmts:
# only augment assignment for this node, no-member should be
# triggered by the typecheck checker
continue
# filter defstmts to only pick the first one when there are
# several assignments in the same scope
scope = defstmts[0].scope()
defstmts = [stmt for i, stmt in enumerate(defstmts)
if i == 0 or stmt.scope() is not scope]
# if there are still more than one, don't attempt to be smarter
# than we can be
if len(defstmts) == 1:
defstmt = defstmts[0]
# check that if the node is accessed in the same method as
# it's defined, it's accessed after the initial assignment
frame = defstmt.frame()
lno = defstmt.fromlineno
for _node in nodes:
if _node.frame() is frame and _node.fromlineno < lno \
and not astroid.are_exclusive(_node.statement(), defstmt, excs):
self.add_message('access-member-before-definition',
node=_node, args=(attr, lno))
def _check_first_arg_for_type(self, node, metaclass=0):
"""check the name of first argument, expect:
* 'self' for a regular method
* 'cls' for a class method or a metaclass regular method (actually
valid-classmethod-first-arg value)
* 'mcs' for a metaclass class method (actually
valid-metaclass-classmethod-first-arg)
* not one of the above for a static method
"""
# don't care about functions with unknown argument (builtins)
if node.args.args is None:
return
first_arg = node.args.args and node.argnames()[0]
self._first_attrs.append(first_arg)
first = self._first_attrs[-1]
# static method
if node.type == 'staticmethod':
if (first_arg == 'self' or
first_arg in self.config.valid_classmethod_first_arg or
first_arg in self.config.valid_metaclass_classmethod_first_arg):
self.add_message('bad-staticmethod-argument', args=first, node=node)
return
self._first_attrs[-1] = None
# class / regular method with no args
elif not node.args.args:
self.add_message('no-method-argument', node=node)
# metaclass
elif metaclass:
# metaclass __new__ or classmethod
if node.type == 'classmethod':
self._check_first_arg_config(
first,
self.config.valid_metaclass_classmethod_first_arg, node,
'bad-mcs-classmethod-argument', node.name)
# metaclass regular method
else:
self._check_first_arg_config(
first,
self.config.valid_classmethod_first_arg, node,
'bad-mcs-method-argument',
node.name)
# regular class
else:
# class method
if node.type == 'classmethod':
self._check_first_arg_config(
first,
self.config.valid_classmethod_first_arg, node,
'bad-classmethod-argument',
node.name)
# regular method without self as argument
elif first != 'self':
self.add_message('no-self-argument', node=node)
def _check_first_arg_config(self, first, config, node, message,
method_name):
if first not in config:
if len(config) == 1:
valid = repr(config[0])
else:
valid = ', '.join(repr(v) for v in config[:-1])
valid = '%s or %r' % (valid, config[-1])
self.add_message(message, args=(method_name, valid), node=node)
def _check_bases_classes(self, node):
"""check that the given class node implements abstract methods from
base classes
"""
def is_abstract(method):
return method.is_abstract(pass_is_abstract=False)
# check if this class abstract
if class_is_abstract(node):
return
methods = sorted(
unimplemented_abstract_methods(node, is_abstract).items(),
key=lambda item: item[0],
)
for name, method in methods:
owner = method.parent.frame()
if owner is node:
continue
# owner is not this class, it must be a parent class
# check that the ancestor's method is not abstract
if name in node.locals:
# it is redefined as an attribute or with a descriptor
continue
self.add_message('abstract-method', node=node,
args=(name, owner.name))
def _check_init(self, node):
"""check that the __init__ method call super or ancestors'__init__
method
"""
if (not self.linter.is_message_enabled('super-init-not-called') and
not self.linter.is_message_enabled('non-parent-init-called')):
return
klass_node = node.parent.frame()
to_call = _ancestors_to_call(klass_node)
not_called_yet = dict(to_call)
for stmt in node.nodes_of_class(astroid.Call):
expr = stmt.func
if not isinstance(expr, astroid.Attribute) \
or expr.attrname != '__init__':
continue
# skip the test if using super
if isinstance(expr.expr, astroid.Call) and \
isinstance(expr.expr.func, astroid.Name) and \
expr.expr.func.name == 'super':
return
try:
for klass in expr.expr.infer():
if klass is astroid.YES:
continue
# The infered klass can be super(), which was
# assigned to a variable and the `__init__`
# was called later.
#
# base = super()
# base.__init__(...)
if (isinstance(klass, astroid.Instance) and
isinstance(klass._proxied, astroid.ClassDef) and
is_builtin_object(klass._proxied) and
klass._proxied.name == 'super'):
return
elif isinstance(klass, objects.Super):
return
try:
del not_called_yet[klass]
except KeyError:
if klass not in to_call:
self.add_message('non-parent-init-called',
node=expr, args=klass.name)
except astroid.InferenceError:
continue
for klass, method in six.iteritems(not_called_yet):
cls = node_frame_class(method)
if klass.name == 'object' or (cls and cls.name == 'object'):
continue
self.add_message('super-init-not-called', args=klass.name, node=node)
def _check_signature(self, method1, refmethod, class_type, cls):
"""check that the signature of the two given methods match
"""
if not (isinstance(method1, astroid.FunctionDef)
and isinstance(refmethod, astroid.FunctionDef)):
self.add_message('method-check-failed',
args=(method1, refmethod), node=method1)
return
instance = cls.instanciate_class()
method1 = function_to_method(method1, instance)
refmethod = function_to_method(refmethod, instance)
# Don't care about functions with unknown argument (builtins).
if method1.args.args is None or refmethod.args.args is None:
return
# If we use *args, **kwargs, skip the below checks.
if method1.args.vararg or method1.args.kwarg:
return
# Ignore private to class methods.
if is_attr_private(method1.name):
return
# Ignore setters, they have an implicit extra argument,
# which shouldn't be taken in consideration.
if method1.decorators:
for decorator in method1.decorators.nodes:
if (isinstance(decorator, astroid.Attribute) and
decorator.attrname == 'setter'):
return
method1_args = _get_method_args(method1)
refmethod_args = _get_method_args(refmethod)
if method1_args != refmethod_args:
self.add_message('arguments-differ',
args=(class_type, method1.name),
node=method1)
elif len(method1.args.defaults) < len(refmethod.args.defaults):
self.add_message('signature-differs',
args=(class_type, method1.name),
node=method1)
def is_first_attr(self, node):
"""Check that attribute lookup name use first attribute variable name
(self for method, cls for classmethod and mcs for metaclass).
"""
return self._first_attrs and isinstance(node.expr, astroid.Name) and \
node.expr.name == self._first_attrs[-1]
class SpecialMethodsChecker(BaseChecker):
"""Checker which verifies that special methods
are implemented correctly.
"""
__implements__ = (IAstroidChecker, )
name = 'classes'
msgs = {
'E0301': ('__iter__ returns non-iterator',
'non-iterator-returned',
'Used when an __iter__ method returns something which is not an '
'iterable (i.e. has no `%s` method)' % NEXT_METHOD,
{'old_names': [('W0234', 'non-iterator-returned'),
('E0234', 'non-iterator-returned')]}),
'E0302': ('The special method %r expects %s param(s), %d %s given',
'unexpected-special-method-signature',
'Emitted when a special method was defined with an '
'invalid number of parameters. If it has too few or '
'too many, it might not work at all.',
{'old_names': [('E0235', 'bad-context-manager')]}),
'E0303': ('__len__ does not return non-negative integer',
'invalid-length-returned',
'Used when an __len__ method returns something which is not a '
'non-negative integer', {}),
}
priority = -2
@check_messages('unexpected-special-method-signature',
'non-iterator-returned', 'invalid-length-returned')
def visit_functiondef(self, node):
if not node.is_method():
return
if node.name == '__iter__':
self._check_iter(node)
if node.name == '__len__':
self._check_len(node)
if node.name in PYMETHODS:
self._check_unexpected_method_signature(node)
visit_asyncfunctiondef = visit_functiondef
def _check_unexpected_method_signature(self, node):
expected_params = SPECIAL_METHODS_PARAMS[node.name]
if expected_params is None:
# This can support a variable number of parameters.
return
if not len(node.args.args) and not node.args.vararg:
# Method has no parameter, will be catched
# by no-method-argument.
return
if decorated_with(node, [BUILTINS + ".staticmethod"]):
# We expect to not take in consideration self.
all_args = node.args.args
else:
all_args = node.args.args[1:]
mandatory = len(all_args) - len(node.args.defaults)
optional = len(node.args.defaults)
current_params = mandatory + optional
if isinstance(expected_params, tuple):
# The expected number of parameters can be any value from this
# tuple, although the user should implement the method
# to take all of them in consideration.
emit = mandatory not in expected_params
expected_params = "between %d or %d" % expected_params
else:
# If the number of mandatory parameters doesn't
# suffice, the expected parameters for this
# function will be deduced from the optional
# parameters.
rest = expected_params - mandatory
if rest == 0:
emit = False
elif rest < 0:
emit = True
elif rest > 0:
emit = not ((optional - rest) >= 0 or node.args.vararg)
if emit:
verb = "was" if current_params <= 1 else "were"
self.add_message('unexpected-special-method-signature',
args=(node.name, expected_params, current_params, verb),
node=node)
@staticmethod
def _is_iterator(node):
if node is astroid.YES:
# Just ignore YES objects.
return True
if isinstance(node, Generator):
# Generators can be itered.
return True
if isinstance(node, astroid.Instance):
try:
node.local_attr(NEXT_METHOD)
return True
except astroid.NotFoundError:
pass
elif isinstance(node, astroid.ClassDef):
metaclass = node.metaclass()
if metaclass and isinstance(metaclass, astroid.ClassDef):
try:
metaclass.local_attr(NEXT_METHOD)
return True
except astroid.NotFoundError:
pass
return False
def _check_iter(self, node):
infered = _safe_infer_call_result(node, node)
if infered is not None:
if not self._is_iterator(infered):
self.add_message('non-iterator-returned', node=node)
def _check_len(self, node):
inferred = _safe_infer_call_result(node, node)
if inferred is None or inferred is astroid.YES:
return
if not isinstance(inferred, astroid.Const):
self.add_message('invalid-length-returned', node=node)
return
value = inferred.value
if not isinstance(value, six.integer_types) or value < 0:
self.add_message('invalid-length-returned', node=node)
def _ancestors_to_call(klass_node, method='__init__'):
"""return a dictionary where keys are the list of base classes providing
the queried method, and so that should/may be called from the method node
"""
to_call = {}
for base_node in klass_node.ancestors(recurs=False):
try:
to_call[base_node] = next(base_node.igetattr(method))
except astroid.InferenceError:
continue
return to_call
def node_method(node, method_name):
"""get astroid for <method_name> on the given class node, ensuring it
is a Function node
"""
for node_attr in node.local_attr(method_name):
if isinstance(node_attr, astroid.Function):
return node_attr
raise astroid.NotFoundError(method_name)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(ClassChecker(linter))
linter.register_checker(SpecialMethodsChecker(linter))
| bgris/ODL_bgris | lib/python3.5/site-packages/pylint/checkers/classes.py | Python | gpl-3.0 | 47,742 | [
"VisIt"
] | 2e4817c848e8be07a71a4a2763463b0c569ed97ef006ff313c229650c8f57c92 |
"""NEOs orbit from NEOWS and JPL SBDB
"""
import re
from bs4 import BeautifulSoup
import requests
import astropy.units as u
from astropy.time import Time
from poliastro.twobody.orbit import Orbit
from poliastro.bodies import Sun
from poliastro.frames import Planes
from poliastro.twobody.angles import M_to_nu
# Base URLs
NEOWS_URL = 'https://api.nasa.gov/neo/rest/v1/neo/'
SBDB_URL = 'https://ssd.jpl.nasa.gov/sbdb.cgi'
DEFAULT_API_KEY = 'DEMO_KEY'
def orbit_from_spk_id(spk_id, api_key=None):
"""Return :py:class:`~poliastro.twobody.orbit.Orbit` given a SPK-ID.
Retrieve info from NASA NeoWS API, and therefore
it only works with NEAs (Near Earth Asteroids).
Parameters
----------
spk_id : str
SPK-ID number, which is given to each body by JPL.
api_key : str
NASA OPEN APIs key (default: `DEMO_KEY`)
Returns
-------
orbit : ~poliastro.twobody.orbit.Orbit
NEA orbit.
"""
payload = {'api_key': api_key or DEFAULT_API_KEY}
response = requests.get(NEOWS_URL + spk_id, params=payload)
response.raise_for_status()
orbital_data = response.json()['orbital_data']
attractor = Sun
a = float(orbital_data['semi_major_axis']) * u.AU
ecc = float(orbital_data['eccentricity']) * u.one
inc = float(orbital_data['inclination']) * u.deg
raan = float(orbital_data['ascending_node_longitude']) * u.deg
argp = float(orbital_data['perihelion_argument']) * u.deg
m = float(orbital_data['mean_anomaly']) * u.deg
nu = M_to_nu(m.to(u.rad), ecc)
epoch = Time(float(orbital_data['epoch_osculation']), format='jd', scale='tdb')
ss = Orbit.from_classical(attractor, a, ecc, inc,
raan, argp, nu, epoch, plane=Planes.EARTH_ECLIPTIC)
return ss
def spk_id_from_name(name):
"""Return SPK-ID number given a small-body name.
Retrieve and parse HTML from JPL Small Body Database
to get SPK-ID.
Parameters
----------
name : str
Small-body object name. Wildcards "*" and/or "?" can be used.
Returns
-------
spk_id : str
SPK-ID number.
"""
payload = {'sstr': name, 'orb': '0', 'log': '0', 'old': '0', 'cov': '0', 'cad': '0'}
response = requests.get(SBDB_URL, params=payload)
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
# page_identifier is used to check what type of response page we are working with.
page_identifier = soup.find(attrs={"name": "top"})
# If there is a 'table' sibling, the object was found.
if page_identifier.find_next_sibling('table') is not None:
data = page_identifier.find_next_sibling('table').table.find_all('td')
complete_string = ''
for string in data[1].stripped_strings:
complete_string += string + ' '
match = re.compile(r'Classification: ([\S\s]+) SPK-ID: (\d+)').match(complete_string)
if match:
return match.group(2)
# If there is a 'center' sibling, it is a page with a list of possible objects
elif page_identifier.find_next_sibling('center') is not None:
object_list = page_identifier.find_next_sibling('center').table.find_all('td')
bodies = ''
obj_num = min(len(object_list), 3)
for body in object_list[:obj_num]:
bodies += body.string + '\n'
raise ValueError(str(len(object_list)) + ' different bodies found:\n' + bodies)
# If everything else failed
raise ValueError('Object could not be found. You can visit: ' +
SBDB_URL + '?sstr=' + name + ' for more information.')
def orbit_from_name(name, api_key=None):
"""Return :py:class:`~poliastro.twobody.orbit.Orbit` given a name.
Retrieve info from NASA NeoWS API, and therefore
it only works with NEAs (Near Earth Asteroids).
Parameters
----------
name : str
NEA name.
api_key : str
NASA OPEN APIs key (default: `DEMO_KEY`)
Returns
-------
orbit : ~poliastro.twobody.orbit.Orbit
NEA orbit.
"""
spk_id = spk_id_from_name(name)
if spk_id is not None:
return orbit_from_spk_id(spk_id, api_key)
| newlawrence/poliastro | src/poliastro/neos/neows.py | Python | mit | 4,190 | [
"VisIt"
] | 58fa4b4f11fa666b3ca432b6990f68d774c7ffe8fa65bf4461096433f8d9d492 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyBrian(PythonPackage):
"""A clock-driven simulator for spiking neural networks"""
homepage = "http://www.briansimulator.org"
url = "https://pypi.io/packages/source/b/brian/brian-1.4.3.tar.gz"
version('1.4.3', '0570099bcce4d7afde73ff4126e6c30f')
depends_on('py-matplotlib@0.90.1:', type=('build', 'run'))
depends_on('py-numpy@1.4.1:', type=('build', 'run'))
depends_on('py-scipy@0.7.0:', type=('build', 'run'))
| skosukhin/spack | var/spack/repos/builtin/packages/py-brian/package.py | Python | lgpl-2.1 | 1,722 | [
"Brian"
] | 2413912fbfd44ec1a2bde3274a456a400b2bd94a764530bbe1472e8ae1fb004f |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Deposition data model classes.
Classes for wrapping BibWorkflowObject and friends to make it easier to
work with the data attributes.
"""
from uuid import uuid4
import json
import os
from datetime import datetime
from dateutil.tz import tzutc
from sqlalchemy.orm.exc import NoResultFound
from werkzeug.datastructures import MultiDict
from werkzeug.utils import secure_filename
from flask import redirect, render_template, flash, url_for, request, \
session, current_app
from flask_login import current_user
from flask_restful import fields, marshal
from invenio.ext.restful import UTCISODateTime
from invenio.base.helpers import unicodifier
from invenio.ext.sqlalchemy import db
from invenio.modules.workflows.models import BibWorkflowObject, Workflow, \
ObjectVersion
from invenio.modules.workflows.engine import WorkflowStatus
from .form import CFG_FIELD_FLAGS, DataExporter
from .signals import file_uploaded
from .storage import Storage, DepositionStorage
#
# Exceptions
#
class DepositionError(Exception):
"""Base class for deposition errors."""
pass
class InvalidDepositionType(DepositionError):
"""Raise when a deposition type cannot be found."""
pass
class InvalidDepositionAction(DepositionError):
"""Raise when deposition is in an invalid state for action."""
pass
class DepositionDoesNotExists(DepositionError):
"""Raise when a deposition does not exists."""
pass
class DraftDoesNotExists(DepositionError):
"""Raise when a draft does not exists."""
pass
class FormDoesNotExists(DepositionError):
"""Raise when a draft does not exists."""
pass
class FileDoesNotExists(DepositionError):
"""Raise when a draft does not exists."""
pass
class DepositionNotDeletable(DepositionError):
"""Raise when a deposition cannot be deleted."""
pass
class FilenameAlreadyExists(DepositionError):
"""Raise when an identical filename is already present in a deposition."""
pass
class ForbiddenAction(DepositionError):
"""Raise when action on a deposition, draft or file is not authorized."""
pass
class InvalidApiAction(DepositionError):
"""Raise when an invalid API action is requested."""
pass
#
# Helpers
#
class FactoryMixin(object):
"""Mix-in class to help create objects from persisted object state."""
@classmethod
def factory(cls, state, *args, **kwargs):
obj = cls(*args, **kwargs)
obj.__setstate__(state)
return obj
#
# Primary classes
#
class DepositionType(object):
"""
A base class for the deposition types to ensure certain
properties are defined on each type.
A deposition type is just a BibWorkflow with a couple of extra methods.
To customize rendering behavior of the workflow for a given deposition type
you can override the render_error(), render_step() and render_completed()
methods.
"""
workflow = []
""" Workflow definition """
name = ""
""" Display name for this deposition type """
name_plural = ""
""" Plural version of display name for this deposition type """
enabled = False
""" Determines if type is enabled - TODO: REMOVE"""
default = False
"""
Determines if type is the default - warnings are issed if conflicts exsists
TODO: remove
"""
deletable = False
"""
Determine if a deposition is deletable after submission.
"""
editable = False
"""
Determine if a deposition is editable after submission.
"""
stopable = False
"""
Determine if a deposition workflow can be stopped (i.e. discard changes).
"""
group = None
""" Name of group to include this type in. """
api = False
"""
Determines if API is enabled for this type (requires workflow to be
compatible with the API).
"""
draft_definitions = {'_default': None}
"""
Dictionary of all drafts for this deposition type
"""
marshal_file_fields = dict(
checksum=fields.String,
filename=fields.String(attribute='name'),
id=fields.String(attribute='uuid'),
filesize=fields.String(attribute='size'),
)
""" REST API structure of a file """
marshal_draft_fields = dict(
metadata=fields.Raw(attribute='values'),
completed=fields.Boolean,
id=fields.String,
)
""" REST API structure of a draft """
marshal_deposition_fields = dict(
id=fields.Integer,
title=fields.String,
created=UTCISODateTime,
modified=UTCISODateTime,
owner=fields.Integer(attribute='user_id'),
state=fields.String,
submitted=fields.Boolean,
files=fields.Nested(marshal_file_fields),
drafts=fields.Nested(marshal_draft_fields, attribute='drafts_list'),
)
""" REST API structure of a deposition """
@classmethod
def default_draft_id(cls, deposition):
return '_default'
@classmethod
def render_error(cls, dummy_deposition):
"""
Render a page when deposition had an workflow error.
Method can be overwritten by subclasses to provide custom
user interface.
"""
flash('%(name)s deposition has returned error.' %
{'name': cls.name}, 'error')
return redirect(url_for('.index'))
@classmethod
def render_step(self, deposition):
"""
Render a page for a given deposition step.
Method can be overwritten by subclasses to provide custom
user interface.
"""
ctx = deposition.get_render_context()
if ctx:
return render_template(**ctx)
else:
return render_template('deposit/error.html', **dict(
depostion=deposition,
deposition_type=(
None if deposition.type.is_default()
else deposition.type.get_identifier()
),
uuid=deposition.id,
my_depositions=list(Deposition.get_depositions(
current_user, type=deposition.type
)),
))
@classmethod
def render_completed(cls, dummy_deposition):
"""
Render page when deposition was successfully completed (i.e workflow
just finished successfully).
Method can be overwritten by subclasses to provide custom
user interface.
"""
flash('%(name)s was successfully finished.' %
{'name': cls.name}, 'success')
return redirect(url_for('.index'))
@classmethod
def render_final(cls, deposition):
"""
Render page when deposition was *already* successfully completed (i.e
a finished workflow is being executed a second time).
This allows you render e.g. a preview of the record. The distinction
between render_completed and render_final is primarily useful for the
REST API (see api_final and api_completed)
Method can be overwritten by subclasses to provide custom
user interface.
"""
return cls.render_completed(deposition)
@classmethod
def api_completed(cls, deposition):
"""
Workflow just finished processing so return an 202 Accepted, since
usually further background processing may happen.
"""
return deposition.marshal(), 202
@classmethod
def api_final(cls, deposition):
"""
Workflow already finished, and the user tries to re-execute the
workflow, so send a 400 Bad Request back.
"""
return dict(
message="Deposition workflow already completed",
status=400,
), 400
@classmethod
def api_step(cls, deposition):
"""
Workflow was halted during processing. The workflow task that halted
processing is expected to provide a response to send back to the
client.
The default response code is 500 Internal Server Error. A workflow task
is expected to use Deposition.set_render_context() with a dictionary
which is returned to the client. Set the key 'status', to change the
status code, e.g.::
d.set_render_context(dict(status=400, message="Bad request"))
If no response is provided by the workflow task, it is regarded as
an internal server error.
"""
ctx = deposition.get_render_context()
if ctx:
return ctx.get('response', {}), ctx.get('status', 500)
return cls.api_error(deposition)
@classmethod
def api_error(cls, deposition):
return dict(message='Internal Server Error', status=500), 500
@classmethod
def api_action(cls, deposition, action_id):
if action_id == 'run':
return deposition.run_workflow(headless=True)
elif action_id == 'reinitialize':
deposition.reinitialize_workflow()
return deposition.run_workflow(headless=True)
elif action_id == 'stop':
deposition.stop_workflow()
return deposition.run_workflow(headless=True)
raise InvalidApiAction(action_id)
@classmethod
def api_metadata_schema(cls, draft_id):
"""
Get the input validation schema for this draft_id
Allows you to override API defaults.
"""
from wtforms.fields.core import FieldList, FormField
if draft_id in cls.draft_definitions:
schema = dict()
formclass = cls.draft_definitions[draft_id]
for fname, fclass in formclass()._fields.items():
if isinstance(fclass, FieldList):
schema[fname] = dict(type='list')
elif isinstance(fclass, FormField):
schema[fname] = dict(type='dict')
else:
schema[fname] = dict(type='any')
return dict(type='dict', schema=schema)
return None
@classmethod
def marshal_deposition(cls, obj):
"""
Generate a JSON representation for REST API of a Deposition
"""
return marshal(obj, cls.marshal_deposition_fields)
@classmethod
def marshal_draft(cls, obj):
"""
Generate a JSON representation for REST API of a DepositionDraft
"""
return marshal(obj, cls.marshal_draft_fields)
@classmethod
def marshal_file(cls, obj):
"""
Generate a JSON representation for REST API of a DepositionFile
"""
return marshal(obj, cls.marshal_file_fields)
@classmethod
def authorize(cls, deposition, action):
if action == 'create':
return True # Any authenticated user
elif action == 'delete':
if deposition.has_sip():
return deposition.type.deletable
return True
elif action == 'reinitialize':
return deposition.type.editable
elif action == 'stop':
return deposition.type.stopable
elif action in ['add_file', 'remove_file', 'sort_files']:
# Don't allow to add/remove/sort files after first submission
return not deposition.has_sip()
elif action in ['add_draft', ]:
# Allow adding drafts when inprogress (independent of SIP exists
# or not).
return deposition.state == 'inprogress'
else:
return not deposition.has_sip()
@classmethod
def authorize_draft(cls, deposition, draft, action):
if action == 'update':
# If deposition allows adding a draft, then allow editing the
# draft.
return cls.authorize(deposition, 'add_draft')
return cls.authorize(deposition, 'add_draft')
@classmethod
def authorize_file(cls, deposition, deposition_file, action):
return cls.authorize(deposition, 'add_file')
@classmethod
def get_identifier(cls):
""" Get type identifier (identical to workflow name) """
return cls.__name__
@classmethod
def is_enabled(cls):
""" Check if workflow is enabled """
# Wrapping in a method to eventually allow enabling/disabling
# via configuration.
return cls.enabled
@classmethod
def is_default(cls):
""" Check if workflow is the default """
# Wrapping in a method to eventually allow configuration
# via configuration.
return cls.default
@classmethod
def run_workflow(cls, deposition):
"""
Run workflow for the given BibWorkflowObject.
Usually not invoked directly, but instead indirectly through
Deposition.run_workflow().
"""
if deposition.workflow_object.workflow is None or (
deposition.workflow_object.version == ObjectVersion.INITIAL
and
deposition.workflow_object.workflow.status ==
WorkflowStatus.NEW):
return deposition.workflow_object.start_workflow(
workflow_name=cls.get_identifier(),
id_user=deposition.workflow_object.id_user,
module_name="webdeposit"
)
else:
return deposition.workflow_object.continue_workflow(
start_point="restart_task",
)
@classmethod
def reinitialize_workflow(cls, deposition):
# Only reinitialize if really needed (i.e. you can only
# reinitialize a fully completed workflow).
wo = deposition.workflow_object
if wo.version == ObjectVersion.COMPLETED and \
wo.workflow.status == WorkflowStatus.COMPLETED:
wo.version = ObjectVersion.INITIAL
wo.workflow.status = WorkflowStatus.NEW
# Clear deposition drafts
deposition.drafts = {}
@classmethod
def stop_workflow(cls, deposition):
# Only stop workflow if really needed
wo = deposition.workflow_object
if wo.version != ObjectVersion.COMPLETED and \
wo.workflow.status != WorkflowStatus.COMPLETED:
# Only workflows which has been fully completed once before
# can be stopped
if deposition.has_sip():
wo.version = ObjectVersion.COMPLETED
wo.workflow.status = WorkflowStatus.COMPLETED
# Clear all drafts
deposition.drafts = {}
# Set title - FIXME: find better way to set title
sip = deposition.get_latest_sip(sealed=True)
title = sip.metadata.get('title', 'Untitled')
deposition.title = title
@classmethod
def all(cls):
""" Get a dictionary of deposition types """
from .registry import deposit_types
return deposit_types.mapping()
@classmethod
def get(cls, identifier):
try:
return cls.all()[identifier]
except KeyError:
raise InvalidDepositionType(identifier)
@classmethod
def keys(cls):
""" Get a list of deposition type names """
return cls.all().keys()
@classmethod
def values(cls):
""" Get a list of deposition type names """
return cls.all().values()
@classmethod
def get_default(cls):
""" Get a list of deposition type names """
from .registry import deposit_default_type
return deposit_default_type.get()
def __unicode__(self):
""" Return a name for this class """
return self.get_identifier()
class DepositionFile(FactoryMixin):
"""
Represents an uploaded file
Creating a normal deposition file::
uploaded_file = request.files['file']
filename = secure_filename(uploaded_file.filename)
backend = DepositionStorage(deposition_id)
d = DepositionFile(backend=backend)
d.save(uploaded_file, filename)
Creating a chunked deposition file::
uploaded_file = request.files['file']
filename = secure_filename(uploaded_file.filename)
chunk = request.files['chunk']
chunks = request.files['chunks']
backend = ChunkedDepositionStorage(deposition_id)
d = DepositionFile(id=file_id, backend=backend)
d.save(uploaded_file, filename, chunk, chunks)
if chunk == chunks:
d.save(finish=True, filename=filename)
Reading a file::
d = DepositionFile.from_json(data)
if d.is_local():
send_file(d.get_syspath())
else:
redirect(d.get_url())
d.delete()
Deleting a file::
d = DepositionFile.from_json(data)
d.delete()
"""
def __init__(self, uuid=None, backend=None):
self.uuid = uuid or str(uuid4())
self._backend = backend
self.name = ''
def __getstate__(self):
# TODO: Add content_type attributes
return dict(
id=self.uuid,
path=self.path,
name=self.name,
size=self.size,
checksum=self.checksum,
#bibdoc=self.bibdoc
)
def __setstate__(self, state):
self.uuid = state['id']
self._path = state['path']
self.name = state['name']
self.size = state['size']
self.checksum = state['checksum']
def __repr__(self):
data = self.__getstate__()
del data['path']
return json.dumps(data)
@property
def backend(self):
if not self._backend:
self._backend = Storage(None)
return self._backend
@property
def path(self):
if self._path is None:
raise Exception("No path set")
return self._path
def save(self, incoming_file, filename=None, *args, **kwargs):
self.name = secure_filename(filename or incoming_file.filename)
(self._path, self.size, self.checksum, result) = self.backend.save(
incoming_file, filename, *args, **kwargs
)
return result
def delete(self):
""" Delete the file on storage """
return self.backend.delete(self.path)
def is_local(self):
""" Determine if file is a local file """
return self.backend.is_local(self.path)
def get_url(self):
""" Get a URL for the file """
return self.backend.get_url(self.path)
def get_syspath(self):
""" Get a local system path to the file """
return self.backend.get_syspath(self.path)
class DepositionDraftCacheManager(object):
"""
Draft cache manager takes care of storing draft values in the cache prior
to a workflow being run. The data can be loaded by the prefill_draft()
workflow task.
"""
def __init__(self, user_id):
self.user_id = user_id
self.data = {}
@classmethod
def from_request(cls):
"""
Create a new draft cache from the current request.
"""
obj = cls(current_user.get_id())
# First check if we can get it via a json
data = request.get_json(silent=True)
if not data:
# If, not simply merge all both query parameters and request body
# parameters.
data = request.values.to_dict()
obj.data = data
return obj
@classmethod
def get(cls):
obj = cls(current_user.get_id())
obj.load()
return obj
def save(self):
""" Save data to session """
if self.has_data():
session['deposit_prefill'] = self.data
session.modified = True
else:
self.delete()
def load(self):
""" Load data from session """
self.data = session.get('deposit_prefill', {})
def delete(self):
""" Delete data in session """
if 'deposit_prefill' in session:
del session['deposit_prefill']
session.modified = True
def has_data(self):
"""
Determine if the cache has data.
"""
return bool(self.data)
def fill_draft(self, deposition, draft_id, clear=True):
"""
Fill a draft with cached draft values
"""
draft = deposition.get_or_create_draft(draft_id)
draft.process(self.data)
if clear:
self.data = {}
self.delete()
return draft
class DepositionDraft(FactoryMixin):
"""
Represents the state of a form
"""
def __init__(self, draft_id, form_class=None, deposition_ref=None):
self.id = draft_id
self.completed = False
self.form_class = form_class
self.values = {}
self.flags = {}
self._form = None
# Back reference to the depositions
self._deposition_ref = deposition_ref
self.validate = False
def __getstate__(self):
return dict(
completed=self.completed,
values=self.values,
flags=self.flags,
validate=self.validate,
)
def __setstate__(self, state):
self.completed = state['completed']
self.form_class = None
if self._deposition_ref:
self.form_class = self._deposition_ref.type.draft_definitions.get(
self.id
)
self.values = state['values']
self.flags = state['flags']
self.validate = state.get('validate', True)
def is_completed(self):
return self.completed
def has_form(self):
return self.form_class is not None
def authorize(self, action):
if not self._deposition_ref:
return True # Not connected to deposition so authorize anything.
return self._deposition_ref.type.authorize_draft(
self._deposition_ref, self, action
)
def complete(self):
"""
Set state of draft to completed.
"""
self.completed = True
def update(self, form):
"""
Update draft values and flags with data from form.
"""
data = dict((key, value) for key, value in form.data.items()
if value is not None)
self.values = data
self.flags = form.get_flags()
def process(self, data, complete_form=False):
"""
Process, validate and store incoming form data and return response.
"""
if not self.authorize('update'):
raise ForbiddenAction('update', self)
if not self.has_form():
raise FormDoesNotExists(self.id)
# The form is initialized with form and draft data. The original
# draft_data is accessible in Field.object_data, Field.raw_data is the
# new form data and Field.data is the processed form data or the
# original draft data.
#
# Behind the scences, Form.process() is called, which in turns call
# Field.process_data(), Field.process_formdata() and any filters
# defined.
#
# Field.object_data contains the value of process_data(), while
# Field.data contains the value of process_formdata() and any filters
# applied.
form = self.get_form(formdata=data)
# Run form validation which will call Field.pre_valiate(),
# Field.validators, Form.validate_<field>() and Field.post_validate().
# Afterwards Field.data has been validated and any errors will be
# present in Field.errors.
validated = form.validate()
# Call Form.run_processors() which in turn will call
# Field.run_processors() that allow fields to set flags (hide/show)
# and values of other fields after the entire formdata has been
# processed and validated.
validated_flags, validated_data, validated_msgs = (
form.get_flags(), form.data, form.messages
)
form.post_process(formfields=[] if complete_form else data.keys())
post_processed_flags, post_processed_data, post_processed_msgs = (
form.get_flags(), form.data, form.messages
)
# Save form values
self.update(form)
# Build result dictionary
process_field_names = None if complete_form else data.keys()
# Determine if some fields where changed during post-processing.
changed_values = dict(
(name, value) for name, value in post_processed_data.items()
if validated_data[name] != value
)
# Determine changed flags
changed_flags = dict(
(name, flags) for name, flags in post_processed_flags.items()
if validated_flags.get(name, []) != flags
)
# Determine changed messages
changed_msgs = dict(
(name, messages) for name, messages in post_processed_msgs.items()
if validated_msgs.get(name, []) != messages
or process_field_names is None or name in process_field_names
)
result = {}
if changed_msgs:
result['messages'] = changed_msgs
if changed_values:
result['values'] = changed_values
if changed_flags:
for flag in CFG_FIELD_FLAGS:
fields = [
(name, flag in field_flags)
for name, field_flags in changed_flags.items()
]
result[flag + '_on'] = map(
lambda x: x[0], filter(lambda x: x[1], fields)
)
result[flag + '_off'] = map(
lambda x: x[0], filter(lambda x: not x[1], fields)
)
return form, validated, result
def get_form(self, formdata=None, load_draft=True,
validate_draft=False):
"""
Create form instance with draft data and form data if provided.
:param formdata: Incoming form data.
:param files: Files to ingest into form
:param load_draft: True to initialize form with draft data.
:param validate_draft: Set to true to validate draft data, when no form
data is provided.
"""
if not self.has_form():
raise FormDoesNotExists(self.id)
# If a field is not present in formdata, Form.process() will assume it
# is blank instead of using the draft_data value. Most of the time we
# are only submitting a single field in JSON via AJAX requests. We
# therefore reset non-submitted fields to the draft_data value with
# form.reset_field_data().
# WTForms deal with unicode - we deal with UTF8 so convert all
draft_data = unicodifier(self.values) if load_draft else {}
formdata = MultiDict(formdata or {})
form = self.form_class(
formdata=formdata, **draft_data
)
if formdata:
form.reset_field_data(exclude=formdata.keys())
# Set field flags
if load_draft and self.flags:
form.set_flags(self.flags)
# Ingest files in form
if self._deposition_ref:
form.files = self._deposition_ref.files
else:
form.files = []
if validate_draft and draft_data and formdata is None:
form.validate()
return form
@classmethod
def merge_data(cls, drafts):
"""
Merge data of multiple drafts
Duplicate keys will be overwritten without warning.
"""
data = {}
# Don't include *) disabled fields, and *) empty optional fields
func = lambda f: not f.flags.disabled and (f.flags.required or f.data)
for d in drafts:
if d.has_form():
visitor = DataExporter(
filter_func=func
)
visitor.visit(d.get_form())
data.update(visitor.data)
else:
data.update(d.values)
return data
class Deposition(object):
"""
Wraps a BibWorkflowObject
Basically an interface to work with BibWorkflowObject data attribute in an
easy manner.
"""
def __init__(self, workflow_object, type=None, user_id=None):
self.workflow_object = workflow_object
if not workflow_object:
self.files = []
self.drafts = {}
self.type = self.get_type(type)
self.title = ''
self.sips = []
self.workflow_object = BibWorkflowObject.create_object(
id_user=user_id,
)
# Ensure default data is set for all objects.
self.update()
else:
self.__setstate__(workflow_object.get_data())
self.engine = None
#
# Properties proxies to BibWorkflowObject
#
@property
def id(self):
return self.workflow_object.id
@property
def user_id(self):
return self.workflow_object.id_user
@user_id.setter
def user_id(self, value):
self.workflow_object.id_user = value
self.workflow_object.workflow.id_user = value
@property
def created(self):
return self.workflow_object.created
@property
def modified(self):
return self.workflow_object.modified
@property
def drafts_list(self):
# Needed for easy marshaling by API
return self.drafts.values()
#
# Proxy methods
#
def authorize(self, action):
"""
Determine if certain action is authorized
Delegated to deposition type to allow overwriting default behavior.
"""
return self.type.authorize(self, action)
#
# Serialization related methods
#
def marshal(self):
"""
API representation of an object.
Delegated to the DepositionType, to allow overwriting default
behaviour.
"""
return self.type.marshal_deposition(self)
def __getstate__(self):
"""
Serialize deposition state for storing in the BibWorkflowObject
"""
# The bibworkflow object id and owner is implicit, as the Deposition
# object only wraps the data attribute of a BibWorkflowObject.
# FIXME: Find better solution for setting the title.
for d in self.drafts.values():
if 'title' in d.values:
self.title = d.values['title']
break
return dict(
type=self.type.get_identifier(),
title=self.title,
files=[f.__getstate__() for f in self.files],
drafts=dict(
[(d_id, d.__getstate__()) for d_id, d in self.drafts.items()]
),
sips=[f.__getstate__() for f in self.sips],
)
def __setstate__(self, state):
"""
Deserialize deposition from state stored in BibWorkflowObject
"""
self.type = DepositionType.get(state['type'])
self.title = state['title']
self.files = [
DepositionFile.factory(
f_state,
uuid=f_state['id'],
backend=DepositionStorage(self.id),
)
for f_state in state['files']
]
self.drafts = dict(
[(d_id, DepositionDraft.factory(d_state, d_id,
deposition_ref=self))
for d_id, d_state in state['drafts'].items()]
)
self.sips = [
SubmissionInformationPackage.factory(s_state, uuid=s_state['id'])
for s_state in state.get('sips', [])
]
#
# Persistence related methods
#
def update(self):
"""
Update workflow object with latest data.
"""
data = self.__getstate__()
# BibWorkflow calls get_data() before executing any workflow task, and
# and calls set_data() after. Hence, unless we update the data
# attribute it will be overwritten.
try:
self.workflow_object.data = data
except AttributeError:
pass
self.workflow_object.set_data(data)
def reload(self):
"""
Get latest data from workflow object
"""
self.__setstate__(self.workflow_object.get_data())
def save(self):
"""
Save the state of the deposition.
Uses the __getstate__ method to make a JSON serializable
representation which, sets this as data on the workflow object
and saves it.
"""
self.update()
self.workflow_object.save()
def delete(self):
"""
Delete the current deposition
"""
if not self.authorize('delete'):
raise DepositionNotDeletable(self)
for f in self.files:
f.delete()
if self.workflow_object.id_workflow:
Workflow.delete(uuid=self.workflow_object.id_workflow)
BibWorkflowObject.query.filter_by(
id_workflow=self.workflow_object.id_workflow
).delete()
else:
db.session.delete(self.workflow_object)
db.session.commit()
#
# Workflow execution
#
def run_workflow(self, headless=False):
"""
Execute the underlying workflow
If you made modifications to the deposition you must save if before
running the workflow, using the save() method.
"""
if self.workflow_object.workflow is not None:
current_status = self.workflow_object.workflow.status
if current_status == WorkflowStatus.COMPLETED:
return self.type.api_final(self) if headless \
else self.type.render_final(self)
self.update()
self.engine = self.type.run_workflow(self)
self.reload()
status = self.engine.status
if status == WorkflowStatus.ERROR:
return self.type.api_error(self) if headless else \
self.type.render_error(self)
elif status != WorkflowStatus.COMPLETED:
return self.type.api_step(self) if headless else \
self.type.render_step(self)
elif status == WorkflowStatus.COMPLETED:
return self.type.api_completed(self) if headless else \
self.type.render_completed(self)
def reinitialize_workflow(self):
"""
Reinitialize a workflow object (i.e. prepare it for editing)
"""
if self.state != 'done':
raise InvalidDepositionAction("Action only allowed for "
"depositions in state 'done'.")
if not self.authorize('reinitialize'):
raise ForbiddenAction('reinitialize', self)
self.type.reinitialize_workflow(self)
def stop_workflow(self):
"""
Stop a running workflow object (e.g. discard changes while editing).
"""
if self.state != 'inprogress' or not self.submitted:
raise InvalidDepositionAction("Action only allowed for "
"depositions in state 'inprogress'.")
if not self.authorize('stop'):
raise ForbiddenAction('stop', self)
self.type.stop_workflow(self)
def set_render_context(self, ctx):
"""
Set rendering context - used in workflow tasks to set what is to be
rendered (either by API or UI)
"""
self.workflow_object.deposition_context = ctx
def get_render_context(self):
"""
Get rendering context - used by DepositionType.render_step/api_step
"""
return getattr(self.workflow_object, 'deposition_context', {})
@property
def state(self):
"""
Return simplified workflow state - inprogress, done or error
"""
try:
status = self.workflow_object.workflow.status
if status == WorkflowStatus.ERROR:
return "error"
elif status == WorkflowStatus.COMPLETED:
return "done"
except AttributeError:
pass
return "inprogress"
#
# Draft related methods
#
def get_draft(self, draft_id):
"""
Get draft
"""
if draft_id not in self.drafts:
raise DraftDoesNotExists(draft_id)
return self.drafts[draft_id]
def get_or_create_draft(self, draft_id):
"""
Get or create a draft for given draft_id
"""
if draft_id not in self.drafts:
if draft_id not in self.type.draft_definitions:
raise DraftDoesNotExists(draft_id)
if not self.authorize('add_draft'):
raise ForbiddenAction('add_draft', self)
self.drafts[draft_id] = DepositionDraft(
draft_id,
form_class=self.type.draft_definitions[draft_id],
deposition_ref=self,
)
return self.drafts[draft_id]
def get_default_draft_id(self):
"""
Get the default draft id for this deposition.
"""
return self.type.default_draft_id(self)
#
# Submission information package related methods
#
def get_latest_sip(self, sealed=None):
"""
Get the latest submission information package
:param sealed: Set to true to only returned latest sealed SIP. Set to
False to only return latest unsealed SIP.
"""
if len(self.sips) > 0:
for sip in reversed(self.sips):
if sealed is None:
return sip
elif sealed and sip.is_sealed():
return sip
elif not sealed and not sip.is_sealed():
return sip
return None
def create_sip(self):
"""
Create a new submission information package (SIP) with metadata from
the drafts.
"""
metadata = DepositionDraft.merge_data(self.drafts.values())
metadata['files'] = map(
lambda x: dict(path=x.path, name=os.path.splitext(x.name)[0]),
self.files
)
sip = SubmissionInformationPackage(metadata=metadata)
self.sips.append(sip)
return sip
def has_sip(self, sealed=True):
"""
Determine if deposition has a sealed submission information package.
"""
for sip in self.sips:
if (sip.is_sealed() and sealed) or \
(not sealed and not sip.is_sealed()):
return True
return False
@property
def submitted(self):
return self.has_sip()
#
# File related methods
#
def get_file(self, file_id):
for f in self.files:
if f.uuid == file_id:
return f
return None
def add_file(self, deposition_file):
if not self.authorize('add_file'):
raise ForbiddenAction('add_file', self)
for f in self.files:
if f.name == deposition_file.name:
raise FilenameAlreadyExists(deposition_file.name)
self.files.append(deposition_file)
file_uploaded.send(
self.type.get_identifier(),
deposition=self,
deposition_file=deposition_file,
)
def remove_file(self, file_id):
if not self.authorize('remove_file'):
raise ForbiddenAction('remove_file', self)
idx = None
for i, f in enumerate(self.files):
if f.uuid == file_id:
idx = i
if idx is not None:
return self.files.pop(idx)
return None
def sort_files(self, file_id_list):
"""
Order the files according the list of ids provided to this function.
"""
if not self.authorize('sort_files'):
raise ForbiddenAction('sort_files', self)
search_dict = dict(
[(f, i) for i, f in enumerate(file_id_list)]
)
def _sort_files_cmp(f_x, f_y):
i_x = search_dict.get(f_x.uuid, None)
i_y = search_dict.get(f_y.uuid, None)
if i_x == i_y:
return 0
elif i_x is None or i_x > i_y:
return 1
elif i_y is None or i_x < i_y:
return -1
self.files = sorted(self.files, _sort_files_cmp)
#
# Class methods
#
@classmethod
def get_type(self, type_or_id):
if type_or_id and isinstance(type_or_id, type) and \
issubclass(type_or_id, DepositionType):
return type_or_id
else:
return DepositionType.get(type_or_id) if type_or_id else \
DepositionType.get_default()
@classmethod
def create(cls, user, type=None):
"""
Create a new deposition object.
To persist the deposition, you must call save() on the created object.
If no type is defined, the default deposition type will be assigned.
@param user: The owner of the deposition
@param type: Deposition type identifier.
"""
t = cls.get_type(type)
if not t.authorize(None, 'create'):
raise ForbiddenAction('create')
# Note: it is correct to pass 'type' and not 't' below to constructor.
obj = cls(None, type=type, user_id=user.get_id())
return obj
@classmethod
def get(cls, object_id, user=None, type=None):
"""
Get the deposition with specified object id.
@param object_id: The BibWorkflowObject id.
@param user: Owner of the BibWorkflowObject
@param type: Deposition type identifier.
"""
if type:
type = DepositionType.get(type)
try:
workflow_object = BibWorkflowObject.query.filter(
BibWorkflowObject.id == object_id,
# id_user!=0 means current version, as opposed to some snapshot
# version.
BibWorkflowObject.id_user != 0,
).one()
except NoResultFound:
raise DepositionDoesNotExists(object_id)
if user and workflow_object.id_user != user.get_id():
raise DepositionDoesNotExists(object_id)
obj = cls(workflow_object)
if type and obj.type != type:
raise DepositionDoesNotExists(object_id, type)
return obj
@classmethod
def get_depositions(cls, user=None, type=None):
"""Get list of depositions (as iterator)."""
params = [
Workflow.module_name == 'webdeposit',
]
if user:
params.append(BibWorkflowObject.id_user == user.get_id())
else:
params.append(BibWorkflowObject.id_user != 0)
if type:
params.append(Workflow.name == type.get_identifier())
objects = BibWorkflowObject.query.join("workflow").options(
db.contains_eager('workflow')).filter(*params).order_by(
BibWorkflowObject.modified.desc())
def _create_obj(o):
try:
obj = cls(o)
except InvalidDepositionType as err:
current_app.logger.exception(err)
return None
if type is None or obj.type == type:
return obj
return None
def mapper_filter(objs):
for o in objs:
o = _create_obj(o)
if o is not None:
yield o
return mapper_filter(objects)
class SubmissionInformationPackage(FactoryMixin):
"""Submission information package (SIP).
:param uuid: Unique identifier for this SIP
:param metadata: Metadata in JSON for this submission information package
:param package: Full generated metadata for this package (i.e. normally
MARC for records, but could anything).
:param timestamp: UTC timestamp in ISO8601 format of when package was
sealed.
:param agents: List of agents for this package (e.g. creator, ...)
:param task_ids: List of task ids submitted to ingest this package (may be
appended to after SIP has been sealed).
"""
def __init__(self, uuid=None, metadata={}):
self.uuid = uuid or str(uuid4())
self.metadata = metadata
self.package = ""
self.timestamp = None
self.agents = []
self.task_ids = []
def __getstate__(self):
return dict(
id=self.uuid,
metadata=self.metadata,
package=self.package,
timestamp=self.timestamp,
task_ids=self.task_ids,
agents=[a.__getstate__() for a in self.agents],
)
def __setstate__(self, state):
self.uuid = state['id']
self._metadata = state.get('metadata', {})
self.package = state.get('package', None)
self.timestamp = state.get('timestamp', None)
self.agents = [Agent.factory(a_state)
for a_state in state.get('agents', [])]
self.task_ids = state.get('task_ids', [])
def seal(self):
self.timestamp = datetime.now(tzutc()).isoformat()
def is_sealed(self):
return self.timestamp is not None
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, value):
import datetime
import json
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime.datetime, datetime.date)):
encoded_object = obj.isoformat()
else:
encoded_object = json.JSONEncoder.default(self, obj)
return encoded_object
data = json.dumps(value, cls=DateTimeEncoder)
self._metadata = json.loads(data)
class Agent(FactoryMixin):
"""Agent."""
def __init__(self, role=None, from_request_context=False):
self.role = role
self.user_id = None
self.ip_address = None
self.email_address = None
if from_request_context:
self.from_request_context()
def __getstate__(self):
return dict(
role=self.role,
user_id=self.user_id,
ip_address=self.ip_address,
email_address=self.email_address,
)
def __setstate__(self, state):
self.role = state['role']
self.user_id = state['user_id']
self.ip_address = state['ip_address']
self.email_address = state['email_address']
def from_request_context(self):
from flask import request
from invenio.ext.login import current_user
self.ip_address = request.remote_addr
self.user_id = current_user.get_id()
self.email_address = current_user.info.get('email', '')
| zenodo/invenio | invenio/modules/deposit/models.py | Python | gpl-2.0 | 47,526 | [
"VisIt"
] | 607f0b8faa478e3b200ff06a2f7b44c11296d196f0b391503fa8c65528f58325 |
# -*- coding: utf-8 -*-
"""
templatetk.jscompiler
~~~~~~~~~~~~~~~~~~~~~
This module can compile a node tree to JavaScript. Not all that
can be compiled to Python bytecode can also be compiled to JavaScript
though.
:copyright: (c) Copyright 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
from StringIO import StringIO
from . import nodes
from .nodeutils import NodeVisitor
from .idtracking import IdentManager
from .fstate import FrameState
from .utils import json
class StopFrameCompilation(Exception):
pass
class JavaScriptWriter(object):
def __init__(self, stream, indentation=2):
self.stream_stack = [stream]
self.indentation = indentation
self._new_lines = 0
self._first_write = True
self._indentation = 0
def indent(self):
self._indentation += 1
def outdent(self, step=1):
self._indentation -= step
def write(self, x):
"""Write a string into the output stream."""
stream = self.stream_stack[-1]
if self._new_lines:
if self.indentation >= 0:
if not self._first_write:
stream.write('\n' * self._new_lines)
self._first_write = False
stream.write(' ' * (self.indentation * self._indentation))
self._new_lines = 0
if isinstance(x, unicode):
x = x.encode('utf-8')
stream.write(x)
def write_newline(self, node=None, extra=0):
self._new_lines = max(self._new_lines, 1 + extra)
if node is not None and node.lineno != self._last_line:
self._write_debug_info = node.lineno
self._last_line = node.lineno
def write_line(self, x, node=None, extra=0):
self.write_newline(node, extra)
self.write(x)
def dump_object(self, obj):
separators = None
if self.indentation < 0:
separators = (',', ':')
return json.dumps(obj, separators=separators)
def write_repr(self, obj):
return self.write(self.dump_object(obj))
def write_from_buffer(self, buffer):
buffer.seek(0)
while 1:
chunk = buffer.read(4096)
if not chunk:
break
self.stream_stack[-1].write(chunk)
def start_buffering(self):
new_stream = StringIO()
self.stream_stack.append(new_stream)
return new_stream
def end_buffering(self):
self.stream_stack.pop()
def to_javascript(node, stream=None, short_ids=False, indentation=2):
"""Converts a template to JavaScript."""
if stream is None:
stream = StringIO()
as_string = True
else:
as_string = False
gen = JavaScriptGenerator(stream, node.config, short_ids, indentation)
gen.visit(node, None)
if as_string:
return stream.getvalue()
class JavaScriptGenerator(NodeVisitor):
def __init__(self, stream, config, short_ids=False, indentation=2):
NodeVisitor.__init__(self)
self.config = config
self.writer = JavaScriptWriter(stream, indentation)
self.ident_manager = IdentManager(short_ids=short_ids)
def begin_rtstate_func(self, name, with_writer=True):
self.writer.write_line('function %s(rts) {' % name)
self.writer.indent()
if with_writer:
self.writer.write_line('var w = rts.writeFunc;')
def end_rtstate_func(self):
self.writer.outdent()
self.writer.write_line('}')
def compile(self, node):
assert isinstance(node, nodes.Template), 'can only transform ' \
'templates, got %r' % node.__class__.__name__
return self.visit(node, None)
def write_scope_code(self, fstate):
vars = []
already_handled = set()
for alias, old_name in fstate.required_aliases.iteritems():
already_handled.add(alias)
vars.append('%s = %s' % (alias, old_name))
# at that point we know about the inner states and can see if any
# of them need variables we do not have yet assigned and we have to
# resolve for them.
for target, sourcename in fstate.iter_required_lookups():
already_handled.add(target)
vars.append('%s = rts.lookupVar("%s")' % (
target,
sourcename
))
# handle explicit var
for name, local_id in fstate.local_identifiers.iteritems():
if local_id not in already_handled:
vars.append(local_id)
if vars:
self.writer.write_line('var %s;' % ', '.join(vars));
def write_assign(self, target, expr, fstate):
assert isinstance(target, nodes.Name), 'can only assign to names'
name = fstate.lookup_name(target.name, 'store')
self.writer.write_line('%s = ' % name)
self.visit(expr, fstate)
self.writer.write(';')
if fstate.root:
self.writer.write_line('rts.exportVar("%s", %s);' % (
target.name,
name
))
def make_target_name_tuple(self, target):
assert target.ctx in ('store', 'param')
assert isinstance(target, (nodes.Name, nodes.Tuple))
if isinstance(target, nodes.Name):
return [target.name]
def walk(obj):
rv = []
for node in obj.items:
if isinstance(node, nodes.Name):
rv.append(node.name)
elif isinstance(node, nodes.Tuple):
rv.append(walk(node))
else:
assert 0, 'unsupported assignment to %r' % node
return rv
return walk(target)
def write_assignment(self, node, fstate):
rv = []
def walk(obj):
if isinstance(obj, nodes.Name):
rv.append(fstate.lookup_name(obj.name, node.ctx))
return
for child in obj.items:
walk(child)
walk(node)
self.writer.write(', '.join(rv))
def write_context_as_object(self, fstate, reference_node):
d = dict(fstate.iter_vars(reference_node))
if not d:
self.writer.write('rts.context')
return
self.writer.write('rts.makeOverlayContext({')
for idx, (name, local_id) in enumerate(d.iteritems()):
if idx:
self.writer.write(', ')
self.writer.write('%s: %s' % (self.writer.dump_object(name), local_id))
self.writer.write('})')
def start_buffering(self, fstate):
self.writer.write_line('w = rts.startBuffering()')
def return_buffer_contents(self, fstate, write_to_var=False):
tmp = self.ident_manager.temporary()
self.writer.write_line('var %s = rts.endBuffering();' % tmp)
self.writer.write_line('w = %s[0];' % tmp)
if write_to_var:
self.writer.write_line('%s = %s[1];' % (tmp, tmp))
return tmp
else:
self.writer.write_line('return %s[1];' % tmp)
def visit_block(self, nodes, fstate):
self.writer.write_newline()
try:
for node in nodes:
self.visit(node, fstate)
except StopFrameCompilation:
pass
def visit_Template(self, node, fstate):
assert fstate is None, 'framestate passed to template visitor'
fstate = FrameState(self.config, ident_manager=self.ident_manager,
root=True)
fstate.analyze_identfiers(node.body)
self.writer.write_line('(function(rt) {')
self.writer.indent()
self.begin_rtstate_func('root')
buffer = self.writer.start_buffering()
self.visit_block(node.body, fstate)
self.writer.end_buffering()
self.write_scope_code(fstate)
self.writer.write_from_buffer(buffer)
self.end_rtstate_func()
self.begin_rtstate_func('setup', with_writer=False)
self.writer.write_line('rt.registerBlockMapping(rts.info, blocks);')
self.end_rtstate_func()
for block_node in node.find_all(nodes.Block):
block_fstate = fstate.derive(scope='hard')
block_fstate.analyze_identfiers(block_node.body)
self.begin_rtstate_func('block_' + block_node.name)
buffer = self.writer.start_buffering()
self.visit_block(block_node.body, block_fstate)
self.writer.end_buffering()
self.write_scope_code(block_fstate)
self.writer.write_from_buffer(buffer)
self.end_rtstate_func()
self.writer.write_line('var blocks = {');
for idx, block_node in enumerate(node.find_all(nodes.Block)):
if idx:
self.writer.write(', ')
self.writer.write('"%s": block_%s' % (block_node.name,
block_node.name))
self.writer.write('};')
self.writer.write_line('return rt.makeTemplate(root, setup, blocks);')
self.writer.outdent()
self.writer.write_line('})')
def visit_For(self, node, fstate):
loop_fstate = fstate.derive()
loop_fstate.analyze_identfiers([node.target], preassign=True)
loop_fstate.add_special_identifier(self.config.forloop_accessor,
preassign=True)
if self.config.forloop_parent_access:
fstate.add_implicit_lookup(self.config.forloop_accessor)
loop_fstate.analyze_identfiers(node.body)
loop_else_fstate = fstate.derive()
if node.else_:
loop_else_fstate.analyze_identfiers(node.else_)
self.writer.write_line('rt.iterate(')
self.visit(node.iter, loop_fstate)
nt = self.make_target_name_tuple(node.target)
self.writer.write(', ')
if self.config.forloop_parent_access:
self.visit(nodes.Name(self.config.forloop_accessor, 'load'), fstate)
else:
self.writer.write('null')
self.writer.write(', %s, function(%s, ' % (
self.writer.dump_object(nt),
loop_fstate.lookup_name(self.config.forloop_accessor, 'store')
))
self.write_assignment(node.target, loop_fstate)
self.writer.write(') {')
self.writer.indent()
buffer = self.writer.start_buffering()
self.visit_block(node.body, loop_fstate)
self.writer.end_buffering()
self.write_scope_code(loop_fstate)
self.writer.write_from_buffer(buffer)
self.writer.outdent()
self.writer.write_line('}, ');
if node.else_:
self.writer.write('function() {')
self.writer.indent()
buffer = self.writer.start_buffering()
self.visit_block(node.else_, loop_else_fstate)
self.writer.end_buffering()
self.write_scope_code(loop_else_fstate)
self.writer.write_from_buffer(buffer)
self.writer.outdent()
self.writer.write('}')
else:
self.writer.write('null')
self.writer.write(');')
def visit_If(self, node, fstate):
self.writer.write_line('if (')
self.visit(node.test, fstate)
self.writer.write(') { ')
condition_fstate = fstate.derive()
condition_fstate.analyze_identfiers(node.body)
self.writer.indent()
buffer = self.writer.start_buffering()
self.visit_block(node.body, condition_fstate)
self.writer.end_buffering()
self.write_scope_code(condition_fstate)
self.writer.write_from_buffer(buffer)
self.writer.outdent()
if node.else_:
self.writer.write_line('} else {')
self.writer.indent()
condition_fstate_else = fstate.derive()
condition_fstate_else.analyze_identfiers(node.else_)
buffer = self.writer.start_buffering()
self.visit_block(node.else_, condition_fstate_else)
self.writer.end_buffering()
self.write_scope_code(condition_fstate)
self.writer.write_from_buffer(buffer)
self.writer.outdent()
else:
else_ = []
self.writer.write_line('}')
def visit_Output(self, node, fstate):
for child in node.nodes:
self.writer.write_line('w(')
if isinstance(child, nodes.TemplateData):
self.writer.write_repr(child.data)
else:
self.writer.write('rts.info.finalize(')
self.visit(child, fstate)
self.writer.write(')')
self.writer.write(');')
def visit_Extends(self, node, fstate):
self.writer.write_line('return rts.extendTemplate(')
self.visit(node.template, fstate)
self.writer.write(', ')
self.write_context_as_object(fstate, node)
self.writer.write(', w);')
if fstate.root:
raise StopFrameCompilation()
def visit_Block(self, node, fstate):
self.writer.write_line('rts.evaluateBlock("%s", ' % node.name)
self.write_context_as_object(fstate, node)
self.writer.write(');')
def visit_Function(self, node, fstate):
func_fstate = fstate.derive()
func_fstate.analyze_identfiers(node.args)
func_fstate.analyze_identfiers(node.body)
argnames = [x.name for x in node.args]
self.writer.write('rt.wrapFunction(')
self.visit(node.name, fstate)
self.writer.write(', %s, [' % self.writer.dump_object(argnames))
for idx, arg in enumerate(node.defaults or ()):
if idx:
self.writer.write(', ')
self.visit(arg, func_fstate)
self.writer.write('], function(')
for idx, arg in enumerate(node.args):
if idx:
self.writer.write(', ')
self.visit(arg, func_fstate)
self.writer.write(') {')
self.writer.write_newline()
self.writer.indent()
buffer = self.writer.start_buffering()
self.start_buffering(func_fstate)
self.visit_block(node.body, func_fstate)
self.writer.end_buffering()
self.write_scope_code(func_fstate)
self.writer.write_from_buffer(buffer)
self.return_buffer_contents(func_fstate)
self.writer.outdent()
self.writer.write_line('})')
def visit_Assign(self, node, fstate):
self.writer.write_newline()
self.write_assign(node.target, node.node, fstate)
def visit_Name(self, node, fstate):
name = fstate.lookup_name(node.name, node.ctx)
self.writer.write(name)
def visit_Const(self, node, fstate):
self.writer.write_repr(node.value)
def visit_Getattr(self, node, fstate):
self.visit(node.node, fstate)
self.writer.write('[')
self.visit(node.attr, fstate)
self.writer.write(']')
def visit_Getitem(self, node, fstate):
self.visit(node.node, fstate)
self.writer.write('[')
self.visit(node.arg, fstate)
self.writer.write(']')
def visit_Call(self, node, fstate):
# XXX: For intercepting this it would be necessary to extract the
# rightmost part of the dotted expression in node.node so that the
# owner can be preserved for JavaScript (this)
self.visit(node.node, fstate)
self.writer.write('(')
for idx, arg in enumerate(node.args):
if idx:
self.writer.write(', ')
self.visit(arg, fstate)
self.writer.write(')')
if node.kwargs or node.dyn_args or node.dyn_kwargs:
raise NotImplementedError('Dynamic calls or keyword arguments '
'not available with javascript')
def visit_TemplateData(self, node, fstate):
self.writer.write('rt.markSafe(')
self.writer.write_repr(node.data)
self.writer.write(')')
def visit_Tuple(self, node, fstate):
raise NotImplementedError('Tuples not possible in JavaScript')
def visit_List(self, node, fstate):
self.writer.write('[')
for idx, child in enumerate(node.items):
if idx:
self.writer.write(', ')
self.visit(child, fstate)
self.writer.write(']')
def visit_Dict(self, node, fstate):
self.writer.write('({')
for idx, pair in enumerate(node.items):
if idx:
self.writer.write(', ')
if not isinstance(pair.key, nodes.Const):
raise NotImplementedError('Constant dict key required with javascript')
# hack to have the same logic as json.dumps for keys
self.writer.write(json.dumps({pair.key.value: 0})[1:-4] + ': ')
self.visit(pair.value, fstate)
self.writer.write('})')
def visit_Filter(self, node, fstate):
self.writer.write('rts.info.callFilter(')
self.writer.write(', ')
self.writer.write_repr(node.name)
self.visit(node.node, fstate)
self.writer.write(', [')
for idx, arg in enumerate(node.args):
if idx:
self.writer.write(', ')
self.visit(arg, fstate)
self.writer.write('])')
if node.kwargs or node.dyn_args or node.dyn_kwargs:
raise NotImplementedError('Dynamic calls or keyword arguments '
'not available with javascript')
def visit_CondExpr(self, node, fstate):
self.writer.write('(')
self.visit(node.test, fstate)
self.writer.write(' ? ')
self.visit(node.true, fstate)
self.writer.write(' : ')
self.visit(node.false, fstate)
self.writer.write(')')
def visit_Slice(self, node, fstate):
raise NotImplementedError('Slicing not possible with JavaScript')
def binexpr(operator):
def visitor(self, node, fstate):
self.writer.write('(')
self.visit(node.left, fstate)
self.writer.write(' %s ' % operator)
self.visit(node.right, fstate)
self.writer.write(')')
return visitor
def visit_Concat(self, node, fstate):
self.writer.write('rt.concat(rts.info, [')
for idx, child in enumerate(node.nodes):
if idx:
self.writer.write(', ')
self.visit(child, fstate)
self.writer.write('])')
visit_Add = binexpr('+')
visit_Sub = binexpr('-')
visit_Mul = binexpr('*')
visit_Div = binexpr('/')
visit_Mod = binexpr('%')
del binexpr
def visit_FloorDiv(self, node, fstate):
self.writer.write('parseInt(')
self.visit(node.left, fstate)
self.writer.write(' / ')
self.visit(node.right, fstate)
self.writer.write(')')
def visit_Pow(self, node, fstate):
self.writer.write('Math.pow(')
self.visit(node.left, fstate)
self.writer.write(', ')
self.visit(node.right, fstate)
self.writer.write(')')
def visit_And(self, node, fstate):
self.writer.write('(')
self.visit(node.left, fstate)
self.writer.write(' && ')
self.visit(node.right, fstate)
self.writer.write(')')
def visit_Or(self, node, fstate):
self.writer.write('(')
self.visit(node.left, fstate)
self.writer.write(' || ')
self.visit(node.right, fstate)
self.writer.write(')')
def visit_Not(self, node, fstate):
self.writer.write('!(')
self.visit(node.node, fstate)
self.writer.write(')')
def visit_Compare(self, node, fstate):
self.writer.write('(')
self.visit(node.expr, fstate)
assert len(node.ops) == 1, 'Comparison of two expressions is supported'
self.visit(node.ops[0], fstate)
self.writer.write(')')
def visit_Operand(self, node, fstate):
cmp_ops = {
'gt': '>',
'gteq': '>=',
'eq': '==',
'ne': '!=',
'lteq': '<=',
'lt': '<'
}
self.writer.write(' ')
self.writer.write(cmp_ops.get(node.op, ''))
self.writer.write(' ')
self.visit(node.expr, fstate)
| mitsuhiko/templatetk | templatetk/jscompiler.py | Python | bsd-3-clause | 20,349 | [
"VisIt"
] | 4b3f48f4db2dee02d292546ebb9fdead2b5f07b13600c4136b9abec67490df23 |
'''
SASSIE: Copyright (C) 2011 Joseph E. Curtis, Ph.D.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import sassie.simulate.complex_monte_carlo.gui_mimic_complex_monte_carlo as gui_mimic_complex_monte_carlo
#import gui_mimic_complex_monte_carlo as gui_mimic_complex_monte_carlo
import filecmp
from unittest import main
from nose.tools import assert_equals
from mocker import Mocker, MockerTestCase
pdb_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'data', 'pdb_common') + os.path.sep
dcd_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'data', 'dcd_common') + os.path.sep
other_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'data', 'other_common') + os.path.sep
module_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'data', 'interface', 'complex_monte_carlo') + os.path.sep
paths = {'pdb_data_path' : pdb_data_path, 'dcd_data_path' : dcd_data_path, 'other_data_path' : other_data_path, 'module_data_path' : module_data_path}
class Test_Complex_Monte_Carlo_Filter(MockerTestCase):
'''
System integration test for complex_filter.py / sassie 1.0
Test to see whether complex_filter catches improper input.
Inputs tested:
runname: string project name
path: string input file path
dcdfile: string name of output dcd file containing accepted structures
pdbfile: string name of input pdb file containing intial structure
trials: integer number of Monte Carlo move attempts
goback: integer number of failed Monte Carlo attempts before returning to previously accepted structure
temp: float run temperature (K)
nsegments: integer total number of segments
npsegments: integer number of segments containing flexible regions
flpsegname: string names of segments with flexible regions (separated by commas if more than one)
segbasis: string type of basis for overlap check ("all", "heavy", "backbone" or specific atom name, i.e., "CA")
seglow: integer low residue for (non-flexible) structure alignment region (not entered directly; parsed from entered alignment range in GenApp)
seghigh: integer high residue for (no-flexible) structure alignment region (not entered directly; parsed from entered alignment range in GenApp)
lowrg: float low Rg cutoff value if Advanced Input is chosen
highrg: float high Rg cutoff value if Advanced Input is chosen
zflag: integer enable zcutoff flag (0=no, 1=yes)
zcutoff: float zcutoff value (discard structures with any z-axis coordinates less than this value)
cflag: integer enable atomic constraint flag (0=no, 1=yes)
confile: string name of file describing additional constraints to check before accepting a structure
directedmc: float non-zero Rg value to guide Monte Carlo run; 0=no directed Monte Carlo (used if Advanced Input is chosen)
psegvariables:
integer number of flexible regions
float_array maximum angle that torsion can sample (in each flexible region)
int_array low residue number for each flexible region
int_array number of contiguous residues per flexible region (not enetered directly; parsed from entered residue range in GenApp)
string molecule type ('protein' or 'rna')
Inputs not tested (options not currently implemented):
psffilepath string path to psf file
psffilename string psf file name
parmfilepath string path to CHARMM parameter file
parmfilename string name of CHARMM parameter file
plotflag integer option to plot structure number vs Rg
Use cases tested:
1. check if runname has incorrect character
2. check input file path permissions
a. no permission error
b. permission error
i. path doesn't not exist
ii. read permission not allowed
iii. write permission not allowed
3. check pdbfile
a. PDB file doesn't exist
b. PDB file exists
i. PDB file is valid
ii. PDB file isn't valid
4. check if trials is > 0
5. check if goback is > 0
6. check if temperature is >= 0
7. check if zflag is 0 or 1 #NOTE: zcutoff test is commented out in complex_filter.py
8. check if clflag is 0 or 1
9. check constraint file
a. file doesn't exist
b. file exists
10. check constraint file parameters
a. bad segment name in file
b. bad atom name in file
c. bad distance value in file
d. no distance value in file
e. COM or ATM type1 and type 2 in file
f. two type definintions in file
g. second resid1/resid2 value > first resid1/resid2 value
h. first resid1 value is in pdb file
i. second resid1 value is in pdb file
j. first resid2 value is in pdb file
k. second resid2 value is in pdb file
11. check if directed Monte Carlo value is 0 or 1
12. check if low Rg cutoff is higher than high Rg cutoff
13. check if Rg cutoffs are > 0
a. low Rg cutoff is > 0
b. high Rg cutoff is > 0
14. check if number of segments is >= 1
15. check if number of flexible segments is >= 1
16. check if the number of flexible segments is <= the number of segments
17. check that number of basis names is the same as the number of segments (for basis != 'all', 'backbone' or 'heavy')
18. check if number of flexible segment names matches the number of flexible segments
19. check if number of alignment low residues matches the number of flexible segments
20. check if the number of alignment high residues matches the number of flexible segments
21. check if each (flexible?) segment in the pdb file contains the correct moltype #NOT TESTED need to loop over segment names
22. check overlap basis atoms
a. check that atom name is in PDB file #NOT TESTED error handling is commented out in complex_filter.py
b. check that atom has VDW paramters #NOT TESTED there are no atoms in vdw list that don't have vdw parameters
23. check overlap in initial structure #NOT TESTED generates a warning only; program does not exit due to overlap in initial structure
24. check if flexible segments are in PDB file
25. check if total number of segments matches the number of segments in PDB file
26. check if pdbfile has missing residue (numbers) #NOT TESTED need to loop over segment names
27. check flexible segment variables
a. angle values are float types
b. angle values are in the range 0.0 to 180.0
c. number of ranges for each segment is an integer
d. number of ranges for each segment is >=1
e. low resid is an integer array
f. number of contiguous residues is an integer array
g. moltype for each segment matches the PDB file
h. PDB file contains low and high alignment residues listed for each flexible region
i. low alignment resid < high alignment resid
j. alignment range for each segment isn't too small (less than 3 points)
k. number of angle values matches the number of ranges
l. number of low residue values matches the number of ranges
m. number of contiguous residues matches the number of ranges
n. PDB file contains the low and high flexible residues listed for each flexible region
o. number of contiguous residues is >= 0
p. alignment and flexible regions don't overlap
q. low residue can't include n-terminus (for numranges > 1)
r. low residue values increase from low to high (for numranges > 1)
s. residue ranges don't overlap (for numranges > 1)
t. low residue + number of contiguous doesn't exceed number of amino acids-1 (for numranges > 1)
u. low residue + number of contiguous doesn't exceed number of amino acids-1 (for numranges = 1)
'''
def setUp(self):
gui_mimic_complex_monte_carlo.test_variables(self, paths)
def test_1(self):
'''
test if runname has incorrect character
'''
self.runname = 'run_&'
return_error = gui_mimic_complex_monte_carlo.run_module(
self, test_filter=True)
''' check for value error '''
expected_error = ['file or path : run_& has incorrect character : &']
assert_equals(return_error, expected_error)
def test_2(self):
'''
test if path exists
'''
self.path = os.path.join(module_data_path, 'non_existent_path')
return_error = gui_mimic_complex_monte_carlo.run_module(
self, file_check=True)
''' check for path error '''
expected_error = ['permission error in input file path ' + self.path + ' [code = FalseFalseFalse]',
'path does not exist']
assert_equals(return_error, expected_error)
def test_3(self):
'''
test if directory has read permission
'''
''' make a directory '''
os.system('mkdir empty_folder')
''' see if you can read the directory '''
# print os.access('empty_folder', os.R_OK)
''' make the directory un-readable'''
os.system('chmod a-r empty_folder')
''' see if you can read the directory '''
# print os.access('empty_folder', os.R_OK)
self.path = os.path.join('./', 'empty_folder')
return_error = gui_mimic_complex_monte_carlo.run_module(
self, file_check=True)
''' check for path error '''
expected_error = ['permission error in input file path ' +
self.path + ' [code = TrueFalseTrue]', 'read permission not allowed']
assert_equals(return_error, expected_error)
''' make the directory readable'''
os.system('chmod a+r empty_folder')
''' remove the directory '''
os.system('rm -Rf empty_folder')
def test_4(self):
'''
test if directory has write permission
'''
''' make a directory '''
os.system('mkdir empty_folder1')
''' see if you can write to the directory '''
# print os.access('empty_folder1', os.W_OK)
''' make the directory un-writeable'''
os.system('chmod a-w empty_folder1')
''' see if you can write to the directory '''
# print os.access('empty_folder', os.W_OK)
self.path = os.path.join('./', 'empty_folder1')
return_error = gui_mimic_complex_monte_carlo.run_module(
self, file_check=True)
# print 'return_error: ', return_error
''' check for path error '''
expected_error = ['permission error in input file path ' +
self.path + ' [code = TrueTrueFalse]', 'write permission not allowed']
# print 'expected_error: ', expected_error
assert_equals(return_error, expected_error)
''' make the directory writeable'''
os.system('chmod a+w empty_folder1')
''' remove the directory '''
os.system('rm -Rf empty_folder1')
def test_5(self):
'''
test if pdbfile exists
'''
self.pdbfile = os.path.join(
module_data_path, 'does_not_exist!&@#X.pdb')
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['input pdb file, ' +
self.pdbfile + ', does not exist']
assert_equals(return_error, expected_error)
def test_6(self):
'''
test if pdbfile is a valid pdb file
'''
self.pdbfile = os.path.join(module_data_path, 'not_valid.pdb')
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['input pdb file, ' +
self.pdbfile + ', is not a valid pdb file']
assert_equals(return_error, expected_error)
def test_7(self):
'''
test if trials is > 0
'''
self.trials = '0'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['trials = 0?']
assert_equals(return_error, expected_error)
def test_8(self):
'''
test if goback is > 0
'''
self.goback = '0'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['goback = 0?']
assert_equals(return_error, expected_error)
def test_9(self):
'''
test if temperature >=0
'''
self.temp = '-1.0'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['use a positive temperature, temperature = -1.0']
assert_equals(return_error, expected_error)
def test_10(self):
'''
test if Z coordinate filter selection is 0 or 1
'''
self.zflag = '2'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['zflag == 0 for "no" and 1 for "yes", zflag = 2']
assert_equals(return_error, expected_error)
def test_11(self):
'''
test if atomic constraints selection is 0 or 1
'''
self.cflag = '2'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['cflag == 0 for "no" and 1 for "yes", cflag = 2']
assert_equals(return_error, expected_error)
def test_12(self):
'''
test if constraint file exists
'''
self.cflag = '1'
self.confile = './does_not_exist.txt'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['file : ./does_not_exist.txt does not exist']]
assert_equals(return_error, expected_error)
def test_13(self):
'''
test for bad seg1 in constraint file
'''
self.cflag = '1'
self.confile = os.path.join(module_data_path,'bad_seg1.txt')
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [" : LINE 1 segment PAI listed in constraint file is not in your PDB file"]
assert_equals(return_error, expected_error)
def test_14(self):
'''
test for bad seg2 in constraint file
'''
self.cflag = '1'
self.confile = os.path.join(module_data_path,'bad_seg2.txt')
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [" : LINE 1 segment VN2 listed in constraint file is not in your PDB file"]
assert_equals(return_error, expected_error)
def test_15(self):
'''
test for bad atom1 in constraint file
'''
self.cflag = '1'
self.confile = os.path.join(module_data_path,'bad_atom1.txt')
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [" : LINE 1 atom name XA listed in constraint file is not in your PDB file"]
assert_equals(return_error, expected_error)
def test_16(self):
'''
test for bad atom2 in constraint file
'''
self.cflag = '1'
self.confile = os.path.join(module_data_path,'bad_atom2.txt')
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [" : LINE 1 atom name ZA listed in constraint file is not in your PDB file"]
assert_equals(return_error, expected_error)
def test_17(self):
'''
test for bad distance in constraint file
'''
self.cflag = '1'
self.confile = os.path.join(module_data_path,'bad_distance.txt')
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [" : LINE 1 distance value is not appropriate: -100.0"]
assert_equals(return_error, expected_error)
def test_18(self):
'''
test for no distance in constraint file
'''
self.cflag = '1'
self.confile = os.path.join(module_data_path,'no_distance.txt')
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [" : LINE 2 no distance specified or error in line: COM"]
assert_equals(return_error, expected_error)
def test_19(self):
'''
test for COM or ATM type1 in constraint file
'''
self.cflag = '1'
self.confile = os.path.join(module_data_path,'bad_type1.txt')
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [" : LINE 2 TYPE1 is not valid (ATM OR COM): CON"]
assert_equals(return_error, expected_error)
def test_20(self):
'''
test for COM or ATM type2 in constraint file
'''
self.cflag = '1'
self.confile = os.path.join(module_data_path,'bad_type2.txt')
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [" : LINE 2 TYPE2 is not valid (ATM OR COM): ATN"]
assert_equals(return_error, expected_error)
def test_21(self):
'''
test for two types COM and/or ATM in constraint file
'''
self.cflag = '1'
self.confile = os.path.join(module_data_path,'no_type2.txt')
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [" : LINE 1 Two type definitions are required for each constraint (ATM OR COM)"]
assert_equals(return_error, expected_error)
def test_22(self):
'''
test for second resid1 value equal or less than first
'''
self.cflag = '1'
self.confile = os.path.join(module_data_path,'bad_resid1.txt')
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [" : resid values in constraint file for constraint 1 are incorrect: second value is equal or less than first"]
assert_equals(return_error, expected_error)
def test_23(self):
'''
test for second resid2 value equal or less than first
'''
self.cflag = '1'
self.confile = os.path.join(module_data_path,'bad_resid2.txt')
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [" : resid values in constraint file for constraint 0 are incorrect: second value is equal or less than first"]
assert_equals(return_error, expected_error)
def test_24(self):
'''
test if first value in first resid range is in pdb file
'''
self.cflag = '1'
self.confile = os.path.join(module_data_path,'missing_resid1_first.txt')
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [" : resid 0 is not in segment PAI1"]
assert_equals(return_error, expected_error)
def test_25(self):
'''
test if second value in first resid range is in pdb file
'''
self.cflag = '1'
self.confile = os.path.join(module_data_path,'missing_resid1_second.txt')
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [" : resid 433 is not in segment PAI1"]
assert_equals(return_error, expected_error)
def test_26(self):
'''
test if first value in second resid range is in pdb file
'''
self.cflag = '1'
self.confile = os.path.join(module_data_path,'missing_resid2_first.txt')
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [" : resid 0 is not in segment VN1"]
assert_equals(return_error, expected_error)
def test_27(self):
'''
test if second value in second resid range is in pdb file
'''
self.cflag = '1'
self.confile = os.path.join(module_data_path,'missing_resid2_second.txt')
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [" : resid 135 is not in segment VN1"]
assert_equals(return_error, expected_error)
def test_28(self):
'''
test for low Rg cutoff higher than high Rg cutoff
'''
self.lowrg = '51.0'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['low Rg cutoff is larger than high Rg cutoff, lowrg = 51.0 highrg = 50.0']
assert_equals(return_error, expected_error)
def test_29(self):
'''
test if low Rg cutoff > 0
'''
self.lowrg = '-1.0'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['Rg cutoffs need to be >= zero, lowrg = -1.0 highrg = 50.0']
assert_equals(return_error, expected_error)
def test_30(self):
'''
test if high Rg cutoff > 0
'''
self.lowrg = '-5.0'
self.highrg = '-1.0'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['Rg cutoffs need to be >= zero, lowrg = -5.0 highrg = -1.0']
assert_equals(return_error, expected_error)
def test_31(self):
'''
test if number of segments >= 1
'''
self.nsegments = '0'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['the number of segments 0 should be equal/greater than 1!']
assert_equals(return_error, expected_error)
def test_32(self):
'''
test if number of flexible segments >= 1
'''
self.npsegments = '0'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['the number of flexible segments 0 should be equal/greater than 1!']
assert_equals(return_error, expected_error)
def test_33(self):
'''
test if the number of flexible segments <= the number of segments
'''
self.npsegments = '3'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['the number of flexible segments 3 should be equal/less than the number of total segments 2!']
assert_equals(return_error, expected_error)
def test_34(self):
'''
test if the number of segment basis matches the number of segments (for basis != 'all', 'backbone' or 'heavy')
'''
self.segbasis = 'CA'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['the number of segment basis does not match the number of segments: number of segbasis = 1 number of segments = 2',
'segment overlap basis entries can be "heavy", "backbone", "all", or a comma delimited list of atom names ... one for each segment']
assert_equals(return_error, expected_error)
def test_35(self):
'''
test if the number of flexible segment names matches the number of flexible segments
'''
self.flpsegname = 'VN1,VN2'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['the number of flexible segment names does not match the number of flexible segments: number of flexible segment names = 2 number of flexible segments = 1']
assert_equals(return_error, expected_error)
def test_36(self):
'''
test if the number of alignment low residues matches the number of flexible segments
'''
self.seglow = '1,1'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['the number of alignment low residues does not match the number of flexible segments: number of alignment low residues = 2 number of flexible segments = 1']
assert_equals(return_error, expected_error)
def test_37(self):
'''
test if the number of alignment high residues matches the number of flexible segments
'''
self.seghigh = '30,30'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['the number of alignment high residues does not match the number of flexible segments: number of alignment high residues = 2 number of flexible segments = 1']
assert_equals(return_error, expected_error)
def test_38(self):
'''
test if single flexible segment is in PDB file
'''
self.flpsegname = 'VN2'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['The flexible segment name "VN2" is not found in the pdb file!']
assert_equals(return_error, expected_error)
def test_39(self):
'''
test if flexible segments are in PDB file (first segment name is in file; second segment name isn't in file)
'''
self.npsegments = '2'
self.seglow = '1,1'
self.seghigh = '30,30'
self.flpsegname = 'VN1,VN2'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['The flexible segment name "VN2" is not found in the pdb file!']
assert_equals(return_error, expected_error)
def test_40(self):
'''
test if total number of segments is equal to number of segments in PDB file
'''
self.nsegments = '3'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['the total number of segments entered does NOT match the number of segments in the pdb file']
assert_equals(return_error, expected_error)
def test_41(self):
'''
test if directedmc >=0
'''
self.directedmc = '-1.0'
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = ['directed Monte Carlo needs to be 0 or a float > 0 (the "goal Rg") ... you entered: -1.0']
assert_equals(return_error, expected_error)
def test_42(self):
'''
test if number of ranges is an integer type
'''
self.psegvariables= [['1.0', '30', '40', '89', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['The number of ranges "1.0" for flexible segment number 1 in the flexible segment input fields should be an integer type!']]
assert_equals(return_error, expected_error)
def test_43(self):
'''
test if number of ranges is >= 1
'''
self.psegvariables= [['0', '30', '40', '89', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['The number of ranges "0" for flexible segment number 1 in the flexible segment input fields should be equal/greater than 1!']]
assert_equals(return_error, expected_error)
def test_44(self):
'''
test if the angle value is a float type
'''
self.psegvariables= [['1', '3o', '40', '89', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['The angle value "3o" should be a float type!']]
assert_equals(return_error, expected_error)
def test_45(self):
'''
test if the angle value is between 0 and 180
'''
self.psegvariables= [['1', '190', '40', '89', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['The angle value "190" should be in the range of (0.0,180.0)!']]
assert_equals(return_error, expected_error)
def test_46(self):
'''
test if the low resid is an integer array
'''
self.psegvariables= [['1', '30', '40.0', '89', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['The low resid "40.0" for flexible segment number 1 in the flexible segment input fields should be an integer array!']]
assert_equals(return_error, expected_error)
def test_47(self):
'''
test if the number of contiguous residues is an integer array
'''
self.psegvariables= [['1', '30', '40', '89.0', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['The number of contiguous residues "89.0" for flexible segment number 1 in the flexible segment input fields should be an integer array!']]
assert_equals(return_error, expected_error)
def test_48(self):
'''
test if the molecule type provided for the flexible segment matches that in the PDB file
'''
self.psegvariables= [['1', '30', '40', '89', 'rna']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['The molecule type "rna" provided for flexible segment number 1 in the flexible segment input fields does not match the pdb file!']]
assert_equals(return_error, expected_error)
def test_49(self):
'''
test if the flexible residue in the input PDB file has low alignment residue
'''
self.pdbfile = os.path.join(module_data_path,'missing_resid.pdb')
self.psegvariables= [['1', '30', '40', '89', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['input pdb file, ' + str(self.pdbfile) + ' does not have low alignment amino acid residue, 1, range = 2 : 130']]
assert_equals(return_error, expected_error)
def test_50(self):
'''
test if the flexible residue in the input PDB file has high alignment residue
'''
self.pdbfile = os.path.join(module_data_path,'missing_resid1.pdb')
self.psegvariables= [['1', '30', '40', '89', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['input pdb file, ' + str(self.pdbfile) + ' does not have high alignment amino acid residue, 39, range = 1 : 130']]
assert_equals(return_error, expected_error)
def test_51(self):
'''
test if low alignment residue < high alignment residue
'''
self.seglow = '20'
self.seghigh = '1'
self.psegvariables= [['1', '30', '40', '89', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['alignment basis is too small (less than 3 points) or low residue > high residue']]
assert_equals(return_error, expected_error)
def test_52(self):
'''
test if alignment range > 3
'''
self.seglow = '1'
self.seghigh = '3'
self.psegvariables= [['1', '30', '40', '89', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['alignment basis is too small (less than 3 points) or low residue > high residue']]
assert_equals(return_error, expected_error)
def test_53(self):
'''
test number of angle values matches the number of ranges
'''
self.psegvariables= [['1', '30,30', '40', '89', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['the number of dtheta values does not match the number of ranges, dtheta = [30.0, 30.0] numranges = 1']]
assert_equals(return_error, expected_error)
def test_54(self):
'''
test number of low residue values matches the number of ranges
'''
self.psegvariables= [['1', '30', '40,130', '89', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['the number of low residue values does not match the number of ranges, lowres = [40, 130] numranges = 1']]
assert_equals(return_error, expected_error)
def test_55(self):
'''
test number of contiguous residues matches the number of ranges
'''
self.psegvariables= [['1', '30', '40', '89,2', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['the number of contiguous residues does not match the number of ranges, contiguous = [89, 2] numranges = 1']]
assert_equals(return_error, expected_error)
def test_56(self):
'''
test if low flexible residue is in PDB file
'''
self.psegvariables= [['1', '30', '131', '2', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['Input pdb file, ' + str(self.pdbfile) + ' does not have low residue amino acid, "131" for segment number 1, range = 1 : 130']]
assert_equals(return_error, expected_error)
def test_57(self):
'''
test if low+contiguous flexible residue is in PDB file
'''
self.pdbfile = os.path.join(module_data_path,'missing_resid2.pdb')
self.psegvariables= [['1', '30', '40', '89', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['Input pdb file, ' + str(self.pdbfile) + ' does not have low+contiguous residue amino acid, "129" for segment number 1, range = 1 : 130']]
assert_equals(return_error, expected_error)
def test_58(self):
'''
test if number of contiguous residues is >=0
'''
self.psegvariables= [['1', '30', '40', '-1', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['The number of contiguous residues "-1" should be greater than 0!']]
assert_equals(return_error, expected_error)
def test_59(self):
'''
test if alignment and flexible ranges overlap
'''
self.psegvariables= [['1', '30', '39', '90', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['alignment and flexible ranges should not overlap!']]
assert_equals(return_error, expected_error)
def test_60(self):
'''
test if low residue includes the n-terminus
'''
self.seglow = '80'
self.seghigh = '90'
self.psegvariables= [['2', '30,30', '1,10', '10,10', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['low residue can not include the n-terminus, reslow = [1, 10]']]
assert_equals(return_error, expected_error)
def test_61(self):
'''
test if low residue values increase from low to high
'''
self.seglow = '80'
self.seghigh = '90'
self.psegvariables= [['2', '30,30', '15,10', '10,10', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['low residue values must increase from low to high, reslow = 15']]
assert_equals(return_error, expected_error)
def test_62(self):
'''
test if residue ranges overlap
'''
self.seglow = '80'
self.seghigh = '90'
self.psegvariables= [['2', '30,30', '2,10', '10,10', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['low residue values plus number contiguous overlap, reslow = 2 numcont = 10']]
assert_equals(return_error, expected_error)
def test_63(self):
'''
test if low residue plus number contiguous exceeds the number of amino acids-1 (numranges > 1)
'''
self.psegvariables= [['2', '30,30', '40,60', '18,70', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['your low residue plus number contiguous exceeds the number of amino acids-1 (129), reslow = 60 numcont = 70']]
assert_equals(return_error, expected_error)
def test_64(self):
'''
test if low residue plus number contiguous exceeds the number of amino acids-1 (numranges = 1)
'''
self.psegvariables= [['1', '30', '40', '90', 'protein']]
return_error = gui_mimic_complex_monte_carlo.run_module(self, test_filter=True)
''' check for file error '''
expected_error = [['your low residue plus number contiguous exceeds the number of amino acids-1 (129), reslow = 40 numcont = 90']]
assert_equals(return_error, expected_error)
def tearDown(self):
if os.path.exists(self.runname):
shutil.rmtree(self.runname)
if __name__ == '__main__':
main()
| madscatt/zazzie | src_2.7/sassie/test_sassie/interface/complex_monte_carlo/test_complex_filter.py | Python | gpl-3.0 | 40,753 | [
"CHARMM"
] | 472a242ea7057a5555c77adc73ac81ceeaf3b1a2137d5313c0d764adfa692656 |
"""Comparators originally meant to be used with particles"""
import numpy as np
from ase.ga.utilities import get_nnmat
class NNMatComparator(object):
"""Use the nearest neighbor matrix to determine differences
in the distribution (and to a slighter degree structure)
of atoms. As specified in
S. Lysgaard et al., Top. Catal., 57 (1-4), pp 33-39, (2014)"""
def __init__(self, d=0.2, elements=[]):
self.d = d
self.elements = elements
def looks_like(self, a1, a2):
""" Return if structure a1 or a2 are similar or not. """
elements = self.elements
if elements == []:
elements = sorted(set(a1.get_chemical_symbols()))
a1, a2 = a1.copy(), a2.copy()
del a1[[a.index for a in a1 if a.symbol not in elements]]
del a2[[a.index for a in a2 if a.symbol not in elements]]
nnmat_a1 = get_nnmat(a1)
nnmat_a2 = get_nnmat(a2)
diff = np.linalg.norm(nnmat_a1 - nnmat_a2)
if diff < self.d:
return True
else:
return False
| suttond/MODOI | ase/ga/particle_comparator.py | Python | lgpl-3.0 | 1,071 | [
"ASE"
] | bd68cacbe422185ed59b3412467a6ad071a33eed50a4b9d59399474d4d34027d |
"""
This script demonstrates how to use moogli to carry out a simulation and
simultaneously update the visualizer.
The visualizer remains active while the simulation is running.
"""
try:
import moogli
except ImportError as e:
print( "[INFO ] Could not import moogli. Quitting..." )
quit()
import moose
from moose import neuroml
from PyQt4 import Qt, QtCore, QtGui
import sys
import os
import random
import numpy as np
import math
# The QApplication class manages the GUI application's
# control flow and main settings
app = QtGui.QApplication(sys.argv)
# Load model from the neuroml file into moose
filename = os.path.join( os.path.split(os.path.realpath(__file__))[0]
, "../neuroml/PurkinjeCellPassivePulseInput/PurkinjePassive.net.xml"
)
popdict, projdict = moose.neuroml.loadNeuroML_L123(filename)
# setting up hsolve object for each neuron
for popinfo in list(popdict.values()):
for cell in list(popinfo[1].values()):
solver = moose.HSolve(cell.path + "/hsolve")
solver.target = cell.path
# reinit moose to bring to a reliable initial state.
moose.reinit()
SIMULATION_DELTA = 0.001
SIMULATION_TIME = 0.03
ALL_COMPARTMENTS = [x.path for x in moose.wildcardFind("/cells[0]/##[ISA=CompartmentBase]")]
BASE_VM_VALUE = -0.065
PEAK_VM_VALUE = -0.060
BASE_VM_COLOR = [1.0, 0.0, 0.0, 0.1]
PEAK_VM_COLOR = [0.0, 0.0, 1.0, 1.0]
# Moogli requires a morphology object. Create a morphology object
# by reading the geometry details from all objects of type CompartmentBase
# inside /cells[0]
morphology = moogli.read_morphology_from_moose(name = "", path = "/cells[0]")
# Create a named group of compartments called 'group-all'
# which will contain all the compartments of the model.
# Each group has a strict upper and lower limit for the
# variable which is being visualized.
# Both limits map to colors provided to the api.
# The value of the variable is linearly mapped to a color value
# lying between the upper and lower color values.
morphology.create_group( "group-all" # group name
, ALL_COMPARTMENTS # sequence of compartments belonging to this group
, BASE_VM_VALUE # base value of variable
, PEAK_VM_VALUE # peak value of variable
, BASE_VM_COLOR # color corresponding to base value
, PEAK_VM_COLOR # color corresponding to peak value
)
# set initial color of all compartments in accordance with their vm
morphology.set_color( "group-all"
, [moose.element(x).Vm for x in ALL_COMPARTMENTS]
)
# instantiate the visualizer with the morphology object created earlier
viewer = moogli.DynamicMorphologyViewerWidget(morphology)
# by default the visualizer is shown maximized.
viewer.showMaximized()
# Callback function will be called by the visualizer at regular intervals.
# The callback can modify both the morphology and viewer object's properties
# since they are passed as arguments.
def callback(morphology, viewer):
# run simulation for 1 ms
moose.start(SIMULATION_DELTA)
# change color of all the compartments according to their vm values.
# a value higher than peak value will be clamped to peak value
# a value lower than base value will be clamped to base value.
morphology.set_color( "group-all"
, [x.Vm for x in moose.wildcardFind("/cells[0]/##[ISA=CompartmentBase]")]
)
# if the callback returns true, it will be called again.
# if it returns false it will not be called ever again.
# the condition below ensures that simulation runs for 1 sec
if moose.element("/clock").currentTime < SIMULATION_TIME : return True
else : return False
# set the callback function to be called after every idletime milliseconds
viewer.set_callback(callback, idletime = 0)
# make sure that entire model is visible
viewer.pitch(math.pi / 2)
viewer.zoom(0.25)
# Enter the main event loop and wait until exit() is called.
# It is necessary to call this function to start event handling.
# The main event loop receives events from the window system and
# dispatches these to the application widgets.
app.exec_()
| BhallaLab/moose | moose-examples/moogli/purkinje_simulation.py | Python | gpl-3.0 | 4,328 | [
"MOOSE",
"NEURON"
] | 67dd10bb6ffaf27aa012aa05be8b5b91a661ef055983de0d8b2f736da32ceb04 |
import os
from director.componentgraph import ComponentFactory
from director import consoleapp
import director.objectmodel as om
import director.visualization as vis
from director.fieldcontainer import FieldContainer
from director import applogic
from director import appsettings
from director import drcargs
import functools
import PythonQt
from PythonQt import QtCore, QtGui
class MainWindowApp(object):
def __init__(self):
self.mainWindow = QtGui.QMainWindow()
self.mainWindow.resize(768 * (16/9.0), 768)
self.settings = QtCore.QSettings()
self.fileMenu = self.mainWindow.menuBar().addMenu('&File')
self.editMenu = self.mainWindow.menuBar().addMenu('&Edit')
self.viewMenu = self.mainWindow.menuBar().addMenu('&View')
self.toolbarMenu = self.viewMenu.addMenu('&Toolbars')
self.toolsMenu = self.mainWindow.menuBar().addMenu('&Tools')
self.helpMenu = self.mainWindow.menuBar().addMenu('&Help')
self.viewMenuManager = PythonQt.dd.ddViewMenu(self.viewMenu)
self.toolbarMenuManager = PythonQt.dd.ddViewMenu(self.toolbarMenu)
self.quitAction = self.fileMenu.addAction('&Quit')
self.quitAction.setShortcut(QtGui.QKeySequence('Ctrl+Q'))
self.quitAction.connect('triggered()', self.quit)
self.fileMenu.addSeparator()
self.pythonConsoleAction = self.toolsMenu.addAction('&Python Console')
self.pythonConsoleAction.setShortcut(QtGui.QKeySequence('F8'))
self.pythonConsoleAction.connect('triggered()', self.showPythonConsole)
self.toolsMenu.addSeparator()
helpAction = self.helpMenu.addAction('Online Documentation')
helpAction.connect('triggered()', self.showOnlineDocumentation)
self.helpMenu.addSeparator()
helpKeyboardShortcutsAction = self.helpMenu.addAction('Keyboard Shortcuts')
helpKeyboardShortcutsAction.connect('triggered()', self.showOnlineKeyboardShortcuts)
self.helpMenu.addSeparator()
def quit(self):
MainWindowApp.applicationInstance().quit()
def exit(self, exitCode=0):
MainWindowApp.applicationInstance().exit(exitCode)
def start(self, enableAutomaticQuit=True, restoreWindow=True):
if not consoleapp.ConsoleApp.getTestingEnabled() and restoreWindow:
self.initWindowSettings()
self.mainWindow.show()
self.mainWindow.raise_()
return consoleapp.ConsoleApp.start(enableAutomaticQuit)
@staticmethod
def applicationInstance():
return QtCore.QCoreApplication.instance()
def showPythonConsole(self):
applogic.showPythonConsole()
def showOnlineDocumentation(self):
QtGui.QDesktopServices.openUrl(QtCore.QUrl('https://openhumanoids.github.io/director/'))
def showOnlineKeyboardShortcuts(self):
QtGui.QDesktopServices.openUrl(QtCore.QUrl('https://openhumanoids.github.io/director/user_guide/keyboard_shortcuts.html#director'))
def showErrorMessage(self, message, title='Error'):
QtGui.QMessageBox.warning(self.mainWindow, title, message)
def showInfoMessage(self, message, title='Info'):
QtGui.QMessageBox.information(self.mainWindow, title, message)
def wrapScrollArea(self, widget):
w = QtGui.QScrollArea()
w.setWidget(widget)
w.setWidgetResizable(True)
w.setWindowTitle(widget.windowTitle)
#w.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
return w
def addWidgetToViewMenu(self, widget):
self.viewMenuManager.addWidget(widget, widget.windowTitle)
def addViewMenuSeparator(self):
self.viewMenuManager.addSeparator()
def addWidgetToDock(self, widget, dockArea, visible=True):
dock = QtGui.QDockWidget()
dock.setWidget(widget)
dock.setWindowTitle(widget.windowTitle)
dock.setObjectName(widget.windowTitle + ' Dock')
dock.setVisible(visible)
self.mainWindow.addDockWidget(dockArea, dock)
self.addWidgetToViewMenu(dock)
return dock
def addToolBar(self, title, area=QtCore.Qt.TopToolBarArea):
toolBar = QtGui.QToolBar(title)
toolBar.objectName = toolBar.windowTitle
self.mainWindow.addToolBar(area, toolBar)
self.toolbarMenuManager.addWidget(toolBar, toolBar.windowTitle)
return toolBar
def addToolBarAction(self, toolBar, text, icon=None, callback=None):
if isinstance(icon, str):
icon = QtGui.QIcon(icon)
action = toolBar.addAction(icon, text)
if callback:
action.connect('triggered()', callback)
return action
def registerStartupCallback(self, func, priority=1):
consoleapp.ConsoleApp._startupCallbacks.setdefault(priority, []).append(func)
def _restoreWindowState(self, key):
appsettings.restoreState(self.settings, self.mainWindow, key)
def _saveWindowState(self, key):
appsettings.saveState(self.settings, self.mainWindow, key)
self.settings.sync()
def _saveCustomWindowState(self):
self._saveWindowState('MainWindowCustom')
def restoreDefaultWindowState(self):
self._restoreWindowState('MainWindowDefault')
def initWindowSettings(self):
self._saveWindowState('MainWindowDefault')
self._restoreWindowState('MainWindowCustom')
self.applicationInstance().connect('aboutToQuit()', self._saveCustomWindowState)
class MainWindowAppFactory(object):
def getComponents(self):
components = {
'View' : [],
'Globals' : [],
'GlobalModules' : ['Globals'],
'ObjectModel' : [],
'ViewOptions' : ['View', 'ObjectModel'],
'MainToolBar' : ['View', 'Grid', 'ViewOptions', 'MainWindow'],
'ViewBehaviors' : ['View'],
'Grid': ['View', 'ObjectModel'],
'MainWindow' : ['View', 'ObjectModel'],
'AdjustedClippingRange' : ['View'],
'ScriptLoader' : ['MainWindow', 'Globals']}
disabledComponents = []
return components, disabledComponents
def initView(self, fields):
view = PythonQt.dd.ddQVTKWidgetView()
applogic._defaultRenderView = view
applogic.setCameraTerrainModeEnabled(view, True)
applogic.resetCamera(viewDirection=[-1, -1, -0.3], view=view)
return FieldContainer(view=view)
def initObjectModel(self, fields):
om.init()
objectModel = om.getDefaultObjectModel()
objectModel.getTreeWidget().setWindowTitle('Scene Browser')
objectModel.getPropertiesPanel().setWindowTitle('Properties Panel')
return FieldContainer(objectModel=objectModel)
def initGrid(self, fields):
gridObj = vis.showGrid(fields.view, parent='scene')
gridObj.setProperty('Surface Mode', 'Surface with edges')
gridObj.setProperty('Color', [0,0,0])
gridObj.setProperty('Alpha', 0.1)
applogic.resetCamera(viewDirection=[-1, -1, -0.3], view=fields.view)
return FieldContainer(gridObj=gridObj)
def initViewBehaviors(self, fields):
from director import viewbehaviors
viewBehaviors = viewbehaviors.ViewBehaviors(fields.view)
return FieldContainer(viewBehaviors=viewBehaviors)
def initViewOptions(self, fields):
viewOptions = vis.ViewOptionsItem(fields.view)
fields.objectModel.addToObjectModel(viewOptions, parentObj=fields.objectModel.findObjectByName('scene'))
viewOptions.setProperty('Background color', [0.3, 0.3, 0.35])
viewOptions.setProperty('Background color 2', [0.95,0.95,1])
return FieldContainer(viewOptions=viewOptions)
def initAdjustedClippingRange(self, fields):
'''This setting improves the near plane clipping resolution.
Drake often draws a very large ground plane which is detrimental to
the near clipping for up close objects. The trade-off is Z buffer
resolution but in practice things look good with this setting.'''
fields.view.renderer().SetNearClippingPlaneTolerance(0.0005)
def initMainWindow(self, fields):
organizationName = 'RobotLocomotion'
applicationName = 'DirectorMainWindow'
windowTitle = 'Director App'
if hasattr(fields, 'organizationName'):
organizationName = fields.organizationName
if hasattr(fields, 'applicationName'):
applicationName = fields.applicationName
if hasattr(fields, 'windowTitle'):
windowTitle = fields.windowTitle
MainWindowApp.applicationInstance().setOrganizationName(organizationName)
MainWindowApp.applicationInstance().setApplicationName(applicationName)
app = MainWindowApp()
app.mainWindow.setCentralWidget(fields.view)
app.mainWindow.setWindowTitle(windowTitle)
app.mainWindow.setWindowIcon(QtGui.QIcon(':/images/drake_logo.png'))
sceneBrowserDock = app.addWidgetToDock(fields.objectModel.getTreeWidget(),
QtCore.Qt.LeftDockWidgetArea, visible=True)
propertiesDock = app.addWidgetToDock(app.wrapScrollArea(fields.objectModel.getPropertiesPanel()),
QtCore.Qt.LeftDockWidgetArea, visible=True)
app.addViewMenuSeparator()
def toggleObjectModelDock():
newState = not sceneBrowserDock.visible
sceneBrowserDock.setVisible(newState)
propertiesDock.setVisible(newState)
applogic.addShortcut(app.mainWindow, 'F1', toggleObjectModelDock)
#applogic.addShortcut(app.mainWindow, 'F8', app.showPythonConsole)
return FieldContainer(
app=app,
mainWindow=app.mainWindow,
sceneBrowserDock=sceneBrowserDock,
propertiesDock=propertiesDock,
toggleObjectModelDock=toggleObjectModelDock,
commandLineArgs=drcargs.args()
)
def initMainToolBar(self, fields):
from director import viewcolors
app = fields.app
toolBar = app.addToolBar('Main Toolbar')
app.addToolBarAction(toolBar, 'Python Console', ':/images/python_logo.png', callback=app.showPythonConsole)
toolBar.addSeparator()
terrainModeAction = fields.app.addToolBarAction(toolBar, 'Camera Free Rotate', ':/images/camera_mode.png')
lightAction = fields.app.addToolBarAction(toolBar, 'Background Light', ':/images/light_bulb_icon.png')
app.addToolBarAction(toolBar, 'Reset Camera', ':/images/reset_camera.png', callback=applogic.resetCamera)
def getFreeCameraMode():
return not applogic.getCameraTerrainModeEnabled(fields.view)
def setFreeCameraMode(enabled):
applogic.setCameraTerrainModeEnabled(fields.view, not enabled)
terrainToggle = applogic.ActionToggleHelper(terrainModeAction, getFreeCameraMode, setFreeCameraMode)
viewBackgroundLightHandler = viewcolors.ViewBackgroundLightHandler(fields.viewOptions, fields.gridObj,
lightAction)
return FieldContainer(viewBackgroundLightHandler=viewBackgroundLightHandler, terrainToggle=terrainToggle)
def initGlobalModules(self, fields):
from PythonQt import QtCore, QtGui
from director import objectmodel as om
from director import visualization as vis
from director import applogic
from director import transformUtils
from director import filterUtils
from director import ioUtils
from director import vtkAll as vtk
from director import vtkNumpy as vnp
from director.debugVis import DebugData
from director.timercallback import TimerCallback
from director.fieldcontainer import FieldContainer
import numpy as np
modules = dict(locals())
del modules['fields']
del modules['self']
fields.globalsDict.update(modules)
def initGlobals(self, fields):
try:
globalsDict = fields.globalsDict
except AttributeError:
globalsDict = dict()
if globalsDict is None:
globalsDict = dict()
return FieldContainer(globalsDict=globalsDict)
def initScriptLoader(self, fields):
def loadScripts():
for scriptArgs in fields.commandLineArgs.scripts:
filename = scriptArgs[0]
globalsDict = fields.globalsDict
args = dict(__file__=filename,
_argv=scriptArgs,
_fields=fields)
prev_args = {}
for k, v in args.items():
if k in globalsDict:
prev_args[k] = globalsDict[k]
globalsDict[k] = v
try:
execfile(filename, globalsDict)
finally:
for k in args.keys():
del globalsDict[k]
for k, v in prev_args.items():
globalsDict[k] = v
fields.app.registerStartupCallback(loadScripts)
class MainWindowPanelFactory(object):
def getComponents(self):
components = {
'OpenDataHandler' : ['MainWindow'],
'ScreenGrabberPanel' : ['MainWindow'],
'CameraBookmarksPanel' : ['MainWindow'],
'CameraControlPanel' : ['MainWindow'],
'MeasurementPanel' : ['MainWindow'],
'OutputConsole' : ['MainWindow'],
'UndoRedo' : ['MainWindow'],
'DrakeVisualizer' : ['MainWindow'],
'TreeViewer' : ['MainWindow'],
'LCMGLRenderer' : ['MainWindow']}
# these components depend on lcm and lcmgl
# so they are disabled by default
disabledComponents = [
'DrakeVisualizer',
'TreeViewer',
'LCMGLRenderer']
return components, disabledComponents
def initOpenDataHandler(self, fields):
from director import opendatahandler
openDataHandler = opendatahandler.OpenDataHandler(fields.app)
def loadData():
for filename in drcargs.args().data_files:
openDataHandler.openGeometry(filename)
fields.app.registerStartupCallback(loadData)
return FieldContainer(openDataHandler=openDataHandler)
def initOutputConsole(self, fields):
from director import outputconsole
outputConsole = outputconsole.OutputConsole()
outputConsole.addToAppWindow(fields.app, visible=False)
return FieldContainer(outputConsole=outputConsole)
def initMeasurementPanel(self, fields):
from director import measurementpanel
measurementPanel = measurementpanel.MeasurementPanel(fields.app, fields.view)
measurementDock = fields.app.addWidgetToDock(measurementPanel.widget, QtCore.Qt.RightDockWidgetArea, visible=False)
return FieldContainer(
measurementPanel=measurementPanel,
measurementDock=measurementDock
)
def initScreenGrabberPanel(self, fields):
from director.screengrabberpanel import ScreenGrabberPanel
screenGrabberPanel = ScreenGrabberPanel(fields.view)
screenGrabberDock = fields.app.addWidgetToDock(screenGrabberPanel.widget, QtCore.Qt.RightDockWidgetArea, visible=False)
return FieldContainer(
screenGrabberPanel=screenGrabberPanel,
screenGrabberDock=screenGrabberDock
)
def initCameraBookmarksPanel(self, fields):
from director import camerabookmarks
cameraBookmarksPanel = camerabookmarks.CameraBookmarkWidget(fields.view)
cameraBookmarksDock = fields.app.addWidgetToDock(cameraBookmarksPanel.widget, QtCore.Qt.RightDockWidgetArea, visible=False)
return FieldContainer(
cameraBookmarksPanel=cameraBookmarksPanel,
cameraBookmarksDock=cameraBookmarksDock
)
def initCameraControlPanel(self, fields):
from director import cameracontrolpanel
cameraControlPanel = cameracontrolpanel.CameraControlPanel(fields.view)
cameraControlDock = fields.app.addWidgetToDock(cameraControlPanel.widget, QtCore.Qt.RightDockWidgetArea, visible=False)
return FieldContainer(
cameraControlPanel=cameraControlPanel,
cameraControlDock=cameraControlDock
)
def initUndoRedo(self, fields):
undoStack = QtGui.QUndoStack()
undoView = QtGui.QUndoView(undoStack)
undoView.setEmptyLabel('Start')
undoView.setWindowTitle('History')
undoDock = fields.app.addWidgetToDock(undoView, QtCore.Qt.LeftDockWidgetArea, visible=False)
undoAction = undoStack.createUndoAction(undoStack)
redoAction = undoStack.createRedoAction(undoStack)
undoAction.setShortcut(QtGui.QKeySequence('Ctrl+Z'))
redoAction.setShortcut(QtGui.QKeySequence('Ctrl+Shift+Z'))
fields.app.editMenu.addAction(undoAction)
fields.app.editMenu.addAction(redoAction)
return FieldContainer(
undoDock=undoDock,
undoStack=undoStack,
undoView=undoView,
undoAction=undoAction,
redoAction=redoAction
)
def initDrakeVisualizer(self, fields):
from director import drakevisualizer
drakeVisualizer = drakevisualizer.DrakeVisualizer(fields.view)
applogic.MenuActionToggleHelper('Tools', drakeVisualizer.name, drakeVisualizer.isEnabled, drakeVisualizer.setEnabled)
return FieldContainer(
drakeVisualizer=drakeVisualizer
)
def initTreeViewer(self, fields):
from director import treeviewer
treeViewer = treeviewer.TreeViewer(fields.view)
applogic.MenuActionToggleHelper('Tools', treeViewer.name, treeViewer.isEnabled, treeViewer.setEnabled)
return FieldContainer(
treeViewer=treeViewer
)
def initLCMGLRenderer(self, fields):
from director import lcmgl
if lcmgl.LCMGL_AVAILABLE:
lcmglManager = lcmgl.LCMGLManager(fields.view)
applogic.MenuActionToggleHelper('Tools', 'LCMGL Renderer', lcmglManager.isEnabled, lcmglManager.setEnabled)
else:
lcmglManager = None
return FieldContainer(
lcmglManager=lcmglManager
)
def construct(globalsDict=None):
fact = ComponentFactory()
fact.register(MainWindowAppFactory)
fact.register(MainWindowPanelFactory)
return fact.construct(globalsDict=globalsDict)
def main(globalsDict=None):
app = construct(globalsDict)
if globalsDict is not None:
globalsDict.update(**dict(app))
app.app.start()
if __name__ == '__main__':
main(globals())
| patmarion/director | src/python/director/mainwindowapp.py | Python | bsd-3-clause | 18,755 | [
"VTK"
] | a75ffef769e36181c34a9d0b4a725ec0ac42b3e051e7b0e6c41685e02965c51e |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.