text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
# coding: utf-8
"""Floating bodies to be used in radiation-diffraction problems."""
# Copyright (C) 2017-2019 Matthieu Ancellin
# See LICENSE file at <https://github.com/mancellin/capytaine>
import logging
import copy
from itertools import chain, accumulate, product, zip_longest
import datetime
import numpy as np
import xarray as xr
from capytaine.meshes.geometry import Abstract3DObject, Plane, inplace_transformation
from capytaine.meshes.meshes import Mesh
from capytaine.meshes.symmetric import build_regular_array_of_meshes
from capytaine.meshes.collections import CollectionOfMeshes
LOG = logging.getLogger(__name__)
TRANSLATION_DOFS_DIRECTIONS = {"surge": (1, 0, 0), "sway": (0, 1, 0), "heave": (0, 0, 1)}
ROTATION_DOFS_AXIS = {"roll": (1, 0, 0), "pitch": (0, 1, 0), "yaw": (0, 0, 1)}
class FloatingBody(Abstract3DObject):
"""A floating body described as a mesh and some degrees of freedom.
The mesh structure is stored as a Mesh from capytaine.mesh.mesh or a
CollectionOfMeshes from capytaine.mesh.meshes_collection.
The degrees of freedom (dofs) are stored as a dict associating a name to
a complex-valued array of shape (nb_faces, 3). To each face of the body
(as indexed in the mesh) corresponds a complex-valued 3d vector, which
defines the displacement of the center of the face in frequency domain.
Parameters
----------
mesh : Mesh or CollectionOfMeshes, optional
the mesh describing the geometry of the floating body.
If none is given, a empty one is created.
dofs : dict, optional
the degrees of freedom of the body.
If none is given, a empty dictionary is initialized.
name : str, optional
a name for the body.
If none is given, the one of the mesh is used.
"""
def __init__(self, mesh=None, dofs=None, name=None):
if mesh is None:
mesh = Mesh(name="dummy_mesh")
if dofs is None:
dofs = {}
if name is None:
name = mesh.name
assert isinstance(mesh, Mesh) or isinstance(mesh, CollectionOfMeshes)
self.mesh = mesh
self.full_body = None
self.dofs = dofs
self.name = name
if self.mesh.nb_vertices == 0 or self.mesh.nb_faces == 0:
LOG.warning(f"New floating body (with empty mesh!): {self.name}.")
else:
self.mesh.heal_mesh()
LOG.info(f"New floating body: {self.name}.")
@staticmethod
def from_meshio(mesh, name=None) -> 'FloatingBody':
"""Create a FloatingBody from a meshio mesh object."""
import meshio
if not isinstance(mesh, meshio._mesh.Mesh):
raise TypeError('mesh must be of type meshio._mesh.Mesh, received {:}'.format(type(mesh)))
if name is None:
date_str = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')
name = 'fb_{:}'.format(date_str)
def all_faces_as_quads(cells):
all_faces = []
if 'quad' in cells:
all_faces.append(cells['quad'])
if 'triangle' in cells:
num_triangles = len(mesh.cells_dict['triangle'])
LOG.info("Stored {:} triangle faces as quadrilaterals".format(num_triangles))
triangles_as_quads = np.empty((cells['triangle'].shape[0], 4), dtype=int)
triangles_as_quads[:, :3] = cells['triangle'][:, :]
triangles_as_quads[:, 3] = cells['triangle'][:, 2] # Repeat one node to make a quad
all_faces.append(triangles_as_quads)
return np.concatenate(all_faces)
cpt_mesh = Mesh(vertices=mesh.points,
faces=all_faces_as_quads(mesh.cells_dict),
name=name+"_mesh")
fb = FloatingBody(mesh=cpt_mesh, name=name)
return fb
@staticmethod
def from_file(filename: str, file_format=None, name=None) -> 'FloatingBody':
"""Create a FloatingBody from a mesh file using meshmagick."""
from capytaine.io.mesh_loaders import load_mesh
if name is None:
name = filename
mesh = load_mesh(filename, file_format, name=f"{name}_mesh")
return FloatingBody(mesh, name=name)
def __lt__(self, other: 'FloatingBody') -> bool:
"""Arbitrary order. The point is to sort together the problems involving the same body."""
return self.name < other.name
##########
# Dofs #
##########
@property
def nb_dofs(self) -> int:
"""Number of degrees of freedom."""
return len(self.dofs)
def add_translation_dof(self, direction=None, name=None, amplitude=1.0) -> None:
"""Add a new translation dof (in place).
If no direction is given, the code tries to infer it from the name.
Parameters
----------
direction : array of shape (3,), optional
the direction of the translation
name : str, optional
a name for the degree of freedom
amplitude : float, optional
amplitude of the dof (default: 1.0 m/s)
"""
if direction is None:
if name is not None and name.lower() in TRANSLATION_DOFS_DIRECTIONS:
direction = TRANSLATION_DOFS_DIRECTIONS[name.lower()]
else:
raise ValueError("A direction needs to be specified for the dof.")
if name is None:
name = f"dof_{self.nb_dofs}_translation"
direction = np.asarray(direction)
assert direction.shape == (3,)
motion = np.empty((self.mesh.nb_faces, 3))
motion[:, :] = direction
self.dofs[name] = amplitude * motion
def add_rotation_dof(self, axis=None, name=None, amplitude=1.0) -> None:
"""Add a new rotation dof (in place).
If no axis is given, the code tries to infer it from the name.
Parameters
----------
axis: Axis, optional
the axis of the rotation
name : str, optional
a name for the degree of freedom
amplitude : float, optional
amplitude of the dof (default: 1.0)
"""
if axis is None:
if name is not None and name.lower() in ROTATION_DOFS_AXIS:
axis_direction = ROTATION_DOFS_AXIS[name.lower()]
for point_attr in ('rotation_center', 'center_of_mass', 'geometric_center'):
if hasattr(self, point_attr):
axis_point = getattr(self, point_attr)
LOG.info(f"The rotation dof {name} has been initialized around the point: "
f"{self.name}.{point_attr} = {getattr(self, point_attr)}")
break
else:
axis_point = np.array([0, 0, 0])
LOG.warning(f"The rotation dof {name} has been initialized "
f"around the origin of the domain (0, 0, 0).")
else:
raise ValueError("A direction needs to be specified for the dof.")
else:
axis_point = axis.point
axis_direction = axis.vector
if name is None:
name = f"dof_{self.nb_dofs}_rotation"
if self.mesh.nb_faces == 0:
self.dofs[name] = np.empty((self.mesh.nb_faces, 3))
else:
motion = np.cross(axis_point - self.mesh.faces_centers, axis_direction)
self.dofs[name] = amplitude * motion
def add_all_rigid_body_dofs(self) -> None:
"""Add the six degrees of freedom of rigid bodies (in place)."""
self.add_translation_dof(name="Surge")
self.add_translation_dof(name="Sway")
self.add_translation_dof(name="Heave")
self.add_rotation_dof(name="Roll")
self.add_rotation_dof(name="Pitch")
self.add_rotation_dof(name="Yaw")
@inplace_transformation
def keep_only_dofs(self, dofs):
for dof in list(self.dofs.keys()):
if dof not in dofs:
del self.dofs[dof]
if hasattr(self, 'mass'):
self.mass = self.mass.sel(radiating_dof=dofs, influenced_dof=dofs)
if hasattr(self, 'hydrostatic_stiffness'):
self.hydrostatic_stiffness = self.hydrostatic_stiffness.sel(radiating_dof=dofs, influenced_dof=dofs)
return self
def add_dofs_labels_to_vector(self, vector):
"""Helper function turning a bare vector into a vector labelled by the name of the dofs of the body,
to be used for instance for the computation of RAO."""
return xr.DataArray(data=np.asarray(vector), dims=['influenced_dof'],
coords={'influenced_dof': list(self.dofs)},
)
def add_dofs_labels_to_matrix(self, matrix):
"""Helper function turning a bare matrix into a matrix labelled by the name of the dofs of the body,
to be used for instance for the computation of RAO."""
return xr.DataArray(data=np.asarray(matrix), dims=['influenced_dof', 'radiating_dof'],
coords={'influenced_dof': list(self.dofs), 'radiating_dof': list(self.dofs)},
)
###################
# Transformations #
###################
def __add__(self, body_to_add: 'FloatingBody') -> 'FloatingBody':
return self.join_bodies(body_to_add)
def join_bodies(*bodies, name=None) -> 'FloatingBody':
if name is None:
name = "+".join(body.name for body in bodies)
meshes = CollectionOfMeshes([body.mesh for body in bodies], name=f"{name}_mesh")
dofs = FloatingBody.combine_dofs(bodies)
return FloatingBody(mesh=meshes, dofs=dofs, name=name)
@staticmethod
def combine_dofs(bodies) -> dict:
"""Combine the degrees of freedom of several bodies."""
dofs = {}
cum_nb_faces = accumulate(chain([0], (body.mesh.nb_faces for body in bodies)))
total_nb_faces = sum(body.mesh.nb_faces for body in bodies)
for body, nbf in zip(bodies, cum_nb_faces):
# nbf is the cumulative number of faces of the previous subbodies,
# that is the offset of the indices of the faces of the current body.
for name, dof in body.dofs.items():
new_dof = np.zeros((total_nb_faces, 3))
new_dof[nbf:nbf+len(dof), :] = dof
if '__' not in name:
new_dof_name = '__'.join([body.name, name])
else:
# The body is probably a combination of bodies already.
# So for the associativity of the + operation,
# it is better to keep the same name.
new_dof_name = name
dofs[new_dof_name] = new_dof
return dofs
def copy(self, name=None) -> 'FloatingBody':
"""Return a deep copy of the body.
Parameters
----------
name : str, optional
a name for the new copy
"""
new_body = copy.deepcopy(self)
if name is None:
new_body.name = f"copy_of_{self.name}"
LOG.debug(f"Copy {self.name}.")
else:
new_body.name = name
LOG.debug(f"Copy {self.name} under the name {name}.")
return new_body
def assemble_regular_array(self, distance, nb_bodies):
"""Create an regular array of identical bodies.
Parameters
----------
distance : float
Center-to-center distance between objects in the array
nb_bodies : couple of ints
Number of objects in the x and y directions.
Returns
-------
FloatingBody
"""
array_mesh = build_regular_array_of_meshes(self.mesh, distance, nb_bodies)
total_nb_faces = array_mesh.nb_faces
array_dofs = {}
for dof_name, dof in self.dofs.items():
for i, j in product(range(nb_bodies[0]), range(nb_bodies[1])):
shift_nb_faces = (j*nb_bodies[0] + i) * self.mesh.nb_faces
new_dof = np.zeros((total_nb_faces, 3))
new_dof[shift_nb_faces:shift_nb_faces+len(dof), :] = dof
array_dofs[f'{i}_{j}__{dof_name}'] = new_dof
return FloatingBody(mesh=array_mesh, dofs=array_dofs, name=f"array_of_{self.name}")
def assemble_arbitrary_array(self, locations:np.ndarray):
if not isinstance(locations, np.ndarray):
raise TypeError('locations must be of type np.ndarray')
assert locations.shape[1] == 2, 'locations must be of shape nx2, received {:}'.format(locations.shape)
n = locations.shape[0]
fb_list = []
for idx, li in enumerate(locations):
fb1 = self.copy()
fb1.translate(np.append(li,0))
fb1.name = 'arbitrary_array_body{:02d}'.format(idx)
fb_list.append(fb1)
arbitrary_array = fb_list[0].join_bodies(*fb_list[1:])
return arbitrary_array
def extract_faces(self, id_faces_to_extract, return_index=False):
"""Create a new FloatingBody by extracting some faces from the mesh.
The dofs evolve accordingly.
"""
if isinstance(self.mesh, CollectionOfMeshes):
raise NotImplementedError # TODO
if return_index:
new_mesh, id_v = Mesh.extract_faces(self.mesh, id_faces_to_extract, return_index)
else:
new_mesh = Mesh.extract_faces(self.mesh, id_faces_to_extract, return_index)
new_body = FloatingBody(new_mesh)
LOG.info(f"Extract floating body from {self.name}.")
new_body.dofs = {}
for name, dof in self.dofs.items():
new_body.dofs[name] = dof[id_faces_to_extract, :]
if return_index:
return new_body, id_v
else:
return new_body
def sliced_by_plane(self, plane):
return FloatingBody(mesh=self.mesh.sliced_by_plane(plane), dofs=self.dofs, name=self.name)
def minced(self, nb_slices=(8, 8, 4)):
"""Experimental method decomposing the mesh as a hierarchical structure.
Parameters
----------
nb_slices: Tuple[int, int, int]
The number of slices in each of the x, y and z directions.
Only powers of 2 are supported at the moment.
Returns
-------
FloatingBody
"""
minced_body = self.copy()
# Extreme points of the mesh in each directions.
x_min, x_max, y_min, y_max, z_min, z_max = self.mesh.axis_aligned_bbox
sizes = [(x_min, x_max), (y_min, y_max), (z_min, z_max)]
directions = [np.array(d) for d in [(1, 0, 0), (0, 1, 0), (0, 0, 1)]]
def _slice_positions_at_depth(i):
"""Helper function.
Returns a list of floats as follows:
i=1 -> [1/2]
i=2 -> [1/4, 3/4]
i=3 -> [1/8, 3/8, 5/8, 7/8]
...
"""
denominator = 2**i
return [numerator/denominator for numerator in range(1, denominator, 2)]
# GENERATE ALL THE PLANES THAT WILL BE USED TO MINCE THE MESH
planes = []
for direction, nb_slices_in_dir, (min_coord, max_coord) in zip(directions, nb_slices, sizes):
planes_in_dir = []
depth_of_treelike_structure = int(np.log2(nb_slices_in_dir))
for i_depth in range(1, depth_of_treelike_structure+1):
planes_in_dir_at_depth = []
for relative_position in _slice_positions_at_depth(i_depth):
slice_position = (min_coord + relative_position*(max_coord-min_coord))*direction
plane = Plane(normal=direction, point=slice_position)
planes_in_dir_at_depth.append(plane)
planes_in_dir.append(planes_in_dir_at_depth)
planes.append(planes_in_dir)
# SLICE THE MESH
intermingled_x_y_z = chain.from_iterable(zip_longest(*planes))
for planes in intermingled_x_y_z:
if planes is not None:
for plane in planes:
minced_body = minced_body.sliced_by_plane(plane)
return minced_body
@inplace_transformation
def mirror(self, plane):
self.mesh.mirror(plane)
for dof in self.dofs:
self.dofs[dof] -= 2 * np.outer(np.dot(self.dofs[dof], plane.normal), plane.normal)
for point_attr in ('geometric_center', 'rotation_center', 'center_of_mass'):
if point_attr in self.__dict__:
self.__dict__[point_attr] -= 2 * (np.dot(self.__dict__[point_attr], plane.normal) - plane.c) * plane.normal
return self
@inplace_transformation
def translate(self, *args):
self.mesh.translate(*args)
for point_attr in ('geometric_center', 'rotation_center', 'center_of_mass'):
if point_attr in self.__dict__:
self.__dict__[point_attr] += args[0]
return self
@inplace_transformation
def rotate(self, axis, angle):
matrix = axis.rotation_matrix(angle)
self.mesh.rotate(axis, angle)
for point_attr in ('geometric_center', 'rotation_center', 'center_of_mass'):
if point_attr in self.__dict__:
self.__dict__[point_attr] = matrix @ self.__dict__[point_attr]
for dof in self.dofs:
self.dofs[dof] = (matrix @ self.dofs[dof].T).T
return self
@inplace_transformation
def clip(self, plane):
# Keep of copy of the full mesh
if self.full_body is None:
self.full_body = self.copy()
# Clip mesh
LOG.info(f"Clipping {self.name} with respect to {plane}")
self.mesh.clip(plane)
# Clip dofs
ids = self.mesh._clipping_data['faces_ids']
for dof in self.dofs:
if len(ids) > 0:
self.dofs[dof] = self.dofs[dof][ids]
else:
self.dofs[dof] = np.empty((0, 3))
return self
def clipped(self, plane, **kwargs):
# Same API as for the other transformations
return self.clip(plane, inplace=False, **kwargs)
@inplace_transformation
def keep_immersed_part(self, free_surface=0.0, sea_bottom=-np.infty):
"""Remove the parts of the mesh above the sea bottom and below the free surface."""
self.clip(Plane(normal=(0, 0, 1), point=(0, 0, free_surface)))
if sea_bottom > -np.infty:
self.clip(Plane(normal=(0, 0, -1), point=(0, 0, sea_bottom)))
return self
#############
# Display #
#############
def __str__(self):
return self.name
def __repr__(self):
return (f"{self.__class__.__name__}(mesh={self.mesh.name}, "
f"dofs={{{', '.join(self.dofs.keys())}}}, name={self.name})")
def show(self, **kwargs):
from capytaine.ui.vtk.body_viewer import FloatingBodyViewer
viewer = FloatingBodyViewer()
viewer.add_body(self, **kwargs)
viewer.show()
viewer.finalize()
def show_matplotlib(self, *args, **kwargs):
return self.mesh.show_matplotlib(*args, **kwargs)
def animate(self, motion, *args, **kwargs):
"""Display a motion as a 3D animation.
Parameters
==========
motion: dict or pd.Series or str
A dict or series mapping the name of the dofs to its amplitude.
If a single string is passed, it is assumed to be the name of a dof
and this dof with a unit amplitude will be displayed.
"""
from capytaine.ui.vtk.animation import Animation
if isinstance(motion, str):
motion = {motion: 1.0}
elif isinstance(motion, xr.DataArray):
motion = {k: motion.sel(radiating_dof=k).data for k in motion.coords["radiating_dof"].data}
animation = Animation(*args, **kwargs)
animation._add_actor(self.mesh.merged(), faces_motion=sum(motion[dof_name] * dof for dof_name, dof in self.dofs.items() if dof_name in motion))
return animation
|
mancellin/capytaine
|
capytaine/bodies/bodies.py
|
Python
|
gpl-3.0
| 20,164
|
[
"VTK"
] |
f2e660a9b11391e331404fcc7a2c632788b86b482a00af654689192b355a1c70
|
#!/usr/bin/python2
from os import system
from hashlib import sha256
from base import *
from bedrock import *
from globs import *
from types_builtin import *
from vat import *
import types
types_by_name['set'] = lambda: t_DT(Set)
# Shouldn't this be an env or something?
BUILTINS = {}
RUNTIME = {}
Builtin = DT('Builtin')
Env = DT('Env', ('type', Type))
Extrinsic = DT('Extrinsic', ('type', Type))
Vector = DT('Vector') # hack for array IDatas
Var = DT('Var')
GlobalVar = DT('GlobalVar')
Pat, PatCtor, PatCapture, PatInt, PatStr, PatTuple, PatVar, PatWild = \
ADT('Pat',
'PatCtor', ('ctor', '*Ctor'), ('args', '[Pat]'),
'PatCapture', ('var', Var), ('pattern', 'Pat'),
'PatInt', ('val', int),
'PatStr', ('val', str),
'PatTuple', ('vals', '[Pat]'),
'PatVar', ('var', Var),
'PatWild')
MatchCase = DT('MatchCase', ('pat', Pat), ('result', 'e'))
CoreLiteral, IntLit, FloatLit, StrLit = ADT('CoreLiteral',
'IntLit', ('val', int),
'FloatLit', ('val', float),
'StrLit', ('val', str))
CoreExpr, Attr, Bind, Call, Lit, TupleLit = \
ADT('CoreExpr',
'Attr', ('expr', 'CoreExpr'), ('field', '*Field'),
'Bind', ('target', '*a'), # Binder a => a
'Call', ('func', 'CoreExpr'), ('args', '[CoreExpr]'),
'Lit', ('literal', CoreLiteral),
'TupleLit', ('vals', '[CoreExpr]'))
Expr, E, And, DictLit, FuncExpr, GenExpr, \
GetEnv, HaveEnv, InEnv, CreateCtx, DestroyCtx, \
GetExtrinsic, HasExtrinsic, ScopeExtrinsic, \
ListLit, Match, Or, Ternary = \
ADT(('Expr', CoreExpr),
'And', ('left', 'Expr'), ('right', 'Expr'),
'DictLit', ('vals', '[(Expr, Expr)]'),
'FuncExpr', ('func', 'Func(Expr)'),
'GenExpr', ('expr', 'Expr'), ('pattern', 'Pat'),
('listExpr', 'Expr'), ('preds', '[Expr]'),
'GetEnv', ('env', '*Env'),
'HaveEnv', ('env', '*Env'),
'InEnv', ('env', '*Env'), ('init', 'Expr'), ('expr', 'Expr'),
'CreateCtx', ('env', '*Env'), ('init', 'Expr'),
'DestroyCtx', ('env', '*Env'), ('ctx', 'Expr'),
'GetExtrinsic', ('extrinsic', '*Extrinsic'), ('node', 'Expr'),
'HasExtrinsic', ('extrinsic', '*Extrinsic'), ('node', 'Expr'),
'ScopeExtrinsic', ('extrinsic', '*Extrinsic'), ('expr', 'Expr'),
'ListLit', ('vals', '[Expr]'),
'Match', ('expr', 'Expr'), ('cases', ['MatchCase(Expr)']),
'Or', ('left', 'Expr'), ('right', 'Expr'),
'Ternary', ('test', 'Expr'), ('then', 'Expr'),
('else_', 'Expr'))
AugOp, AugAdd, AugSubtract, AugMultiply, AugDivide, AugModulo = ADT('AugOp',
'AugAdd', 'AugSubtract', 'AugMultiply', 'AugDivide', 'AugModulo')
Body = DT('Body', ('stmts', '[Stmt(e)]'))
CondCase = DT('CondCase', ('test', 'e'), ('body', 'Body(e)'))
BlockCondCase = DT('BlockCondCase', ('test', 'Body(e)'), ('body', 'Body(e)'))
Func = DT('Func', ('params', [Var]), ('body', 'Body(e)'))
Lhs, LhsVar, LhsAttr, LhsSlot = ADT('Lhs',
'LhsVar', ('var', '*Var'),
'LhsAttr', ('sub', 'e'), ('attr', '*Field'),
# TODO move to quilt
'LhsSlot', ('sub', 'e'), ('index', int))
VoidExpr, VoidCall, VoidInEnv = ADT('VoidExpr',
'VoidCall', ('func', 'e'), ('args', ['e']),
'VoidInEnv', ('env', '*Env'), ('init', 'e'), ('expr', 'VoidExpr(e)'))
CoreStmt, Assign, AugAssign, Break, Cond, Continue, Defn, \
Return, ReturnNothing, While, VoidStmt = \
ADT('CoreStmt',
'Assign', ('lhs', 'Lhs(e)'), ('expr', 'e'),
'AugAssign', ('op', AugOp), ('lhs', 'Lhs(e)'), ('expr', 'e'),
'Break',
'Cond', ('cases', ['CondCase(e)']),
'Continue',
'Defn', ('pat', Pat), ('expr', 'e'),
'Return', ('expr', 'e'),
'ReturnNothing',
'While', ('test', 'e'), ('body', 'Body(e)'),
'VoidStmt', ('voidExpr', 'VoidExpr(e)'))
Stmt, S, Assert, BlockCond, BlockMatch, BreakUnless, NextCase, Nop, \
PushEnv, PopEnv, WriteExtrinsic = \
ADT(('Stmt', CoreStmt, {CoreExpr: Expr}),
'Assert', ('test', 'e'), ('message', 'e'),
'BlockCond', ('cases', '[BlockCondCase(e)]'),
'BlockMatch', ('expr', 'e'), ('cases', '[MatchCase(Body(e))]'),
'BreakUnless', ('test', 'e'),
'NextCase', ('test', 'e'),
'Nop',
'PushEnv', ('env', '*Env'), ('init', 'e'),
'PopEnv', ('env', '*Env'),
'WriteExtrinsic', ('extrinsic', '*Extrinsic'), ('node', 'e'),
('val', 'e'), ('isNew', bool))
LitDecl = DT('LitDecl', ('var', GlobalVar), ('literal', CoreLiteral))
ModuleDecls = DT('ModuleDecls',
('cdecls', [GlobalVar]),
('dts', [DataType]),
('envs', [Env]),
('extrinsics', [Extrinsic]),
('lits', [LitDecl]),
('funcDecls', [GlobalVar]),
# XXX hack; should store in serialized extrs
('grabBag', [GlobalVar]))
def blank_module_decls():
return ModuleDecls([], [], [], [], [], [], [])
TopFunc = DT('TopFunc', ('var', '*GlobalVar'), ('func', 'Func(e)'))
CompilationUnit = DT('CompilationUnit', ('funcs', ['TopFunc(Expr)']))
Bundle = DT('Bundle', ('decls', ['*Module']),
('units', ['*Module']),
('overlays', ['*Module']))
STMTCTXT = new_env('STMTCTXT', '*Stmt')
EXPRCTXT = new_env('EXPRCTXT', '*Expr')
UNIFYCTXT = new_env('UNIFYCTXT', '(*Type, *Type)')
def with_context(desc, msg):
if have_env(UNIFYCTXT):
src, dest = env(UNIFYCTXT)
desc = fmtcol("^DG^Types:^N {0} ^DG\n=====>^N {1}\n{2}",src,dest,desc)
if have_env(EXPRCTXT):
desc = fmtcol("^DG^Expr:^N {0}\n{1}", env(EXPRCTXT), desc)
desc = fmtcol("\n^DG^At:^N {0}\n{1}", env(STMTCTXT), desc)
return fmtcol("^DG{0}^N\n^Red{1}^N", desc, msg)
def lit_type(lit):
return match(lit, ("IntLit(_)", TInt),
("FloatLit(_)", TFloat),
("StrLit(_)", TStr))
Bindable = new_typeclass('Bindable',
('isLocalVar', 'a -> bool', lambda v: False),
('asLocalVar', 'a -> Maybe(Var)', lambda v: Nothing()))
# This is silly
@impl(Bindable, Var)
def isLocalVar_Var(var):
return True
@impl(Bindable, Var)
def asLocalVar_Var(var):
return Just(var)
default_impl(Bindable, GlobalVar)
default_impl(Bindable, Builtin)
default_impl(Bindable, Ctor)
# XXX only become bindable after expansion (ought to be a different typeclass)
default_impl(Bindable, Extrinsic)
default_impl(Bindable, Env)
# XXX maybe codegen
Nullable = new_typeclass('Nullable', ('isMaybe', 'a -> bool', lambda v: False))
@impl(Nullable, Ctor)
def isMaybe_Ctor(ctor):
name = extrinsic(Name, ctor)
return name == 'Just' or name == 'Nothing'
default_impl(Nullable, Builtin)
default_impl(Nullable, Var)
default_impl(Nullable, GlobalVar)
@matcher('key')
def _match_key(atom, args):
assert len(args) == 1
name = args[0]['val']
target = BUILTINS.get(name)
return [] if atom is target else None
@matcher('sym')
def _match_sym(atom, args):
assert 2 <= len(args) <= 3
mod_name = args[0].val
assert mod_name in WRITTEN_MODULES, "%s not written yet" % mod_name
mod = WRITTEN_MODULES[mod_name]
if isinstance(atom, Ref):
r = atom.refAtom
if isinstance(r, Ref) and r.refAtom is _b_symbol:
for sub in r.subs:
if getattr(sub, 'refAtom', None) is _b_name:
nm = sub.subs[0].strVal
break
else:
return None
m = match_try(nm, args[1])
if m is None or len(args) == 2:
return m
msubs = match_try(atom.subs, args[2])
if msubs is not None:
return m + msubs
return None
@matcher('named')
def _match_named(atom, args):
assert len(args) == 1
if has_extrinsic(Name, atom):
return match_try(extrinsic(Name, atom), args[0])
return None
def walk_deps(func, mod, seen):
def walk(deps):
for dep in deps:
if dep in seen:
continue
seen.add(dep)
walk(extrinsic(ModDeps, dep))
func(dep)
walk(extrinsic(ModDeps, mod))
return seen
ModRepr = DT('ModRepr', ('write', 'str -> void'),
('indent', int),
('exts', [object]),
('seen', set([object])),
('weakIndices', {object: int}),
('weakCtr', int))
MODREPR = new_env('MODREPR', ModRepr)
def write_mod_repr(filename, m, exts=[]):
if not env(GENOPTS).dumpViews:
return
with file(filename, 'w') as f:
def write(x):
f.write('%s%s\n' % (' ' * env(MODREPR).indent, x))
init = ModRepr(write, 0, exts, set(), {}, 0)
in_env(MODREPR, init, lambda: _do_repr(m.root))
if has_extrinsic(ModDeps, m):
f.write('\nMODULE DEPS:\n')
for dep in extrinsic(ModDeps, m):
f.write(' %s\n' % (extrinsic(Name, dep),))
def tree(atom, exts=[]):
def write(x):
print '%s%s' % (' ' * env(MODREPR).indent, x)
init = ModRepr(write, 0, exts, set(), {}, 10000)
in_env(MODREPR, init, lambda: _do_repr(atom))
def _do_repr(s):
c = env(MODREPR)
if isinstance(s, Structured):
dt = type(s)
if s in c.seen:
if s in c.weakIndices:
c.write('<cyclic #%d>' % c.weakIndices[s])
else:
c.write('<cyclic %s %s>' % (dt.__name__, short_id(s)))
return
c.seen.add(s)
name = [dt.__name__]
if s in c.weakIndices:
name.append('#%d' % (c.weakIndices[s],))
if has_extrinsic(Name, s):
name.append(fmtcol('^Red{0!r}^N', extrinsic(Name, s)))
name.append(short_id(s))
for ext in c.exts:
if has_extrinsic(ext, s):
name.append(repr(extrinsic(ext, s)))
c.write(' '.join(name))
c.indent += 1
form = extrinsic(FormSpec, dt)
assert not isinstance(form, DataType)
for field in form.fields:
f = getattr(s, extrinsic(Name, field))
if matches(field.type, "TWeak(_)"):
if isinstance(f, Structured):
if has_extrinsic(Name, f):
c.write('->%s %s' % (extrinsic(Name, f), short_id(f)))
else:
if f not in c.weakIndices:
c.weakCtr += 1
c.weakIndices[f] = c.weakCtr
c.write('->#%d %r' % (c.weakIndices[f], f))
else:
c.write('->?? %r' % (f,))
else:
_do_repr(f)
c.indent -= 1
elif isinstance(s, (tuple, list)):
l, r = '()' if isinstance(s, tuple) else '[]'
if not s:
c.write(fmtcol('^Blue{0}{1}^N', l, r))
else:
c.write(fmtcol('^Blue{0}^N', l))
for o in s:
_do_repr(o)
c.write(fmtcol('^Blue{0}^N', r))
elif isinstance(s, value_types):
c.write(repr(s))
else:
assert False, "Can't deal with %r" % (s,)
STRINGIFY = new_env('STRINGIFY', [str])
def stringify(ast, t):
t = parse_type(t)
frags = []
in_env(STRINGIFY, frags, lambda: visit(ExprStringifier, ast, t))
return ''.join(frags)
def frag(s):
env(STRINGIFY).append(s)
def frag_comma():
frag(', ')
class ExprStringifier(Visitor):
def Attr(self, a):
self.visit('expr')
frag('.%s' % (extrinsic(FieldSymbol, a.field),))
def AttrIx(self, a):
self.visit('expr')
frag('._ix')
def Bind(self, bind):
t = bind.target
if has_extrinsic(Original, t):
orig = extrinsic(Original, t)
if matches(orig, "Lit(_)"):
frag(repr(orig.literal.val))
return
if Bindable.isLocalVar(t):
frag(var_name(t))
elif has_extrinsic(GlobalSymbol, t):
frag(global_name(t))
else:
frag(extrinsic(Name, t))
def Call(self, call):
if matches(call.func, 'Bind(Builtin())'):
if len(call.args) == 2:
op = extrinsic(Name, call.func.target)
if op in ('subscript', 'intsubscript'):
self.visit('args', 0)
frag('[')
self.visit('args', 1)
frag(']')
return
self.visit('args', 0)
frag(' %s ' % (op,))
self.visit('args', 1)
return
elif len(call.args) == 1:
m = match(call.func.target)
if m('key("not")'):
frag('!')
self.visit('args', 0)
return
elif m('key("negate")'):
frag('-')
self.visit('args', 0)
return
self.visit('func')
frag('(')
for i in xrange(len(call.args)):
if i > 0:
frag_comma()
self.visit('args', i)
frag(')')
def VoidCall(self, call):
self.Call(call)
def CallIndirect(self, call):
self.Call(call)
def Lit(self, lit):
frag(repr(lit.literal.val))
def Ternary(self, lit):
self.visit('test')
frag(' ? ')
self.visit('then')
frag(' : ')
self.visit('else_')
def And(self, e):
self.visit('left')
frag(' and ')
self.visit('right')
def Or(self, e):
self.visit('left')
frag(' or ')
self.visit('right')
def TupleLit(self, lit):
frag('[')
for i in xrange(len(lit.vals)):
if i > 0:
frag_comma()
self.visit('vals', i)
frag(']')
def ListLit(self, lit):
frag('[')
for i in xrange(len(lit.vals)):
if i > 0:
frag_comma()
self.visit('vals', i)
frag(']')
def DictLit(self, lit):
assert False
def NullPtr(self, null):
frag('null')
def SizeOf(self, sizeof):
m = match(sizeof)
if m("SizeOf(IPtr(IData(dt) or IDataCtor(dt)))"):
frag('sizeof %s' % (extrinsic(Name, m.dt),))
else:
frag('sizeof ...')
def Undefined(self, undef):
frag('undef')
def FuncVal(self, e):
ctx = match(e.ctx, ("Just(v)", var_name),
("Nothing()", lambda: "null"))
frag('{&%s, %s}' % (global_name(e.funcVar), ctx))
def FuncExpr(self, fe):
frag('<function %s>' % (extrinsic(Name, fe.func),))
def InEnv(self, e):
frag('in_env(%s' % (global_name(e.env),))
frag_comma()
self.visit('init')
frag_comma()
self.visit('expr')
frag(')')
def VoidInEnv(self, e):
self.InEnv(e)
def ScopeExtrinsic(self, e):
frag('scope_extrinsic(%s' % (global_name(e.extrinsic),))
frag_comma()
self.visit('expr')
frag(')')
def Match(self, m):
frag('match(')
self.visit('expr')
frag_comma()
frag('...)')
# PATTERNS
def PatVar(self, pat):
frag(var_name(pat.var))
def PatWild(self, pat):
frag('_')
def LhsVar(self, lhs):
frag(var_name(lhs.var))
def LhsAttr(self, lhs):
self.visit('sub')
frag('.%s' % (extrinsic(FieldSymbol, lhs.attr),))
def LhsSlot(self, lhs):
self.visit('sub')
frag('.slot[%d]' % (lhs.index,))
# STMTS
def Assign(self, a):
self.visit('lhs')
frag(' = ')
self.visit('expr')
def AugAssign(self, a):
self.visit('lhs')
self.visit('op')
self.visit('expr')
def AugAdd(self, a): frag(' += ')
def AugSubtract(self, a): frag(' -= ')
def AugMultiply(self, a): frag(' *= ')
def AugDivide(self, a): frag(' //= ')
def AugModulo(self, a): frag(' %= ')
def Break(self, b): frag('break')
def Continue(self, c): frag('continue')
def Cond(self, cond):
pass # first if case done manually in llvm
def CondCase(self, case):
frag('elif ')
self.visit('test')
frag(':')
def Defn(self, defn):
self.visit('pat')
frag(' := ')
self.visit('expr')
def Nop(self, nop):
frag('nop')
def Return(self, ret):
frag('return ')
self.visit('expr')
def ReturnNothing(self, ret):
frag('return')
def While(self, w):
frag('while ')
self.visit('test')
frag(':')
def Assert(self, a):
frag('assert ')
self.visit('test')
frag_comma()
self.visit('message')
def WriteExtrinsic(self, a):
frag('add_extrinsic(' if a.isNew else 'update_extrinsic(')
frag(global_name(a.extrinsic))
frag_comma()
self.visit('node')
frag_comma()
self.visit('val')
frag(')')
def var_name(var):
return extrinsic(LocalSymbol, var)
def global_name(target):
return extrinsic(GlobalSymbol, target).symbol
# vi: set sw=4 ts=4 sts=4 tw=79 ai et nocindent:
|
pshc/archipelago
|
atom.py
|
Python
|
mit
| 17,314
|
[
"VisIt"
] |
60d0565ba111c657d569c6a9849b0f493c6c6a5fce896bbf6e09b1772c336bb0
|
from __future__ import print_function
class RBM:
def __init__(self, rng, configFile, cloneFrom=None)
"""
initializing the RBM from the specified configFile
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type configFile: string
:param configFile: the configuration file that speicifes the details of RBM net
:type cloneFrom: RBM object
:param cloneFrom: RBM from which the weights of this network will be cloned
"""
#read the layers sizes, and neuron types
pass
def CD(self, n, v):
"""
contrastive divergence for n-steps. This funciton performs Gibbs sampling on the network
:type n: int
:param n: specifies the number of steps in the CD(n)
:type v: numpy matrix, 2D
:param v: specifies the value of visible units at the start of sampling
"""
#perform sampling, return v_n, h_n
|
mohsenmalmir/DeepLearningStack
|
DeepLearningStack/RBM/RBM.py
|
Python
|
mit
| 1,034
|
[
"NEURON"
] |
14403d2d65f6eed6c09577152aa1edd8115704c1325589fa873524f04845fcaa
|
import numpy as np
from pandas import DataFrame, Series
def generate_qmatrix(concepts, problems):
""" Generates a Q-matrix of size "concepts x problems" with values
sampled from a continuous uniform distribution between 0 and 1."""
return np.random.rand(concepts, problems)
def generate_skills(concepts, students, sd=1):
""" Generates a skill matrix of size "students x concepts" with values
sampled from a normal distribution. """
return np.random.normal(scale=sd, size=(students, concepts))
def generate_zscores(qmatrix, skills, noise=0.2, missing=0.0):
""" Generates a Z-score matrix of shape "students x problems" by
multiplying a skill matrix and a Q-matrix and adding Gaussian noise. """
results = np.dot(skills, qmatrix)
if noise > 0:
results += np.random.normal(scale=noise, size=results.shape)
for i in range(results.shape[0]):
for j in range(results.shape[1]):
if np.random.rand() < missing:
results[i, j] = np.NAN
return results
############################## BASIC MODEL ###################################
def basic_model_generate_parameters(students, problems):
S, P = students, problems
a = Series(np.random.randn(P) * 0.4 - 1)
b = Series(np.random.randn(P) * 2 + 7)
c = Series(np.random.rand(P) + 0.1)
theta = Series(np.random.randn(S) * 0.7)
sigma = Series(np.random.rand(S) * 1)
return dict(a=a, b=b, c=c, theta=theta, sigma=sigma)
def basic_model_generate_times(a, b, c, theta, sigma):
S, P = len(theta), len(a)
times = DataFrame(columns=a.index, index=theta.index)
local_skill = DataFrame(np.random.randn(S, P),
columns=a.index, index=theta.index)
local_skill = local_skill.apply(lambda problem: theta + problem * sigma)
local_variance = DataFrame(np.random.randn(S, P),
columns=a.index, index=theta.index) * c
times = b + local_skill * a + local_variance
return times
|
jniznan/edami
|
edami/synthetic.py
|
Python
|
mit
| 2,007
|
[
"Gaussian"
] |
149cc9c24c9e26d63e2b5c09acdb5f89f322d57feb734ac56d0d103fee9559c3
|
from gaussian import noise_from_psd, noise_from_string, frequency_noise_from_psd # noqa
|
hagabbar/pycbc_copy
|
pycbc/noise/__init__.py
|
Python
|
gpl-3.0
| 88
|
[
"Gaussian"
] |
0c728b8b501b93e90387656c531bf82518967ace3351b418964d690fe9cfb8f6
|
from StoryModels import StoryModels
from AllDBFields import ZoneDBFields
from ZoneDefinitions import ZoneDefinition
from ZoneDefinitions import AllZones
from bson.objectid import ObjectId
from OrphanedModelException import OrphanedModelException
import DatabaseLayer
import random
class Zone(StoryModels):
"""
This is a wrapper for the zone data from the database. This is different
from the other models in that Zone is used as a part of the hero model.
"""
def __init__(self, definitionKey):
return super().__init__(definitionKey)
@classmethod
def construct_model_from_pk(cls,pk):
"""
args:
id:
uses the id to load this model from the database.
return: an instance of the model on which this is called
"""
collection = DatabaseLayer.get_table(cls.get_dbFields().COLLECTION_NAME)
obj = cls(None)
obj.dict = collection.find_one({cls.get_dbFields().PK_KEY:pk})
return obj
def save_changes(self,heroId):
"""
args:
heroId:
this needs to be an pymongo objectId. It is used as an owner
relationship to a hero model
"""
from AllDBFields import HeroDbFields
ownerCollection = DatabaseLayer.get_table(self.get_dbFields().OWNER_COLLECTION)
if self.get_pk():
if self._changes:
ownerCollection.update_one({self.get_dbFields().PK_KEY:heroId},{'$set':self._changes})
else:
collection = DatabaseLayer.get_table(self.get_dbFields().COLLECTION_NAME)
pk = collection.insert_one(self.dict).inserted_id
self.dict[self.get_dbFields().PK_KEY] = pk
nestedZone = {HeroDbFields.ZONE:self.dict}
ownerCollection.update_one({self.get_dbFields().PK_KEY:heroId},{'$set':nestedZone})
self._changes = {}
@classmethod
def get_dbFields(cls):
return ZoneDBFields
def get_zoneName(self):
if not self._definition:
self._definition = ZoneDefinition(self.definitionKey)
return self._definition.get_name()
def get_fullName(self):
return "{0} {1}".format(self.get_zoneName(),self.suffix).rstrip()
@property
def suffix(self):
if self.get_dbFields().SUFFIX in self.dict:
return self.dict[self.get_dbFields().SUFFIX]
else:
return ""
@suffix.setter
def suffix(self,value):
self.set_common_story_property(self.get_dbFields().SUFFIX,value)
@property
def monstersKilled(self):
if self.dict[self.get_dbFields().MONSTERS_KILLED]:
return self.dict[self.get_dbFields().MONSTERS_KILLED]
else:
return 0
@monstersKilled.setter
def monstersKilled(self,value):
self.set_common_story_property(self.get_dbFields().MONSTERS_KILLED,value)
@property
def maxMonsters(self):
return self.dict[self.get_dbFields().MAX_MONSTERS]
@maxMonsters.setter
def maxMonsters(self,value):
self.set_common_story_property(self.get_dbFields().MAX_MONSTERS,value)
@property
def lvl(self):
return self.dict[self.get_dbFields().LVL]
@lvl.setter
def lvl(self,value):
self.set_common_story_property(self.get_dbFields().LVL,value)
def get_description(self):
if not self._definition:
self._definition = ZoneDefinition(self.definitionKey)
return self._definition.get_description()
@property
def previousZoneReferencePK(self):
if self.get_dbFields().PREVIOUS_ZONE_REFERENCE_PK in self.dict:
return self.dict[self.get_dbFields().PREVIOUS_ZONE_REFERENCE_PK]
return None
@previousZoneReferencePK.setter
def previousZoneReferencePK(self,value):
self.set_common_story_property(self.get_dbFields().PREVIOUS_ZONE_REFERENCE_PK,value)
@property
def nextZoneReferenceList(self):
return self.dict[self.get_dbFields().NEXT_ZONE_REFERENCE_LIST]
@nextZoneReferenceList.setter
def nextZoneReferenceList(self,value):
self.set_common_story_property(self.get_dbFields().NEXT_ZONE_REFERENCE_LIST,value)
@property
def alias(self):
raise NotImplementedError()
return self.dict[self.get_dbFields().ALIAS]
@alias.setter
def alias(self,value):
raise NotImplementedError()
self.dict[self.get_dbFields().ALIAS] = value
self._changes[self.get_dbFields().ALIAS] = value
@property
def definitionKey(self):
return self.dict[self.get_dbFields().DEFINITION_KEY]
@definitionKey.setter
def definitionKey(self,value):
self.set_common_story_property(self.get_dbFields().DEFINITION_KEY,value)
@classmethod
def get_home_zone(cls):
"""
this probably only needs to be called when a new hero is being created for
a user
args:
heroId:
this needs to be an pymongo objectId. It is used as an owner
relationship to a hero model
returns:
a model of type zone with starting details
"""
from AllDBFields import ZoneDefinitionFields
zone = Zone(ZoneDefinitionFields.HOME)
zone.maxMonsters = 0
zone.skillLvl = 0
return zone
@classmethod
def construct_next_zone_choice(cls,heroLvl,vistiedZones,matchHeroLvl = False):
"""
generates a zone with unique name and randomlvl
args:
heroLvl:
this should be a positive integer greater than 1
visitedZones:
this should be a dict. the dict is used to keep tract of which
name suffix combinations have popped up already.
matchHeroLvl:
Set this to true if first level.if this is true than the zone
difficulty level will perfectly match the hero's level rather than
approximate it.
returns:
a model of type zone
also adds to the value for a key in the visitedZones dict
"""
import GeneralUtilities as gu
selectedZoneKey = Zone.get_random_zone_definitionKey(heroLvl)
definition = ZoneDefinition(selectedZoneKey)
zone = {ZoneDBFields.DEFINITION_KEY:selectedZoneKey,ZoneDBFields.LVL: heroLvl,
ZoneDBFields.MAX_MONSTERS: random.randint(5,15),ZoneDBFields.NAME: definition.get_name(),
ZoneDBFields.DESCRIPTION: definition.get_description()}
if selectedZoneKey in vistiedZones: #if we've visited it before
zone[ZoneDBFields.SUFFIX] = Zone.generate_full_zone_name_suffix(vistiedZones[selectedZoneKey])
zone[ZoneDBFields.FULL_NAME] = \
"{0} {1}".format(zone[ZoneDBFields.NAME],zone[ZoneDBFields.SUFFIX]).rstrip()
vistiedZones[selectedZoneKey] += 1
else:
zone[ZoneDBFields.FULL_NAME] = zone[ZoneDBFields.NAME]
vistiedZones[selectedZoneKey] = 1
if not matchHeroLvl:
zone[ZoneDBFields.LVL] = gu.calculate_lvl(heroLvl,10)
return zone
@classmethod
def get_random_zone_definitionKey(cls,heroLvl):
"""
selects a random dictionary key to be used with ZoneDefinitions
args:
heroLvl:
this should be a positive integer greater than 1
returns:
a string which is a dict key
"""
zoneGroupKeys = Zone.get_unlocked_zone_groupKeys(heroLvl)
selectedZoneGroupKey = random.choice(zoneGroupKeys)
zoneList = list(AllZones[selectedZoneGroupKey].keys())
return random.choice(zoneList)
@classmethod
def generate_full_zone_name_suffix(cls,visitCount):
"""
each time we visit a particular zone type, we don't want it to have
the same exact name as last time. To do this, we will add a suffix to
the name. This generates a suffic based on the number of times
that zone has been hit.
args:
visitCount:
the number of times the hero character has visited a zone
returns:
a suffix which will be a string. We will take this string and append
it to stuff.
"""
if visitCount < 1:
return ""
symbols = Zone.get_symbols()
hugeVisitCountResult = Zone.special_action_for_extremely_huge_visitCounts(visitCount,symbols)
numericSuffix = hugeVisitCountResult['numericSuffix']
visitCount = hugeVisitCountResult['visitCount']
adjustedVisitCount = Zone.skip_powers_of_base_in_number(visitCount,len(symbols))
suffix = Zone.get_symbol_suffix(adjustedVisitCount,symbols)
if numericSuffix > 0:
suffix += str(numericSuffix)
return suffix.strip()
@classmethod
def special_action_for_extremely_huge_visitCounts(cls,visitCount,symbols):
"""
this gets a special suffix for extremely huge vist counts, i.e, higher
than 10100. Also shrinks the number play nicely with the normal suffix
generating process
args:
visitCount:
the number of times the hero character has visited a zone
symbols:
the list of symbols. We're changing the first element to something
magic
return:
a dict with the numericSuffix value and the updated visitCount
"""
numericSuffix = 0
if visitCount > (len(symbols)-1) * len(symbols):
symbols[0] = "?4815162342"
numericSuffix = Zone.get_numeric_suffix(visitCount,len(symbols))
visitCount = Zone.adjust_visitCount_for_extremely_huge_counts(visitCount,len(symbols))
return {'numericSuffix':numericSuffix,'visitCount':visitCount}
@classmethod
def get_symbol_suffix(cls,visitCount,symbols):
"""
converts a number to a suffix.
Think of it as converting a number to a base 100 system of sorts
args:
visitCount:
the number of times the hero character has visited a zone
symbols:
the list of symbols.
return:
a string to be zone suffix
"""
suffix = ""
while visitCount > 0:
r = visitCount % len(symbols)
visitCount //= len(symbols)
suffix = (symbols[r] + " " + suffix)
return suffix
@classmethod
def adjust_visitCount_for_extremely_huge_counts(cls,visitCount,symbolsLen):
"""
args:
visitCount:
the number of times the hero character has visited a zone
symbolsLen:
the count of all the available symbols to be made into a suffix
"""
return visitCount % ((symbolsLen-1) * symbolsLen)
@classmethod
def get_numeric_suffix(cls,visitCount,symbolsLen):
"""
args:
visitCount:
the number of times the hero character has visited a zone
symbolsLen:
the count of all the available symbols to be made into a suffix
"""
#the -1 on the first array length is to account for the single symbol range of items
return visitCount // ((symbolsLen-1) * symbolsLen) + 1 #+1 because the 1 suffix would be redundant
@classmethod
def get_symbols(cls):
"""
if you add any items to symbols, please adjust the unit test
to account for that
"""
symbols =["","Alpha", "Beta","Cain","Delta", #4
"Epsilon","Foxtrot","September","October", #8
"November","Kilo","Juliett","Romeo","Silver","Deckard", #14
"Sierra","Tango","Zeta","Theta","July","Ludwig","Tyrell", #21
"Lambda","Mu","London","Victor","Quintin","Gold", #27
"Whiskey","Xray","Zulu","Pi","Rho","Antilles","Blanca", #34
"Sigma","Tau","India","Hector","Quebec","Waltz","Sapphire", #41
"Tokyo","Ramesses","Washington","Darius","Emerald","Midgard", #47
"Futura","Charlotte","Flanders","Berlin","Onion","Ruby", #53
"David","Pizza","Lazlo","Kong","Jerico","Diamond", #59
"Black","White","Olaf","Biggs","Wedge","Tyrannus", #65
"Richter","Medusa","Swan","Gemini","Noir","Xerxes",#71
"TNT","Plutonia","Cerberus","Tiberius", #75
"Arcturus","Prime","Tarsonis","Babylon","Sparta",#80
"Atlanta","Yutani","Python","Ridley","Midway", #85
"Bismark","Dextera","Dominus","Jejunum", #89
"Superior","Distal","Eurebus","Indigo", #93
"Xs","Rex","Titan","Zen","Apex","Omega","Zed"] #100
return symbols
@classmethod
def skip_powers_of_base_in_number(cls,num,base):
"""
Numbers naturally want to follow this pattern:
0,A,B,C,...,Y,Z,A0,AA,AB,AC,...,AY,AZ,B0,BA,BB,BC
But I want zone suffix naming system to follow this pattern:
0,A,B,C,...,Y,Z,AA,AB,AC,...,AY,AZ,BA,BB,BC,...
This function adjust numbers to fit the wanted pattern,
i.e. without the proverbial mulitples of 10
The accuracy of this function becomes unreliable after base^2
args:
num:
this is the number that we're offsetting.
base:
an integer. multiples of this number will be skipped
returns:
a number that's been offset for the base occurances skipped over
"""
if base < 1 or not float.is_integer(float(base)):
raise ValueError("Base needs to be a positive non-zero integer")
if not float.is_integer(float(num)):
raise ValueError("num needs to be an integer and not a floating number")
isNegative = False
if num < 0:
num *= -1
isNegative = True
adjusterNum = num + (num // base)
return num + (adjusterNum // base)
@classmethod
def get_unlocked_zone_groupKeys(cls,heroLvl):
""""
gets the list of availible zones groups that can be selected depeding on the
hero's level
args:
heroLvl:
this should be an interger
returns:
a list of dict keys to the AllZones dict.
"""
if heroLvl < 1:
return []
availableZonesGroups = []
availableZonesGroups.append("lvl1Zones")
if heroLvl >= 5:
availableZonesGroups.append("lvl5Zones")
if heroLvl >= 10:
availableZonesGroups.append("lvl10Zones")
if heroLvl >= 15:
availableZonesGroups.append("lvl15Zones")
if heroLvl >= 20:
availableZonesGroups.append("lvl20Zones")
if heroLvl >= 25:
availableZonesGroups.append("lvl25Zones")
if heroLvl >= 30:
availableZonesGroups.append("lvl30Zones")
return availableZonesGroups
|
joelliusp/SpaceHabit
|
SpaceHabitRPG/Models/Zone.py
|
Python
|
mit
| 14,252
|
[
"VisIt"
] |
566c80a333d824aa3212c889a7b7a3c6240b35e8ab21b8a226e08c3353dfc3a7
|
#!/usr/bin/env python
################################################################################
#
# A bunch of regression type tests for the builder and parser.
#
################################################################################
ident = '$Id: SOAPtest.py,v 1.19 2004/04/01 13:25:46 warnes Exp $'
import urllib
import sys
import unittest
import re
sys.path.insert(1, "..")
from SOAPpy import *
config=Config
config.strict_range=1
# run these tests with this variable set both to 1 and 0
config.simplify_objects=0
# as borrowed from jake.soapware.org for float compares.
def nearlyeq(a, b, prec = 1e-7):
return abs(a - b) <= abs(a) * prec
# helper
def negfloat(x):
return float(x) * -1.0
class Book(structType):
def __init__(self):
self.title = "Title of a book"
structType.__init__(self)
class Person(structType):
def __init__(self):
self.age = "49"
self.height = "5.5"
structType.__init__(self)
class Result(structType):
def __init__(self):
structType.__init__(self, name = 'Result')
self.Book = Book()
self.Person = Person()
class one:
def __init__(self):
self.str = "one"
class two:
def __init__(self):
self.str = "two"
class three:
def __init__(self):
self.str = "three"
ws = ' \t\r\n'
N = None
class SOAPTestCase(unittest.TestCase):
# big message
def notestBigMessage(self):
x=[]
for y in string.lowercase:
x.append(y*999999)
buildSOAP(x)
# test arrayType
def testArrayType(self):
x = structType( {"name":"widg1","quantity":200,
"price":decimalType(45.99),
"_typename":"LineItem"})
y = buildSOAP([x, x])
# could be parsed using an XML parser?
self.failUnless(string.find(y, "LineItem")>-1)
# test arguments ordering
def testOrdering(self):
x = buildSOAP(method="newCustomer", namespace="urn:customer", \
kw={"name":"foo1", "address":"bar"}, \
config=SOAPConfig(argsOrdering={"newCustomer":("address", "name")}))
# could be parsed using an XML parser?
self.failUnless(string.find(x, "<address ")<string.find(x, "<name "))
x = buildSOAP(method="newCustomer", namespace="urn:customer", \
kw={"name":"foo1", "address":"bar"}, \
config=SOAPConfig(argsOrdering={"newCustomer":("name", "address")}))
# could be parsed using an XML parser?
self.failUnless(string.find(x, "<address ")>string.find(x, "<name "))
# test struct
def testStructIn(self):
x = '''<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/" xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<soap:Body soap:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<SomeMethod>
<Result>
<Book>
<title>My Life and Work</title>
</Book>
<Person>
<name>Henry Ford</name>
<age> 49 </age>
<height> 5.5 </height>
</Person>
</Result>
</SomeMethod>
</soap:Body>
</soap:Envelope>
'''
# parse rules
pr = {'SomeMethod':
{'Result':
{'Book': {'title':(NS.XSD, "string")},
'Person': {'age':(NS.XSD, "int"),
'height':negfloat}
}
}
}
y = parseSOAPRPC(x, rules=pr)
if config.simplify_objects:
self.assertEquals(y['Result']['Person']['age'], 49);
self.assertEquals(y['Result']['Person']['height'], -5.5);
else:
self.assertEquals(y.Result.Person.age, 49);
self.assertEquals(y.Result.Person.height, -5.5);
# Try the reverse
def testStructOut(self):
x = buildSOAP(Result())
def testIntFloat(self):
x='''<SOAP-ENV:Envelope
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
SOAP-ENV:encodingStyle="http://schemas.microsoft.com/soap/encoding/clr/1.0
http://schemas.xmlsoap.org/soap/encoding/"
xmlns:i3="http://soapinterop.org/xsd" xmlns:i2="http://soapinterop.org/">
<SOAP-ENV:Body>
<i2:echoStructArray id="ref-1">
<return href="#ref-4"/>
</i2:echoStructArray>
<SOAP-ENC:Array id="ref-4" SOAP-ENC:arrayType="i3:SOAPStruct[3]">
<item href="#ref-5"/>
<item href="#ref-6"/>
<item href="#ref-7"/>
</SOAP-ENC:Array>
<i3:SOAPStruct id="ref-5">
<varString xsi:type="xsd:string">West Virginia</varString>
<varInt xsi:type="xsd:int">-546</varInt>
<varFloat xsi:type="xsd:float">-5.398</varFloat>
</i3:SOAPStruct>
<i3:SOAPStruct id="ref-6">
<varString xsi:type="xsd:string">New Mexico</varString>
<varInt xsi:type="xsd:int">-641</varInt>
<varFloat xsi:type="xsd:float">-9.351</varFloat>
</i3:SOAPStruct>
<i3:SOAPStruct id="ref-7">
<varString xsi:type="xsd:string">Missouri</varString>
<varInt xsi:type="xsd:int">-819</varInt>
<varFloat xsi:type="xsd:float">1.375</varFloat>
</i3:SOAPStruct>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>'''
y = parseSOAPRPC(x)
if(config.simplify_objects):
self.assertEquals(y['return'][0]['varString'], "West Virginia")
self.assertEquals(y['return'][1]['varInt'], -641)
self.assertEquals(y['return'][2]['varFloat'], 1.375)
else:
self.assertEquals(getattr(y,"return")[0].varString, "West Virginia")
self.assertEquals(getattr(y,"return")[1].varInt, -641)
self.assertEquals(getattr(y,"return")[2].varFloat, 1.375)
def testArray1(self):
x='''<SOAP-ENV:Envelope
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
SOAP-ENV:encodingStyle="http://schemas.microsoft.com/soap/encoding/clr/1.0
http://schemas.xmlsoap.org/soap/encoding/"
xmlns:i3="http://soapinterop.org/xsd" xmlns:i2="http://soapinterop.org/">
<SOAP-ENV:Body>
<i2:echoStructArray id="ref-1">
<return href="#ref-4"/>
</i2:echoStructArray>
<SOAP-ENC:Array id="ref-4" SOAP-ENC:arrayType="i3:SOAPStruct[3]">
<item href="#ref-5"/>
<item href="#ref-6"/>
<item href="#ref-7"/>
</SOAP-ENC:Array>
<i3:SOAPStruct id="ref-5">
<xsd:string>West Virginia</xsd:string>
<xsd:int>-546</xsd:int>
<xsd:float>-5.398</xsd:float>
</i3:SOAPStruct>
<i3:SOAPStruct id="ref-6">
<xsd:string>New Mexico</xsd:string>
<xsd:int>-641</xsd:int>
<xsd:float>-9.351</xsd:float>
</i3:SOAPStruct>
<i3:SOAPStruct id="ref-7">
<xsd:string>Missouri</xsd:string>
<xsd:int>-819</xsd:int>
<xsd:float>1.375</xsd:float>
</i3:SOAPStruct>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>'''
y = parseSOAPRPC(x)
if(config.simplify_objects):
self.assertEquals(y["return"][0]['string'], "West Virginia")
self.assertEquals(y["return"][1]['int'], -641)
self.assertEquals(y["return"][2]['float'], 1.375)
else:
self.assertEquals(getattr(y,"return")[0].string, "West Virginia")
self.assertEquals(getattr(y,"return")[1].int, -641)
self.assertEquals(getattr(y,"return")[2].float, 1.375)
def testUTF8Encoding1(self):
x = '''<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
<SOAP-ENV:Body SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsd2="http://www.w3.org/2000/10/XMLSchema" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance" xmlns:xsi2="http://www.w3.org/2000/10/XMLSchema-instance">
<ns0:echoStringArrayResponse xmlns:ns0="http://soapinterop.org/">
<return2 href="#id3"/>
</ns0:echoStringArrayResponse>
<a id="id0" xmlns:ns0="http://soapinterop.org/" xsi2:type="xsd:string" xsi:type="xsd:string"></a>
<a id="id1" xmlns:ns0="http://soapinterop.org/" xsi2:type="xsd:string" xsi:type="xsd:string">Hello</a>
<a id="id2" xmlns:ns0="http://soapinterop.org/" xsi2:type="xsd:string" xsi:type="xsd:string">\'<&>"</a>
<return2 SOAP-ENC:arrayType="xsd:string[3]" id="id3" xmlns:ns0="http://soapinterop.org/">
<a href="#id0"/>
<a href="#id1"/>
<a href="#id2"/>
</return2>
</SOAP-ENV:Body></SOAP-ENV:Envelope>'''
y = parseSOAPRPC(x)
if config.simplify_objects:
self.assertEquals(y['return2'][1], "Hello")
else:
self.assertEquals(y.return2[1], "Hello")
def testUTF8Encoding2(self):
x = '''<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
<SOAP-ENV:Body SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<ns0:echoStringArrayResponse xmlns:ns0="http://soapinterop.org/">
<a xsi:type="xsd:string"></a>
<a xsi:type="xsd:string">Hello</a>
<a xsi:type="xsd:string">\'<&>"</a>
<b xsi:type="xsd:string">Goodbye</b>
</ns0:echoStringArrayResponse>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>'''
y = parseSOAPRPC(x)
self.assertEquals(type(y.a), type([]))
self.assertEquals(type(y.b), type(''))
self.assertEquals(type(y._getItemAsList('a')), type([]))
self.assertEquals(type(y._getItemAsList('b')), type([]))
self.assertEquals(y.b, 'Goodbye')
self.assertEquals(y.a, ['', 'Hello', '\'<&>"'])
self.assertEquals(y._getItemAsList('b'), ['Goodbye'])
self.assertEquals(y._getItemAsList('c'), [])
self.assertEquals(y._getItemAsList('c', 'hello'), 'hello')
def testUTF8Encoding2(self):
x = '''<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
<SOAP-ENV:Body
SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:xsd="http://www.w3.org/1999/XMLSchema"
xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<a1 SOAP-ENC:root="1">Hello</a1>
<a2 SOAP-ENC:root="0" id="id">\'<&>"</a2>
<a3>Goodbye</a3>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>'''
y = parseSOAP(x)
self.assertEquals(y.a1, 'Hello')
self.assertEquals(y.a3, 'Goodbye')
self.failIf(hasattr(y, 'a2'))
def testUTF8Encoding3(self):
x = '''<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/" xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<soap:Body soap:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<SomeMethod>
<Result>
<Book>
<title>My Life and Work</title>
<author href="#Person-1"/>
</Book>
<Person id="Person-1">
<name>Henry Ford</name>
<address href="#Address-2"/>
</Person>
<Address id="Address-2">
<email>mailto:henryford@hotmail.com</email>
<web>http://www.henryford.com</web>
<pers href="#Person-1"/>
</Address>
</Result>
</SomeMethod>
</soap:Body>
</soap:Envelope>
'''
y = parseSOAPRPC(x)
if config.simplify_objects:
self.assertEquals(y['Result']['Book']['author']['name'], "Henry Ford")
self.assertEquals(y['Result']['Book']['author']['address']['web'], "http://www.henryford.com")
self.assertEquals(y['Result']['Book']['author']['address']['pers']['name'], "Henry Ford")
else:
self.assertEquals(y.Result.Book.author.name, "Henry Ford")
self.assertEquals(y.Result.Book.author.address.web, "http://www.henryford.com")
self.assertEquals(y.Result.Book.author.address.pers.name, "Henry Ford")
# ref example
def testRef(self):
x = '''<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/" xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<soap:Body soap:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<echoFloatArrayResponse xmlns="http://soapinterop.org/">
<Return href="#i1" xmlns="" />
</echoFloatArrayResponse>
<soapenc:Array id="i1" soapenc:arrayType="xsd:float[4]">
<Item>0</Item>
<Item>1</Item>
<Item>-1</Item>
<Item>3853.33325</Item>
</soapenc:Array>
</soap:Body>
</soap:Envelope>
'''
y = parseSOAPRPC(x)
if config.simplify_objects:
self.assertEquals(y['Return'][0], 0)
self.assertEquals(y['Return'][1], 1)
self.assertEquals(y['Return'][2], -1)
self.failUnless(nearlyeq(y['Return'][3], 3853.33325))
else:
self.assertEquals(y.Return[0], 0)
self.assertEquals(y.Return[1], 1)
self.assertEquals(y.Return[2], -1)
self.failUnless(nearlyeq(y.Return[3], 3853.33325))
# Make sure passing in our own bodyType works.
def testBodyType(self):
a = [23, 42]
b = bodyType()
b.a = b.b = a
x = buildSOAP(b)
y = parseSOAP(x)
self.assertEquals(id(y.a), id(y.b))
self.assertEquals(y.a, a)
self.assertEquals(y.b, a)
# Test Envelope versioning (see section 4.1.2 of http://www.w3.org/TR/SOAP).
def testEnvelopeVersioning(self):
xml = '''<SOAP-ENV:Envelope
SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:xsd="http://www.w3.org/1999/XMLSchema"
xmlns:SOAP-ENV="http://new/envelope/version/"
xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance"
xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/">
<SOAP-ENV:Body>
<_1 xsi:type="xsd:int" SOAP-ENC:root="1">1</_1>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>'''
try:
parseSOAP(xml)
except Exception, e:
self.failUnless(isinstance(e, faultType))
self.assertEquals(e.faultcode, '%s:VersionMismatch' % NS.ENV_T)
self.failIf(hasattr(e, 'detail'))
# Big terrible ordered data with attributes test.
def testBigOrderedData(self):
data = '''<?xml version="1.0" encoding="UTF-8" ?>
<Envelope xmlns="http://schemas.xmlsoap.org/soap/envelope/">
<Body>
<replyBlock generic="1.0" attrib1="false" attrib2='hello'>
<itemList>
<mainItem mainattrib1='uno'>
<name>first_main_item</name>
<description>whatever etc.</description>
<infoList>
<itemInfo a1='123' a2='abc'>
<name>unoItem1</name>
</itemInfo>
<itemInfo a1='456' a2='def'>
<name>unoItem2</name>
</itemInfo>
<itemInfo a1='789' a2='ghi'>
<name>unoItem3</name>
</itemInfo>
</infoList>
</mainItem>
<mainItem mainattrib1='dos'>
<name>second_main_item</name>
<description>whatever etc.</description>
<infoList>
<itemInfo a1='3123' a2='3abc'>
<name>dosItem1</name>
</itemInfo>
<itemInfo a1='3456' a2='3def'>
<name>dosItem2</name>
</itemInfo>
<itemInfo a1='3789' a2='3ghi'>
<name>dosItem3</name>
</itemInfo>
</infoList>
</mainItem>
</itemList>
<itemList>
<mainItem mainattrib1='single'>
<name>single_main_item</name>
<description>whatever etc.</description>
<infoList>
<itemInfo a1='666' a2='xxx'>
<name>singleItem1</name>
</itemInfo>
</infoList>
</mainItem>
</itemList>
</replyBlock>
</Body>
</Envelope>'''
x = parseSOAP(data)
# print ".>",x.replyBlock.itemList._ns
y = buildSOAP(x)
def testEnvelope1(self):
my_xml2 = '''
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<SOAP-ENV:Header>
<t:Transaction xmlns:t="some-URI" SOAP-ENV:mustUnderstand="1">
5
</t:Transaction>
</SOAP-ENV:Header>
<SOAP-ENV:Body>
<m:GetLastTradePriceResponse xmlns:m="Some-URI">
<PriceAndVolume>
<LastTradePrice>
34.5
</LastTradePrice>
<DayVolume>
10000
</DayVolume>
</PriceAndVolume>
</m:GetLastTradePriceResponse>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
(x,h) = parseSOAPRPC(my_xml2,header=1)
def testEnvelope2(self):
x ='''
<V:Envelope
xmlns:V="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:C="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:i="http://www.w3.org/1999/XMLSchema-instance"
xmlns:d="http://www.w3.org/1999/XMLSchema"
V:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<V:Body>
<m:echoStructArray
xmlns:m="urn:xmethodsInterop">
<inputStructArray
i:type="C:Array"
C:arrayType="ns3:SOAPStruct[0]"
xmlns:ns3="http://soapinterop.org/xsd"/>
</m:echoStructArray>
</V:Body>
</V:Envelope>'''
x = parseSOAPRPC(x)
def testEnvelope3(self):
x = '''<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<SOAP-ENV:Envelope SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
<SOAP-ENV:Body>
<m:echoStringResponse xmlns:m="http://soapinterop.org/">
<Result name="fred">hello</Result>
</m:echoStringResponse>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
x, a = parseSOAPRPC(x, attrs = 1)
if config.simplify_objects:
self.assertEquals(a[id(x['Result'])][(None, 'name')], 'fred')
else:
self.assertEquals(a[id(x.Result)][(None, 'name')], 'fred')
def testParseException(self):
x='''<SOAP-ENV:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" SOAP-ENV:encodingStyle="http://schemas.microsoft.com/soap/encoding/clr/1.0 http://schemas.xmlsoap.org/soap/encoding/" xmlns:a1="http://schemas.microsoft.com/clr/ns/System.Runtime.Serialization.Formatters">
<SOAP-ENV:Body>
<SOAP-ENV:Fault id="ref-1">
<faultcode id="ref-2">SOAP-ENV:Server</faultcode>
<faultstring id="ref-3">Exception thrown on Server</faultstring>
<detail xsi:type="a1:ServerFault">
<exceptionType id="ref-4">System.Runtime.Serialization.SerializationException, mscorlib, Version=1.0.2411.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</exceptionType>
<message id="ref-5">Soap Parser Error System.Runtime.Serialization.SerializationException: Parse Error, xsd type not valid: Array
at System.Runtime.Serialization.Formatters.Soap.SoapHandler.ProcessGetType(String value, String xmlKey)
at System.Runtime.Serialization.Formatters.Soap.SoapHandler.ProcessType(ParseRecord pr, ParseRecord objectPr)
at System.Runtime.Serialization.Formatters.Soap.SoapHandler.ProcessAttributes(ParseRecord pr, ParseRecord objectPr)
at System.Runtime.Serialization.Formatters.Soap.SoapHandler.StartElement(String prefix, String name, String urn)
at System.XML.XmlParser.ParseElement()
at System.XML.XmlParser.ParseTag()
at System.XML.XmlParser.Parse()
at System.XML.XmlParser.Parse0()
at System.XML.XmlParser.Run()</message>
<stackTrace id="ref-6"> at System.Runtime.Serialization.Formatters.Soap.SoapHandler.Error(IXmlProcessor p, Exception ex)
at System.XML.XmlParser.Run()
at System.Runtime.Serialization.Formatters.Soap.SoapParser.Run()
at System.Runtime.Serialization.Formatters.Soap.ObjectReader.Deserialize(HeaderHandler handler, ISerParser serParser)
at System.Runtime.Serialization.Formatters.Soap.SoapFormatter.Deserialize(Stream serializationStream, HeaderHandler handler)
at System.Runtime.Remoting.Channels.CoreChannel.DeserializeMessage(String mimeType, Stream xstm, Boolean methodRequest, IMessage msg, Header[] h)
at System.Runtime.Remoting.Channels.SoapServerFormatterSink.ProcessMessage(IServerChannelSinkStack sinkStack, ITransportHeaders requestHeaders, Stream requestStream, IMessage& msg, ITransportHeaders& responseHeaders, Stream& responseStream)</stackTrace>
</detail>
</SOAP-ENV:Fault>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
z = parseSOAPRPC(x)
self.assertEquals(z.__class__,faultType)
self.assertEquals(z.faultstring, "Exception thrown on Server")
def testFlatEnvelope(self):
x = '''<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<SOAP-ENV:Envelope SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"><SOAP-ENV:Body><m:echoStringResponse xmlns:m="http://soapinterop.org/"><Result></Result></m:echoStringResponse></SOAP-ENV:Body></SOAP-ENV:Envelope>
'''
z = parseSOAPRPC(x)
if config.simplify_objects:
self.assertEquals(type(z['Result']), type(''))
else:
self.assertEquals(type(z.Result), type(''))
def testNumericArray(self):
x = [1,2,3,4,5]
y = buildSOAP(x)
z = parseSOAPRPC(y)
self.assertEquals(x, z)
def testStringArray(self):
x = ["cayce", "asd", "buy"]
y = buildSOAP(x)
z = parseSOAPRPC(y)
self.assertEquals(x, z)
def testStringArray1(self):
x = arrayType(['a', 'b', 'c'])
y = buildSOAP(x)
z = parseSOAP(y)
if config.simplify_objects:
self.assertEquals(z.v1._elemsname, 'item')
self.assertEquals(z.v1, x)
else:
self.assertEquals(z['v1']['_elemsname'], 'item')
self.assertEquals(z['v1'], x)
def testStringArray2(self):
x = arrayType(['d', 'e', 'f'], elemsname = 'elementals')
y = buildSOAP(x)
z = parseSOAP(y)
if config.simplify_objects:
self.assertEquals(z.v1._elemsname, 'elementals')
self.assertEquals(z.v1, x)
else:
self.assertEquals(z['v1']['_elemsname'], 'elementals')
self.assertEquals(z['v1'], x)
def testInt1(self):
my_xml = '''
<SOAP-ENV:Envelope SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<SOAP-ENV:Body>
<m:getStateName xmlns:m="http://www.soapware.org/">
<statenum xsi:type="xsd:int">41</statenum>
</m:getStateName>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
s = parseSOAPRPC(my_xml)
if config.simplify_objects:
self.assertEquals(s['statenum'], 41)
self.assertEquals(type(s['statenum']), type(0))
else:
self.assertEquals(s.statenum, 41)
self.assertEquals(type(s.statenum), type(0))
def testInt2(self):
my_xml_ns = '''
<XSOAP-ENV:Envelope XSOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:XSOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:XSOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:Xxsd="http://www.w3.org/1999/XMLSchema" xmlns:Xxsi="http://www.w3.org/1999/XMLSchema-instance">
<XSOAP-ENV:Body>
<m:getStateName xmlns:m="http://www.soapware.org/">
<statenum Xxsi:type="Xxsd:int">41</statenum>
</m:getStateName>
</XSOAP-ENV:Body>
</XSOAP-ENV:Envelope>
'''
s = parseSOAPRPC(my_xml_ns)
if config.simplify_objects:
self.assertEquals(s['statenum'], 41, "NS one failed")
self.assertEquals(type(s['statenum']), type(0))
else:
self.assertEquals(s.statenum, 41, "NS one failed")
self.assertEquals(type(s.statenum), type(0))
def testPriceAndVolume(self):
my_xml2 = '''
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<SOAP-ENV:Header>
<t:Transaction xmlns:t="some-URI" SOAP-ENV:mustUnderstand="1">
5
</t:Transaction>
</SOAP-ENV:Header>
<SOAP-ENV:Body>
<m:GetLastTradePriceResponse xmlns:m="Some-URI">
<PriceAndVolume>
<LastTradePrice>
34.5
</LastTradePrice>
<DayVolume>
10000
</DayVolume>
</PriceAndVolume>
</m:GetLastTradePriceResponse>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
s = parseSOAPRPC(my_xml2)
if config.simplify_objects:
self.assertEquals(s['PriceAndVolume']['LastTradePrice'].strip(), "34.5")
self.assertEquals(s['PriceAndVolume']['DayVolume'].strip(), "10000")
else:
self.assertEquals(s.PriceAndVolume.LastTradePrice.strip(), "34.5")
self.assertEquals(s.PriceAndVolume.DayVolume.strip(), "10000")
def testInt3(self):
my_xml3 = '''
<SOAP-ENV:Envelope SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<SOAP-ENV:Body>
<Bounds>
<param>
<lowerBound xsi:type="xsd:int"> 18 </lowerBound>
<upperBound xsi:type="xsd:int"> 139</upperBound>
</param>
</Bounds>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
s = parseSOAPRPC(my_xml3)
if config.simplify_objects:
self.assertEquals(s['param']['lowerBound'], 18)
self.assertEquals(s['param']['upperBound'], 139)
else:
self.assertEquals(s.param.lowerBound, 18)
self.assertEquals(s.param.upperBound, 139)
def testBoolean(self):
my_xml4 = '''
<SOAP-ENV:Envelope SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<SOAP-ENV:Body>
<Bounds>
<param SOAP-ENC:arrayType="xsd:ur-type[4]" xsi:type="SOAP-ENC:Array"><item xsi:type="xsd:int">12</item>
<item xsi:type="xsd:string">Egypt</item>
<item xsi:type="xsd:boolean">0</item>
<item xsi:type="xsd:int">-31</item>
</param>
<param1 xsi:null="1"></param1>
<param2 xsi:null="true"></param2>
<param3 xsi:type="xsd:int" xsi:null="false">7</param3>
</Bounds>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
s = parseSOAPRPC(my_xml4)
if config.simplify_objects:
self.assertEquals(s['param'][0], 12)
self.assertEquals(s['param'][1], "Egypt")
self.assertEquals(s['param'][2], 0)
self.assertEquals(s['param'][3], -31)
self.assertEquals(s['param1'], None)
self.assertEquals(s['param2'], None)
self.assertEquals(s['param3'], 7)
else:
self.assertEquals(s.param[0], 12)
self.assertEquals(s.param[1], "Egypt")
self.assertEquals(s.param[2], 0)
self.assertEquals(s.param[3], -31)
self.assertEquals(s.param1, None)
self.assertEquals(s.param2, None)
self.assertEquals(s.param3, 7)
def testFault(self):
my_xml5 = '''
<SOAP-ENV:Envelope SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<SOAP-ENV:Body>
<SOAP-ENV:Fault>
<faultcode>SOAP-ENV:Client</faultcode>
<faultstring>Cant call getStateName because there are too many parameters.</faultstring>
</SOAP-ENV:Fault>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
s = parseSOAPRPC(my_xml5)
self.assertEquals(s.__class__, faultType)
self.assertEquals(s.faultcode, "SOAP-ENV:Client")
def testArray2(self):
my_xml6 = '''
<SOAP-ENV:Envelope SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<SOAP-ENV:Body>
<h SOAP-ENC:arrayType="xsd:ur-type[6]" xsi:type="SOAP-ENC:Array">
<item xsi:type="xsd:int">5</item>
<item xsi:type="xsd:int">3</item>
<item xsi:type="xsd:int">2</item>
<item xsi:type="xsd:string">monkey</item>
<item xsi:type="xsd:string">cay</item>
<item>
<cat xsi:type="xsd:string">hello</cat>
<ferret SOAP-ENC:arrayType="xsd:ur-type[6]" xsi:type="SOAP-ENC:Array">
<item xsi:type="xsd:int">5</item>
<item xsi:type="xsd:int">4</item>
<item xsi:type="xsd:int">3</item>
<item xsi:type="xsd:int">2</item>
<item xsi:type="xsd:int">1</item>
<item>
<cow xsi:type="xsd:string">moose</cow>
</item>
</ferret>
<monkey xsi:type="xsd:int">5</monkey>
</item>
</h>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
q = parseSOAPRPC(my_xml6)
self.assertEquals(q[0], 5)
self.assertEquals(q[1], 3)
self.assertEquals(q[2], 2)
self.assertEquals(q[3], 'monkey')
self.assertEquals(q[4], 'cay')
x = q[5]
if config.simplify_objects:
self.assertEquals(x['monkey'], 5)
self.assertEquals(x['cat'], "hello")
self.assertEquals(x['ferret'][0], 5)
self.assertEquals(x['ferret'][3], 2)
self.assertEquals(x['ferret'][5]['cow'], "moose")
else:
self.assertEquals(x.monkey, 5)
self.assertEquals(x.cat, "hello")
self.assertEquals(x.ferret[0], 5)
self.assertEquals(x.ferret[3], 2)
self.assertEquals(x.ferret[5].cow, "moose")
def testArray3(self):
x = arrayType([5,4,3,21], "spam")
y = buildSOAP(x)
z = parseSOAPRPC(y)
self.assertEquals(x, z)
# test struct
def testStruct(self):
x = structType(name = "eggs")
x.test = 5
y = buildSOAP(x)
z = parseSOAPRPC(y)
if config.simplify_objects:
self.assertEquals( x['test'], z['test'] )
else:
self.assertEquals( x.test, z.test )
# test faults
def testFault1(self):
x = faultType("ServerError","Howdy",[5,4,3,2,1])
y = buildSOAP(x)
z = parseSOAPRPC(y)
self.assertEquals( x.faultcode , z.faultcode)
self.assertEquals( x.faultstring , z.faultstring)
self.assertEquals( x.detail , z.detail)
# Test the recursion
def testRecursion(self):
o = one()
t = two()
o.t = t
t.o = o
tre = three()
tre.o = o
tre.t = t
x = buildSOAP(tre)
y = parseSOAPRPC(x)
if config.simplify_objects:
self.assertEquals( y['t']['o']['t']['o']['t']['o']['t']['str'] , "two")
else:
self.assertEquals( y.t.o.t.o.t.o.t.str , "two")
# Test the recursion with structs
def testRecursionWithStructs(self):
o = structType("one")
t = structType("two")
o.t = t
o.str = "one"
t.o = o
t.str = "two"
tre = structType("three")
tre.o = o
tre.t = t
tre.str = "three"
x = buildSOAP(tre)
y = parseSOAPRPC(x)
if config.simplify_objects:
self.assertEquals( y['t']['o']['t']['o']['t']['o']['t']['str'] , "two")
else:
self.assertEquals( y.t.o.t.o.t.o.t.str , "two")
def testAmp(self):
m = "Test Message <tag> & </tag>"
x = structType("test")
x.msg = m
y = buildSOAP(x)
z = parseSOAPRPC(y)
if config.simplify_objects:
self.assertEquals( m , z['msg'])
else:
self.assertEquals( m , z.msg)
def testInt4(self):
my_xml7 = '''
<SOAP-ENV:Envelope SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<SOAP-ENV:Body>
<Bounds>
<param>
<lowerBound xsi:type="xsd:int"> 18 </lowerBound>
<upperBound xsi:type="xsd:int"> 139</upperBound>
</param>
</Bounds>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
x = parseSOAPRPC(my_xml7)
y = buildSOAP(x)
# Does buildSOAP require a valid encoding?
def testBuildSOAPEncoding(self):
try:
x = buildSOAP('hello', encoding = 'gleck')
except LookupError, e:
if str (e)[0:16] != 'unknown encoding': raise
x = None
except:
print "Got unexpected exception: %s %s" % tuple (sys.exc_info ()[0:2])
x = ''
self.assertEquals( x , None)
# Does SOAPProxy require a valid encoding?
def testSOAPProxyEncoding(self):
try:
x = SOAPProxy('', encoding = 'gleck')
except LookupError, e:
if str (e)[0:16] != 'unknown encoding': raise
x = None
except:
print "Got unexpected exception: %s %s" % tuple (sys.exc_info ()[0:2])
x = ''
self.assertEquals( x , None)
# Does SOAPServer require a valid encoding?
def testSOAPServerEncoding(self):
try:
x = SOAPServer(('localhost', 0), encoding = 'gleck')
except LookupError, e:
if str (e)[0:16] != 'unknown encoding': raise
x = None
except:
print "Got unexpected exception: %s %s" % tuple (sys.exc_info ()[0:2])
x = ''
self.assertEquals( x , None)
def testEncodings(self):
encodings = ('US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16')
tests = ('A', u'\u0041')
for t in tests:
for i in range (len (encodings)):
x = buildSOAP (t, encoding = encodings[i])
y = parseSOAPRPC (x)
self.assertEquals( y , t)
tests = (u'\u00a1',)
for t in tests:
for i in range (len (encodings)):
try:
x = buildSOAP (t, encoding = encodings[i])
except:
if i > 0: raise
continue
y = parseSOAPRPC (x)
self.assertEquals( y , t)
tests = (u'\u01a1', u'\u2342')
for t in tests:
for i in range (len (encodings)):
try:
x = buildSOAP (t, encoding = encodings[i])
except:
if i > 1: raise
continue
y = parseSOAPRPC (x)
self.assertEquals( y , t)
def build_xml(self, schema, type, value, attrs = ''):
return '''<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope
SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:xsd="%(schema)s"
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<SOAP-ENV:Body>
<_1 xsi:type="xsd:%(type)s"%(attrs)s>%(value)s</_1>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>''' % {'schema': schema, 'type': type, 'value': value,
'attrs': attrs}
# Make sure the various limits are checked when parsing
def testIntegerLimits(self):
for t, l in SOAPParser.intlimits.items():
try:
parseSOAP(xml % (NS.XSD, t, 'hello'))
raise AssertionError, "parsed %s of 'hello' without error" % t
except AssertionError:
raise
except:
pass
if l[1] != None:
try:
parseSOAP(self.build_xml(NS.XSD, t, l[1] - 1))
raise AssertionError, "parsed %s of %s without error" % \
(t, l[1] - 1)
except AssertionError:
raise
except UnderflowError:
pass
if l[2] != None:
try:
parseSOAP(self.build_xml(NS.XSD, t, l[2] + 1))
raise AssertionError, "parsed %s of %s without error" % \
(t, l[2] + 1)
except AssertionError:
raise
except OverflowError:
pass
# Make sure the various limits are checked when parsing
# Next, floats. Note that chances are good this won't work in any non-Unix Pythons.
def testFloatLimits(self):
for i in \
(
('float', '-3.402823466391E+38'),
('float', '3.402823466391E+38'),
('float', '3.5e+38'),
('float', '6.9e-46'),
('double', '-1.7976931348623159E+308'),
('double', '1.7976931348623159E+308'),
('double', '1.8e308'),
('double', '2.4e-324'),
):
try:
parseSOAP(self.build_xml(NS.XSD, i[0], i[1]))
# Hide this error for now, cause it is a bug in python 2.0 and 2.1
#if not (sys.version_info[0] == 2 and sys.version_info[1] <= 2) \
# and i[1]=='1.7976931348623159E+308':
raise AssertionError, "parsed %s of %s without error" % i
except AssertionError:
raise
except (UnderflowError, OverflowError):
pass
# Make sure we can't instantiate the base classes
def testCannotInstantiateBaseClasses(self):
for t in (anyType, NOTATIONType):
try:
x = t()
raise AssertionError, "instantiated %s directly" % repr(t)
except:
pass
# Try everything that requires initial data without any.
def testMustBeInitialized(self):
for t in (CDATAType, ENTITIESType, ENTITYType, IDType, IDREFType,
IDREFSType, NCNameType, NMTOKENType, NMTOKENSType, NOTATIONType,
NameType, QNameType, anyURIType, base64Type, base64BinaryType,
binaryType, booleanType, byteType, decimalType, doubleType,
durationType, floatType, hexBinaryType, intType, integerType,
languageType, longType, negative_IntegerType, negativeIntegerType,
non_Negative_IntegerType, non_Positive_IntegerType,
nonNegativeIntegerType, nonPositiveIntegerType, normalizedStringType,
positive_IntegerType, positiveIntegerType, shortType, stringType,
timeDurationType, tokenType, unsignedByteType, unsignedIntType,
unsignedLongType, unsignedShortType, untypedType, uriType,
uriReferenceType):
try:
t()
raise AssertionError, "instantiated a %s with no value" % t.__name__
except AssertionError:
raise
except:
pass
def testInstantiations(self):
# string, ENTITY, ID, IDREF, language, Name, NCName,
# NMTOKEN, QName, untypedType
for t in (stringType, ENTITYType, IDType, IDREFType,
languageType, NameType, NCNameType, NMTOKENType,
QNameType, untypedType):
# First some things that shouldn't be taken as the current type
test = (10, (), [], {})
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad type (%s)" % \
(repr(t), repr(type(i)))
except AssertionError:
raise
except:
pass
# Now some things that should
for i in ('hello', u'goodbye'):
x = t(i)
d = x._marshalData()
if d != i:
raise AssertionError, "expected %s, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (i, z)
# ENTITIES, IDREFS, NMTOKENS
for t in (ENTITIESType, IDREFSType, NMTOKENSType):
# First some things that shouldn't be taken as the current type
test = ({}, lambda x: x, ((),), ([],), [{}], [()])
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad type (%s)" % \
repr(t), repr(type(i))
except AssertionError:
raise
except:
pass
# Now some things that should
for i in ('hello', (), [], ('hello', 'goodbye'), ['aloha', 'guten_tag']):
x = t(i)
d = x._marshalData()
if type(i) in (type(()), type([])):
j = list(i)
else:
j = [i]
k = ' '.join(j)
if d != k:
raise AssertionError, "expected %s, got %s" % (k, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != j:
raise AssertionError, "expected %s, got %s" % (repr(j), repr(z))
# uri, uriReference, anyURI
for t in (uriType, uriReferenceType, anyURIType):
# First some things that shouldn't be taken as the current type
test = (10, (), [], {})
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad type (%s)" % \
t.__name__, repr(type(i))
except AssertionError:
raise
except:
pass
# Now some things that should
for i in ('hello', u'goodbye', '!@#$%^&*()-_=+[{]}\|;:\'",<.>/?`~'):
x = t(i)
d = x._marshalData()
j = urllib.quote(i)
if d != j:
raise AssertionError, "expected %s, got %s" % (j, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# token First some things that shouldn't be valid because of type
test = (42, 3.14, (), [], {})
t = tokenType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad type (%s)" % (t.__name__, repr(i))
except AssertionError:
raise
except AttributeError:
pass
# Now some things that shouldn't be valid because of content
test = (' hello', 'hello ', 'hel\nlo', 'hel\tlo', 'hel lo', ' \n \t ')
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % (t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should be valid
for i in ('', 'hello', u'hello'):
x = t(i)
d = x._marshalData()
if d != i:
raise AssertionError, "expected %s, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i and i != '':
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
#### CDATA, normalizedString
for t in (CDATAType, normalizedStringType):
# First some things that shouldn't be valid because of type
test = (42, 3.14, (), [], {})
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad type (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except AttributeError:
pass
# Now some things that shouldn't be valid because of content
test = ('hel\nlo', 'hel\rlo', 'hel\tlo', '\n\r\t')
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should be valid
for i in ('', 'hello', u'hello', 'hel lo'):
x = t(i)
d = x._marshalData()
if d != i:
raise AssertionError, "expected %s, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i and i != '':
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
#### boolean
# First some things that shouldn't be valid
test = (10, 'hello', (), [], {})
t = booleanType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % (t.__name__, repr(i))
except AssertionError:
raise
except:
pass
# Now some things that should
for i in ((0, 'false'), ('false', 'false'), (1, 'true'),
('true', 'true'), (0.0, 'false'), (1.0, 'true')):
x = t(i[0])
d = x._marshalData()
if d != i[1]:
raise AssertionError, "%s: expected %s, got %s" % (i[0], i[1], d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
j = ('false', 'true')[z]
if j != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], repr(i[1]), repr(j))
# Now test parsing, both valid and invalid
test = (('10', None), ('hello', None), ('false', 0), ('FALSE', 0),
(ws + 'false' + ws, 0), (ws + '0' + ws, 0),
('0', 0), ('true', 1), ('TRUE', 1), ('1', 1),
(ws + 'true' + ws, 1), (ws + '1' + ws, 1))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != None:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
# Can we give it a name and no type?
#print
x = t(1, name = 'George', typed = 0)
#print "x=",x
y = buildSOAP(x)
#print "y=",y
z = parseSOAP(y)
#print "z=",z
test = 'true'
if z.George != test:
raise AssertionError, "expected %s, got %s" % (repr(test), repr(z))
# How about some attributes, set in various and sundry manners?
x = t(1, attrs = {'nonamespaceURI': 1})
x._setAttrs({(None, 'NonenamespaceURI'): 2,
('http://some/namespace', 'namespaceURIattr1'): 3})
x._setAttr(('http://some/other/namespace', 'namespaceURIattr2'), 4)
self.assertEquals( x._getAttr('nonamespaceURI') , 1)
self.assertEquals( x._getAttr('NonenamespaceURI') , 2)
self.assertEquals( x._getAttr(('http://some/namespace',
'namespaceURIattr1')) , 3)
self.assertEquals( x._getAttr(('http://some/other/namespace',
'namespaceURIattr2')) , 4)
self.assertEquals( x._getAttr('non-extant attr') , None)
y = buildSOAP(x)
z = parseSOAPRPC(y)
self.assertEquals( z , 1)
#### decimal
# First some things that shouldn't be valid
test = ('hello', (), [], {})
t = decimalType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad type (%s)" % \
(t.__name__, repr(type(i)))
except AssertionError:
raise
except:
pass
# Now some things that should
for i in (10, 3.14, 23L):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %f, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', None), ('1.2.3', None), ('10', 10), ('10.', 10),
('.1', .1), ('.1000000', .1), (ws + '10.4' + ws, 10.4))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != None:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
#### float
# First some things that shouldn't be valid
test = ('hello', (), [], {}, -3.402823466391E+38, 3.402823466391E+38)
t = floatType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (10, 3.14, 23L, -3.4028234663852886E+38, 3.4028234663852886E+38):
x = t(i)
d = x._marshalData()
if not nearlyeq(float(d), i):
raise AssertionError, "expected %f, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if not nearlyeq(z, i):
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', None), ('1.2.3', None), ('10', 10), ('10.', 10),
('.1', .1), ('.1000000', .1), (ws + '10.4' + ws, 10.4),
('-3.402823466391E+38', None), ('3.402823466391E+38', None),
('-3.4028234663852886E+38', -3.4028234663852886E+38),
('3.4028234663852886E+38', 3.4028234663852886E+38))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if abs(z - i[1]) > 1e-6:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != None:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
#### double
# First some things that shouldn't be valid
test = ('hello', (), [], {},
-1.7976931348623159E+308, 1.7976931348623159E+308)
t = doubleType
for i in test:
try:
t(i)
# Hide this error for now, cause it is a bug in python 2.0 and 2.1
if not (sys.version_info[0] == 2 and sys.version_info[1] <= 2
and i==1.7976931348623159E+308):
raise AssertionError, \
"instantiated a double with a bad value (%s)" % repr(i)
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (10, 3.14, 23L, -1.79769313486E+308, 1.79769313486E+308):
x = t(i)
d = x._marshalData()
if not nearlyeq(float(d), i):
raise AssertionError, "expected %s, got %s" % (i, str(x))
y = buildSOAP(x)
z = parseSOAPRPC(y)
if not nearlyeq(z, i):
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', None), ('1.2.3', None), ('10', 10), ('10.', 10),
('.1', .1), ('.1000000', .1), (ws + '10.4' + ws, 10.4),
('-1.7976931348623159E+308', None), ('1.7976931348623158E+308', None),
('-1.79769313486E+308', -1.79769313486E+308),
('1.79769313486E+308', 1.79769313486E+308))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if abs(z - i[1]) > 1e-6:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != None:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
#### hexBinary
x = ''
for i in range(256):
x += chr(i)
test = ('', x, 'hello')
t = hexBinaryType
l = []
for i in test:
l.append(hexBinaryType(i))
x = buildSOAP(l)
y = parseSOAPRPC(x)
for i in range(len(test)):
if test[i] != y[i]:
raise AssertionError, "@ %d expected '%s', got '%s'" % \
(i, test[i], y[i])
# Now test parsing, both valid and invalid
test = (('hello', None), ('6163 747A65726F', None), ('6163747A65726', None),
('6163747A65726F', 'actzero'), (ws + '6163747A65726F' + ws, 'actzero'))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != None:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
#### base64Binary and base64
s = ''
for i in range(256):
s += chr(i)
for t in (base64BinaryType, base64Type):
# First some things that shouldn't be valid
test = ((), [], {}, lambda x: x)
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except AttributeError:
pass
# Now some things that should
test = ('', s, u'hello')
l = []
for i in test:
l.append(t(i))
x = buildSOAP(l)
y = parseSOAPRPC(x)
for i in range(len(test)):
if test[i] != y[i]:
raise AssertionError, "@ %d expected '%s', got '%s'" % \
(i, test[i], y[i])
# Now test parsing, both valid and invalid
test = (('hello', None), ('YWN0emVybw=', None),
('YWN 0emVybw==', 'actzero'), ('YWN0emVybw==', 'actzero'),
(ws + 'YWN0emVybw==' + ws, 'actzero'))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != None:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
#### binary (uses s from above)
# First some check invalid encodings
try:
x = binaryType('hello', encoding = 'yellow')
raise AssertionError, "created binary with invalid encoding"
except AssertionError:
raise
except:
pass
for t in ('hex', 'base64'):
# First some things that shouldn't be valid
test = ((), [], {}, lambda x: x)
for i in test:
try:
binaryType(i, encoding = t)
raise AssertionError, \
"instantiated a %s binary with a bad value (%s)" % \
(e, repr(i))
except AssertionError:
raise
except AttributeError:
pass
# Now some things that should
test = ('', s, u'hello')
l = []
for i in test:
l.append(binaryType(i, encoding = t))
x = buildSOAP(l)
y = parseSOAPRPC(x)
for i in range(len(test)):
if test[i] != y[i]:
raise AssertionError, "@ %d expected '%s', got '%s'" % \
(i, test[i], y[i])
# Now test parsing, both valid and invalid
if t == 'hex':
test = (('hello', None), ('6163 747A65726F', None),
('6163747A65726', None), ('6163747A65726F', 'actzero'),
(ws + '6163747A65726F' + ws, 'actzero'))
else:
test = (('hello', None), ('YWN0emVybw=', None),
('YWN 0emVybw==', 'actzero'), ('YWN0emVybw==', 'actzero'),
(ws + 'YWN0emVybw==' + ws, 'actzero'))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(NS.XSD, 'binary', i[0],
' encoding="%s"' % t))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != None:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t, sys.exc_info()[0], sys.exc_info()[1])
# Finally try an Array of binaries (with references!)
test = ('', s, u'hello')
l = []
for i in test:
l.append(binaryType(i, encoding = t))
l.append(l[1])
x = buildSOAP(l)
y = parseSOAPRPC(x)
for i in range(len(test)):
if test[i] != y[i]:
raise AssertionError, "@ %d expected '%s', got '%s'" % \
(i, test[i], y[i])
# Make sure the references worked
self.assertEquals( id(y[1]) , id(y[3]))
def badTest(self, t, data):
for i in data:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except:
pass
def goodTest(self, t, data):
for i in data:
x = t(i[0])
d = x._marshalData()
if d != i[1]:
raise AssertionError, "%s(%s): expected %s, got %s" % \
(t.__name__, repr(i[0]), i[1], d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i[2]:
raise AssertionError, "%s(%s): expected %s, got %s" % \
(t.__name__, repr(i[0]), repr(i[2]), repr(z))
def parseTest(self, t, data):
for i in data:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4],
i[0]))
if z != i[1]:
raise AssertionError, "%s(%s): expected %s, got %s" % \
(t.__name__, repr(i[0]), i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def allTests(self, t, baddata, gooddata, parsedata):
self.badTest(t, baddata)
self.goodTest(t, gooddata)
self.parseTest(t, parsedata)
# duration and timeDuration
def testTimeDuration(self):
baddata = \
(
'hello',
('hello',),
(-10, -10),
(-10, 0, -10),
(10.5, 10.5),
(0, 10.5, 0, 10.5, 0),
(1, 2, 3, 4, 5, 6, 7),
(1, 2, 'hello', 4, 5, 6),
(1, 2, 3.5, 4, 5, 6),
)
gooddata = \
(
(0, 'PT0S', (N, N, N, N, N, 0.0,)),
((), 'PT0S', (N, N, N, N, N, 0.0,)),
([], 'PT0S', (N, N, N, N, N, 0.0,)),
((0.5,), 'PT0.5S', (N, N, N, N, N, 0.5,)),
(10L, 'PT10S', (N, N, N, N, N, 10.0,)),
(-10, '-PT10S', (N, N, N, N, N, -10.0,)),
(10.5, 'PT10.5S', (N, N, N, N, N, 10.5,)),
((10L, 20), 'PT10M20S', (N, N, N, N, 10, 20.0)),
((-10, 20), '-PT10M20S', (N, N, N, N, -10, 20.0)),
((10, 0), 'PT10M', (N, N, N, N, 10, N)),
((10, 0, 0), 'PT10H', (N, N, N, 10, N, N)),
((10, 0L, 0, 0), 'P10D', (N, N, 10, N, N, N)),
((10, 0, 0, 0, 0), 'P10M', (N, 10, N, N, N, N)),
((10, 0, 0, 0L, 0, 0), 'P10Y', (10, N, N, N, N, N)),
((-10, 0, 0, 0, 0, 0), '-P10Y', (-10, N, N, N, N, N)),
((10, 0, 0, 0, 0, 20L), 'P10YT20S', (10, N, N, N, N, 20.0,)),
((1, 2, 3, 4, 5, 6.75), 'P1Y2M3DT4H5M6.75S',
(1, 2, 3, 4, 5, 6.75)),
((-1, 2, 3, 4, 5, 6.75), '-P1Y2M3DT4H5M6.75S',
(-1, 2, 3, 4, 5, 6.75)),
((1, 2, 3, 10, 30, 0), 'P1Y2M3DT10H30M',
(1, 2, 3, 10, 30, N)),
((1e6, 2e6, 3e6, 4e6, 5e6, 6.7e6),
'P1000000Y2000000M3000000DT4000000H5000000M6700000S',
(1e6, 2e6, 3e6, 4e6, 5e6, 6.7e6)),
((1347, 0, N, 0, 0), 'P1347M', (N, 1347, N, N, N, N)),
((-1347, 0, 0, 0, N), '-P1347M', (N, -1347, N, N, N, N)),
((1e15, 0, 0, 0, 0), 'P1000000000000000M',
(N, 1000000000000000L, N, N, N, N)),
((-1e15, 0, 0, 0, 0), '-P1000000000000000M',
(N, -1000000000000000L, N, N, N, N)),
((1000000000000000L, 0, 0, 0, 0), 'P1000000000000000M',
(N, 1000000000000000L, N, N, N, N)),
((-1000000000000000L, 0, 0, 0, 0), '-P1000000000000000M',
(N, -1000000000000000L, N, N, N, N)),
)
parsedata = (
('hello', N),
('P T0S', N),
('P10.5Y10.5M', N),
('P1Y2MT', N),
('PT0S', (N, N, N, N, N, 0,)),
('P10Y', (10, N, N, N, N, N)),
(ws + 'P10M' + ws, (N, 10, N, N, N, N)),
('P0Y1347M', (0, 1347, N, N, N, N)),
('P0Y1347M0D', (0, 1347, 0, N, N, N)),
('P0MT0M', (N, 0, N, N, 0, N)),
)
for t in (durationType, timeDurationType):
self.allTests(t, baddata, gooddata, parsedata)
# dateTime, timeInstant, and timePeriod
def testTimePeriod(self):
baddata = \
(
'hello',
('hello',),
(1, 2, 3, 4, 5),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(1, 2, 3, 4, 5, 'hello'),
(1, 2.5, 3, 4, 5, 6),
(1, 0, 3, 4, 5, 6),
(1, 13, 3, 4, 5, 6),
(1, 1, 0, 4, 5, 6),
(1, 1, 32, 4, 5, 6),
(1, 2, 29, 4, 5, 6),
(0, 2, 30, 4, 5, 6),
(100, 2, 29, 4, 5, 6),
(1, 2, 3, -1, 5, 6),
(1, 2, 3, 24, 5, 6),
(1, 2, 3, 4, -1, 6),
(1, 2, 3, 4, 60, 6),
(1, 2, 3, 4, 5, -1),
(1, 2, 3, 4, 5, 61),
(1, 3, 32, 4, 5, 6),
(1, 4, 31, 4, 5, 6),
(1, 5, 32, 4, 5, 6),
(1, 6, 31, 4, 5, 6),
(1, 7, 32, 4, 5, 6),
(1, 8, 32, 4, 5, 6),
(1, 9, 31, 4, 5, 6),
(1, 10, 32, 4, 5, 6),
(1, 11, 31, 4, 5, 6),
(1, 12, 32, 4, 5, 6),
)
gooddata = \
(
(1L, '1970-01-01T00:00:01Z', (1970, 1, 1, 0, 0, 1.0)),
(1.5, '1970-01-01T00:00:01.5Z', (1970, 1, 1, 0, 0, 1.5)),
((-1, 2, 3, 4, 5, 6), '-0001-02-03T04:05:06Z',
(-1, 2, 3, 4, 5, 6.0)),
((1, 2, 3, 4, 5, 6), '0001-02-03T04:05:06Z',
(1, 2, 3, 4, 5, 6.0)),
((10, 2, 3, 4, 5, 6), '0010-02-03T04:05:06Z',
(10, 2, 3, 4, 5, 6.0)),
((100, 2, 3, 4, 5, 6), '0100-02-03T04:05:06Z',
(100, 2, 3, 4, 5, 6.0)),
((1970, 2, 3, 4, 5, 6), '1970-02-03T04:05:06Z',
(1970, 2, 3, 4, 5, 6.0)),
((-1970, 2, 3, 4, 5, 6), '-1970-02-03T04:05:06Z',
(-1970, 2, 3, 4, 5, 6.0)),
((1970L, 2.0, 3.0, 4L, 5L, 6.875), '1970-02-03T04:05:06.875Z',
(1970, 2, 3, 4, 5, 6.875)),
((11990, 1, 2, 3, 4L, 5.25, 0, 0, 0),
'11990-01-02T03:04:05.25Z',
(11990, 1, 2, 3, 4, 5.25)),
((1e15, 1, 2, 3, 4L, 5.25, 0, 0, 0),
'1000000000000000-01-02T03:04:05.25Z',
(1e15, 1, 2, 3, 4, 5.25)),
((-1e15, 1, 2, 3, 4L, 5.25, 0, 0, 0),
'-1000000000000000-01-02T03:04:05.25Z',
(-1e15, 1, 2, 3, 4, 5.25)),
((1000000000000000L, 1, 2, 3, 4L, 5.25, 0, 0, 0),
'1000000000000000-01-02T03:04:05.25Z',
(1e15, 1, 2, 3, 4, 5.25)),
((-1000000000000000L, 1, 2, 3, 4L, 5.25, 0, 0, 0),
'-1000000000000000-01-02T03:04:05.25Z',
(-1e15, 1, 2, 3, 4, 5.25)),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('1970 -01 -01T00:00:01Z', N),
('0001-02-03t07:08:23Z', N),
# Invalid ranges
('2001-00-03T07:08:23Z', N),
('2001-13-03T07:08:23Z', N),
('2001-02-00T07:08:23Z', N),
('2001-02-29T07:08:23Z', N),
('2000-02-30T07:08:23Z', N),
('1900-02-29T07:08:23Z', N),
('2001-02-03T24:08:23Z', N),
('2001-02-03T04:60:23Z', N),
('2001-02-03T04:05:61Z', N),
('2001-01-32T04:05:06Z', N),
('2001-03-32T04:05:06Z', N),
('2001-04-31T04:05:06Z', N),
('2001-05-32T04:05:06Z', N),
('2001-06-31T04:05:06Z', N),
('2001-07-32T04:05:06Z', N),
('2001-08-32T04:05:06Z', N),
('2001-09-31T04:05:06Z', N),
('2001-10-32T04:05:06Z', N),
('2001-11-31T04:05:06Z', N),
('2001-12-32T04:05:06Z', N),
# Whitespace
(ws + '1970-01-01T00:00:00Z' + ws, (1970, 1, 1, 0, 0, 0)),
# No timezones
('11971-02-03T04:05:06.125', (11971, 2, 3, 4, 5, 6.125)),
('1971-02-03T04:05:06.125', (1971, 2, 3, 4, 5, 6.125)),
('-1971-02-03T04:05:06.125', (-1971, 2, 3, 4, 5, 6.125)),
# Non-zulu
('11971-02-03T04:05:06.125-07:08', (11971, 2, 3, 11, 13, 6.125)),
('11971-02-03T04:05:06.125+07:08', (11971, 2, 2, 20, 57, 6.125)),
('-11971-02-03T04:05:06.125-07:08', (-11971, 2, 3, 11, 13, 6.125)),
('-11971-02-03T04:05:06.125+07:08', (-11971, 2, 2, 20, 57, 6.125)),
('1971-02-03T04:05:06.125-07:08', (1971, 2, 3, 11, 13, 6.125)),
('1971-02-03T04:05:06.125+07:08', (1971, 2, 2, 20, 57, 6.125)),
('-1971-02-03T04:05:06.125-07:08', (-1971, 2, 3, 11, 13, 6.125)),
('-1971-02-03T04:05:06.125+07:08', (-1971, 2, 2, 20, 57, 6.125)),
# Edgepoints (ranges)
('2001-01-03T07:08:09Z', (2001, 1, 3, 7, 8, 9)),
('2001-12-03T07:08:09Z', (2001, 12, 3, 7, 8, 9)),
('2001-02-01T07:08:09Z', (2001, 2, 1, 7, 8, 9)),
('2001-02-28T07:08:09Z', (2001, 2, 28, 7, 8, 9)),
('2000-02-29T07:08:09Z', (2000, 2, 29, 7, 8, 9)),
('1900-02-28T07:08:09Z', (1900, 2, 28, 7, 8, 9)),
('2001-02-03T00:08:09Z', (2001, 2, 3, 0, 8, 9)),
('2001-02-03T23:08:09Z', (2001, 2, 3, 23, 8, 9)),
('2001-02-03T04:00:09Z', (2001, 2, 3, 4, 0, 9)),
('2001-02-03T04:59:09Z', (2001, 2, 3, 4, 59, 9)),
('2001-02-03T04:05:00Z', (2001, 2, 3, 4, 5, 0)),
('2001-02-03T04:05:60.9Z', (2001, 2, 3, 4, 5, 60.9)),
('2001-01-31T04:05:06Z', (2001, 1, 31, 4, 5, 6)),
('2001-03-31T04:05:06Z', (2001, 3, 31, 4, 5, 6)),
('2001-04-30T04:05:06Z', (2001, 4, 30, 4, 5, 6)),
('2001-05-31T04:05:06Z', (2001, 5, 31, 4, 5, 6)),
('2001-06-30T04:05:06Z', (2001, 6, 30, 4, 5, 6)),
('2001-07-31T04:05:06Z', (2001, 7, 31, 4, 5, 6)),
('2001-08-31T04:05:06Z', (2001, 8, 31, 4, 5, 6)),
('2001-09-30T04:05:06Z', (2001, 9, 30, 4, 5, 6)),
('2001-10-31T04:05:06Z', (2001, 10, 31, 4, 5, 6)),
('2001-11-30T04:05:06Z', (2001, 11, 30, 4, 5, 6)),
('2001-12-31T04:05:06Z', (2001, 12, 31, 4, 5, 6)),
# Edgepoints (crossing boundaries)
('0001-01-01T07:08:23+07:08', (1, 1, 1, 0, 0, 23)),
('0001-01-01T07:07:42+07:08', (0, 12, 31, 23, 59, 42)),
('-0004-01-01T07:07:42+07:08', (-5, 12, 31, 23, 59, 42)),
('2001-03-01T07:07:42+07:08', (2001, 2, 28, 23, 59, 42)),
('2000-03-01T07:07:42+07:08', (2000, 2, 29, 23, 59, 42)),
('1900-03-01T07:07:42+07:08', (1900, 2, 28, 23, 59, 42)),
)
for t in (dateTimeType, timeInstantType, timePeriodType):
self.allTests(t, baddata, gooddata, parsedata)
# recurringInstant
def testRecurringInstant(self):
baddata = \
(
'hello',
('hello',),
(1, 2, N, 3, 4, 5),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(1, 2, 3, 4, 5, 'hello'),
(1, 2, 3.5, 4, 5, 6),
)
gooddata = \
(
(1L, '1970-01-01T00:00:01Z', (1970, 1, 1, 0, 0, 1.0)),
(1.5, '1970-01-01T00:00:01.5Z', (1970, 1, 1, 0, 0, 1.5)),
(1e9, '2001-09-09T01:46:40Z', (2001, 9, 9, 1, 46, 40.0)),
((1, 1, 2, 3, 4, 5), '-01-01-02T03:04:05Z',
(1, 1, 2, 3, 4, 5)),
((-1, 1, 2, 3, 4, 5), '--01-01-02T03:04:05Z',
(-1, 1, 2, 3, 4, 5)),
((10, 1, 2, 3, 4, 5), '-10-01-02T03:04:05Z',
(10, 1, 2, 3, 4, 5)),
((-10, 1, 2, 3, 4, 5), '--10-01-02T03:04:05Z',
(-10, 1, 2, 3, 4, 5)),
((100, 1, 2, 3, 4, 5), '0100-01-02T03:04:05Z',
(100, 1, 2, 3, 4, 5)),
((-100, 1, 2, 3, 4, 5), '-0100-01-02T03:04:05Z',
(-100, 1, 2, 3, 4, 5)),
((1970L, 1, 2, 3, 4, 5), '1970-01-02T03:04:05Z',
(1970, 1, 2, 3, 4, 5)),
((1970L, 1, 2L, 3, 4.0, 5.25), '1970-01-02T03:04:05.25Z',
(1970, 1, 2, 3, 4, 5.25)),
((11990, 1, 2, 3L, 4, 5.25), '11990-01-02T03:04:05.25Z',
(11990, 1, 2, 3, 4, 5.25)),
((1e15, 1, 2, 3L, 4, 5.25),
'1000000000000000-01-02T03:04:05.25Z',
(1e15, 1, 2, 3, 4, 5.25)),
((-1e15, 1, 2, 3L, 4, 5.25),
'-1000000000000000-01-02T03:04:05.25Z',
(-1e15, 1, 2, 3, 4, 5.25)),
((N, 1, 2, 3, 4L, 5.25), '---01-02T03:04:05.25Z',
(N, 1, 2, 3, 4, 5.25)),
((N, N, 2, 3, 4, 5.25, 0, 0, 0), '-----02T03:04:05.25Z',
(N, N, 2, 3, 4, 5.25)),
((N, N, -2, 3, 4, 5.25, 0, 0, 0), '------02T03:04:05.25Z',
(N, N, -2, 3, 4, 5.25)),
((N, N, N, 3, 4, 5.25), '------T03:04:05.25Z',
(N, N, N, 3, 4, 5.25)),
((N, N, N, N, 4, 5.25, 0, 0, 0), '------T-:04:05.25Z',
(N, N, N, N, 4, 5.25)),
((N, N, N, N, N, 5.25), '------T-:-:05.25Z',
(N, N, N, N, N, 5.25)),
((N, N, N, N, N, -5.25), '-------T-:-:05.25Z',
(N, N, N, N, N, -5.25)),
((N, N, N, N, N, N, 0, 0, 0), '------T-:-:-Z',
(N, N, N, N, N, N)),
((N, N, N, N, N, N, N), '------T-:-:-Z',
(N, N, N, N, N, N)),
((N, N, N, N, N, N, N, N),
'------T-:-:-Z', (N, N, N, N, N, N)),
((N, N, N, N, N, N, N, N, N),
'------T-:-:-Z', (N, N, N, N, N, N)),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('1970 -01 -01T00:00:01Z', N),
('0001-01-01t07:08:23+07:08', N),
# Invalid ranges
('2001-00-03T07:08:23Z', N),
('2001-13-03T07:08:23Z', N),
('2001-02-00T07:08:23Z', N),
('2001-02-29T07:08:23Z', N),
('2000-02-30T07:08:23Z', N),
('1900-02-29T07:08:23Z', N),
('2001-02-03T24:08:23Z', N),
('2001-02-03T04:60:23Z', N),
('2001-02-03T04:05:61Z', N),
('2001-01-32T04:05:06Z', N),
('2001-03-32T04:05:06Z', N),
('2001-04-31T04:05:06Z', N),
('2001-05-32T04:05:06Z', N),
('2001-06-31T04:05:06Z', N),
('2001-07-32T04:05:06Z', N),
('2001-08-32T04:05:06Z', N),
('2001-09-31T04:05:06Z', N),
('2001-10-32T04:05:06Z', N),
('2001-11-31T04:05:06Z', N),
('2001-12-32T04:05:06Z', N),
# Whitespace
(ws + '1970-01-01T00:00:01Z' + ws, (1970, 1, 1, 0, 0, 1)),
# No timezones
('11971-02-03T04:05:06.125', (11971, 2, 3, 4, 5, 6.125)),
('-11971-02-03T04:05:06.125', (-11971, 2, 3, 4, 5, 6.125)),
('1971-02-03T04:05:06.125', (1971, 2, 3, 4, 5, 6.125)),
('-1971-02-03T04:05:06.125', (-1971, 2, 3, 4, 5, 6.125)),
('-71-02-03T04:05:06.125', (71, 2, 3, 4, 5, 6.125)),
('--71-02-03T04:05:06.125', (-71, 2, 3, 4, 5, 6.125)),
('---02-03T04:05:06.125', (N, 2, 3, 4, 5, 6.125)),
('----02-03T04:05:06.125', (N, -2, 3, 4, 5, 6.125)),
('-----03T04:05:06.125', (N, N, 3, 4, 5, 6.125)),
('------03T04:05:06.125', (N, N, -3, 4, 5, 6.125)),
('------T04:05:06.125', (N, N, N, 4, 5, 6.125)),
('-------T04:05:06.125', (N, N, N, -4, 5, 6.125)),
('------T-:05:06.125', (N, N, N, N, 5, 6.125)),
('-------T-:05:06.125', (N, N, N, N, -5, 6.125)),
('------T-:-:06.125', (N, N, N, N, N, 6.125)),
('-------T-:-:06.125', (N, N, N, N, N, -6.125)),
('------T-:-:-', (N, N, N, N, N, N)),
('-------T-:-:-', (N, N, N, N, N, N)),
# Non-zulu
('11971-02-03T04:05:06.125-07:08', (11971, 2, 3, 11, 13, 6.125)),
('11971-02-03T04:05:06.125+07:08', (11971, 2, 2, 20, 57, 6.125)),
('-11971-02-03T04:05:06.125-07:08', (-11971, 2, 3, 11, 13, 6.125)),
('-11971-02-03T04:05:06.125+07:08', (-11971, 2, 2, 20, 57, 6.125)),
('1971-02-03T04:05:06.125-07:08', (1971, 2, 3, 11, 13, 6.125)),
('1971-02-03T04:05:06.125+07:08', (1971, 2, 2, 20, 57, 6.125)),
('-1971-02-03T04:05:06.125-07:08', (-1971, 2, 3, 11, 13, 6.125)),
('-1971-02-03T04:05:06.125+07:08', (-1971, 2, 2, 20, 57, 6.125)),
('-71-02-03T04:05:06.125-07:08', (71, 2, 3, 11, 13, 6.125)),
('-71-02-03T04:05:06.125+07:08', (71, 2, 2, 20, 57, 6.125)),
('--71-02-03T04:05:06.125-07:08', (-71, 2, 3, 11, 13, 6.125)),
('--71-02-03T04:05:06.125+07:08', (-71, 2, 2, 20, 57, 6.125)),
('---02-03T04:05:06.125-07:08', (N, 2, 3, 11, 13, 6.125)),
('---02-03T04:05:06.125+07:08', (N, 2, 2, 20, 57, 6.125)),
('----02-03T04:05:06.125-07:08', (N, -2, 3, 11, 13, 6.125)),
('----02-03T04:05:06.125+07:08', (N, -2, 2, 20, 57, 6.125)),
('-----03T04:05:06.125-07:08', (N, N, 3, 11, 13, 6.125)),
('-----03T04:05:06.125+07:08', (N, N, 2, 20, 57, 6.125)),
('------03T04:05:06.125-07:08', (N, N, -3, 11, 13, 6.125)),
('------03T04:05:06.125+07:08', (N, N, -4, 20, 57, 6.125)),
('------T04:05:06.125-07:08', (N, N, N, 11, 13, 6.125)),
('------T04:05:06.125+07:08', (N, N, N, -4, 57, 6.125)),
('-------T04:05:06.125-07:08', (N, N, N, 3, 13, 6.125)),
('-------T04:05:06.125+07:08', (N, N, N, -12, 57, 6.125)),
('------T-:05:06.125-07:08', (N, N, N, N, 433, 6.125)),
('------T-:05:06.125+07:08', (N, N, N, N, -423, 6.125)),
('-------T-:05:06.125-07:08', (N, N, N, N, 423, 6.125)),
('-------T-:05:06.125+07:08', (N, N, N, N, -433, 6.125)),
('------T-:-:06.125-07:08', (N, N, N, N, 428, 6.125)),
('------T-:-:06.125+07:08', (N, N, N, N, -428, 6.125)),
('-------T-:-:06.125-07:08', (N, N, N, N, 427, 53.875)),
('-------T-:-:06.125+07:08', (N, N, N, N, -429, 53.875)),
('------T-:-:--07:08', (N, N, N, N, 428, 0)),
('------T-:-:-+07:08', (N, N, N, N, -428, 0)),
('-------T-:-:--07:08', (N, N, N, N, 428, 0)),
('-------T-:-:-+07:08', (N, N, N, N, -428, 0)),
# Edgepoints (ranges)
('2001-01-03T07:08:09Z', (2001, 1, 3, 7, 8, 9)),
('2001-12-03T07:08:09Z', (2001, 12, 3, 7, 8, 9)),
('2001-02-01T07:08:09Z', (2001, 2, 1, 7, 8, 9)),
('2001-02-28T07:08:09Z', (2001, 2, 28, 7, 8, 9)),
('2000-02-29T07:08:09Z', (2000, 2, 29, 7, 8, 9)),
('1900-02-28T07:08:09Z', (1900, 2, 28, 7, 8, 9)),
('2001-02-03T00:08:09Z', (2001, 2, 3, 0, 8, 9)),
('2001-02-03T23:08:09Z', (2001, 2, 3, 23, 8, 9)),
('2001-02-03T04:00:09Z', (2001, 2, 3, 4, 0, 9)),
('2001-02-03T04:59:09Z', (2001, 2, 3, 4, 59, 9)),
('2001-02-03T04:05:00Z', (2001, 2, 3, 4, 5, 0)),
('2001-02-03T04:05:60.9Z', (2001, 2, 3, 4, 5, 60.9)),
('2001-01-31T04:05:06Z', (2001, 1, 31, 4, 5, 6)),
('2001-03-31T04:05:06Z', (2001, 3, 31, 4, 5, 6)),
('2001-04-30T04:05:06Z', (2001, 4, 30, 4, 5, 6)),
('2001-05-31T04:05:06Z', (2001, 5, 31, 4, 5, 6)),
('2001-06-30T04:05:06Z', (2001, 6, 30, 4, 5, 6)),
('2001-07-31T04:05:06Z', (2001, 7, 31, 4, 5, 6)),
('2001-08-31T04:05:06Z', (2001, 8, 31, 4, 5, 6)),
('2001-09-30T04:05:06Z', (2001, 9, 30, 4, 5, 6)),
('2001-10-31T04:05:06Z', (2001, 10, 31, 4, 5, 6)),
('2001-11-30T04:05:06Z', (2001, 11, 30, 4, 5, 6)),
('2001-12-31T04:05:06Z', (2001, 12, 31, 4, 5, 6)),
# Edgepoints (crossing boundaries)
('0001-01-01T07:08:23+07:08', (1, 1, 1, 0, 0, 23)),
('0001-01-01T07:07:42+07:08', (0, 12, 31, 23, 59, 42)),
('-0004-01-01T07:07:42+07:08', (-5, 12, 31, 23, 59, 42)),
('2001-03-01T07:07:42+07:08', (2001, 2, 28, 23, 59, 42)),
('2000-03-01T07:07:42+07:08', (2000, 2, 29, 23, 59, 42)),
('1900-03-01T07:07:42+07:08', (1900, 2, 28, 23, 59, 42)),
('---03-01T07:07:42+07:08', (N, 2, 28, 23, 59, 42)),
)
for t in (recurringInstantType,):
self.allTests(t, baddata, gooddata, parsedata)
def testTime(self):
baddata = \
(
'hello',
('hello',),
(1, 2, 3, 4, 5),
(1, 2, 3, 4, 5, 6, 7, 8),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(1, 2, 'hello'),
(1, 2.5, 3),
(25, 0, 0),
(1, 60, 0),
(1, 0, 61),
)
gooddata = \
(
(1L, '00:00:01Z', (0, 0, 1.0)),
(1.5, '00:00:01.5Z', (0, 0, 1.5)),
(3661.5, '01:01:01.5Z', (1, 1, 1.5)),
(86399.75, '23:59:59.75Z', (23, 59, 59.75)),
((1,), '01:00:00Z', (1, 0, 0)),
((1, 2), '01:02:00Z', (1, 2, 0)),
((10L, 20.0, 30), '10:20:30Z', (10, 20, 30.0)),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('00 00:01Z', N),
('07:O8:23Z', N),
# Invalid ranges
('24:08:23Z', N),
('04:60:23Z', N),
('04:05:61Z', N),
# Whitespace
(ws + '00:00:01Z' + ws, (0, 0, 1)),
# No timezones
('04:05:06.125', (4, 5, 6.125)),
# Non-zulu
('04:05:06.125-07:08', (11, 13, 6.125)),
('04:05:06.125+07:08', (-4, 57, 6.125)),
# Edgepoints (ranges)
('00:08:09Z', (0, 8, 9)),
('23:08:09Z', (23, 8, 9)),
('04:00:09Z', (4, 0, 9)),
('04:59:09Z', (4, 59, 9)),
('04:05:00Z', (4, 5, 0)),
('04:05:60.9Z', (4, 5, 60.9)),
# Edgepoints (crossing boundaries)
('07:08:23+07:08', (0, 0, 23)),
('07:07:42+07:08', (-1, 59, 42)),
)
for t in (timeType,):
self.allTests(t, baddata, gooddata, parsedata)
def testDate(self):
baddata = \
(
'hello',
('hello',),
(1, 2, 3, 4, 5),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(1, 2, 3, 4, 5, 'hello'),
(1, 2.5, 3, 4, 5, 6),
(1, 2, 3.5),
(1, 0, 3),
(1, 13, 3),
(1, 1, 0),
(1, 1, 32),
(1, 2, 29),
(0, 2, 30),
(100, 2, 29),
(1, 3, 32),
(1, 4, 31),
(1, 5, 32),
(1, 6, 31),
(1, 7, 32),
(1, 8, 32),
(1, 9, 31),
(1, 10, 32),
(1, 11, 31),
(1, 12, 32),
)
gooddata = \
(
(1L, '1970-01-01Z', (1970, 1, 1)),
(1.5, '1970-01-01Z', (1970, 1, 1)),
((2,), '0002-01-01Z', (2, 1, 1)),
((2, 3), '0002-03-01Z', (2, 3, 1)),
((-2, 3, 4), '-0002-03-04Z', (-2, 3, 4)),
((2, 3, 4), '0002-03-04Z', (2, 3, 4)),
((10, 2, 3), '0010-02-03Z', (10, 2, 3)),
((100, 2, 3), '0100-02-03Z', (100, 2, 3)),
((1970, 2, 3), '1970-02-03Z', (1970, 2, 3)),
((-1970, 2, 3), '-1970-02-03Z', (-1970, 2, 3)),
((1970L, 2.0, 3.0), '1970-02-03Z', (1970, 2, 3)),
((11990, 1L, 2), '11990-01-02Z', (11990, 1, 2)),
((1e15, 1, 2), '1000000000000000-01-02Z', (1e15, 1, 2)),
((-1e15, 1, 2), '-1000000000000000-01-02Z', (-1e15, 1, 2)),
((1000000000000000L, 1, 2), '1000000000000000-01-02Z',
(1e15, 1, 2)),
((-1000000000000000L, 1, 2), '-1000000000000000-01-02Z',
(-1e15, 1, 2)),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('1970 -01 -01Z', N),
('0001-02-03z', N),
# Invalid ranges
('2001-00-03Z', N),
('2001-13-03Z', N),
('2001-02-00Z', N),
('2001-02-29Z', N),
('2000-02-30Z', N),
('1900-02-29Z', N),
('2001-01-32Z', N),
('2001-03-32Z', N),
('2001-04-31Z', N),
('2001-05-32Z', N),
('2001-06-31Z', N),
('2001-07-32Z', N),
('2001-08-32Z', N),
('2001-09-31Z', N),
('2001-10-32Z', N),
('2001-11-31Z', N),
('2001-12-32Z', N),
# Whitespace
(ws + '1970-01-01Z' + ws, (1970, 1, 1)),
# No timezones
('11971-02-03', (11971, 2, 3)),
('1971-02-03', (1971, 2, 3)),
('-1971-02-03', (-1971, 2, 3)),
# Non-zulu
('11971-02-03-07:08', (11971, 2, 3)),
('11971-02-03+07:08', (11971, 2, 2)),
('-11971-02-03-07:08', (-11971, 2, 3)),
('-11971-02-03+07:08', (-11971, 2, 2)),
('1971-02-03-07:08', (1971, 2, 3)),
('1971-02-03+07:08', (1971, 2, 2)),
('-1971-02-03-07:08', (-1971, 2, 3)),
('-1971-02-03+07:08', (-1971, 2, 2)),
# Edgepoints (ranges)
('2001-01-03Z', (2001, 1, 3)),
('2001-12-03Z', (2001, 12, 3)),
('2001-02-01Z', (2001, 2, 1)),
('2001-02-28Z', (2001, 2, 28)),
('2000-02-29Z', (2000, 2, 29)),
('1900-02-28Z', (1900, 2, 28)),
('2001-01-31Z', (2001, 1, 31)),
('2001-03-31Z', (2001, 3, 31)),
('2001-04-30Z', (2001, 4, 30)),
('2001-05-31Z', (2001, 5, 31)),
('2001-06-30Z', (2001, 6, 30)),
('2001-07-31Z', (2001, 7, 31)),
('2001-08-31Z', (2001, 8, 31)),
('2001-09-30Z', (2001, 9, 30)),
('2001-10-31Z', (2001, 10, 31)),
('2001-11-30Z', (2001, 11, 30)),
('2001-12-31Z', (2001, 12, 31)),
# Edgepoints (crossing boundaries)
('0001-01-01+07:08', (0, 12, 31)),
('-0004-01-01+07:08', (-5, 12, 31)),
('2001-03-01+07:08', (2001, 2, 28)),
('2000-03-01+07:08', (2000, 2, 29)),
('1900-03-01+07:08', (1900, 2, 28)),
)
for t in (dateType,):
self.allTests(t, baddata, gooddata, parsedata)
def testGYearMonth(self):
baddata = \
(
'hello',
('hello',),
(1, 2, 3),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(1, 2, 3.5),
(1, 'hello'),
(1, 2.5),
(1, 0),
(1, 13),
)
gooddata = \
(
(1L, '1970-01Z', (1970, 1)),
(1.5, '1970-01Z', (1970, 1)),
((2,), '0002-01Z', (2, 1)),
((2, 3), '0002-03Z', (2, 3)),
((-2, 3), '-0002-03Z', (-2, 3)),
((10, 2), '0010-02Z', (10, 2)),
((100, 2), '0100-02Z', (100, 2)),
((1970, 2), '1970-02Z', (1970, 2)),
((-1970, 2), '-1970-02Z', (-1970, 2)),
((1970L, 2.0), '1970-02Z', (1970, 2)),
((11990, 1L), '11990-01Z', (11990, 1)),
((1e15, 1), '1000000000000000-01Z', (1e15, 1)),
((-1e15, 1), '-1000000000000000-01Z', (-1e15, 1)),
((1000000000000000L, 1), '1000000000000000-01Z', (1e15, 1)),
((-1000000000000000L, 1), '-1000000000000000-01Z', (-1e15, 1)),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('1970 -01Z', N),
('0001-02z', N),
# Invalid ranges
('2001-00Z', N),
('2001-13Z', N),
# Whitespace
(ws + '1970-01Z' + ws, (1970, 1)),
# No timezones
('11971-02', (11971, 2)),
('1971-02', (1971, 2)),
('-1971-02', (-1971, 2)),
# Non-zulu
('11971-02-07:08', (11971, 2)),
('11971-02+07:08', (11971, 1)),
('-11971-02-07:08', (-11971, 2)),
('-11971-02+07:08', (-11971, 1)),
('1971-02-07:08', (1971, 2)),
('1971-02+07:08', (1971, 1)),
('-1971-02-07:08', (-1971, 2)),
('-1971-02+07:08', (-1971, 1)),
# Edgepoints (ranges)
('2001-01Z', (2001, 1)),
('2001-12Z', (2001, 12)),
# Edgepoints (crossing boundaries)
('0001-01+07:08', (0, 12)),
('-0004-01+07:08', (-5, 12)),
('2001-03+07:08', (2001, 2)),
('2000-03+07:08', (2000, 2)),
('1900-03+07:08', (1900, 2)),
)
for t in (gYearMonthType,):
self.allTests(t, baddata, gooddata, parsedata)
def testGYearAndYear(self):
baddata = \
(
'hello',
('hello',),
(1, 2),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(2.5,),
)
gooddata = \
(
(1L, '0001Z', 1),
(10, '0010Z', 10),
(100, '0100Z', 100),
(1970, '1970Z', 1970),
(-1970, '-1970Z', -1970),
(1970L, '1970Z', 1970),
(11990.0, '11990Z', 11990),
(1e15, '1000000000000000Z', 1e15),
(-1e15, '-1000000000000000Z', -1e15),
(1000000000000000L, '1000000000000000Z', 1e15),
(-1000000000000000L, '-1000000000000000Z', -1e15),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('197OZ', N),
('0001z', N),
# Whitespace
(ws + '1970Z' + ws, 1970),
# No timezones
('11971', 11971),
('1971', 1971),
('-1971', -1971),
# Non-zulu
('11971-07:08', 11971),
('11971+07:08', 11970),
('-11971-07:08', -11971),
('-11971+07:08', -11972),
('1971-07:08', 1971),
('1971+07:08', 1970),
('-1971-07:08', -1971),
('-1971+07:08', -1972),
# Edgepoints (crossing boundaries)
('0001+07:08', 0),
('-0004+07:08', -5),
)
for t in (gYearType, yearType):
self.allTests(t, baddata, gooddata, parsedata)
def testCentury(self):
baddata = \
(
'hello',
('hello',),
(1, 2),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(2.5,),
)
gooddata = \
(
(1L, '01Z', 1),
(10, '10Z', 10),
(100, '100Z', 100),
(19, '19Z', 19),
(-19, '-19Z', -19),
(19L, '19Z', 19),
(119.0, '119Z', 119),
(1e15, '1000000000000000Z', 1e15),
(-1e15, '-1000000000000000Z', -1e15),
(1000000000000000L, '1000000000000000Z', 1e15),
(-1000000000000000L, '-1000000000000000Z', -1e15),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('197OZ', N),
('0001z', N),
# Whitespace
(ws + '1970Z' + ws, 1970),
# No timezones
('11971', 11971),
('1971', 1971),
('-1971', -1971),
# Non-zulu
('11971-07:08', 11971),
('11971+07:08', 11970),
('-11971-07:08', -11971),
('-11971+07:08', -11972),
('1971-07:08', 1971),
('1971+07:08', 1970),
('-1971-07:08', -1971),
('-1971+07:08', -1972),
# Edgepoints (crossing boundaries)
('0001+07:08', 0),
('-0004+07:08', -5),
)
for t in (centuryType,):
self.allTests(t, baddata, gooddata, parsedata)
def testGMonthDayAndRecurringDate(self):
baddata = \
(
'hello',
('hello',),
(3, 4, 5),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(4, 5, 'hello'),
(2.5, 3),
(0, 3),
(13, 3),
(1, 0),
(1, 32),
(2, 29),
(3, 32),
(4, 31),
(5, 32),
(6, 31),
(7, 32),
(8, 32),
(9, 31),
(10, 32),
(11, 31),
(12, 32),
)
gooddata = \
(
(1L, '--01-01Z', (1, 1)),
(1.5, '--01-01Z', (1, 1)),
((2,), '--02-01Z', (2, 1)),
((2, 3), '--02-03Z', (2, 3)),
((10, 2), '--10-02Z', (10, 2)),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('--01 -01Z', N),
('--02-03z', N),
# Invalid ranges
('--00-03Z', N),
('--13-03Z', N),
('--01-32Z', N),
('--02-00Z', N),
('--02-29Z', N),
('--03-32Z', N),
('--04-31Z', N),
('--05-32Z', N),
('--06-31Z', N),
('--07-32Z', N),
('--08-32Z', N),
('--09-31Z', N),
('--10-32Z', N),
('--11-31Z', N),
('--12-32Z', N),
# Whitespace
(ws + '--01-01Z' + ws, (1, 1)),
# No timezones
('--02-03', (2, 3)),
# Non-zulu
('--02-03-07:08', (2, 3)),
('--02-03+07:08', (2, 2)),
# Edgepoints (ranges)
('--01-03Z', (1, 3)),
('--12-03Z', (12, 3)),
('--01-31Z', (1, 31)),
('--02-01Z', (2, 1)),
('--02-28Z', (2, 28)),
('--03-31Z', (3, 31)),
('--04-30Z', (4, 30)),
('--05-31Z', (5, 31)),
('--06-30Z', (6, 30)),
('--07-31Z', (7, 31)),
('--08-31Z', (8, 31)),
('--09-30Z', (9, 30)),
('--10-31Z', (10, 31)),
('--11-30Z', (11, 30)),
('--12-31Z', (12, 31)),
# Edgepoints (crossing boundaries)
('--01-01+07:08', (12, 31)),
('--03-01+07:08', (2, 28)),
)
for t in (gMonthDayType, recurringDateType):
self.allTests(t, baddata, gooddata, parsedata)
def testGMonthAndMonth(self):
baddata = \
(
'hello',
('hello',),
(3, 4,),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(2.5,),
(0,),
(13,),
)
gooddata = \
(
(1L, '--01--Z', 1),
((2,), '--02--Z', 2),
((10,), '--10--Z', 10),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('--01 --Z', N),
('--03--z', N),
# Invalid ranges
('--00--Z', N),
('--13--Z', N),
# Whitespace
(ws + '--01--Z' + ws, 1),
# No timezones
('--03--', 3),
# Non-zulu
('--03---07:08', 3),
('--03--+07:08', 2),
# Edgepoints (ranges)
('--01--Z', 1),
('--12--Z', 12),
# Edgepoints (crossing boundaries)
('--01--+07:08', 12),
('--12---07:08', 12),
)
for t in (gMonthType, monthType):
self.allTests(t, baddata, gooddata, parsedata)
def testGDayAndRecurringDay(self):
baddata = \
(
'hello',
('hello',),
(3, 4,),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(2.5,),
(0,),
(32,),
)
gooddata = \
(
(1L, '---01Z', 1),
((2,), '---02Z', 2),
((10,), '---10Z', 10),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('---01 Z', N),
('---03z', N),
# Invalid ranges
('---00Z', N),
('---32Z', N),
# Whitespace
(ws + '---01Z' + ws, 1),
# No timezones
('---03', 3),
# Non-zulu
('---03-07:08', 3),
('---03+07:08', 2),
# Edgepoints (ranges)
('---01Z', 1),
('---31Z', 31),
# Edgepoints (crossing boundaries)
('---01+07:08', 31),
('---31-07:08', 31),
)
for t in (gDayType, recurringDayType):
self.allTests(t, baddata, gooddata, parsedata)
def testInteger(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {})
t = integerType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (10, 23L, 1111111111111111111111111111111111111111111111111111L):
x = integerType(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('10 000', N),
('1', 1),
('123456789012345678901234567890', 123456789012345678901234567890L),
(ws + '12' + ws, 12))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4],
i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testNonPositiveInteger(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, 1, 23)
for t in (nonPositiveIntegerType, non_Positive_IntegerType):
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a t with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (0, -23L, -1111111111111111111111111111111111111111111111111L):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N), ('1', N),
('0', 0),
('-1', -1),
('-123456789012345678901234567890', -123456789012345678901234567890L),
(ws + '-12' + ws, -12))
for i in test:
try:
if t == nonPositiveIntegerType:
n = t.__name__[:-4]
else:
n = 'non-positive-integer'
z = parseSOAPRPC(self.build_xml(t._validURIs[0], n, i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testNegativeInteger(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, 0, 23)
for t in (negativeIntegerType, negative_IntegerType):
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (-1, -23L, -111111111111111111111111111111111111111111111111L):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N), ('1', N),
('0', N),
('-1', -1),
('-123456789012345678901234567890', -123456789012345678901234567890L),
(ws + '-12' + ws, -12))
for i in test:
try:
if t == negativeIntegerType:
n = t.__name__[:-4]
else:
n = 'negative-integer'
z = parseSOAPRPC(self.build_xml(t._validURIs[0], n, i[0]))
if z != i[1]:
raise AssertionError, "expected %s, got %s" % (i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testLong(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {},
-9223372036854775809L, 9223372036854775808L)
t = longType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (-1, -23L, -9223372036854775808L, 9223372036854775807L):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N),
('-9223372036854775809', N), ('9223372036854775808', N),
('-1', -1), ('0', 0), ('1', 1),
('-9223372036854775808', -9223372036854775808L),
('9223372036854775807', 9223372036854775807L),
(ws + '-12' + ws, -12))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testInt(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, -2147483649L, 2147483648L)
t = intType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (-1, -23L, -2147483648L, 2147483647):
x = intType(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N),
('-2147483649', N), ('2147483648', N),
('-1', -1), ('0', 0), ('1', 1),
('-2147483648', -2147483648L),
('2147483647', 2147483647),
(ws + '-12' + ws, -12))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testShort(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, -32769, 32768)
t = shortType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (-1, -23L, -32768, 32767):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N),
('-32769', N), ('32768', N),
('-1', -1), ('0', 0), ('1', 1),
('-32768', -32768),
('32767', 32767),
(ws + '-12' + ws, -12))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testByte(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, -129, 128)
t = byteType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (-1, -23L, -128, 127):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N),
('-129', N), ('128', N),
('-1', -1), ('0', 0), ('1', 1),
('-128', -128),
('127', 127),
(ws + '-12' + ws, -12))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testNonNegativeInteger(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, -42, -1)
for t in (nonNegativeIntegerType, non_Negative_IntegerType):
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (0, 1, 23L, 111111111111111111111111111111111111111111111111L):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N), ('-1', N),
('0', 0),
('1', 1),
('123456789012345678901234567890', 123456789012345678901234567890L),
(ws + '12' + ws, 12))
for i in test:
try:
if t == nonNegativeIntegerType:
n = t.__name__[:-4]
else:
n = 'non-negative-integer'
z = parseSOAPRPC(self.build_xml(t._validURIs[0], n, i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testUnsignedLong(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, -42, -1, 18446744073709551616L)
t = unsignedLongType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (0, 23L, 18446744073709551615L):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N), ('-1', N),
('18446744073709551616', N),
('0', 0), ('1', 1),
('18446744073709551615', 18446744073709551615L),
(ws + '12' + ws, 12))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testUnsignedInt(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, -42, -1, 4294967296L)
t = unsignedIntType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (0, 23L, 4294967295L):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N), ('-1', N),
('4294967296', N),
('0', 0), ('1', 1),
('4294967295', 4294967295L),
(ws + '12' + ws, 12))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testUnsignedShort(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, -42, -1, 65536)
t = unsignedShortType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (0, 23L, 65535):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N), ('-1', N),
('65536', N),
('0', 0), ('1', 1),
('65535', 65535),
(ws + '12' + ws, 12))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testUnsignedByte(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, -42, -1, 256)
t = unsignedByteType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (0, 23L, 255):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N), ('-1', N),
('256', N),
('0', 0), ('1', 1),
('255', 255),
(ws + '12' + ws, 12))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testPositiveInteger(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, -42, -1, 0)
for t in (positiveIntegerType, positive_IntegerType):
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a t with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (1, 23L, 1111111111111111111111111111111111111111111111111111L):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N), ('-1', N),
('0', N), ('1', 1),
('123456789012345678901234567890', 123456789012345678901234567890L),
(ws + '12' + ws, 12))
for i in test:
try:
if t == positiveIntegerType:
n = t.__name__[:-4]
else:
n = 'positive-integer'
z = parseSOAPRPC(self.build_xml(t._validURIs[0], n, i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testUntyped(self):
# Make sure untypedType really isn't typed
a = stringType('hello', name = 'a')
b = untypedType('earth', name = 'b')
x = buildSOAP((a, b))
#print "x=",x
self.failUnless(x.find('<a xsi:type="xsd:string" SOAP-ENC:root="1">hello</a>') != -1)
self.failUnless(x.find('<b SOAP-ENC:root="1">earth</b>') != -1)
# Now some Array tests
def testArray(self):
env = '''<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsd2="http://www.w3.org/2000/10/XMLSchema" xmlns:xsd3="http://www.w3.org/2001/XMLSchema" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/">
%s
</SOAP-ENV:Envelope>'''
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[4]" SOAP-ENC:offset="[2]" xsi:type="SOAP-ENC:Array">
<_2 SOAP-ENC:arrayType="xsd:int[2]" xsi:type="SOAP-ENC:Array">
<item>1</item>
<item>2</item>
</_2>
<_3 SOAP-ENC:arrayType="xsd:int[2]" xsi:type="SOAP-ENC:Array">
<item>3</item>
<item>4</item>
</_3>
</_1>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [None, None, [1, 2], [3, 4]])
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[3,4,2]" SOAP-ENC:offset="[17]" xsi:type="SOAP-ENC:Array">
<item>1</item>
<item>2</item>
<item>3</item>
<item>4</item>
<item>5</item>
<item>6</item>
<item>7</item>
</_1>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [
[[None, None], [None, None], [None, None], [None, None]],
[[None, None], [None, None], [None, None], [None, None]],
[[None, 1], [2, 3], [4, 5], [6, 7]]
])
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[3,4,2]" xsi:type="SOAP-ENC:Array">
<item SOAP-ENC:position="[17]">-17</item>
<item SOAP-ENC:position="[13]">13</item>
<item SOAP-ENC:position="[22]">-22</item>
<item SOAP-ENC:position="[1]">1</item>
<item SOAP-ENC:position="[17]">17</item>
<item SOAP-ENC:position="[23]">23</item>
<item SOAP-ENC:position="[6]">6</item>
</_1>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [
[[None, 1L], [None, None], [None, None], [6L, None]],
[[None, None], [None, None], [None, 13L], [None, None]],
[[None, 17L], [None, None], [None, None], [-22L, 23L]]
])
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[4]" SOAP-ENC:offset="[3]" xsi:type="SOAP-ENC:Array">
<item SOAP-ENC:position="[2]">2</item>
<item SOAP-ENC:position="[0]">0</item>
<item SOAP-ENC:position="[1]">1</item>
<item SOAP-ENC:position="[3]">3</item>
</_1>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [0, 1, 2, 3])
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[2,3,4]" SOAP-ENC:offset="[23]" xsi:type="SOAP-ENC:Array">
</_1>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [
[
[None, None, None, None],
[None, None, None, None],
[None, None, None, None],
],
[
[None, None, None, None],
[None, None, None, None],
[None, None, None, None],
]
])
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[4]" SOAP-ENC:offset="[3]" xsi:type="SOAP-ENC:Array">
<item>2</item>
<item>3</item>
</_1>
</SOAP-ENV:Body>'''
try:
x = parseSOAPRPC(xml)
raise AssertionError, "full array parsed"
except AssertionError:
raise
except:
pass
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[2,0,4]" xsi:type="SOAP-ENC:Array">
</_1>
</SOAP-ENV:Body>'''
try:
x = parseSOAPRPC(xml)
raise AssertionError, "array with bad dimension (0) parsed"
except AssertionError:
raise
except:
pass
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[2,3,-4]" xsi:type="SOAP-ENC:Array">
</_1>
</SOAP-ENV:Body>'''
try:
x = parseSOAPRPC(xml)
raise AssertionError, "array with bad dimension (negative) parsed"
except AssertionError:
raise
except:
pass
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[2,3,4.4]" xsi:type="SOAP-ENC:Array">
</_1>
</SOAP-ENV:Body>'''
try:
x = parseSOAPRPC(xml)
raise AssertionError, "array with bad dimension (non-integral) parsed"
except AssertionError:
raise
except:
pass
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[2,hello,4]" xsi:type="SOAP-ENC:Array">
</_1>
</SOAP-ENV:Body>'''
try:
x = parseSOAPRPC(xml)
raise AssertionError, "array with bad dimension (non-numeric) parsed"
except AssertionError:
raise
except:
pass
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[2,3,4]" SOAP-ENC:offset="[-4]" xsi:type="SOAP-ENC:Array">
</_1>
</SOAP-ENV:Body>'''
try:
x = parseSOAPRPC(xml)
raise AssertionError, "array with too large offset parsed"
except AssertionError:
raise
except:
pass
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[2,3,4]" SOAP-ENC:offset="[24]" xsi:type="SOAP-ENC:Array">
</_1>
</SOAP-ENV:Body>'''
try:
x = parseSOAPRPC(xml)
raise AssertionError, "array with too large offset parsed"
except AssertionError:
raise
except:
pass
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[2,3,4]" xsi:type="SOAP-ENC:Array">
<item SOAP-ENC:position="0">2</item>
<item>3</item>
</_1>
</SOAP-ENV:Body>'''
try:
x = parseSOAPRPC(xml)
raise AssertionError, "full array parsed"
except AssertionError:
raise
except:
pass
xml = env % '''<SOAP-ENV:Body>
<myFavoriteNumbers type="SOAP-ENC:Array" SOAP-ENC:arrayType="xsd:int[2]">
<number>3</number>
<number>4</number>
</myFavoriteNumbers>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [3, 4])
xml = env % '''<SOAP-ENV:Body>
<SOAP-ENC:Array SOAP-ENC:arrayType="xsd:ur-type[4]">
<thing xsi:type="xsd:int">12345</thing>
<thing xsi:type="xsd:decimal">6.789</thing>
<thing xsi:type="xsd:string">Of Mans First Disobedience, and the Fruit
Of that Forbidden Tree, whose mortal tast
Brought Death into the World, and all our woe,</thing>
<thing xsi:type="xsd2:uriReference">
http://www.dartmouth.edu/~milton/reading_room/
</thing>
</SOAP-ENC:Array>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [12345, 6.789, '''Of Mans First Disobedience, and the Fruit
Of that Forbidden Tree, whose mortal tast
Brought Death into the World, and all our woe,''',
'http://www.dartmouth.edu/~milton/reading_room/'])
xml = env % '''<SOAP-ENV:Body>
<SOAP-ENC:Array SOAP-ENC:arrayType="xyz:Order[2]">
<Order>
<Product>Apple</Product>
<Price>1.56</Price>
</Order>
<Order>
<Product>Peach</Product>
<Price>1.48</Price>
</Order>
</SOAP-ENC:Array>
</SOAP-ENV:Body>'''
#x = parseSOAPRPC(xml)
#print "x=",x
xml = env % '''<SOAP-ENV:Body>
<SOAP-ENC:Array SOAP-ENC:arrayType="xsd:string[3]">
<item href="#array-1"/>
<item href="#array-2"/>
<item href="#array-2"/>
</SOAP-ENC:Array>
<SOAP-ENC:Array id="array-1" SOAP-ENC:arrayType="xsd:string[3]">
<item>r1c1</item>
<item>r1c2</item>
<item>r1c3</item>
</SOAP-ENC:Array>
<SOAP-ENC:Array id="array-2" SOAP-ENC:arrayType="xsd:string[2]">
<item>r2c1</item>
<item>r2c2</item>
</SOAP-ENC:Array>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [['r1c1', 'r1c2', 'r1c3'],
['r2c1', 'r2c2'], ['r2c1', 'r2c2']])
xml = env % '''<SOAP-ENV:Body>
<SOAP-ENC:Array SOAP-ENC:arrayType="xsd:string[2,3]">
<item>r1c1</item>
<item>r1c2</item>
<item>r1c3</item>
<item>r2c1</item>
<item>r2c2</item>
<item>r2c3</item>
</SOAP-ENC:Array>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [['r1c1', 'r1c2', 'r1c3'], ['r2c1', 'r2c2', 'r2c3']])
xml = env % '''<SOAP-ENV:Body>
<SOAP-ENC:Array SOAP-ENC:arrayType="xsd:string[5]" SOAP-ENC:offset="[2]">
<item>The third element</item>
<item>The fourth element</item>
</SOAP-ENC:Array>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [None, None, 'The third element', 'The fourth element', None])
xml = env % '''<SOAP-ENV:Body>
<SOAP-ENC:Array SOAP-ENC:arrayType="xsd:string[,][4]">
<SOAP-ENC:Array href="#array-1" SOAP-ENC:position="[2]"/>
</SOAP-ENC:Array>
<SOAP-ENC:Array id="array-1" SOAP-ENC:arrayType="xsd:string[10,10]">
<item SOAP-ENC:position="[2,2]">Third row, third col</item>
<item SOAP-ENC:position="[7,2]">Eighth row, third col</item>
</SOAP-ENC:Array>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
# Example using key data
def testKeyData(self):
xml = '''<?xml version="1.0" encoding="UTF-8"?>
<soap:Envelope xmlns:dsig="http://www.w3.org/2000/09/xmldsig#" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/" xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<soap:Body>
<xkms:RegisterResult xmlns:xkms="http://www.xkms.org/schema/xkms-2001-01-20">
<xkms:Result>Success</xkms:Result>
<xkms:Answer soapenc:arrayType="KeyBinding[1]">
<xkms:KeyBinding>
<xkms:Status>Valid</xkms:Status>
<xkms:KeyID>mailto:actzerotestkeyname</xkms:KeyID>
<dsig:KeyInfo>
<dsig:X509Data>
<dsig:X509Certificate>MIIDPjCCAqegAwIBAgIEOroMvDANBgkqhkiG9w0BAQUFADAxMQswCQYDVQQGEwJVI3nlMkH84ZdPKIyz60sNcVEwJ8kF+B6ZVNimCF+r7BWgLi/Dolce5CpbfMMyexZ+UQEMADrc7331eYS891KXSDQx</dsig:X509Certificate>
</dsig:X509Data>
<dsig:KeyName>mailto:actzerotestkeyname</dsig:KeyName>
<dsig:KeyValue>
<dsig:RSAKeyValue>
<dsig:Modulus>wgmV2FY6MBKvtaMmCvCoNi/0hycZkiPKC2PXjRLJKFJ5wjNfF+vWsQQUXxOKUQnu
HjJqRkx90jJvnEzW3j9FlZFQcZTfJbE0v6BXhhSre2kZvkgcOERmDMeMs//oEA4u
epnedUwrkPzedWU9AL7c/oN7rk65UuPWf7V8c/4E9bc=</dsig:Modulus>
<dsig:Exponent>AQAB</dsig:Exponent>
</dsig:RSAKeyValue>
</dsig:KeyValue>
</dsig:KeyInfo>
</xkms:KeyBinding>
</xkms:Answer>
<xkms:Private>9GKuRC3ISwE9aEatzDKW0WIp+P/ufOvCxy9d5jVglLaRiTTIelHoGKCE6cDG62HYOu/3ebce6M7Z6LX6l1J9pB5PUx+f2DaMYYEGuOtNA7/ei5Ga/mibRBCehQIcN6FF6ESFOwAJBRLajj+orgYSy0u1sTCla0V4nSBrYA2H6lx8mD3qfDJ4hie7nU0YqZxy50F9f9UxXKIVSeutyIIBjWDDKv0kVpKy7OUerOaZXOW6HBohXuV74kXMUZu+MpLIkMHOrhJeo+edfhmeFuw4kCo5it6GkrOKrGs6zo1hSxWp7uuvKAPbvUrumC6sTsTxAUg4KTGq85IUnBTYI40Q9TZtzMcONtrWfIIF23/7NJyOmygBaFa4wFqHxe7j2gSWCQRv2fPwXo/AAJTeKwsUIY8OgmANHHbFVqJEeg27jbCuSaQFxWD7ms240YurTb55HBLk6JSufDl0CUbxoUgjrDB++gUb8oalroWDIb5NcZ94QER+HiTQfB11HcPDHvONnzk/n+iF+Mcri53ZbAButnfp2x87sh6RedeiUUWruYA4eonRq5+aj2I9cIrGLQaLemna1AQ+PyD2SMelBLukfR7GUc7zaSPjPJh2W/aYAJSyjM98g6ABNntdfhuf+6jRYnYFqSXZL1W1JPf92OMOfwfuXTE2K68sNwCRhcbHDLM=</xkms:Private>
</xkms:RegisterResult>
</soap:Body>
</soap:Envelope>'''
x = parseSOAPRPC(xml)
def testZeroLengthTypedArray(self):
"""
Test that zero length typed arrays maintain thier type information when
converted to a SOAP message.
"""
empty_int = typedArrayType(typed="int")
empty_int_message = buildSOAP( empty_int )
self.assertNotEquals( re.search("xsd:int\[0\]", empty_int_message),
None )
if __name__ == '__main__':
print """
NOTE: The 'testArray' test will fail because 'referenced' elements are
included in the return object. This is a known shortcoming of
the current version of SOAPpy.
All other tests should succeed.
"""
unittest.main()
|
nagyistoce/devide.johannes
|
extra/soappy-cvp/tests/SOAPtest.py
|
Python
|
bsd-3-clause
| 140,087
|
[
"MOOSE"
] |
19c98cc2b664f04b66eb68fce29992b47e9d2f36de739905322389dfad275ba6
|
########################################################################
# File : IdProviderFactory.py
# Author : A.T.
########################################################################
""" The Identity Provider Factory instantiates IdProvider objects
according to their configuration
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import jwt
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities import ObjectLoader, ThreadSafe
from DIRAC.Core.Utilities.DictCache import DictCache
from DIRAC.Resources.IdProvider.Utilities import getProviderInfo, getSettingsNamesForIdPIssuer
from DIRAC.FrameworkSystem.private.authorization.utils.Clients import getDIRACClients
from DIRAC.FrameworkSystem.private.authorization.utils.Utilities import collectMetadata
__RCSID__ = "$Id$"
gCacheMetadata = ThreadSafe.Synchronizer()
class IdProviderFactory(object):
def __init__(self):
"""Standard constructor"""
self.log = gLogger.getSubLogger("IdProviderFactory")
self.cacheMetadata = DictCache()
@gCacheMetadata
def getMetadata(self, idP):
return self.cacheMetadata.get(idP) or {}
@gCacheMetadata
def addMetadata(self, idP, data, time=24 * 3600):
if data:
self.cacheMetadata.add(idP, time, data)
def getIdProviderForToken(self, token):
"""This method returns a IdProvider instance corresponding to the supplied
issuer in a token.
:param token: access token or dict with access_token key
:return: S_OK(IdProvider)/S_ERROR()
"""
if isinstance(token, dict):
token = token["access_token"]
data = {}
# Read token without verification to get issuer
issuer = jwt.decode(token, leeway=300, options=dict(verify_signature=False, verify_aud=False))["iss"].strip("/")
result = getSettingsNamesForIdPIssuer(issuer)
if not result["OK"]:
return result
return self.getIdProvider(result["Value"])
def getIdProvider(self, name, **kwargs):
"""This method returns a IdProvider instance corresponding to the supplied
name.
:param str name: the name of the Identity Provider client
:return: S_OK(IdProvider)/S_ERROR()
"""
if not name:
return S_ERROR("Identity Provider client name must be not None.")
# Get Authorization Server metadata
try:
asMetaDict = collectMetadata(kwargs.get("issuer"), ignoreErrors=True)
except Exception as e:
return S_ERROR(str(e))
self.log.debug("Search configuration for", name)
clients = getDIRACClients()
if name in clients:
# If it is a DIRAC default pre-registred client
pDict = asMetaDict
pDict.update(clients[name])
else:
# if it is external identity provider client
result = getProviderInfo(name)
if not result["OK"]:
self.log.error("Failed to read configuration", "%s: %s" % (name, result["Message"]))
return result
pDict = result["Value"]
# Set default redirect_uri
pDict["redirect_uri"] = pDict.get("redirect_uri", asMetaDict["redirect_uri"])
pDict.update(kwargs)
pDict["ProviderName"] = name
self.log.verbose("Creating IdProvider of %s type with the name %s" % (pDict["ProviderType"], name))
subClassName = "%sIdProvider" % pDict["ProviderType"]
objectLoader = ObjectLoader.ObjectLoader()
result = objectLoader.loadObject("Resources.IdProvider.%s" % subClassName, subClassName)
if not result["OK"]:
self.log.error("Failed to load object", "%s: %s" % (subClassName, result["Message"]))
return result
pClass = result["Value"]
try:
provider = pClass(**pDict)
except Exception as x:
msg = "IdProviderFactory could not instantiate %s object: %s" % (subClassName, str(x))
self.log.exception()
self.log.warn(msg)
return S_ERROR(msg)
return S_OK(provider)
|
ic-hep/DIRAC
|
src/DIRAC/Resources/IdProvider/IdProviderFactory.py
|
Python
|
gpl-3.0
| 4,216
|
[
"DIRAC"
] |
12b5759bcf783660c5810a9caea12ad50f0c9954bbb476b3ce32cad32bfee0af
|
#import matplotlib as plt
#from numpy import *
## Force matplotlib to not use any Xwindows backend.
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
#def plot_cov_contour(S, m):
## plots equidensity contor of a gaussian
## S: covariance matrix
# phi=arange(0,2*pi+.01,.01)
# points=zeros((size(phi), 2)
#
# [l, v] = linalg.eig(S)
# A = diag(l)
#
# for i in range(0,size(phi)):
# x[i] = dot(d, ) # (diag(d)'.*[cos(tRange(i)) sin(tRange(i))])*v';
#
# # plot x
#
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Arc
def plot_point_cov(points, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma ellipse based on the mean and covariance of a point
"cloud" (points, an Nx2 array).
Parameters
----------
points : An Nx2 array of the data points.
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
pos = points.mean(axis=0)
cov = np.cov(points, rowvar=False)
return plot_cov_ellipse(cov, pos, nstd, ax, **kwargs)
def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
----------
cov : The 2x2 covariance matrix to base the ellipse on
pos : The location of the center of the ellipse. Expects a 2-element
sequence of [x0, y0].
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Arc(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
if __name__ == '__main__':
#-- Example usage -----------------------
# Generate some random, correlated data
points = np.random.multivariate_normal(
mean=(1,1), cov=[[0.4, 9],[9, 10]], size=1000
)
# Plot the raw points...
x, y = points.T
plt.plot(x, y, 'ro')
# Plot a transparent 3 standard deviation covariance ellipse
plot_point_cov(points, nstd=3, alpha=0.5, color='green')
plt.show()
|
zsomko/visualcortex
|
python/plotting/covariance_ellipse.py
|
Python
|
gpl-2.0
| 3,091
|
[
"Gaussian"
] |
f0dc3e971f856fd9583e6cc84b035d22a78f1a8d0e255edd0cfe706cde1e2f4b
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import (
FeaturestoreOnlineServingServiceAsyncClient,
)
from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import (
FeaturestoreOnlineServingServiceClient,
)
from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import (
transports,
)
from google.cloud.aiplatform_v1beta1.types import feature_selector
from google.cloud.aiplatform_v1beta1.types import featurestore_online_service
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert (
FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(None) is None
)
assert (
FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(
api_mtls_endpoint
)
== api_mtls_endpoint
)
assert (
FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(
sandbox_endpoint
)
== sandbox_mtls_endpoint
)
assert (
FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(
sandbox_mtls_endpoint
)
== sandbox_mtls_endpoint
)
assert (
FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class",
[
FeaturestoreOnlineServingServiceClient,
FeaturestoreOnlineServingServiceAsyncClient,
],
)
def test_featurestore_online_serving_service_client_from_service_account_info(
client_class,
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"),
(
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_featurestore_online_serving_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class",
[
FeaturestoreOnlineServingServiceClient,
FeaturestoreOnlineServingServiceAsyncClient,
],
)
def test_featurestore_online_serving_service_client_from_service_account_file(
client_class,
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_featurestore_online_serving_service_client_get_transport_class():
transport = FeaturestoreOnlineServingServiceClient.get_transport_class()
available_transports = [
transports.FeaturestoreOnlineServingServiceGrpcTransport,
]
assert transport in available_transports
transport = FeaturestoreOnlineServingServiceClient.get_transport_class("grpc")
assert transport == transports.FeaturestoreOnlineServingServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
"grpc",
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
FeaturestoreOnlineServingServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FeaturestoreOnlineServingServiceClient),
)
@mock.patch.object(
FeaturestoreOnlineServingServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient),
)
def test_featurestore_online_serving_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(
FeaturestoreOnlineServingServiceClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
FeaturestoreOnlineServingServiceClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
"grpc",
"true",
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
"grpc",
"false",
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
FeaturestoreOnlineServingServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FeaturestoreOnlineServingServiceClient),
)
@mock.patch.object(
FeaturestoreOnlineServingServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_featurestore_online_serving_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class",
[
FeaturestoreOnlineServingServiceClient,
FeaturestoreOnlineServingServiceAsyncClient,
],
)
@mock.patch.object(
FeaturestoreOnlineServingServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FeaturestoreOnlineServingServiceClient),
)
@mock.patch.object(
FeaturestoreOnlineServingServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient),
)
def test_featurestore_online_serving_service_client_get_mtls_endpoint_and_cert_source(
client_class,
):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
"grpc",
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_featurestore_online_serving_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_featurestore_online_serving_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_featurestore_online_serving_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = FeaturestoreOnlineServingServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_featurestore_online_serving_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [featurestore_online_service.ReadFeatureValuesRequest, dict,]
)
def test_read_feature_values(request_type, transport: str = "grpc"):
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = featurestore_online_service.ReadFeatureValuesResponse()
response = client.read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == featurestore_online_service.ReadFeatureValuesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse)
def test_read_feature_values_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
client.read_feature_values()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == featurestore_online_service.ReadFeatureValuesRequest()
@pytest.mark.asyncio
async def test_read_feature_values_async(
transport: str = "grpc_asyncio",
request_type=featurestore_online_service.ReadFeatureValuesRequest,
):
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
featurestore_online_service.ReadFeatureValuesResponse()
)
response = await client.read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == featurestore_online_service.ReadFeatureValuesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse)
@pytest.mark.asyncio
async def test_read_feature_values_async_from_dict():
await test_read_feature_values_async(request_type=dict)
def test_read_feature_values_field_headers():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = featurestore_online_service.ReadFeatureValuesRequest()
request.entity_type = "entity_type/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
call.return_value = featurestore_online_service.ReadFeatureValuesResponse()
client.read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_read_feature_values_field_headers_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = featurestore_online_service.ReadFeatureValuesRequest()
request.entity_type = "entity_type/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
featurestore_online_service.ReadFeatureValuesResponse()
)
await client.read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"]
def test_read_feature_values_flattened():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = featurestore_online_service.ReadFeatureValuesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.read_feature_values(entity_type="entity_type_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].entity_type
mock_val = "entity_type_value"
assert arg == mock_val
def test_read_feature_values_flattened_error():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.read_feature_values(
featurestore_online_service.ReadFeatureValuesRequest(),
entity_type="entity_type_value",
)
@pytest.mark.asyncio
async def test_read_feature_values_flattened_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = featurestore_online_service.ReadFeatureValuesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
featurestore_online_service.ReadFeatureValuesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.read_feature_values(entity_type="entity_type_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].entity_type
mock_val = "entity_type_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_read_feature_values_flattened_error_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.read_feature_values(
featurestore_online_service.ReadFeatureValuesRequest(),
entity_type="entity_type_value",
)
@pytest.mark.parametrize(
"request_type",
[featurestore_online_service.StreamingReadFeatureValuesRequest, dict,],
)
def test_streaming_read_feature_values(request_type, transport: str = "grpc"):
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iter(
[featurestore_online_service.ReadFeatureValuesResponse()]
)
response = client.streaming_read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert (
args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest()
)
# Establish that the response is the type that we expect.
for message in response:
assert isinstance(
message, featurestore_online_service.ReadFeatureValuesResponse
)
def test_streaming_read_feature_values_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
client.streaming_read_feature_values()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert (
args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest()
)
@pytest.mark.asyncio
async def test_streaming_read_feature_values_async(
transport: str = "grpc_asyncio",
request_type=featurestore_online_service.StreamingReadFeatureValuesRequest,
):
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(
side_effect=[featurestore_online_service.ReadFeatureValuesResponse()]
)
response = await client.streaming_read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert (
args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest()
)
# Establish that the response is the type that we expect.
message = await response.read()
assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse)
@pytest.mark.asyncio
async def test_streaming_read_feature_values_async_from_dict():
await test_streaming_read_feature_values_async(request_type=dict)
def test_streaming_read_feature_values_field_headers():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = featurestore_online_service.StreamingReadFeatureValuesRequest()
request.entity_type = "entity_type/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
call.return_value = iter(
[featurestore_online_service.ReadFeatureValuesResponse()]
)
client.streaming_read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_streaming_read_feature_values_field_headers_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = featurestore_online_service.StreamingReadFeatureValuesRequest()
request.entity_type = "entity_type/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(
side_effect=[featurestore_online_service.ReadFeatureValuesResponse()]
)
await client.streaming_read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "entity_type=entity_type/value",) in kw["metadata"]
def test_streaming_read_feature_values_flattened():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iter(
[featurestore_online_service.ReadFeatureValuesResponse()]
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.streaming_read_feature_values(entity_type="entity_type_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].entity_type
mock_val = "entity_type_value"
assert arg == mock_val
def test_streaming_read_feature_values_flattened_error():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.streaming_read_feature_values(
featurestore_online_service.StreamingReadFeatureValuesRequest(),
entity_type="entity_type_value",
)
@pytest.mark.asyncio
async def test_streaming_read_feature_values_flattened_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iter(
[featurestore_online_service.ReadFeatureValuesResponse()]
)
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.streaming_read_feature_values(
entity_type="entity_type_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].entity_type
mock_val = "entity_type_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_streaming_read_feature_values_flattened_error_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.streaming_read_feature_values(
featurestore_online_service.StreamingReadFeatureValuesRequest(),
entity_type="entity_type_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FeaturestoreOnlineServingServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = FeaturestoreOnlineServingServiceClient(
client_options=options, transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = FeaturestoreOnlineServingServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FeaturestoreOnlineServingServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = FeaturestoreOnlineServingServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport, transports.FeaturestoreOnlineServingServiceGrpcTransport,
)
def test_featurestore_online_serving_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.FeaturestoreOnlineServingServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_featurestore_online_serving_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.FeaturestoreOnlineServingServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"read_feature_values",
"streaming_read_feature_values",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_featurestore_online_serving_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FeaturestoreOnlineServingServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_featurestore_online_serving_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FeaturestoreOnlineServingServiceTransport()
adc.assert_called_once()
def test_featurestore_online_serving_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
FeaturestoreOnlineServingServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
],
)
def test_featurestore_online_serving_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.FeaturestoreOnlineServingServiceGrpcTransport, grpc_helpers),
(
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
grpc_helpers_async,
),
],
)
def test_featurestore_online_serving_service_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
],
)
def test_featurestore_online_serving_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_featurestore_online_serving_service_host_no_port():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com"
),
)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_featurestore_online_serving_service_host_with_port():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com:8000"
),
)
assert client.transport._host == "aiplatform.googleapis.com:8000"
def test_featurestore_online_serving_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_featurestore_online_serving_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
],
)
def test_featurestore_online_serving_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
],
)
def test_featurestore_online_serving_service_transport_channel_mtls_with_adc(
transport_class,
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_entity_type_path():
project = "squid"
location = "clam"
featurestore = "whelk"
entity_type = "octopus"
expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(
project=project,
location=location,
featurestore=featurestore,
entity_type=entity_type,
)
actual = FeaturestoreOnlineServingServiceClient.entity_type_path(
project, location, featurestore, entity_type
)
assert expected == actual
def test_parse_entity_type_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"featurestore": "cuttlefish",
"entity_type": "mussel",
}
path = FeaturestoreOnlineServingServiceClient.entity_type_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_entity_type_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = FeaturestoreOnlineServingServiceClient.common_billing_account_path(
billing_account
)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = FeaturestoreOnlineServingServiceClient.common_billing_account_path(
**expected
)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path(
path
)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = FeaturestoreOnlineServingServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = FeaturestoreOnlineServingServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = FeaturestoreOnlineServingServiceClient.common_organization_path(
organization
)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = FeaturestoreOnlineServingServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = FeaturestoreOnlineServingServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = FeaturestoreOnlineServingServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = FeaturestoreOnlineServingServiceClient.common_location_path(
project, location
)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = FeaturestoreOnlineServingServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.FeaturestoreOnlineServingServiceTransport, "_prep_wrapped_messages"
) as prep:
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.FeaturestoreOnlineServingServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = FeaturestoreOnlineServingServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(
FeaturestoreOnlineServingServiceClient,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
),
(
FeaturestoreOnlineServingServiceAsyncClient,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-aiplatform
|
tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py
|
Python
|
apache-2.0
| 69,458
|
[
"Octopus"
] |
331f9cd511d5e2606c93a537fa72a9c796172dcd84fe4d919dd8f964d9ec97a6
|
#!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is a simple HTTP server used for testing Chrome.
It supports several test URLs, as specified by the handlers in TestPageHandler.
It defaults to living on localhost:8888.
It can use https if you specify the flag --https=CERT where CERT is the path
to a pem file containing the certificate and private key that should be used.
To shut it down properly, visit localhost:8888/kill.
"""
import base64
import BaseHTTPServer
import cgi
import optparse
import os
import re
import shutil
import SocketServer
import sys
import time
import tlslite
import tlslite.api
import pyftpdlib.ftpserver
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
SERVER_HTTP = 0
SERVER_FTP = 1
debug_output = sys.stderr
def debug(str):
debug_output.write(str + "\n")
debug_output.flush()
class StoppableHTTPServer(BaseHTTPServer.HTTPServer):
"""This is a specialization of of BaseHTTPServer to allow it
to be exited cleanly (by setting its "stop" member to True)."""
def serve_forever(self):
self.stop = False
self.nonce = None
while not self.stop:
self.handle_request()
self.socket.close()
class HTTPSServer(tlslite.api.TLSSocketServerMixIn, StoppableHTTPServer):
"""This is a specialization of StoppableHTTPerver that add https support."""
def __init__(self, server_address, request_hander_class, cert_path):
s = open(cert_path).read()
x509 = tlslite.api.X509()
x509.parse(s)
self.cert_chain = tlslite.api.X509CertChain([x509])
s = open(cert_path).read()
self.private_key = tlslite.api.parsePEMKey(s, private=True)
self.session_cache = tlslite.api.SessionCache()
StoppableHTTPServer.__init__(self, server_address, request_hander_class)
def handshake(self, tlsConnection):
"""Creates the SSL connection."""
try:
tlsConnection.handshakeServer(certChain=self.cert_chain,
privateKey=self.private_key,
sessionCache=self.session_cache)
tlsConnection.ignoreAbruptClose = True
return True
except tlslite.api.TLSError, error:
print "Handshake failure:", str(error)
return False
class TestPageHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, request, client_address, socket_server):
self._connect_handlers = [
self.RedirectConnectHandler,
self.ServerAuthConnectHandler,
self.DefaultConnectResponseHandler]
self._get_handlers = [
self.KillHandler,
self.NoCacheMaxAgeTimeHandler,
self.NoCacheTimeHandler,
self.CacheTimeHandler,
self.CacheExpiresHandler,
self.CacheProxyRevalidateHandler,
self.CachePrivateHandler,
self.CachePublicHandler,
self.CacheSMaxAgeHandler,
self.CacheMustRevalidateHandler,
self.CacheMustRevalidateMaxAgeHandler,
self.CacheNoStoreHandler,
self.CacheNoStoreMaxAgeHandler,
self.CacheNoTransformHandler,
self.DownloadHandler,
self.DownloadFinishHandler,
self.EchoHeader,
self.EchoAllHandler,
self.FileHandler,
self.RealFileWithCommonHeaderHandler,
self.RealBZ2FileWithCommonHeaderHandler,
self.AuthBasicHandler,
self.AuthDigestHandler,
self.SlowServerHandler,
self.ContentTypeHandler,
self.ServerRedirectHandler,
self.ClientRedirectHandler,
self.DefaultResponseHandler]
self._post_handlers = [
self.WriteFile,
self.EchoTitleHandler,
self.EchoAllHandler,
self.EchoHandler] + self._get_handlers
self._mime_types = {
'gif': 'image/gif',
'jpeg' : 'image/jpeg',
'jpg' : 'image/jpeg'
}
self._default_mime_type = 'text/html'
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, request,
client_address,
socket_server)
def _ShouldHandleRequest(self, handler_name):
"""Determines if the path can be handled by the handler.
We consider a handler valid if the path begins with the
handler name. It can optionally be followed by "?*", "/*".
"""
pattern = re.compile('%s($|\?|/).*' % handler_name)
return pattern.match(self.path)
def GetMIMETypeFromName(self, file_name):
"""Returns the mime type for the specified file_name. So far it only looks
at the file extension."""
(shortname, extension) = os.path.splitext(file_name)
if len(extension) == 0:
# no extension.
return self._default_mime_type
# extension starts with a dot, so we need to remove it
return self._mime_types.get(extension[1:], self._default_mime_type)
def KillHandler(self):
"""This request handler kills the server, for use when we're done"
with the a particular test."""
if (self.path.find("kill") < 0):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=0')
self.end_headers()
self.wfile.write("Time to die")
self.server.stop = True
return True
def NoCacheMaxAgeTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and no caching requested."""
if not self._ShouldHandleRequest("/nocachetime/maxage"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'max-age=0')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def NoCacheTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and no caching requested."""
if not self._ShouldHandleRequest("/nocachetime"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'no-cache')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for one minute."""
if not self._ShouldHandleRequest("/cachetime"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'max-age=60')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheExpiresHandler(self):
"""This request handler yields a page with the title set to the current
system time, and set the page to expire on 1 Jan 2099."""
if not self._ShouldHandleRequest("/cache/expires"):
return False
self.send_response(200)
self.send_header('Expires', 'Thu, 1 Jan 2099 00:00:00 GMT')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheProxyRevalidateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 60 seconds"""
if not self._ShouldHandleRequest("/cache/proxy-revalidate"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, proxy-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CachePrivateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 5 seconds."""
if not self._ShouldHandleRequest("/cache/private"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=3, private')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CachePublicHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 5 seconds."""
if not self._ShouldHandleRequest("/cache/public"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=3, public')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheSMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow for caching."""
if not self._ShouldHandleRequest("/cache/s-maxage"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'public, s-maxage = 60, max-age = 0')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheMustRevalidateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow caching."""
if not self._ShouldHandleRequest("/cache/must-revalidate"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'must-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheMustRevalidateMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow caching event though max-age of 60
seconds is specified."""
if not self._ShouldHandleRequest("/cache/must-revalidate/max-age"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, must-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoStoreHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the page to be stored."""
if not self._ShouldHandleRequest("/cache/no-store"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'no-store')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoStoreMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the page to be stored even though max-age
of 60 seconds is specified."""
if not self._ShouldHandleRequest("/cache/no-store/max-age"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, no-store')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoTransformHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the content to transformed during
user-agent caching"""
if not self._ShouldHandleRequest("/cache/no-transform"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'no-transform')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def EchoHeader(self):
"""This handler echoes back the value of a specific request header."""
if not self._ShouldHandleRequest("/echoheader"):
return False
query_char = self.path.find('?')
if query_char != -1:
header_name = self.path[query_char+1:]
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-control', 'max-age=60000')
# insert a vary header to properly indicate that the cachability of this
# request is subject to value of the request header being echoed.
if len(header_name) > 0:
self.send_header('Vary', header_name)
self.end_headers()
if len(header_name) > 0:
self.wfile.write(self.headers.getheader(header_name))
return True
def EchoHandler(self):
"""This handler just echoes back the payload of the request, for testing
form submission."""
if not self._ShouldHandleRequest("/echo"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
length = int(self.headers.getheader('content-length'))
request = self.rfile.read(length)
self.wfile.write(request)
return True
def WriteFile(self):
"""This is handler dumps the content of POST request to a disk file into
the data_dir/dump. Sub-directories are not supported."""
prefix='/writefile/'
if not self.path.startswith(prefix):
return False
file_name = self.path[len(prefix):]
# do not allow fancy chars in file name
re.sub('[^a-zA-Z0-9_.-]+', '', file_name)
if len(file_name) and file_name[0] != '.':
path = os.path.join(self.server.data_dir, 'dump', file_name);
length = int(self.headers.getheader('content-length'))
request = self.rfile.read(length)
f = open(path, "wb")
f.write(request);
f.close()
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html>%s</html>' % file_name)
return True
def EchoTitleHandler(self):
"""This handler is like Echo, but sets the page title to the request."""
if not self._ShouldHandleRequest("/echotitle"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
length = int(self.headers.getheader('content-length'))
request = self.rfile.read(length)
self.wfile.write('<html><head><title>')
self.wfile.write(request)
self.wfile.write('</title></head></html>')
return True
def EchoAllHandler(self):
"""This handler yields a (more) human-readable page listing information
about the request header & contents."""
if not self._ShouldHandleRequest("/echoall"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><style>'
'pre { border: 1px solid black; margin: 5px; padding: 5px }'
'</style></head><body>'
'<div style="float: right">'
'<a href="http://localhost:8888/echo">back to referring page</a></div>'
'<h1>Request Body:</h1><pre>')
if self.command == 'POST':
length = int(self.headers.getheader('content-length'))
qs = self.rfile.read(length)
params = cgi.parse_qs(qs, keep_blank_values=1)
for param in params:
self.wfile.write('%s=%s\n' % (param, params[param][0]))
self.wfile.write('</pre>')
self.wfile.write('<h1>Request Headers:</h1><pre>%s</pre>' % self.headers)
self.wfile.write('</body></html>')
return True
def DownloadHandler(self):
"""This handler sends a downloadable file with or without reporting
the size (6K)."""
if self.path.startswith("/download-unknown-size"):
send_length = False
elif self.path.startswith("/download-known-size"):
send_length = True
else:
return False
#
# The test which uses this functionality is attempting to send
# small chunks of data to the client. Use a fairly large buffer
# so that we'll fill chrome's IO buffer enough to force it to
# actually write the data.
# See also the comments in the client-side of this test in
# download_uitest.cc
#
size_chunk1 = 35*1024
size_chunk2 = 10*1024
self.send_response(200)
self.send_header('Content-type', 'application/octet-stream')
self.send_header('Cache-Control', 'max-age=0')
if send_length:
self.send_header('Content-Length', size_chunk1 + size_chunk2)
self.end_headers()
# First chunk of data:
self.wfile.write("*" * size_chunk1)
self.wfile.flush()
# handle requests until one of them clears this flag.
self.server.waitForDownload = True
while self.server.waitForDownload:
self.server.handle_request()
# Second chunk of data:
self.wfile.write("*" * size_chunk2)
return True
def DownloadFinishHandler(self):
"""This handler just tells the server to finish the current download."""
if not self._ShouldHandleRequest("/download-finish"):
return False
self.server.waitForDownload = False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=0')
self.end_headers()
return True
def FileHandler(self):
"""This handler sends the contents of the requested file. Wow, it's like
a real webserver!"""
prefix = self.server.file_root_url
if not self.path.startswith(prefix):
return False
file = self.path[len(prefix):]
entries = file.split('/');
path = os.path.join(self.server.data_dir, *entries)
if os.path.isdir(path):
path = os.path.join(path, 'index.html')
if not os.path.isfile(path):
print "File not found " + file + " full path:" + path
self.send_error(404)
return True
f = open(path, "rb")
data = f.read()
f.close()
# If file.mock-http-headers exists, it contains the headers we
# should send. Read them in and parse them.
headers_path = path + '.mock-http-headers'
if os.path.isfile(headers_path):
f = open(headers_path, "r")
# "HTTP/1.1 200 OK"
response = f.readline()
status_code = re.findall('HTTP/\d+.\d+ (\d+)', response)[0]
self.send_response(int(status_code))
for line in f:
# "name: value"
name, value = re.findall('(\S+):\s*(.*)', line)[0]
self.send_header(name, value)
f.close()
else:
# Could be more generic once we support mime-type sniffing, but for
# now we need to set it explicitly.
self.send_response(200)
self.send_header('Content-type', self.GetMIMETypeFromName(file))
self.send_header('Content-Length', len(data))
self.end_headers()
self.wfile.write(data)
return True
def RealFileWithCommonHeaderHandler(self):
"""This handler sends the contents of the requested file without the pseudo
http head!"""
prefix='/realfiles/'
if not self.path.startswith(prefix):
return False
file = self.path[len(prefix):]
path = os.path.join(self.server.data_dir, file)
try:
f = open(path, "rb")
data = f.read()
f.close()
# just simply set the MIME as octal stream
self.send_response(200)
self.send_header('Content-type', 'application/octet-stream')
self.end_headers()
self.wfile.write(data)
except:
self.send_error(404)
return True
def RealBZ2FileWithCommonHeaderHandler(self):
"""This handler sends the bzip2 contents of the requested file with
corresponding Content-Encoding field in http head!"""
prefix='/realbz2files/'
if not self.path.startswith(prefix):
return False
parts = self.path.split('?')
file = parts[0][len(prefix):]
path = os.path.join(self.server.data_dir, file) + '.bz2'
if len(parts) > 1:
options = parts[1]
else:
options = ''
try:
self.send_response(200)
accept_encoding = self.headers.get("Accept-Encoding")
if accept_encoding.find("bzip2") != -1:
f = open(path, "rb")
data = f.read()
f.close()
self.send_header('Content-Encoding', 'bzip2')
self.send_header('Content-type', 'application/x-bzip2')
self.end_headers()
if options == 'incremental-header':
self.wfile.write(data[:1])
self.wfile.flush()
time.sleep(1.0)
self.wfile.write(data[1:])
else:
self.wfile.write(data)
else:
"""client do not support bzip2 format, send pseudo content
"""
self.send_header('Content-type', 'text/html; charset=ISO-8859-1')
self.end_headers()
self.wfile.write("you do not support bzip2 encoding")
except:
self.send_error(404)
return True
def AuthBasicHandler(self):
"""This handler tests 'Basic' authentication. It just sends a page with
title 'user/pass' if you succeed."""
if not self._ShouldHandleRequest("/auth-basic"):
return False
username = userpass = password = b64str = ""
set_cookie_if_challenged = self.path.find('?set-cookie-if-challenged') > 0
auth = self.headers.getheader('authorization')
try:
if not auth:
raise Exception('no auth')
b64str = re.findall(r'Basic (\S+)', auth)[0]
userpass = base64.b64decode(b64str)
username, password = re.findall(r'([^:]+):(\S+)', userpass)[0]
if password != 'secret':
raise Exception('wrong password')
except Exception, e:
# Authentication failed.
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm="testrealm"')
self.send_header('Content-type', 'text/html')
if set_cookie_if_challenged:
self.send_header('Set-Cookie', 'got_challenged=true')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>Denied: %s</title>' % e)
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('b64str=%s<p>' % b64str)
self.wfile.write('username: %s<p>' % username)
self.wfile.write('userpass: %s<p>' % userpass)
self.wfile.write('password: %s<p>' % password)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('</body></html>')
return True
# Authentication successful. (Return a cachable response to allow for
# testing cached pages that require authentication.)
if_none_match = self.headers.getheader('if-none-match')
if if_none_match == "abc":
self.send_response(304)
self.end_headers()
else:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-control', 'max-age=60000')
self.send_header('Etag', 'abc')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>%s/%s</title>' % (username, password))
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('</body></html>')
return True
def AuthDigestHandler(self):
"""This handler tests 'Digest' authentication. It just sends a page with
title 'user/pass' if you succeed."""
if not self._ShouldHandleRequest("/auth-digest"):
return False
# Periodically generate a new nonce. Technically we should incorporate
# the request URL into this, but we don't care for testing.
nonce_life = 10
stale = False
if (not self.server.nonce or
(time.time() - self.server.nonce_time > nonce_life)):
if self.server.nonce:
stale = True
self.server.nonce_time = time.time()
self.server.nonce = \
_new_md5(time.ctime(self.server.nonce_time) +
'privatekey').hexdigest()
nonce = self.server.nonce
opaque = _new_md5('opaque').hexdigest()
password = 'secret'
realm = 'testrealm'
auth = self.headers.getheader('authorization')
pairs = {}
try:
if not auth:
raise Exception('no auth')
if not auth.startswith('Digest'):
raise Exception('not digest')
# Pull out all the name="value" pairs as a dictionary.
pairs = dict(re.findall(r'(\b[^ ,=]+)="?([^",]+)"?', auth))
# Make sure it's all valid.
if pairs['nonce'] != nonce:
raise Exception('wrong nonce')
if pairs['opaque'] != opaque:
raise Exception('wrong opaque')
# Check the 'response' value and make sure it matches our magic hash.
# See http://www.ietf.org/rfc/rfc2617.txt
hash_a1 = _new_md5(
':'.join([pairs['username'], realm, password])).hexdigest()
hash_a2 = _new_md5(':'.join([self.command, pairs['uri']])).hexdigest()
if 'qop' in pairs and 'nc' in pairs and 'cnonce' in pairs:
response = _new_md5(':'.join([hash_a1, nonce, pairs['nc'],
pairs['cnonce'], pairs['qop'], hash_a2])).hexdigest()
else:
response = _new_md5(':'.join([hash_a1, nonce, hash_a2])).hexdigest()
if pairs['response'] != response:
raise Exception('wrong password')
except Exception, e:
# Authentication failed.
self.send_response(401)
hdr = ('Digest '
'realm="%s", '
'domain="/", '
'qop="auth", '
'algorithm=MD5, '
'nonce="%s", '
'opaque="%s"') % (realm, nonce, opaque)
if stale:
hdr += ', stale="TRUE"'
self.send_header('WWW-Authenticate', hdr)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>Denied: %s</title>' % e)
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('pairs=%s<p>' % pairs)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('We are replying:<br>%s<p>' % hdr)
self.wfile.write('</body></html>')
return True
# Authentication successful.
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>%s/%s</title>' % (pairs['username'], password))
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('pairs=%s<p>' % pairs)
self.wfile.write('</body></html>')
return True
def SlowServerHandler(self):
"""Wait for the user suggested time before responding. The syntax is
/slow?0.5 to wait for half a second."""
if not self._ShouldHandleRequest("/slow"):
return False
query_char = self.path.find('?')
wait_sec = 1.0
if query_char >= 0:
try:
wait_sec = int(self.path[query_char + 1:])
except ValueError:
pass
time.sleep(wait_sec)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write("waited %d seconds" % wait_sec)
return True
def ContentTypeHandler(self):
"""Returns a string of html with the given content type. E.g.,
/contenttype?text/css returns an html file with the Content-Type
header set to text/css."""
if not self._ShouldHandleRequest("/contenttype"):
return False
query_char = self.path.find('?')
content_type = self.path[query_char + 1:].strip()
if not content_type:
content_type = 'text/html'
self.send_response(200)
self.send_header('Content-Type', content_type)
self.end_headers()
self.wfile.write("<html>\n<body>\n<p>HTML text</p>\n</body>\n</html>\n");
return True
def ServerRedirectHandler(self):
"""Sends a server redirect to the given URL. The syntax is
'/server-redirect?http://foo.bar/asdf' to redirect to
'http://foo.bar/asdf'"""
test_name = "/server-redirect"
if not self._ShouldHandleRequest(test_name):
return False
query_char = self.path.find('?')
if query_char < 0 or len(self.path) <= query_char + 1:
self.sendRedirectHelp(test_name)
return True
dest = self.path[query_char + 1:]
self.send_response(301) # moved permanently
self.send_header('Location', dest)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest)
return True
def ClientRedirectHandler(self):
"""Sends a client redirect to the given URL. The syntax is
'/client-redirect?http://foo.bar/asdf' to redirect to
'http://foo.bar/asdf'"""
test_name = "/client-redirect"
if not self._ShouldHandleRequest(test_name):
return False
query_char = self.path.find('?');
if query_char < 0 or len(self.path) <= query_char + 1:
self.sendRedirectHelp(test_name)
return True
dest = self.path[query_char + 1:]
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<meta http-equiv="refresh" content="0;url=%s">' % dest)
self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest)
return True
def DefaultResponseHandler(self):
"""This is the catch-all response handler for requests that aren't handled
by one of the special handlers above.
Note that we specify the content-length as without it the https connection
is not closed properly (and the browser keeps expecting data)."""
contents = "Default response given for path: " + self.path
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header("Content-Length", len(contents))
self.end_headers()
self.wfile.write(contents)
return True
def RedirectConnectHandler(self):
"""Sends a redirect to the CONNECT request for www.redirect.com. This
response is not specified by the RFC, so the browser should not follow
the redirect."""
if (self.path.find("www.redirect.com") < 0):
return False
dest = "http://www.destination.com/foo.js"
self.send_response(302) # moved temporarily
self.send_header('Location', dest)
self.send_header('Connection', 'close')
self.end_headers()
return True
def ServerAuthConnectHandler(self):
"""Sends a 401 to the CONNECT request for www.server-auth.com. This
response doesn't make sense because the proxy server cannot request
server authentication."""
if (self.path.find("www.server-auth.com") < 0):
return False
challenge = 'Basic realm="WallyWorld"'
self.send_response(401) # unauthorized
self.send_header('WWW-Authenticate', challenge)
self.send_header('Connection', 'close')
self.end_headers()
return True
def DefaultConnectResponseHandler(self):
"""This is the catch-all response handler for CONNECT requests that aren't
handled by one of the special handlers above. Real Web servers respond
with 400 to CONNECT requests."""
contents = "Your client has issued a malformed or illegal request."
self.send_response(400) # bad request
self.send_header('Content-type', 'text/html')
self.send_header("Content-Length", len(contents))
self.end_headers()
self.wfile.write(contents)
return True
def do_CONNECT(self):
for handler in self._connect_handlers:
if handler():
return
def do_GET(self):
for handler in self._get_handlers:
if handler():
return
def do_POST(self):
for handler in self._post_handlers:
if handler():
return
# called by the redirect handling function when there is no parameter
def sendRedirectHelp(self, redirect_name):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><body><h1>Error: no redirect destination</h1>')
self.wfile.write('Use <pre>%s?http://dest...</pre>' % redirect_name)
self.wfile.write('</body></html>')
def MakeDumpDir(data_dir):
"""Create directory named 'dump' where uploaded data via HTTP POST request
will be stored. If the directory already exists all files and subdirectories
will be deleted."""
dump_dir = os.path.join(data_dir, 'dump');
if os.path.isdir(dump_dir):
shutil.rmtree(dump_dir)
os.mkdir(dump_dir)
def MakeDataDir():
if options.data_dir:
if not os.path.isdir(options.data_dir):
print 'specified data dir not found: ' + options.data_dir + ' exiting...'
return None
my_data_dir = options.data_dir
else:
# Create the default path to our data dir, relative to the exe dir.
my_data_dir = os.path.dirname(sys.argv[0])
my_data_dir = os.path.join(my_data_dir, "..", "..", "..", "..",
"test", "data")
#TODO(ibrar): Must use Find* funtion defined in google\tools
#i.e my_data_dir = FindUpward(my_data_dir, "test", "data")
return my_data_dir
def main(options, args):
# redirect output to a log file so it doesn't spam the unit test output
logfile = open('testserver.log', 'w')
sys.stderr = sys.stdout = logfile
port = options.port
if options.server_type == SERVER_HTTP:
if options.cert:
# let's make sure the cert file exists.
if not os.path.isfile(options.cert):
print 'specified cert file not found: ' + options.cert + ' exiting...'
return
server = HTTPSServer(('127.0.0.1', port), TestPageHandler, options.cert)
print 'HTTPS server started on port %d...' % port
else:
server = StoppableHTTPServer(('127.0.0.1', port), TestPageHandler)
print 'HTTP server started on port %d...' % port
server.data_dir = MakeDataDir()
server.file_root_url = options.file_root_url
MakeDumpDir(server.data_dir)
# means FTP Server
else:
my_data_dir = MakeDataDir()
def line_logger(msg):
if (msg.find("kill") >= 0):
server.stop = True
print 'shutting down server'
sys.exit(0)
# Instantiate a dummy authorizer for managing 'virtual' users
authorizer = pyftpdlib.ftpserver.DummyAuthorizer()
# Define a new user having full r/w permissions and a read-only
# anonymous user
authorizer.add_user('chrome', 'chrome', my_data_dir, perm='elradfmw')
authorizer.add_anonymous(my_data_dir)
# Instantiate FTP handler class
ftp_handler = pyftpdlib.ftpserver.FTPHandler
ftp_handler.authorizer = authorizer
pyftpdlib.ftpserver.logline = line_logger
# Define a customized banner (string returned when client connects)
ftp_handler.banner = ("pyftpdlib %s based ftpd ready." %
pyftpdlib.ftpserver.__ver__)
# Instantiate FTP server class and listen to 127.0.0.1:port
address = ('127.0.0.1', port)
server = pyftpdlib.ftpserver.FTPServer(address, ftp_handler)
print 'FTP server started on port %d...' % port
try:
server.serve_forever()
except KeyboardInterrupt:
print 'shutting down server'
server.stop = True
if __name__ == '__main__':
option_parser = optparse.OptionParser()
option_parser.add_option("-f", '--ftp', action='store_const',
const=SERVER_FTP, default=SERVER_HTTP,
dest='server_type',
help='FTP or HTTP server default HTTP')
option_parser.add_option('', '--port', default='8888', type='int',
help='Port used by the server')
option_parser.add_option('', '--data-dir', dest='data_dir',
help='Directory from which to read the files')
option_parser.add_option('', '--https', dest='cert',
help='Specify that https should be used, specify '
'the path to the cert containing the private key '
'the server should use')
option_parser.add_option('', '--file-root-url', default='/files/',
help='Specify a root URL for files served.')
options, args = option_parser.parse_args()
sys.exit(main(options, args))
|
kuiche/chromium
|
net/tools/testserver/testserver.py
|
Python
|
bsd-3-clause
| 36,527
|
[
"VisIt"
] |
3f1bad71ca579f7b92859b0bb3720a4ec1c3bc4b8d48d463ff60435fa609fbab
|
# Copyright 2001 by Tarjei Mikkelsen. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# get set abstraction for graph representation
#TODO - Subclass graph?
class MultiGraph(object):
"""A directed multigraph abstraction with labeled edges."""
def __init__(self, nodes = []):
"""Initializes a new MultiGraph object."""
self._adjacency_list = {} # maps parent -> set of (child, label) pairs
for n in nodes:
self._adjacency_list[n] = set()
self._label_map = {} # maps label -> set of (parent, child) pairs
def __eq__(self, g):
"""Returns true if g is equal to this graph."""
return isinstance(g, MultiGraph) and \
(self._adjacency_list == g._adjacency_list) and \
(self._label_map == g._label_map)
def __ne__(self, g):
"""Returns true if g is not equal to this graph."""
return not self.__eq__(g)
def __repr__(self):
"""Returns an unique string representation of this graph."""
s = "<MultiGraph: "
keys = sorted(self._adjacency_list.keys())
for key in keys:
values = sorted(self._adjacency_list[key])
s += "(" + repr(key) + ": " + ",".join(map(repr, values)) + ")"
return s + ">"
def __str__(self):
"""Returns a concise string description of this graph."""
nodenum = len(self._adjacency_list)
edgenum = reduce(lambda x,y: x+y,
map(len, self._adjacency_list.values()))
labelnum = len(self._label_map)
return "<MultiGraph: " + \
str(nodenum) + " node(s), " + \
str(edgenum) + " edge(s), " + \
str(labelnum) + " unique label(s)>"
def add_node(self, node):
"""Adds a node to this graph."""
if node not in self._adjacency_list:
self._adjacency_list[node] = set()
def add_edge(self, source, to, label = None):
"""Adds an edge to this graph."""
if source not in self._adjacency_list:
raise ValueError("Unknown <from> node: " + str(source))
if to not in self._adjacency_list:
raise ValueError("Unknown <to> node: " + str(to))
edge = (to, label)
self._adjacency_list[source].add(edge)
if label not in self._label_map:
self._label_map[label] = set()
self._label_map[label].add((source,to))
def child_edges(self, parent):
"""Returns a list of (child, label) pairs for parent."""
if parent not in self._adjacency_list:
raise ValueError("Unknown <parent> node: " + str(parent))
return sorted(self._adjacency_list[parent])
def children(self, parent):
"""Returns a list of unique children for parent."""
return sorted(set([x[0] for x in self.child_edges(parent)]))
def edges(self, label):
"""Returns a list of all the edges with this label."""
if label not in self._label_map:
raise ValueError("Unknown label: " + str(label))
return sorted(self._label_map[label])
def labels(self):
"""Returns a list of all the edge labels in this graph."""
return self._label_map.keys()
def nodes(self):
"""Returns a list of the nodes in this graph."""
return self._adjacency_list.keys()
def parent_edges(self, child):
"""Returns a list of (parent, label) pairs for child."""
if child not in self._adjacency_list:
raise ValueError("Unknown <child> node: " + str(child))
parents = []
for parent, children in self._adjacency_list.iteritems():
for x in children:
if x[0] is child:
parents.append((parent, x[1]))
return sorted(parents)
def parents(self, child):
"""Returns a list of unique parents for child."""
return sorted(set([x[0] for x in self.parent_edges(child)]))
def remove_node(self, node):
"""Removes node and all edges connected to it."""
if node not in self._adjacency_list:
raise ValueError("Unknown node: " + str(node))
# remove node (and all out-edges) from adjacency list
del self._adjacency_list[node]
# remove all in-edges from adjacency list
for n in self._adjacency_list:
self._adjacency_list[n] = set(x for x in self._adjacency_list[n] \
if x[0] is not node)
# remove all refering pairs in label map
for label in self._label_map.keys():
lm = set(x for x in self._label_map[label] \
if (x[0] is not node) and (x[1] is not node))
# remove the entry completely if the label is now unused
if lm:
self._label_map[label] = lm
else:
del self._label_map[label]
def remove_edge(self, parent, child, label):
"""Removes edge. -- NOT IMPLEMENTED"""
# hm , this is a multigraph - how should this be implemented?
raise NotImplementedError("remove_edge is not yet implemented")
# auxilliary graph functions
def df_search(graph, root = None):
"""Depth first search of g.
Returns a list of all nodes that can be reached from the root node
in depth-first order.
If root is not given, the search will be rooted at an arbitrary node.
"""
seen = {}
search = []
if len(graph.nodes()) < 1:
return search
if root is None:
root = (graph.nodes())[0]
seen[root] = 1
search.append(root)
current = graph.children(root)
while len(current) > 0:
node = current[0]
current = current[1:]
if node not in seen:
search.append(node)
seen[node] = 1
current = graph.children(node) + current
return search
def bf_search(graph, root = None):
"""Breadth first search of g.
Returns a list of all nodes that can be reached from the root node
in breadth-first order.
If root is not given, the search will be rooted at an arbitrary node.
"""
seen = {}
search = []
if len(graph.nodes()) < 1:
return search
if root is None:
root = (graph.nodes())[0]
seen[root] = 1
search.append(root)
current = graph.children(root)
while len(current) > 0:
node = current[0]
current = current[1:]
if node not in seen:
search.append(node)
seen[node] = 1
current.extend(graph.children(node))
return search
|
bryback/quickseq
|
genescript/Bio/Pathway/Rep/MultiGraph.py
|
Python
|
mit
| 6,729
|
[
"Biopython"
] |
3669300064e9bc034f87569a62cf1882d9345ab4c3ec9ff6d4ea62cc5bc2cb17
|
from __future__ import print_function
from bokeh.browserlib import view
from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid, GlyphRenderer, Circle, HoverTool, BoxSelectTool
from bokeh.models.widgets import (
Select, HBox, VBox, DataTable, TableColumn, StringFormatter,
NumberFormatter, StringEditor, IntEditor, NumberEditor, SelectEditor)
from bokeh.document import Document
from bokeh.session import Session
from bokeh.sampledata.autompg2 import autompg2 as mpg
class DataTables(object):
def __init__(self):
self.document = Document()
self.session = Session()
self.session.use_doc('data_tables_server')
self.session.load_document(self.document)
self.manufacturer_filter = None
self.model_filter = None
self.transmission_filter = None
self.drive_filter = None
self.class_filter = None
self.source = ColumnDataSource()
self.update_data()
self.document.add(self.create())
self.session.store_document(self.document)
def create(self):
manufacturers = sorted(mpg["manufacturer"].unique())
models = sorted(mpg["model"].unique())
transmissions = sorted(mpg["trans"].unique())
drives = sorted(mpg["drv"].unique())
classes = sorted(mpg["class"].unique())
manufacturer_select = Select(title="Manufacturer:", value="All", options=["All"] + manufacturers)
manufacturer_select.on_change('value', self.on_manufacturer_change)
model_select = Select(title="Model:", value="All", options=["All"] + models)
model_select.on_change('value', self.on_model_change)
transmission_select = Select(title="Transmission:", value="All", options=["All"] + transmissions)
transmission_select.on_change('value', self.on_transmission_change)
drive_select = Select(title="Drive:", value="All", options=["All"] + drives)
drive_select.on_change('value', self.on_drive_change)
class_select = Select(title="Class:", value="All", options=["All"] + classes)
class_select.on_change('value', self.on_class_change)
columns = [
TableColumn(field="manufacturer", title="Manufacturer", editor=SelectEditor(options=manufacturers), formatter=StringFormatter(font_style="bold")),
TableColumn(field="model", title="Model", editor=StringEditor(completions=models)),
TableColumn(field="displ", title="Displacement", editor=NumberEditor(step=0.1), formatter=NumberFormatter(format="0.0")),
TableColumn(field="year", title="Year", editor=IntEditor()),
TableColumn(field="cyl", title="Cylinders", editor=IntEditor()),
TableColumn(field="trans", title="Transmission", editor=SelectEditor(options=transmissions)),
TableColumn(field="drv", title="Drive", editor=SelectEditor(options=drives)),
TableColumn(field="class", title="Class", editor=SelectEditor(options=classes)),
TableColumn(field="cty", title="City MPG", editor=IntEditor()),
TableColumn(field="hwy", title="Highway MPG", editor=IntEditor()),
]
data_table = DataTable(source=self.source, columns=columns, editable=True)
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(title=None, x_range=xdr, y_range=ydr, plot_width=800, plot_height=300)
xaxis = LinearAxis(plot=plot)
plot.below.append(xaxis)
yaxis = LinearAxis(plot=plot)
ygrid = Grid(plot=plot, dimension=1, ticker=yaxis.ticker)
plot.left.append(yaxis)
cty_glyph = Circle(x="index", y="cty", fill_color="#396285", size=8, fill_alpha=0.5, line_alpha=0.5)
hwy_glyph = Circle(x="index", y="hwy", fill_color="#CE603D", size=8, fill_alpha=0.5, line_alpha=0.5)
cty = GlyphRenderer(data_source=self.source, glyph=cty_glyph)
hwy = GlyphRenderer(data_source=self.source, glyph=hwy_glyph)
tooltips = [
("Manufacturer", "@manufacturer"),
("Model", "@model"),
("Displacement", "@displ"),
("Year", "@year"),
("Cylinders", "@cyl"),
("Transmission", "@trans"),
("Drive", "@drv"),
("Class", "@class"),
]
cty_hover_tool = HoverTool(plot=plot, renderers=[cty], tooltips=tooltips + [("City MPG", "@cty")])
hwy_hover_tool = HoverTool(plot=plot, renderers=[hwy], tooltips=tooltips + [("Highway MPG", "@hwy")])
select_tool = BoxSelectTool(plot=plot, renderers=[cty, hwy], dimensions=['width'])
plot.tools.extend([cty_hover_tool, hwy_hover_tool, select_tool])
plot.renderers.extend([cty, hwy, ygrid])
controls = VBox(children=[manufacturer_select, model_select, transmission_select, drive_select, class_select], width=200)
top_panel = HBox(children=[controls, plot])
layout = VBox(children=[top_panel, data_table])
return layout
def on_manufacturer_change(self, obj, attr, _, value):
self.manufacturer_filter = None if value == "All" else value
self.update_data()
def on_model_change(self, obj, attr, _, value):
self.model_filter = None if value == "All" else value
self.update_data()
def on_transmission_change(self, obj, attr, _, value):
self.transmission_filter = None if value == "All" else value
self.update_data()
def on_drive_change(self, obj, attr, _, value):
self.drive_filter = None if value == "All" else value
self.update_data()
def on_class_change(self, obj, attr, _, value):
self.class_filter = None if value == "All" else value
self.update_data()
def update_data(self):
df = mpg
if self.manufacturer_filter:
df = df[df["manufacturer"] == self.manufacturer_filter]
if self.model_filter:
df = df[df["model"] == self.model_filter]
if self.transmission_filter:
df = df[df["trans"] == self.transmission_filter]
if self.drive_filter:
df = df[df["drv"] == self.drive_filter]
if self.class_filter:
df = df[df["class"] == self.class_filter]
self.source.data = ColumnDataSource.from_df(df)
self.session.store_document(self.document)
def run(self, do_view=False, poll_interval=0.5):
link = self.session.object_link(self.document.context)
print("Please visit %s to see the plots" % link)
if do_view: view(link)
print("\npress ctrl-C to exit")
self.session.poll_document(self.document)
if __name__ == "__main__":
data_tables = DataTables()
data_tables.run(True)
|
lukebarnard1/bokeh
|
examples/glyphs/data_tables_server.py
|
Python
|
bsd-3-clause
| 6,815
|
[
"VisIt"
] |
bac92349a5189f78f4d468875aaedf3f665a60cad48d8e2f9cb8285ad266254a
|
#! /usr/bin/env python
#------------------------------------------------------------------------------
# THIS FILE HAS BEEN MODIFIED FROM THE ORIGINAL (freeze.py).
# THE MODIFICATIONS ENABLE FREEZING OF MODULES/PACKAGES WITHOUT IMPORTING THEM.
# THAT MAKES IT POSSIBLE TO FREEZE PARAVIEW MODULES WITHOUT IMPORTING THE
# CORRESPONDING C/C++ LIBRARIES.
#------------------------------------------------------------------------------
"""Freeze a Python script into a binary.
usage: freeze [options...] script [module]...
Options:
-p prefix: This is the prefix used when you ran ``make install''
in the Python build directory.
(If you never ran this, freeze won't work.)
The default is whatever sys.prefix evaluates to.
It can also be the top directory of the Python source
tree; then -P must point to the build tree.
-P exec_prefix: Like -p but this is the 'exec_prefix', used to
install objects etc. The default is whatever sys.exec_prefix
evaluates to, or the -p argument if given.
If -p points to the Python source tree, -P must point
to the build tree, if different.
-e extension: A directory containing additional .o files that
may be used to resolve modules. This directory
should also have a Setup file describing the .o files.
On Windows, the name of a .INI file describing one
or more extensions is passed.
More than one -e option may be given.
-o dir: Directory where the output files are created; default '.'.
-m: Additional arguments are module names instead of filenames.
-a package=dir: Additional directories to be added to the package's
__path__. Used to simulate directories added by the
package at runtime (eg, by OpenGL and win32com).
More than one -a option may be given for each package.
-l file: Pass the file to the linker (windows only)
-d: Debugging mode for the module finder.
-q: Make the module finder totally quiet.
-h: Print this help message.
-x module Exclude the specified module. It will still be imported
by the frozen binary if it exists on the host system.
-X module Like -x, except the module can never be imported by
the frozen binary.
-E: Freeze will fail if any modules can't be found (that
were not excluded using -x or -X).
-i filename: Include a file with additional command line options. Used
to prevent command lines growing beyond the capabilities of
the shell/OS. All arguments specified in filename
are read and the -i option replaced with the parsed
params (note - quoting args in this file is NOT supported)
-s subsystem: Specify the subsystem (For Windows only.);
'console' (default), 'windows', 'service' or 'com_dll'
-w: Toggle Windows (NT or 95) behavior.
(For debugging only -- on a win32 platform, win32 behavior
is automatic.)
-r prefix=f: Replace path prefix.
Replace prefix with f in the source path references
contained in the resulting binary.
Arguments:
script: The Python script to be executed by the resulting binary.
module ...: Additional Python modules (referenced by pathname)
that will be included in the resulting binary. These
may be .py or .pyc files. If -m is specified, these are
module names that are search in the path instead.
If -p is specified, all packages and modules under that path will
be frozen (without import dependency tracking).
NOTES:
In order to use freeze successfully, you must have built Python and
installed it ("make install").
The script should not use modules provided only as shared libraries;
if it does, the resulting binary is not self-contained.
"""
# Import standard modules
import modulefinder
import getopt
import os
import sys
# Import the freeze-private modules
import checkextensions
import makeconfig
import makefreeze
import makemakefile
import parsesetup
import bkfile
# Main program
def main():
# overridable context
prefix = None # settable with -p option
exec_prefix = None # settable with -P option
extensions = []
exclude = [] # settable with -x option
addn_link = [] # settable with -l, but only honored under Windows.
path = sys.path[:]
modargs = 0
debug = 1
odir = ''
win = sys.platform[:3] == 'win'
replace_paths = [] # settable with -r option
error_if_any_missing = 0
# default the exclude list for each platform
if win: exclude = exclude + [
'dos', 'dospath', 'mac', 'macpath', 'macfs', 'MACFS', 'posix',
'os2', 'ce', 'riscos', 'riscosenviron', 'riscospath',
]
fail_import = exclude[:]
# output files
frozen_c = 'frozen.c'
config_c = 'config.c'
target = 'a.out' # normally derived from script name
makefile = 'Makefile'
subsystem = 'console'
# parse command line by first replacing any "-i" options with the
# file contents.
pos = 1
while pos < len(sys.argv)-1:
# last option can not be "-i", so this ensures "pos+1" is in range!
if sys.argv[pos] == '-i':
try:
options = open(sys.argv[pos+1]).read().split()
except IOError, why:
usage("File name '%s' specified with the -i option "
"can not be read - %s" % (sys.argv[pos+1], why) )
# Replace the '-i' and the filename with the read params.
sys.argv[pos:pos+2] = options
pos = pos + len(options) - 1 # Skip the name and the included args.
pos = pos + 1
# Now parse the command line with the extras inserted.
try:
opts, args = getopt.getopt(sys.argv[1:], 'r:a:dEe:hmo:p:P:qs:wX:x:l:')
except getopt.error, msg:
usage('getopt error: ' + str(msg))
# proces option arguments
for o, a in opts:
if o == '-h':
print __doc__
return
if o == '-d':
debug = debug + 1
if o == '-e':
extensions.append(a)
if o == '-m':
modargs = 1
if o == '-o':
odir = a
if o == '-p':
prefix = a
if o == '-P':
exec_prefix = a
if o == '-q':
debug = 0
if o == '-w':
win = not win
if o == '-s':
if not win:
usage("-s subsystem option only on Windows")
subsystem = a
if o == '-x':
exclude.append(a)
if o == '-X':
exclude.append(a)
fail_import.append(a)
if o == '-E':
error_if_any_missing = 1
if o == '-l':
addn_link.append(a)
if o == '-a':
apply(modulefinder.AddPackagePath, tuple(a.split("=", 2)))
if o == '-r':
f,r = a.split("=", 2)
replace_paths.append( (f,r) )
# modules that are imported by the Python runtime
implicits = []
for module in ('site', 'warnings',):
if module not in exclude:
implicits.append(module)
# default prefix and exec_prefix
if not exec_prefix:
if prefix:
exec_prefix = prefix
else:
exec_prefix = sys.exec_prefix
if not prefix:
prefix = sys.prefix
# determine whether -p points to the Python source tree
ishome = os.path.exists(os.path.join(prefix, 'Python', 'ceval.c'))
# locations derived from options
version = sys.version[:3]
if win:
extensions_c = 'frozen_extensions.c'
if ishome:
print "(Using Python source directory)"
binlib = exec_prefix
incldir = os.path.join(prefix, 'Include')
config_h_dir = exec_prefix
config_c_in = os.path.join(prefix, 'Modules', 'config.c.in')
frozenmain_c = os.path.join(prefix, 'Python', 'frozenmain.c')
makefile_in = os.path.join(exec_prefix, 'Makefile')
if win:
frozendllmain_c = os.path.join(exec_prefix, 'Pc\\frozen_dllmain.c')
else:
binlib = os.path.join(exec_prefix,
'lib', 'python%s' % version, 'config')
incldir = os.path.join(prefix, 'include', 'python%s' % version)
config_h_dir = os.path.join(exec_prefix, 'include',
'python%s' % version)
config_c_in = os.path.join(binlib, 'config.c.in')
frozenmain_c = os.path.join(binlib, 'frozenmain.c')
makefile_in = os.path.join(binlib, 'Makefile')
frozendllmain_c = os.path.join(binlib, 'frozen_dllmain.c')
supp_sources = []
defines = []
includes = ['-I' + incldir, '-I' + config_h_dir]
# sanity check of directories and files
check_dirs = [prefix, exec_prefix, binlib, incldir]
if not win:
# These are not directories on Windows.
check_dirs = check_dirs + extensions
for dir in check_dirs:
if not os.path.exists(dir):
usage('needed directory %s not found' % dir)
if not os.path.isdir(dir):
usage('%s: not a directory' % dir)
if win:
files = supp_sources + extensions # extensions are files on Windows.
else:
files = [config_c_in, makefile_in] + supp_sources
for file in supp_sources:
if not os.path.exists(file):
usage('needed file %s not found' % file)
if not os.path.isfile(file):
usage('%s: not a plain file' % file)
if not win:
for dir in extensions:
setup = os.path.join(dir, 'Setup')
if not os.path.exists(setup):
usage('needed file %s not found' % setup)
if not os.path.isfile(setup):
usage('%s: not a plain file' % setup)
# check that enough arguments are passed
if not args:
usage('at least one filename argument required')
# check that file arguments exist
for arg in args:
if arg == '-m' or arg == '-p':
break
# if user specified -m on the command line before _any_
# file names, then nothing should be checked (as the
# very first file should be a module name)
if modargs:
break
if not os.path.exists(arg):
usage('argument %s not found' % arg)
if not os.path.isfile(arg):
usage('%s: not a plain file' % arg)
# process non-option arguments
scriptfile = args[0]
modules = args[1:]
# derive target name from script name
base = os.path.basename(scriptfile)
base, ext = os.path.splitext(base)
if base:
if base != scriptfile:
target = base
else:
target = base + '.bin'
# handle -o option
base_frozen_c = frozen_c
base_config_c = config_c
base_target = target
if odir and not os.path.isdir(odir):
try:
os.mkdir(odir)
print "Created output directory", odir
except os.error, msg:
usage('%s: mkdir failed (%s)' % (odir, str(msg)))
base = ''
if odir:
base = os.path.join(odir, '')
frozen_c = os.path.join(odir, frozen_c)
config_c = os.path.join(odir, config_c)
target = os.path.join(odir, target)
makefile = os.path.join(odir, makefile)
if win: extensions_c = os.path.join(odir, extensions_c)
# Handle special entry point requirements
# (on Windows, some frozen programs do not use __main__, but
# import the module directly. Eg, DLLs, Services, etc
custom_entry_point = None # Currently only used on Windows
python_entry_is_main = 1 # Is the entry point called __main__?
# handle -s option on Windows
if win:
import winmakemakefile
try:
custom_entry_point, python_entry_is_main = \
winmakemakefile.get_custom_entry_point(subsystem)
except ValueError, why:
usage(why)
# Actual work starts here...
# collect all modules of the program
dir = os.path.dirname(scriptfile)
path[0] = dir
mf = modulefinder.ModuleFinder(path, debug, exclude, replace_paths)
if win and subsystem=='service':
# If a Windows service, then add the "built-in" module.
mod = mf.add_module("servicemanager")
mod.__file__="dummy.pyd" # really built-in to the resulting EXE
for mod in implicits:
mf.import_hook(mod)
_current = None
directories = []
for mod in modules:
if mod == '-m':
_current = mod
elif mod == '-p':
_current = mod
elif _current == '-p':
_current = None
if not os.path.exists(mod):
usage('needed directory %s not found' % mod)
if not os.path.isdir(mod):
usage('%s: not a directory' % mod)
directories.append(mod)
elif _current == '-m':
_current = None
if mod[-2:] == '.*':
mf.import_hook(mod[:-2], None, ["*"])
else:
mf.import_hook(mod)
else:
_current = None
mf.load_file(mod)
# process directories and add modules/packages found under those directories
# to the paths
add_packages(mf, directories, exclude)
# Add the main script as either __main__, or the actual module name.
if python_entry_is_main:
mf.run_script(scriptfile)
else:
mf.load_file(scriptfile)
if debug > 0:
mf.report()
print
dict = mf.modules
if error_if_any_missing:
missing = mf.any_missing()
if missing:
sys.exit("There are some missing modules: %r" % missing)
# generate output for frozen modules
files = makefreeze.makefreeze(base, dict, debug, custom_entry_point,
fail_import)
# look for unfrozen modules (builtin and of unknown origin)
builtins = []
unknown = []
mods = dict.keys()
mods.sort()
for mod in mods:
if dict[mod].__code__:
continue
if not dict[mod].__file__:
builtins.append(mod)
else:
unknown.append(mod)
# search for unknown modules in extensions directories (not on Windows)
addfiles = []
frozen_extensions = [] # Windows list of modules.
if unknown or (not win and builtins):
if not win:
addfiles, addmods = \
checkextensions.checkextensions(unknown+builtins,
extensions)
for mod in addmods:
if mod in unknown:
unknown.remove(mod)
builtins.append(mod)
else:
# Do the windows thang...
import checkextensions_win32
# Get a list of CExtension instances, each describing a module
# (including its source files)
frozen_extensions = checkextensions_win32.checkextensions(
unknown, extensions, prefix)
for mod in frozen_extensions:
unknown.remove(mod.name)
# report unknown modules
if unknown:
sys.stderr.write('Warning: unknown modules remain: %s\n' %
' '.join(unknown))
# windows gets different treatment
if win:
# Taking a shortcut here...
import winmakemakefile, checkextensions_win32
checkextensions_win32.write_extension_table(extensions_c,
frozen_extensions)
# Create a module definition for the bootstrap C code.
xtras = [frozenmain_c, os.path.basename(frozen_c),
frozendllmain_c, os.path.basename(extensions_c)] + files
maindefn = checkextensions_win32.CExtension( '__main__', xtras )
frozen_extensions.append( maindefn )
outfp = open(makefile, 'w')
try:
winmakemakefile.makemakefile(outfp,
locals(),
frozen_extensions,
os.path.basename(target))
finally:
outfp.close()
return
# --- We skip this part since CMake-ified Python doesn't have config_c_in
# file present and besides, we don't need the make files.
## generate config.c and Makefile
#builtins.sort()
#infp = open(config_c_in)
#outfp = bkfile.open(config_c, 'w')
#try:
# makeconfig.makeconfig(infp, outfp, builtins)
#finally:
# outfp.close()
#infp.close()
#cflags = ['$(OPT)']
#cppflags = defines + includes
#libs = [os.path.join(binlib, 'libpython$(VERSION).a')]
#somevars = {}
#if os.path.exists(makefile_in):
# makevars = parsesetup.getmakevars(makefile_in)
# for key in makevars.keys():
# somevars[key] = makevars[key]
#somevars['CFLAGS'] = ' '.join(cflags) # override
#somevars['CPPFLAGS'] = ' '.join(cppflags) # override
#files = [base_config_c, base_frozen_c] + \
# files + supp_sources + addfiles + libs + \
# ['$(MODLIBS)', '$(LIBS)', '$(SYSLIBS)']
#outfp = bkfile.open(makefile, 'w')
#try:
# makemakefile.makemakefile(outfp, somevars, files, base_target)
#finally:
## outfp.close()
# Done!
#if odir:
# print 'Now run "make" in', odir,
# print 'to build the target:', base_target
#else:
# print 'Now run "make" to build the target:', base_target
# Print usage message and exit
def usage(msg):
sys.stdout = sys.stderr
print "Error:", msg
print "Use ``%s -h'' for help" % sys.argv[0]
sys.exit(2)
def add_packages(mf, directories, exclude):
# manually add all packages under the given directories.
for dir in directories:
for dirpath, dirnames, filenames in os.walk(dir):
relpath = os.path.relpath(dirpath, dir)
if relpath == ".":
continue
packagename = relpath.replace('/', '.')
if packagename in exclude:
dirnames[:] = []
continue
if not "__init__.py" in filenames:
# skip pacakges without init.py for now.
continue
# process all files in this package.
for filename in filenames:
if os.path.splitext(filename)[-1] != ".py":
continue
modulepath = None # this must be non-null for a package.
pathname = os.path.join(dirpath, filename)
if filename == "__init__.py":
modulename = packagename
modulepath = [pathname]
else:
modulename = \
packagename + "." + os.path.splitext(filename)[0]
mf.import_hook(modulename)
return dict
main()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/ThirdParty/FreezePython/freeze/freeze_paraview.py
|
Python
|
gpl-3.0
| 19,322
|
[
"ParaView"
] |
4bf1ec4eb0c296136efb67f8eb988f24641f2b08e88a0925049226739b726843
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for xc.lda."""
import tempfile
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import jax
import numpy as np
from pyscf.dft import libxc
import pyscf.gto
from pyscf.lib import parameters
from symbolic_functionals.syfes.xc import lda
jax.config.update('jax_enable_x64', True)
class XCLDATest(parameterized.TestCase):
def setUp(self):
super().setUp()
parameters.TMPDIR = tempfile.mkdtemp(dir=flags.FLAGS.test_tmpdir)
mol = pyscf.gto.M(
atom='''O 0. 0. 0.
H 0. -0.757 0.587
H 0. 0.757 0.587
''',
basis='def2svpd',
verbose=1)
ks = pyscf.dft.RKS(mol)
ks.xc = 'lda,lda'
ks.kernel()
ao = ks._numint.eval_ao(ks.mol, coords=ks.grids.coords, deriv=0)
self.rho = ks._numint.eval_rho2(
ks.mol, ao, mo_coeff=ks.mo_coeff, mo_occ=ks.mo_occ, xctype='LDA')
# construct a spin polarized density to test spin polarized case
zeta = 0.2
self.rhoa = 0.5 * (1 + zeta) * self.rho
self.rhob = 0.5 * (1 - zeta) * self.rho
@parameterized.parameters(
('lda_x', lda.e_x_lda_unpolarized),
('lda_c_pw', lda.e_c_lda_unpolarized),
)
def test_lda_xc_unpolarized_against_libxc(self, xc_name, xc_fun):
eps_xc_ref, (vrho_ref, _, _, _), _, _ = libxc.eval_xc(
xc_name, self.rho, spin=0, relativity=0, deriv=1)
e_xc_ref = eps_xc_ref * self.rho
e_xc, vrho = jax.vmap(jax.value_and_grad(xc_fun))(self.rho)
np.testing.assert_allclose(e_xc, e_xc_ref)
np.testing.assert_allclose(vrho, vrho_ref)
@parameterized.parameters(
('lda_x', lda.e_x_lda_polarized),
('lda_c_pw', lda.e_c_lda_polarized),
)
def test_lda_xc_polarized_against_libxc(self, xc_name, xc_fun):
eps_xc_ref, (vrho_ref, _, _, _), _, _ = libxc.eval_xc(
xc_name, (self.rhoa, self.rhob), spin=1, relativity=0, deriv=1)
e_xc_ref = eps_xc_ref * self.rho
vrhoa_ref, vrhob_ref = vrho_ref[:, 0], vrho_ref[:, 1]
e_xc, (vrhoa, vrhob) = jax.vmap(jax.value_and_grad(xc_fun, argnums=(0, 1)))(
self.rhoa, self.rhob)
np.testing.assert_allclose(e_xc, e_xc_ref)
np.testing.assert_allclose(vrhoa, vrhoa_ref)
np.testing.assert_allclose(vrhob, vrhob_ref)
# reference values are pre-computed using external codes
@parameterized.parameters(
(0., 0., 0.),
(0.5, -0.01588696391353700, -0.0168974057764630),
(1., -0.0345539265331996, -0.0366463870651906),
)
def test_decomposed_e_c_lda_unpolarized_use_jax(
self, rho, expected_e_c_ss, expected_e_c_os):
e_c_ss, e_c_os = lda.decomposed_e_c_lda_unpolarized(rho, use_jax=True)
np.testing.assert_allclose(e_c_ss, expected_e_c_ss)
np.testing.assert_allclose(e_c_os, expected_e_c_os)
@parameterized.parameters(
(0., 0., 0.),
(0.5, -0.01588696391353700, -0.0168974057764630),
(1., -0.0345539265331996, -0.0366463870651906),
)
def test_decomposed_e_c_lda_unpolarized_not_use_jax(
self, rho, expected_e_c_ss, expected_e_c_os):
e_c_ss, e_c_os = lda.decomposed_e_c_lda_unpolarized(rho, use_jax=False)
np.testing.assert_allclose(e_c_ss, expected_e_c_ss)
np.testing.assert_allclose(e_c_os, expected_e_c_os)
# reference values are pre-computed using external codes
@parameterized.parameters(
(0., 0., 0., 0., 0.),
(0., 0.5, 0., -0.0172769632665998, 0.),
(0.5, 0., -0.0172769632665998, 0., 0.),
(0.5, 1., -0.0172769632665998, -0.0374279447531902, -0.05301827865038937),
(1., 0.5, -0.0374279447531902, -0.0172769632665998, -0.05301827865038937),
(1., 1., -0.0374279447531902, -0.0374279447531902, -0.0791659658806080),
)
def test_decomposed_e_c_lda_polarized_use_jax(
self, rhoa, rhob, expected_e_c_aa, expected_e_c_bb, expected_e_c_ab):
e_c_aa, e_c_bb, e_c_ab = lda.decomposed_e_c_lda_polarized(
rhoa, rhob, use_jax=True)
np.testing.assert_allclose(e_c_aa, expected_e_c_aa)
np.testing.assert_allclose(e_c_bb, expected_e_c_bb)
np.testing.assert_allclose(e_c_ab, expected_e_c_ab)
@parameterized.parameters(
(0., 0., 0., 0., 0.),
(0., 0.5, 0., -0.0172769632665998, 0.),
(0.5, 0., -0.0172769632665998, 0., 0.),
(0.5, 1., -0.0172769632665998, -0.0374279447531902, -0.05301827865038937),
(1., 0.5, -0.0374279447531902, -0.0172769632665998, -0.05301827865038937),
(1., 1., -0.0374279447531902, -0.0374279447531902, -0.0791659658806080),
)
def test_decomposed_e_c_lda_polarized_not_use_jax(
self, rhoa, rhob, expected_e_c_aa, expected_e_c_bb, expected_e_c_ab):
e_c_aa, e_c_bb, e_c_ab = lda.decomposed_e_c_lda_polarized(
rhoa, rhob, use_jax=False)
np.testing.assert_allclose(e_c_aa, expected_e_c_aa)
np.testing.assert_allclose(e_c_bb, expected_e_c_bb)
np.testing.assert_allclose(e_c_ab, expected_e_c_ab)
if __name__ == '__main__':
absltest.main()
|
google-research/google-research
|
symbolic_functionals/syfes/xc/lda_test.py
|
Python
|
apache-2.0
| 5,545
|
[
"PySCF"
] |
d4a663b5b289c8e1b5f9cf6005c6f12c7d9561a0bb11d6f1ff44da5e12cd75ac
|
""" gpr_fit_2d.py
An example that uses functionality from the GPR module
to regress a 2-Dimensional Function
"""
# ------------------------------------------------------------
# Imports
# ------------------------------------------------------------
import time, os, sys, copy
import numpy as np
from numpy import pi
import pylab as plt
from matplotlib import cm
import VyPy
from VyPy.regression import gpr, active_subspace
from VyPy import optimize as opt
from warnings import warn, simplefilter
simplefilter('error')
# ------------------------------------------------------------
# Main
# ------------------------------------------------------------
def main():
# ---------------------------------------------------------
# Setup
# ---------------------------------------------------------
# Choose the function and bounds
# the target function, defined at the end of this file
The_Func = composite_function
The_Con = composite_constraint
# hypercube bounds
ND = 4 # dimensions
XB = np.array( [[-2.,2.]]*ND )
# ---------------------------------------------------------
# Training Data
# ---------------------------------------------------------
# Select training data randomly with Latin Hypercube Sampling
# number of samples
ns = 250
## ns = 10 # try for lower model accuracy
# perform sampling with latin hypercube
XS = VyPy.sampling.lhc_uniform(XB,ns)
# evaluate function and gradients
FS,DFS = The_Func(XS)
CS,DCS = The_Con(XS)
# ---------------------------------------------------------
# Machine Learning
# ---------------------------------------------------------
# number of active domain dimensions
N_AS = 1
#Model = gpr.library.Gaussian(XB,XS,FS,DFS)
Model = active_subspace.build_surrogate(XS,FS,DFS,XB,N_AS,probNze=-2.0)
# pull function handles for plotting and evaluating
g_x = Model.g_x # the surrogate
#g_x = Model.predict_YI
f_x = lambda(Z): The_Func(Z)[0] # the truth function
# ---------------------------------------------------------
# Evaluate a Testing Set
# ---------------------------------------------------------
# Run a test sample on the functions
nt = 200 # number of test samples
XT = VyPy.sampling.lhc_uniform(XB,nt)
# functions at training data locations
FSI = g_x(XS)
FST = f_x(XS)
# functions at grid testing locations
FTI = g_x(XT)
FTT = f_x(XT)
# ---------------------------------------------------------
# Model Errors
# ---------------------------------------------------------
# estimate the rms training and testing errors
print 'Estimate Modeling Errors ...'
# the scaling object
Scaling = Model.M_Y.Scaling # careful, this is in the active subspace
#Scaling = Model.Scaling
# scale data - training samples
FSI_scl = Scaling.Y.set_scaling(FSI)
FST_scl = Scaling.Y.set_scaling(FST)
# scale data - grid testing samples
FTI_scl = FTI / Scaling.Y # alternate syntax
FTT_scl = FTT / Scaling.Y
# rms errors
ES_rms = np.sqrt( np.mean( (FSI_scl-FST_scl)**2 ) )
EI_rms = np.sqrt( np.mean( (FTI_scl-FTT_scl)**2 ) )
print ' Training Error = %.3f%%' % (ES_rms*100.)
print ' Testing Error = %.3f%%' % (EI_rms*100.)
# ---------------------------------------------------------
# Optimization
# ---------------------------------------------------------
problem = opt.Problem()
var = opt.Variable()
var.tag = 'x'
var.initial = np.array([[0.0] * ND])
var.bounds = XB.T
problem.variables.append(var)
obj = opt.Objective()
obj.evaluator = lambda X: {'f' : f_x(X['x'])+np.linalg.norm(X['x'],axis=1)/10.}
#obj.evaluator = lambda X: {'f' : g_x(X['x'])+np.linalg.norm(X['x'],axis=1)/10.}
obj.tag = 'f'
problem.objectives.append(obj)
driver = opt.drivers.CMA_ES(0)
result = driver.run(problem)
Xmin = result[1]['x']
# ---------------------------------------------------------
# Plotting
# ---------------------------------------------------------
# plot the estimated and truth surface, evaluate rms error
plt.figure(0)
plt.plot(Model.d,'bo-')
plt.title('Eigenvalue Powers')
print 'Plot Response Surface ...'
# center point
#X0 = [1.0] * ND
X0 = Xmin
## rosenbrock local minimum for 4 <= dim <= 7
#X0[0] = -1.
# plot spider legs
fig = plt.figure(1)
ax = VyPy.plotting.spider_axis(fig,X0,XB)
VyPy.plotting.spider_trace(ax,g_x,X0,XB,100,'b-',lw=2,label='Fit')
VyPy.plotting.spider_trace(ax,f_x,X0,XB,100,'r-',lw=2,label='Truth')
ax.legend()
ax.set_zlabel('F')
# in active domain
U = Model.U
Y0 = active_subspace.project.simple(X0,U)
g_y = Model.g_y
YB = Model.YB
# plot spider legs
fig = plt.figure(2)
ax = VyPy.plotting.spider_axis(fig,Y0,YB)
VyPy.plotting.spider_trace(ax,g_y,Y0,YB,100,'b-',lw=2,label='Fit')
ax.legend()
ax.set_zlabel('F')
plt.draw();
plt.show(block=True)
# Done!
return
#: def main()
# -------------------------------------------------------------
# Test Functions
# -------------------------------------------------------------
from VyPy.tools import atleast_2d
# --- Rosenbrock Function ---
def rosenbrock_function(X):
X = atleast_2d(X)
D = X.shape[1]
Y = 0.
DY = X*0.
for I in range(D):
if I < D-1:
Y = Y + 100.*( X[:,I+1]-X[:,I]**2. )**2. + ( 1-X[:,I] )**2.
DY[:,I] = DY[:,I] - 400.*( X[:,I+1]-X[:,I]**2. ) * X[:,I] - 2.*( 1.-X[:,I] )
if I>0:
DY[:,I] = DY[:,I] + 200.*( X[:,I]-X[:,I-1]**2. )
Y = atleast_2d(Y,'col')
return Y,DY
# --- Rastrigin Function ---
def rastrigin_function(X):
X = atleast_2d(X)
scl = 1.0
sgn = 1.0
X = X * scl
D = X.shape[1]
Y = sgn*( 10.*D + np.sum( 10*X**2. - 10.*np.cos(2.*pi*X) , 1 ) );
DY = sgn*( 2.*X + 20.*pi*np.sin(2.*pi*X) ) * scl;
Y = atleast_2d(Y,'col')
return Y,DY
# --- Parabolic Function ---
def parabolic_function(X):
X = atleast_2d(X)
D = X.shape[1]
C = np.ones([1,D]) * 10
#C = np.array([ np.arange(D)+1. ])
Y = np.dot( X**2. , C.T ) - 10.
DY = 2.*X*C
Y = atleast_2d(Y,'col')
return Y,DY
# --- Hyperplane Function ---
def hyperplane_function(X):
X = atleast_2d(X) + 0.5
N,D = X.shape
C = np.array([ np.arange(D)+1. ])
I = np.ones([N,D])
Y = np.dot( X , C.T )
DY = C * I
Y = atleast_2d(Y,'col')
return Y,DY
def rotation_function(X):
X = atleast_2d(X) + 0.0
N,D = X.shape
C = np.ones([1,D]) * 1/D
#C = np.array([ np.arange(D)+1. ])
Y = np.dot( X , C.T )
I = np.ones([N,D])
DY = C * I
Y = atleast_2d(Y,'col')
return Y,DY
def composite_function(X):
X = atleast_2d(X)
N,D = X.shape
Y = np.zeros([N,1])
DY = np.zeros([N,D])
#y,dy = hyperplane_function(X)
#Y += y
#DY += dy
k = D
f,df = rotation_function(X[:,0:k])
g,dg = rastrigin_function(f)
Y += g
DY[:,0:k] += dg * df
return Y,DY
def rotation_function_2(X):
X = atleast_2d(X) + 0.0
N,D = X.shape
C = np.ones([1,D]) * 1/D
C[0:D/2] = -C[0:D/2]
#C = np.array([ np.arange(D)+1. ])
Y = np.dot( X , C.T )
I = np.ones([N,D])
DY = C * I
Y = atleast_2d(Y,'col')
return Y,DY
def composite_constraint(X):
X = atleast_2d(X)
N,D = X.shape
Y = np.zeros([N,1])
DY = np.zeros([N,D])
#y,dy = hyperplane_function(X)
#Y += y
#DY += dy
f,df = rotation_function_2(X)
g,dg = parabolic_function(f)
Y += g
DY += dg * df
return Y,DY
# -------------------------------------------------------------
# Start Main
# -------------------------------------------------------------
if __name__=='__main__':
main()
|
aerialhedgehog/VyPy
|
tests/_experimental/activesubspace_tests_01.py
|
Python
|
bsd-3-clause
| 8,500
|
[
"Gaussian"
] |
be74c0067c0a4fc406baf5cd08731a440d0ec8cc013f371440ddb88729a2b1d4
|
#!/usr/bin/python
import numpy as np
import numpy.linalg as lg
import sys
import subprocess as sp
import os
import errno
import shutil
import re
import numpy.linalg as lg
from __tools__ import MyParser
from __tools__ import make_sure_path_exists
from __tools__ import XmlParser
from __tools__ import cd
from __tools__ import XmlWriter
from __tools__ import RepresentsInt
from __exciton__ import readexcitonlogfile
parser=MyParser(description="Enviroment to do numerical polarisation calculations with gwbse and gaussian")
parser.add_argument("--template","-t",type=str,required=True,help="Folder, from which to take votca-optionfiles from")
parser.add_argument("--options","-o",type=str,required=True,help="optionfile")
parser.add_argument("--setup", action='store_const', const=1, default=0,help="Setup folders")
parser.add_argument("--run", action='store_const', const=1, default=0,help="Run jobs")
parser.add_argument("--read", action='store_const', const=1, default=0,help="Readout outputfiles")
args=parser.parse_args()
BohrtoAngstroem=0.5291772109
b2a3=BohrtoAngstroem**3
root=XmlParser(args.options)
h=float(root.find("fieldstrength").text)
tags=(root.find("tags").text).split()
if h< 10E-5:
print "Aborting. Field strength is too small"
sys.exit()
def copyfromtemplate(path):
base=os.path.realpath('.')
#print base
template=os.path.join(base, "TEMPLATE")
#print template
try:
shutil.copytree(template,path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
class job(object):
def __init__(self,x,y,z):
self.shift=np.array([x,y,z])
self.identifier=self.convarray2identifier(self.shift)
self.energy=0.0
self.energydft=0.0
self.energybse=0.0
self.gaussianfield=""
self.path=os.path.realpath('.')
self.writefield()
self.foldername="gaussian{}".format(self.identifier)
self.path=os.path.join(self.path,self.foldername)
def convarray2identifier(self,nparray):
ident=""
if nparray[0]>0:
ident+="+x"
elif nparray[0]<0:
ident+="-x"
if nparray[1]>0:
ident+="+y"
elif nparray[1]<0:
ident+="-y"
if nparray[2]>0:
ident+="+z"
elif nparray[2]<0:
ident+="-z"
return ident
def writefield(self):
line1="{0:1.4f} {1:1.4f} {2:1.4f}\n".format(self.shift[0],self.shift[1],self.shift[2])
line2="0.0 0.0 0.0 0.0 0.0 0.0\n"
line3="0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0\n"
line4="0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0\n\n"
return line1+line2+line3+line4
def makefolder(self):
#print self.path
copyfromtemplate(self.foldername)
root=XmlParser("{}/exciton.xml".format(self.foldername))
exciton=root.find("exciton")
gwbseengine=exciton.find("gwbse_engine")
gwbseengine.find("tasks").text="input"
XmlWriter(root,"{}/exciton.xml".format(self.foldername))
with cd(self.foldername):
sp.call("xtp_tools -e exciton -o exciton.xml > exciton.log",shell=True)
self.modcomfile("system.com")
gwbseengine.find("tasks").text="dft,parse,gwbse"
XmlWriter(root,"{}/exciton.xml".format(self.foldername))
def modcomfile(self,comfile):
keywords=["scf=tight","field=read"]
content=[]
breaks=0
check=False
with open(comfile,"r") as f:
for line in f.readlines():
if line=="\n":
breaks+=1
elif line[0]=="#":
if line[1]!="p":
line="#p "+line[1:]
for keyword in keywords:
if keyword not in line:
line="{} {}\n".format(line[:-1],keyword)
content.append(line)
if breaks==4 and check==False:
self.writefield()
content.append(self.writefield())
check=True
with open(comfile,"w") as f:
for line in content:
f.write(line)
def runjob(self):
print "Running job {}".format(self.identifier)
with cd(self.foldername):
sp.call("xtp_tools -e exciton -o exciton.xml > exciton.log",shell=True)
def readlogfilebse(self,tag):
logfile=os.path.join(self.path,"exciton.log")
singlets=False
triplets=False
state=None
if "s"==tag[0]:
singlets=True
elif "t"==tag[0]:
triplets=True
else:
print "Error: Tag {} not known. Exiting..".format(tag)
sys.exit()
if RepresentsInt(tag[1:]):
state=int(tag[1:])
results=readexcitonlogfile(logfile,dft=True,singlets=singlets,triplets=triplets)
if singlets:
self.energybse=(results[4][state-1])/27.211385
elif triplets:
self.energybse=(results[5][state-1])/27.211385
self.energydft=results[0][2]/27.211385
self.energy=self.energybse+self.energydft
print "{}\t DFT Energy[Hartree]: {:1.5f}\t BSE Energy {} [Hartree]: {:1.5f}\t Total [Hartree]: {:1.5f}".format(self.identifier,self.energydft,tag,self.energybse,self.energy)
class Polarisation(object):
def __init__(self,h):
self.folder=""
self.joblist=[]
self.pol=np.zeros((3,3))
self.poldft=np.zeros((3,3))
self.h=h
def setupjobs(self):
depth=1
h=self.h
for i in range(-depth,depth+1):
for j in range(-depth,depth+1):
for k in range(-depth,depth+1):
if (i==j or i==k or j==k):
if (i**2+j**2+k**2<3 ):
self.joblist.append(job(i*h,j*h,k*h))
def createfolders(self):
for job in self.joblist:
job.makefolder()
def runjobs(self):
i=1
for job in self.joblist:
print "Running job {} of {}.".format(i,len(self.joblist))
job.runjob()
i+=1
def readlogs(self,tag):
for job in self.joblist:
job.readlogfilebse(tag)
def E(self,string):
energy=0.0
for job in self.joblist:
if job.identifier==string:
energy=job.energy
#print string, energy
return energy
def Edft(self,string):
energy=0.0
for job in self.joblist:
if job.identifier==string:
energy=job.energydft
#print string, energy
return energy
def printjobs(self):
print "Setting up {} jobs:".format(len(self.joblist))
temp=[]
for job in self.joblist:
temp.append(job.identifier)
print " ".join(temp)
def writelogfile(self,filename,tag):
print "Writing output for state {} to {}".format(tag,filename)
with open(filename,"w") as f:
f.write("\nDiag Polarisation Tensor of state {} with field {} au in Angstroem**3 \n".format(tag,h))
f.write(np.array_str(b2a3*self.diagpol))
f.write("\nVotca-Molpol entry of state {} with field {} au in Angstroem**3 \n".format(tag,h))
f.write("xx, xy, xz, yy, yz, zz\n")
f.write("{0:4.4f} 0.0 0.0 {1:4.4f} 0.0 {2:4.4f}\n".format(b2a3*self.diagpol[0,0],b2a3*self.diagpol[1,1],b2a3*self.diagpol[2,2]))
f.write("\n\n\nDiag Polarisation Tensor of groundstate with field {} au in Angstroem**3 \n".format(h))
f.write(np.array_str(b2a3*self.diagpoldft))
f.write("\n\n\nVotca-Molpol entry of groundstate with field {} au in Angstroem**3 \n".format(h))
f.write("xx, xy, xz, yy, yz, zz\n")
f.write("{0:4.4f} 0.0 0.0 {1:4.4f} 0.0 {2:4.4f}\n".format(b2a3*self.diagpoldft[0,0],b2a3*self.diagpoldft[1,1],b2a3*self.diagpoldft[2,2]))
f.write("\n\n\nPolarisation Tensor of groundstate with field {} au in atomic units \n".format(h))
f.write(np.array_str(self.poldft))
f.write("\nPolarisation Tensor of groundstate with field {} au in Angstroem**3 \n".format(h))
f.write(np.array_str(b2a3*self.poldft))
f.write("\nPolarisation Tensor of state {} with field {} au in atomic units \n".format(tag,h))
f.write(np.array_str(self.pol))
f.write("\nPolarisation Tensor of state {} with field {} au in Angstroem**3 \n".format(tag,h))
f.write(np.array_str(b2a3*self.pol))
f.write("\nConfiguration / Energy / EnergyGS / EnergyBSE\n")
for job in self.joblist:
f.write("{:4s} {:3.7f} {:3.7f} {:3.7f}\n".format(job.identifier,job.energy,job.energydft,job.energybse))
return
def calcpolarisation(self):
pol=np.zeros((3,3))
for i,a in zip(["x","y","z"],range(3)):
for j,b in zip(["x","y","z"],range(3)):
# ondiagonal
if (a==b):
#print i,j
pol[a,a]=(self.E("+"+i)-2*self.E("")+self.E("-"+i))/(h**2)
#upperhalf
if (a<b):
#print i,j
pol[a,b]=(self.E("+"+i+"+"+j)-self.E("+"+i)-self.E("+"+j)+2*self.E("")-self.E("-"+i)-self.E("-"+j)+self.E("-"+i+"-"+j))/(2*h**2)
self.pol=-1*(pol+pol.T-np.diag(pol.diagonal()))
self.diagpol=np.diag(lg.eigvalsh(self.pol))
print "Polarisation Tensor for excited state in au"
print np.array_str(self.pol)
def calcpolarisationdft(self):
pol=np.zeros((3,3))
for i,a in zip(["x","y","z"],range(3)):
for j,b in zip(["x","y","z"],range(3)):
# ondiagonal
if (a==b):
#print i,j
pol[a,a]=(self.Edft("+"+i)-2*self.Edft("")+self.Edft("-"+i))/(h**2)
#upperhalf
if (a<b):
#print i,j
pol[a,b]=(self.Edft("+"+i+"+"+j)-self.Edft("+"+i)-self.Edft("+"+j)+2*self.Edft("")-self.Edft("-"+i)-self.Edft("-"+j)+self.Edft("-"+i+"-"+j))/(2*h**2)
self.poldft=-1*(pol+pol.T-np.diag(pol.diagonal()))
self.diagpoldft=np.diag(lg.eigvalsh(self.poldft))
print "Polarisation Tensor for groundstate in au"
print np.array_str(self.poldft)
test=Polarisation(h)
test.setupjobs()
test.printjobs()
if args.setup:
test.createfolders()
if args.run:
test.runjobs()
if args.read:
for tag in tags:
print "Evaluating polarisation for {}".format(tag)
test.readlogs(tag)
test.calcpolarisationdft()
test.calcpolarisation()
test.writelogfile("polarisation_{}.log".format(tag),tag)
|
12AngryMen/votca-scripts
|
xtp/xtp_Polarisation.py
|
Python
|
apache-2.0
| 9,256
|
[
"Gaussian"
] |
19dcae3dbefc3e7db843df220369839d99596df882c08aaf050b2c38fdc372eb
|
# -*- coding: utf-8 -*-s
"""
module to transform raw vtk to rotated vtk along mom bud axis
THIS IS FOR OLD DATASET (2015 , thesis YPD, YPE, YPL and YPR WT cells)
"""
import os
import os.path as op
import cPickle as pickle
import numpy as np
from mayavi import mlab
import pandas as pd
from tvtk.api import tvtk
from mombud.functions import vtkvizfuncs as vz
from mombud.classes.vtk_pick_mombud_class import MitoSkel, CellEllipse
import wrappers as wr
# pylint: disable=C0103
datadir = op.join(os.getcwd(), 'data') # input folder
rawdir = op.join(os.getcwd(), 'output') # output folder
# xkcd palette colors for labels
def_cols = dict(colors=['medium blue', 'bright green', 'red'],
labels=['base', 'tip', 'neck'])
cur_col, palette = vz.generate_color_labels(**def_cols)
# vtk data and cell picking points data
vtkF = wr.ddwalk(op.join(rawdir, 'normalizedVTK'),
'*skeleton.vtk', start=5, stop=-13)
mombud = wr.swalk(op.join(datadir, 'csv'),
'*csv', stop=-4)
filekeys = {item: vtkF[media][item] for media
in sorted(vtkF.keys()) for item
in sorted(vtkF[media].keys())}
# cell tracing info
DataSize = pd.read_table(op.join(datadir, 'csv', 'Results.txt'))
df_celltracing = DataSize.ix[:, 1:]
df_celltracing['cell'] = \
df_celltracing.ix[:, 'Label'].apply(lambda x: x.partition(':')[2])
counter = df_celltracing.groupby('cell').Label.count()
hasbuds = \
df_celltracing[df_celltracing.cell.isin(counter[counter > 1].index.values)]
mlab.close(all=True)
##############################################################################
if __name__ == "__main__":
WRITE_PICKLE = False # don't overwrite old pickle file by default
WRITE_VTK = False # same for vtk
D = {} # holder for original bud,neck, tip points
dfmb = pd.DataFrame(columns=['base', 'neck', 'tip', 'media'])
# Figure to render on
figone = mlab.figure(size=(800, 600), bgcolor=(.1, .1, .1))
figone.scene.off_screen_rendering = True
for key in sorted(mombud.keys())[::]:
mlab.clf(figure=figone)
print "now on %s" % key
# get original cursor points
df_cursorpts = pd.read_csv(mombud[key],
header=0,
names=['x', 'y', 'z'],
index_col=0)
D['tip'] = np.array(df_cursorpts.ix['tip'])
D['base'] = np.array(df_cursorpts.ix['base'])
D['neck'] = np.array(df_cursorpts.ix['neck'])
# get rotation matrix transform
t, rot, scale1 = vz.arrowvect(D['base'], D['tip'], D['neck'])
tr_filt = vz.inverse_tr(rot, D['base'])
# original vtk skel
vtkob = vz.setup_vtk_source(op.join(filekeys[key]))
# vtkob.point_scalars_name = 'DY_raw' # IMPORTANT
mitoskel = MitoSkel(data_src=vtkob)
trans_obj = tvtk.TransformPolyDataFilter(
input=mitoskel.data_src.data,
transform=tr_filt).output
# this is just to visualize the VTK actor
mitoskel.viz_skel(figure=figone)
mitoskel.surf.module_manager.scalar_lut_manager.show_legend = True
mitoskel.transform(tr_filt)
# draw ellipse shells
df_ellipse = vz.getelipspar(key, df_celltracing, useold=True)
if 'centerpt' in df_cursorpts:
zpos = df_cursorpts.ix['centerpt', 'x']
else: # old csv might not have centerpt data
zpos = np.mean(vtkob.data.bounds[4:])
for mb in ['mom', 'bud']:
mb_glyph = CellEllipse(name='%s' % mb, dataframe=df_ellipse)
mb_glyph.make_surf(figure=figone)
mb_glyph.adjust_ellipse()
x, y, _ = mb_glyph.surf.actor.actor.position
mb_glyph.surf.actor.actor.set(
position=[x, y, zpos])
mb_glyph.surf.actor.actor.user_transform = tr_filt
# Dataframe to save parameters of transformed object
df = pd.Series({}, name=key)
# transform original bud, neck and tip points and writeout
for part in D:
center = D[part]
src = tvtk.SphereSource(center=D[part], radius=.15,
theta_resolution=32,
phi_resolution=32)
label = cur_col[part]
pt_glyph = mlab.pipeline.surface(src.output,
color=palette[label],
name='%s_trnf' % part,
figure=figone)
pt_glyph.actor.actor.user_transform = tr_filt
df[part] = pt_glyph.actor.actor.center
df['mom'] = df_ellipse.ix['mom', 'vol']
df['bud'] = df_ellipse.ix['bud', 'vol']
df['media'] = key.partition("_")[0]
mlab.view(0, 0)
dfmb = dfmb.append(df)
if WRITE_VTK:
w = tvtk.PolyDataWriter(input=trans_obj, file_name='%s.vtk' %
op.join(datadir, 'transformedData', key))
w.write()
mlab.savefig(op.join(datadir, 'transformedData', '%s.png' % key))
if WRITE_PICKLE:
with open(op.join(datadir, 'transformedData',
'mombudtrans_new.pkl'), 'wb') as output:
pickle.dump(dfmb, output)
|
moosekaka/sweepython
|
cell_pick_viz/vtk_rotate_mombud_old.py
|
Python
|
mit
| 5,336
|
[
"Mayavi",
"VTK"
] |
476d46cef446cf20532f32966258ec1129caf2fce28d70bd534e422e1557d82a
|
from __future__ import division
import numpy as np
import scipy.io
import pylab as plt
def sparse_nmf(X, r, maxiter, spar, W = None, H = None):
"""Input data and the rank
Learns a sparse NMF model given data X and the rank r.
Parameters
----------
X : {array}, shape = [n_features, n_samples]
r : rank of factorization
maxiter : number of updates of the factor matrices
spar : sparsity of the features given by measure sp(x)= (sqrt(n)-|x|_1/|x|_2 )/(sqrt(n)-1)
Returns
-------
W : {array}
Feature matrix to the sparse NMF problem.
Reference
---------
Block Coordinate Descent for Sparse NMF
Vamsi K. Potluru, Sergey M. Plis, Jonathan Le Roux, Barak A. Pearlmutter, Vince D. Calhoun, Thomas P. Hayes
ICLR 2013.
http://arxiv.org/abs/1301.3527
"""
m, n = np.shape(X)
if not W and not H:
W, H = init_nmf(X, r, spar)
Obj = np.zeros(maxiter)
for i in range(maxiter):
Obj[i] = np.linalg.norm(X - np.dot(W, H), 'fro')
print('iter: {} Obj: {}'.format(i + 1, Obj[i]))
W = update_W(X, W, H, spar)
H = update_H(X, W, H)
return W
def init_nmf(X, r, spar):
""" Initialize the matrix factors for NMF.
Use Gaussian random numbers in [-1,1] to initialize
Parameters
----------
X: {array}, shape = [n_features, n_samples]
r: rank of factorization
Returns
-------
W : {array}
Feature matrix of the factorization
H : {array}
Weight matrix of the factorization
where X ~ WH
"""
m, n = np.shape(X)
W = np.zeros((m, r))
k = np.sqrt(m) - spar * (np.sqrt(m) - 1)
for i in range(r):
W[:, i] = sparse_opt(np.sort(np.random.rand(m))[::-1], k)
W = np.random.rand(m, r)
H = np.random.rand(r, n)
return (W, H)
def update_W(X, W, H, spar):
"""Update the feature matrix based on user-defined sparsity"""
m, n = np.shape(X)
m, r = np.shape(W)
cach = np.zeros((m, r))
HHt = np.dot(H, H.T)
cach = -np.dot(X, H.T) + np.dot(W, np.dot(H, H.T))
for i in range(r):
W, cach = W_sparse_ith(W, HHt, cach, spar, i)
return W
def update_H(X, W, H):
"""Update the weight matrix using the regular multiplicative updates"""
m, n = np.shape(X)
WtX = np.dot(W.T, X)
WtW = np.dot(W.T, W)
for j in range(10):
H = H * WtX / (np.dot(WtW, H) + np.spacing(1))
return H
def W_sparse_ith(W, HHt, cach, spar, i):
""" Update the columns sequentially"""
m, r = np.shape(W)
C = cach[:, i] - W[:, i] * HHt[i, i]
V = np.zeros(m)
k = np.sqrt(m) - spar * (np.sqrt(m) - 1)
a = sparse_opt(np.sort(-C)[::-1], k)
ind = np.argsort(-C)[::-1]
V[ind] = a
cach = cach + np.outer(V - W[:, i], HHt[i, :])
W[:, i] = V
return (W, cach)
def sparse_opt(b, k):
""" Project a vector onto a sparsity constraint
Solves the projection problem by taking into account the
symmetry of l1 and l2 constraints.
Parameters
----------
b : sorted vector in decreasing value
k : Ratio of l1/l2 norms of a vector
Returns
-------
z : closest vector satisfying the required sparsity constraint.
"""
n = len(b)
sumb = np.cumsum(b)
normb = np.cumsum(b * b)
pnormb = np.arange(1, n + 1) * normb
y = (pnormb - sumb * sumb) / (np.arange(1, n + 1) - k * k)
bot = np.int(np.ceil(k * k))
z = np.zeros(n)
if bot > n:
print('Looks like the sparsity measure is not between 0 and 1\n')
return
obj = (-np.sqrt(y) * (np.arange(1, n + 1) + k) + sumb) / np.arange(1, n + 1)
indx = np.argmax(obj[bot:n])
p = indx + bot - 1
p = min(p, n - 1)
p = max(p, bot)
lam = np.sqrt(y[p])
mue = -sumb[p] / (p + 1) + k / (p + 1) * lam
z[:p + 1] = (b[:p + 1] + mue) / lam
return z
if __name__ == '__main__':
r = 25
spar = 0.5
maxiter = 200
X = scipy.io.loadmat('../data/orlfaces.mat')
W = sparse_nmf(X['V'], r, maxiter, spar)
for i in range(r):
plt.subplot(np.sqrt(r), np.sqrt(r), i + 1)
plt.imshow(np.reshape(W.T[i], [92, 112]).T)
plt.axis('off')
plt.ion()
plt.show(True)
#import ipdb
#ipdb.set_trace()
|
ismav/sparseNMF
|
sparse_nmf.py
|
Python
|
bsd-3-clause
| 4,259
|
[
"Gaussian"
] |
958b5cf7de743aedf0899ae229715dd6d9e4770bee13633ad0d9e23c4274e89b
|
# coding: utf-8
# Copyright (c) Henniggroup.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
"""
The instrument module:
defines the inputset and the job
"""
import os
import shutil
import subprocess
import logging
from pymatgen.io.vasp.inputs import Incar, Poscar, Potcar, Kpoints
from pymatgen.io.vasp.sets import DictSet
# uncomment for python2.7 pymatgen versions compatibility and more changes, refer
# ufhpc_py27_compat branch
#try:
# from pymatgen.io.vasp.sets import DictVaspInputSet
#except ImportError:
# from pymatgen.io.vasp.sets import DictSet as DictVaspInputSet
from custodian.custodian import Job, ErrorHandler
from monty.json import MontyDecoder
from fireworks.user_objects.queue_adapters.common_adapter import CommonAdapter
from mpinterfaces.data_processor import MPINTVasprun
from mpinterfaces.default_logger import get_default_logger
__author__ = "Kiran Mathew, Joshua J. Gabriel"
__copyright__ = "Copyright 2017, Henniggroup"
__maintainer__ = "Joshua J. Gabriel"
__email__ = "joshgabriel92@gmail.com"
__status__ = "Production"
__date__ = "March 3, 2017"
logger = get_default_logger(__name__)
class MPINTVaspInputSet(DictSet):
"""
defines the set of input required for a vasp job i.e
create INCAR, POSCAR, POTCAR & KPOINTS files
"""
def __init__(self, name, incar, poscar, kpoints, potcar=None,
qadapter=None, script_name='submit_script',
vis_logger=None, reuse_path=None, test=False,
**kwargs):
"""
default INCAR from config_dict
"""
self.name = name
self.test = test
self.incar_init = Incar.from_dict(incar.as_dict())
self.poscar_init = Poscar.from_dict(poscar.as_dict())
if not self.test:
self.potcar_init = Potcar.from_dict(potcar.as_dict())
if not isinstance(kpoints, str):
self.kpoints_init = Kpoints.from_dict(kpoints.as_dict())
else:
self.kpoints_init = kpoints
self.reuse_path = reuse_path # complete reuse paths
self.extra = kwargs
if qadapter is not None:
self.qadapter = qadapter.from_dict(qadapter.to_dict())
else:
self.qadapter = None
self.script_name = script_name
config_dict = {}
config_dict['INCAR'] = self.incar_init.as_dict()
config_dict['POSCAR'] = self.poscar_init.as_dict()
# caution the key and the value are not always the same
if not self.test:
config_dict['POTCAR'] = self.potcar_init.as_dict()
# dict(zip(self.potcar.as_dict()['symbols'],
# self.potcar.as_dict()['symbols']))
if not isinstance(kpoints, str):
config_dict['KPOINTS'] = self.kpoints_init.as_dict()
else:
# need to find a way to dictify this kpoints string more
# appropriately
config_dict['KPOINTS'] = {'kpts_hse':self.kpoints_init}
# self.user_incar_settings = self.incar.as_dict()
DictSet.__init__(self, poscar.structure, config_dict)
#**kwargs)
if vis_logger:
self.logger = vis_logger
else:
self.logger = logger
def write_input(self, job_dir, make_dir_if_not_present=True,
write_cif=False):
"""
the input files are written to the job_dir
process(if needed) and write the input files in each directory
structures read from the poscar files in the directory
"""
d = job_dir
if make_dir_if_not_present and not os.path.exists(d):
os.makedirs(d)
self.logger.info('writing inputset to : ' + d)
self.incar_init.write_file(os.path.join(d, 'INCAR'))
if not isinstance(self.kpoints_init, str):
# maybe temporary fix, pymatgen does not seem
# to have a versatile kpoints object for writing a
# HSE Kpoints file
self.kpoints_init.write_file(os.path.join(d, 'KPOINTS'))
else:
with open(os.path.join(d, 'KPOINTS'), 'w') as kpts:
for line in self.kpoints_init:
kpts.write(line)
if not self.test:
self.potcar_init.write_file(os.path.join(d, 'POTCAR'))
self.poscar_init.write_file(os.path.join(d, 'POSCAR'),
significant_figures=10)
if self.reuse_path:
for reuse in self.reuse_path:
self.logger.info("copied over {0} ".format(reuse))
shutil.copy(reuse, d)
if self.qadapter is not None:
with open(os.path.join(d, self.script_name), 'w') as f:
queue_script = self.qadapter.get_script_str(job_dir)
f.write(queue_script)
def as_dict(self):
qadapter = None
if self.qadapter:
qadapter = self.qadapter.to_dict()
if not isinstance(self.kpoints_init, str):
kpoints = self.kpoints_init.as_dict()
else:
kpoints = [self.kpoints_init]
d = dict(name=self.name, incar=self.incar_init.as_dict(),
poscar=self.poscar_init.as_dict(),
potcar=self.potcar_init.as_dict(),
kpoints=kpoints,
qadapter=qadapter, script_name=self.script_name,
kwargs=self.extra)
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["logger"] = self.logger.name
return d
@classmethod
def from_dict(cls, d):
incar = Incar.from_dict(d["incar"])
poscar = Poscar.from_dict(d["poscar"])
potcar = Potcar.from_dict(d["potcar"])
kpoints = Kpoints.from_dict(d["kpoints"])
qadapter = None
if d["qadapter"] is not None:
qadapter = CommonAdapter.from_dict(d["qadapter"])
script_name = d["script_name"]
return MPINTVaspInputSet(d["name"], incar, poscar, potcar,
kpoints, qadapter,
script_name=script_name,
vis_logger=logging.getLogger(d["logger"]),
**d["kwargs"])
class MPINTJob(Job):
"""
defines a job i.e setup the required input files and
launch the job
Args:
job_cmd: a list, the command to be issued in each job_dir
eg: ['qsub', 'submit_job']
job_dir: the directory from which the jobs will be launched
"""
def __init__(self, job_cmd, name='noname', output_file="job.out",
parent_job_dir='.', job_dir='untitled', suffix="",
final=True, gzipped=False, backup=False, vis=None,
auto_npar=True, settings_override=None, wait=True,
vjob_logger=None):
self.job_cmd = job_cmd
self.name = name
self.output_file = output_file
self.parent_job_dir = parent_job_dir
self.job_dir = job_dir
self.final = final
self.backup = backup
self.gzipped = gzipped
self.vis = vis
self.suffix = suffix
self.settings_override = settings_override
self.auto_npar = auto_npar
self.wait = wait
if vjob_logger:
self.logger = vjob_logger
else:
self.logger = logger
def setup(self):
"""
write the input files to the job_dir
"""
self.vis.write_input(self.job_dir)
if self.backup:
os.chdir(os.path.abspath(self.job_dir))
for f in os.listdir('.'):
shutil.copy(f, "{}.orig".format(f))
os.chdir(self.parent_job_dir)
def run(self):
"""
move to the job_dir, launch the job and back to the
parent job directory
"""
os.chdir(os.path.abspath(self.job_dir))
self.logger.info('running in : ' + self.job_dir)
p = None
# if launching jobs via batch system
if self.vis.qadapter is not None:
submit_cmd = \
self.vis.qadapter.q_commands[self.vis.qadapter.q_type][
"submit_cmd"]
cmd = [submit_cmd, self.vis.script_name]
with open(self.output_file, 'w') as f:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.job_id = stdout.rstrip('\n').split()[-1]
f.write(self.job_id)
else:
cmd = list(self.job_cmd)
with open(self.output_file, 'w') as f:
p = subprocess.Popen(cmd, stdout=f, stderr=f)
self.job_id = 0 # None
os.chdir(self.parent_job_dir)
if self.wait:
return p
else:
return 0
def postprocess(self):
pass
def name(self):
return self.__class__.__name__
def as_dict(self):
d = dict(job_cmd=self.job_cmd, name=self.name,
output_file=self.output_file,
parent_job_dir=self.parent_job_dir,
job_dir=self.job_dir, suffix=self.suffix,
final=self.final, gzipped=self.gzipped,
backup=self.backup, vis=self.vis.as_dict(),
auto_npar=self.auto_npar,
settings_override=self.settings_override,
wait=self.wait)
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["logger"] = self.logger.name
return d
@classmethod
def from_dict(cls, d):
vis = MontyDecoder().process_decoded(d["vis"])
return MPINTVaspJob(d["job_cmd"], name=d["name"],
output_file=d["output_file"],
parent_job_dir=d["parent_job_dir"],
job_dir=d["job_dir"],
suffix=d["suffix"], final=d["final"],
gzipped=d["gzipped"],
backup=d["backup"], vis=vis,
auto_npar=d["auto_npar"],
settings_override=d["settings_override"],
wait=d["wait"],
vjob_logger=logging.getLogger(d["logger"]))
class MPINTVaspJob(MPINTJob):
"""
defines a vasp job i.e setup the required input files and
launch the job
Args:
job_cmd: a list, the command to be issued in each job_dir
eg: ['qsub', 'submit_job']
job_dir: the directory from which the jobs will be launched
"""
def __init__(self, job_cmd, name='noname', output_file="job.out",
parent_job_dir='.', job_dir='untitled', suffix="",
final=True, gzipped=False, backup=False, vis=None,
auto_npar=True, settings_override=None, wait=True,
vjob_logger=None):
MPINTJob.__init__(self, job_cmd, name=name,
output_file=output_file,
parent_job_dir=parent_job_dir,
job_dir=job_dir, suffix=suffix,
final=final, gzipped=gzipped,
backup=backup, vis=vis, auto_npar=auto_npar,
settings_override=settings_override,
wait=wait, vjob_logger=vjob_logger)
def get_final_energy(self):
vasprun_file_path = self.job_dir + os.sep + 'vasprun.xml'
try:
vasprun = MPINTVasprun(vasprun_file_path,
parse_potcar_file=False)
if vasprun.converged:
self.logger.info("job {0} in {1} converged".format(self.job_id,
self.job_dir))
return vasprun.final_energy
else:
self.logger.info(
"job {0} in {1} NOT converged".format(self.job_id,
self.job_dir))
return None
except Exception as ex:
self.logger.info(
"error reading vasprun.xml, probably the job {0} in {1} is not done yet.".format(
self.job_id,
self.job_dir))
return None
class MPINTVaspErrors(ErrorHandler):
"""
handles restarting of jobs that exceed the walltime
employs the check + correct method of custodian ErrorHandler
"""
pass
|
henniggroup/MPInterfaces
|
mpinterfaces/instrument.py
|
Python
|
mit
| 12,676
|
[
"VASP",
"pymatgen"
] |
3cea30cd239a3782758e8c50c543d91daff16b32e07dcddc791f9bed5c618f37
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("sandbox_project.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request),
url(r'^403/$', default_views.permission_denied),
url(r'^404/$', default_views.page_not_found),
url(r'^500/$', default_views.server_error),
]
|
Eraldo/sandbox
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,285
|
[
"VisIt"
] |
669b8fd4c41aacc9a1118d17b0731c3551b1c508fa1cbdd7ad9b6e0d87b0740e
|
"""
cclib (http://cclib.sf.net) is (c) 2006, the cclib development team
and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html).
"""
__revision__ = "$Revision: 879 $"
import StringIO
try:
import bz2 # New in Python 2.3.
except ImportError:
bz2 = None
import fileinput
import gzip
import inspect
import logging
logging.logMultiprocessing = 0 # To avoid a problem with Avogadro
import os
import random
try:
set # Standard type from Python 2.4+.
except NameError:
from sets import Set as set
import sys
import types
import zipfile
import numpy
import utils
from data import ccData
def openlogfile(filename):
"""Return a file object given a filename.
Given the filename of a log file or a gzipped, zipped, or bzipped
log file, this function returns a regular Python file object.
Given an address starting with http://, this function retrieves the url
and returns a file object using a temporary file.
Given a list of filenames, this function returns a FileInput object,
which can be used for seamless iteration without concatenation.
"""
# If there is a single string argument given.
if type(filename) in [str, unicode]:
extension = os.path.splitext(filename)[1]
if extension == ".gz":
fileobject = gzip.open(filename, "r")
elif extension == ".zip":
zip = zipfile.ZipFile(filename, "r")
assert len(zip.namelist()) == 1, "ERROR: Zip file contains more than 1 file"
fileobject = StringIO.StringIO(zip.read(zip.namelist()[0]))
elif extension in ['.bz', '.bz2']:
# Module 'bz2' is not always importable.
assert bz2 != None, "ERROR: module bz2 cannot be imported"
fileobject = bz2.BZ2File(filename, "r")
else:
fileobject = open(filename, "r")
return fileobject
elif hasattr(filename, "__iter__"):
# Compression (gzip and bzip) is supported as of Python 2.5.
if sys.version_info[0] >= 2 and sys.version_info[1] >= 5:
fileobject = fileinput.input(filename, openhook=fileinput.hook_compressed)
else:
fileobject = fileinput.input(filename)
return fileobject
class Logfile(object):
"""Abstract class for logfile objects.
Subclasses defined by cclib:
ADF, GAMESS, GAMESSUK, Gaussian, Jaguar, Molpro, ORCA
"""
def __init__(self, source, progress=None,
loglevel=logging.INFO, logname="Log", logstream=sys.stdout,
fupdate=0.05, cupdate=0.002,
datatype=ccData):
"""Initialise the Logfile object.
This should be called by a ubclass in its own __init__ method.
Inputs:
source - a single logfile, a list of logfiles, or input stream
"""
# Set the filename to source if it is a string or a list of filenames.
# In the case of an input stream, set some arbitrary name and the stream.
# Elsewise, raise an Exception.
if isinstance(source,types.StringTypes):
self.filename = source
self.isstream = False
elif isinstance(source,list) and all([isinstance(s,types.StringTypes) for s in source]):
self.filename = source
self.isstream = False
elif hasattr(source, "read"):
self.filename = "stream %s" %str(type(source))
self.isstream = True
self.stream = source
else:
raise ValueError
# Progress indicator.
self.progress = progress
self.fupdate = fupdate
self.cupdate = cupdate
# Set up the logger.
# Note that calling logging.getLogger() with one name always returns the same instance.
# Presently in cclib, all parser instances of the same class use the same logger,
# which means that care needs to be taken not to duplicate handlers.
self.loglevel = loglevel
self.logname = logname
self.logger = logging.getLogger('%s %s' % (self.logname,self.filename))
self.logger.setLevel(self.loglevel)
if len(self.logger.handlers) == 0:
handler = logging.StreamHandler(logstream)
handler.setFormatter(logging.Formatter("[%(name)s %(levelname)s] %(message)s"))
self.logger.addHandler(handler)
# Periodic table of elements.
self.table = utils.PeriodicTable()
# This is the class that will be used in the data object returned by parse(),
# and should normally be ccData or a subclass.
self.datatype = datatype
def __setattr__(self, name, value):
# Send info to logger if the attribute is in the list self._attrlist.
if name in getattr(self, "_attrlist", {}) and hasattr(self, "logger"):
# Call logger.info() only if the attribute is new.
if not hasattr(self, name):
if type(value) in [numpy.ndarray, list]:
self.logger.info("Creating attribute %s[]" %name)
else:
self.logger.info("Creating attribute %s: %s" %(name, str(value)))
# Set the attribute.
object.__setattr__(self, name, value)
def parse(self, fupdate=None, cupdate=None):
"""Parse the logfile, using the assumed extract method of the child."""
# Check that the sub-class has an extract attribute,
# that is callable with the proper number of arguemnts.
if not hasattr(self, "extract"):
raise AttributeError, "Class %s has no extract() method." %self.__class__.__name__
return -1
if not callable(self.extract):
raise AttributeError, "Method %s._extract not callable." %self.__class__.__name__
return -1
if len(inspect.getargspec(self.extract)[0]) != 3:
raise AttributeError, "Method %s._extract takes wrong number of arguments." %self.__class__.__name__
return -1
# Save the current list of attributes to keep after parsing.
# The dict of self should be the same after parsing.
_nodelete = list(set(self.__dict__.keys()))
# Initiate the FileInput object for the input files.
# Remember that self.filename can be a list of files.
if not self.isstream:
inputfile = openlogfile(self.filename)
else:
inputfile = self.stream
# Intialize self.progress.
if self.progress:
inputfile.seek(0,2)
nstep = inputfile.tell()
inputfile.seek(0)
self.progress.initialize(nstep)
self.progress.step = 0
if fupdate:
self.fupdate = fupdate
if cupdate:
self.cupdate = cupdate
# Initialize the ccData object that will be returned.
# This is normally ccData, but can be changed by passing
# the datatype argument to __init__().
data = self.datatype()
# Copy the attribute list, so that the parser knows what to expect,
# specifically in __setattr__().
# The class self.datatype (normally ccData) must have this attribute.
self._attrlist = data._attrlist
# Maybe the sub-class has something to do before parsing.
if hasattr(self, "before_parsing"):
self.before_parsing()
# Loop over lines in the file object and call extract().
# This is where the actual parsing is done.
for line in inputfile:
self.updateprogress(inputfile, "Unsupported information", cupdate)
# This call should check if the line begins a section of extracted data.
# If it does, it parses some lines and sets the relevant attributes (to self).
# Any attributes can be freely set and used across calls, however only those
# in data._attrlist will be moved to final data object that is returned.
self.extract(inputfile, line)
# Close input file object.
if not self.isstream:
inputfile.close()
# Maybe the sub-class has something to do after parsing.
if hasattr(self, "after_parsing"):
self.after_parsing()
# If atomcoords were not parsed, but some input coordinates were ("inputcoords").
# This is originally from the Gaussian parser, a regression fix.
if not hasattr(self, "atomcoords") and hasattr(self, "inputcoords"):
self.atomcoords = numpy.array(self.inputcoords, 'd')
# Set nmo if not set already - to nbasis.
if not hasattr(self, "nmo") and hasattr(self, "nbasis"):
self.nmo = self.nbasis
# Creating deafult coreelectrons array.
if not hasattr(self, "coreelectrons") and hasattr(self, "natom"):
self.coreelectrons = numpy.zeros(self.natom, "i")
# Move all cclib attributes to the ccData object.
# To be moved, an attribute must be in data._attrlist.
for attr in data._attrlist:
if hasattr(self, attr):
setattr(data, attr, getattr(self, attr))
# Now make sure that the cclib attributes in the data object
# are all the correct type (including arrays and lists of arrays).
data.arrayify()
# Delete all temporary attributes (including cclib attributes).
# All attributes should have been moved to a data object,
# which will be returned.
for attr in self.__dict__.keys():
if not attr in _nodelete:
self.__delattr__(attr)
# Update self.progress as done.
if self.progress:
self.progress.update(nstep, "Done")
# Return the ccData object that was generated.
return data
def updateprogress(self, inputfile, msg, xupdate=0.05):
"""Update progress."""
if self.progress and random.random() < xupdate:
newstep = inputfile.tell()
if newstep != self.progress.step:
self.progress.update(newstep, msg)
self.progress.step = newstep
def normalisesym(self,symlabel):
"""Standardise the symmetry labels between parsers.
This method should be overwritten by individual parsers, and should
contain appropriate doctests. If is not overwritten, this is detected
as an error by unit tests.
"""
return "ERROR: This should be overwritten by this subclass"
def float(self,number):
"""Convert a string to a float avoiding the problem with Ds.
>>> t = Logfile("dummyfile")
>>> t.float("123.2323E+02")
12323.23
>>> t.float("123.2323D+02")
12323.23
"""
number = number.replace("D","E")
return float(number)
if __name__=="__main__":
import doctest
doctest.testmod()
|
faribas/RMG-Java
|
source/cclib/parser/logfileparser.py
|
Python
|
mit
| 11,017
|
[
"ADF",
"Avogadro",
"GAMESS",
"Gaussian",
"Jaguar",
"Molpro",
"ORCA",
"cclib"
] |
93a998b2b155a64be79f71c696b2859b4e3c0cef5aa3d498c338934114f421d6
|
# Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# _generate_pyx.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from typing import Dict
docdict: Dict[str, str] = {}
def get(name):
return docdict.get(name)
def add_newdoc(name, doc):
docdict[name] = doc
add_newdoc("_sf_error_test_function",
"""
Private function; do not use.
""")
add_newdoc("_cosine_cdf",
"""
_cosine_cdf(x)
Cumulative distribution function (CDF) of the cosine distribution::
{ 0, x < -pi
cdf(x) = { (pi + x + sin(x))/(2*pi), -pi <= x <= pi
{ 1, x > pi
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
The cosine distribution CDF evaluated at `x`.
""")
add_newdoc("_cosine_invcdf",
"""
_cosine_invcdf(p)
Inverse of the cumulative distribution function (CDF) of the cosine
distribution.
The CDF of the cosine distribution is::
cdf(x) = (pi + x + sin(x))/(2*pi)
This function computes the inverse of cdf(x).
Parameters
----------
p : array_like
`p` must contain real numbers in the interval ``0 <= p <= 1``.
`nan` is returned for values of `p` outside the interval [0, 1].
Returns
-------
float
The inverse of the cosine distribution CDF evaluated at `p`.
""")
add_newdoc("sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
The spherical harmonics are defined as
.. math::
Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi} \frac{(n-m)!}{(n+m)!}}
e^{i m \theta} P^m_n(\cos(\phi))
where :math:`P_n^m` are the associated Legendre functions; see `lpmv`.
Parameters
----------
m : array_like
Order of the harmonic (int); must have ``|m| <= n``.
n : array_like
Degree of the harmonic (int); must have ``n >= 0``. This is
often denoted by ``l`` (lower case L) in descriptions of
spherical harmonics.
theta : array_like
Azimuthal (longitudinal) coordinate; must be in ``[0, 2*pi]``.
phi : array_like
Polar (colatitudinal) coordinate; must be in ``[0, pi]``.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at ``theta`` and ``phi``.
Notes
-----
There are different conventions for the meanings of the input
arguments ``theta`` and ``phi``. In SciPy ``theta`` is the
azimuthal angle and ``phi`` is the polar angle. It is common to
see the opposite convention, that is, ``theta`` as the polar angle
and ``phi`` as the azimuthal angle.
Note that SciPy's spherical harmonics include the Condon-Shortley
phase [2]_ because it is part of `lpmv`.
With SciPy's conventions, the first several spherical harmonics
are
.. math::
Y_0^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{1}{\pi}} \\
Y_1^{-1}(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{-i\theta} \sin(\phi) \\
Y_1^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{\pi}}
\cos(\phi) \\
Y_1^1(\theta, \phi) &= -\frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{i\theta} \sin(\phi).
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30.
https://dlmf.nist.gov/14.30
.. [2] https://en.wikipedia.org/wiki/Spherical_harmonics#Condon.E2.80.93Shortley_phase
""")
add_newdoc("_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("voigt_profile",
r"""
voigt_profile(x, sigma, gamma, out=None)
Voigt profile.
The Voigt profile is a convolution of a 1-D Normal distribution with
standard deviation ``sigma`` and a 1-D Cauchy distribution with half-width at
half-maximum ``gamma``.
If ``sigma = 0``, PDF of Cauchy distribution is returned.
Conversely, if ``gamma = 0``, PDF of Normal distribution is returned.
If ``sigma = gamma = 0``, the return value is ``Inf`` for ``x = 0``, and ``0`` for all other ``x``.
Parameters
----------
x : array_like
Real argument
sigma : array_like
The standard deviation of the Normal distribution part
gamma : array_like
The half-width at half-maximum of the Cauchy distribution part
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
The Voigt profile at the given arguments
Notes
-----
It can be expressed in terms of Faddeeva function
.. math:: V(x; \sigma, \gamma) = \frac{Re[w(z)]}{\sigma\sqrt{2\pi}},
.. math:: z = \frac{x + i\gamma}{\sqrt{2}\sigma}
where :math:`w(z)` is the Faddeeva function.
See Also
--------
wofz : Faddeeva function
References
----------
.. [1] https://en.wikipedia.org/wiki/Voigt_profile
""")
add_newdoc("wrightomega",
r"""
wrightomega(z, out=None)
Wright Omega function.
Defined as the solution to
.. math::
\omega + \log(\omega) = z
where :math:`\log` is the principal branch of the complex logarithm.
Parameters
----------
z : array_like
Points at which to evaluate the Wright Omega function
Returns
-------
omega : ndarray
Values of the Wright Omega function
Notes
-----
.. versionadded:: 0.19.0
The function can also be defined as
.. math::
\omega(z) = W_{K(z)}(e^z)
where :math:`K(z) = \lceil (\Im(z) - \pi)/(2\pi) \rceil` is the
unwinding number and :math:`W` is the Lambert W function.
The implementation here is taken from [1]_.
See Also
--------
lambertw : The Lambert W function
References
----------
.. [1] Lawrence, Corless, and Jeffrey, "Algorithm 917: Complex
Double-Precision Evaluation of the Wright :math:`\omega`
Function." ACM Transactions on Mathematical Software,
2012. :doi:`10.1145/2168773.2168779`.
""")
add_newdoc("agm",
"""
agm(a, b)
Compute the arithmetic-geometric mean of `a` and `b`.
Start with a_0 = a and b_0 = b and iteratively compute::
a_{n+1} = (a_n + b_n)/2
b_{n+1} = sqrt(a_n*b_n)
a_n and b_n converge to the same limit as n increases; their common
limit is agm(a, b).
Parameters
----------
a, b : array_like
Real values only. If the values are both negative, the result
is negative. If one value is negative and the other is positive,
`nan` is returned.
Returns
-------
float
The arithmetic-geometric mean of `a` and `b`.
Examples
--------
>>> from scipy.special import agm
>>> a, b = 24.0, 6.0
>>> agm(a, b)
13.458171481725614
Compare that result to the iteration:
>>> while a != b:
... a, b = (a + b)/2, np.sqrt(a*b)
... print("a = %19.16f b=%19.16f" % (a, b))
...
a = 15.0000000000000000 b=12.0000000000000000
a = 13.5000000000000000 b=13.4164078649987388
a = 13.4582039324993694 b=13.4581390309909850
a = 13.4581714817451772 b=13.4581714817060547
a = 13.4581714817256159 b=13.4581714817256159
When array-like arguments are given, broadcasting applies:
>>> a = np.array([[1.5], [3], [6]]) # a has shape (3, 1).
>>> b = np.array([6, 12, 24, 48]) # b has shape (4,).
>>> agm(a, b)
array([[ 3.36454287, 5.42363427, 9.05798751, 15.53650756],
[ 4.37037309, 6.72908574, 10.84726853, 18.11597502],
[ 6. , 8.74074619, 13.45817148, 21.69453707]])
""")
add_newdoc("airy",
r"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
Ai, Aip, Bi, Bip : ndarrays
Airy functions Ai and Bi, and their derivatives Aip and Bip.
Notes
-----
The Airy functions Ai and Bi are two independent solutions of
.. math:: y''(x) = x y(x).
For real `z` in [-10, 10], the computation is carried out by calling
the Cephes [1]_ `airy` routine, which uses power series summation
for small `z` and rational minimax approximations for large `z`.
Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are
employed. They are computed using power series for :math:`|z| < 1` and
the following relations to modified Bessel functions for larger `z`
(where :math:`t \equiv 2 z^{3/2}/3`):
.. math::
Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t)
Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t)
Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right)
Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right)
See also
--------
airye : exponentially scaled Airy functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
Examples
--------
Compute the Airy functions on the interval [-15, 5].
>>> from scipy import special
>>> x = np.linspace(-15, 5, 201)
>>> ai, aip, bi, bip = special.airy(x)
Plot Ai(x) and Bi(x).
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, ai, 'r', label='Ai(x)')
>>> plt.plot(x, bi, 'b--', label='Bi(x)')
>>> plt.ylim(-0.5, 1.0)
>>> plt.grid()
>>> plt.legend(loc='upper left')
>>> plt.show()
""")
add_newdoc("airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
eBip = Bip * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
eAi, eAip, eBi, eBip : array_like
Exponentially scaled Airy functions eAi and eBi, and their derivatives
eAip and eBip
Notes
-----
Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`.
See also
--------
airy
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
Examples
--------
We can compute exponentially scaled Airy functions and their derivatives:
>>> from scipy.special import airye
>>> import matplotlib.pyplot as plt
>>> z = np.linspace(0, 50, 500)
>>> eAi, eAip, eBi, eBip = airye(z)
>>> f, ax = plt.subplots(2, 1, sharex=True)
>>> for ind, data in enumerate([[eAi, eAip, ["eAi", "eAip"]],
... [eBi, eBip, ["eBi", "eBip"]]]):
... ax[ind].plot(z, data[0], "-r", z, data[1], "-b")
... ax[ind].legend(data[2])
... ax[ind].grid(True)
>>> plt.show()
We can compute these using usual non-scaled Airy functions by:
>>> from scipy.special import airy
>>> Ai, Aip, Bi, Bip = airy(z)
>>> np.allclose(eAi, Ai * np.exp(2.0 / 3.0 * z * np.sqrt(z)))
True
>>> np.allclose(eAip, Aip * np.exp(2.0 / 3.0 * z * np.sqrt(z)))
True
>>> np.allclose(eBi, Bi * np.exp(-abs(np.real(2.0 / 3.0 * z * np.sqrt(z)))))
True
>>> np.allclose(eBip, Bip * np.exp(-abs(np.real(2.0 / 3.0 * z * np.sqrt(z)))))
True
Comparing non-scaled and exponentially scaled ones, the usual non-scaled
function quickly underflows for large values, whereas the exponentially
scaled function does not.
>>> airy(200)
(0.0, 0.0, nan, nan)
>>> airye(200)
(0.07501041684381093, -1.0609012305109042, 0.15003188417418148, 2.1215836725571093)
""")
add_newdoc("bdtr",
r"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through `floor(k)` of the Binomial probability density.
.. math::
\mathrm{bdtr}(k, n, p) = \sum_{j=0}^{\lfloor k \rfloor} {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (double), rounded down to the nearest integer.
n : array_like
Number of events (int).
p : array_like
Probability of success in a single event (float).
Returns
-------
y : ndarray
Probability of `floor(k)` or fewer successes in `n` independent events with
success probabilities of `p`.
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtr}(k, n, p) = I_{1 - p}(n - \lfloor k \rfloor, \lfloor k \rfloor + 1).
Wrapper for the Cephes [1]_ routine `bdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("bdtrc",
r"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms `floor(k) + 1` through `n` of the binomial probability
density,
.. math::
\mathrm{bdtrc}(k, n, p) = \sum_{j=\lfloor k \rfloor +1}^n {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (double), rounded down to nearest integer.
n : array_like
Number of events (int)
p : array_like
Probability of success in a single event.
Returns
-------
y : ndarray
Probability of `floor(k) + 1` or more successes in `n` independent
events with success probabilities of `p`.
See also
--------
bdtr
betainc
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtrc}(k, n, p) = I_{p}(\lfloor k \rfloor + 1, n - \lfloor k \rfloor).
Wrapper for the Cephes [1]_ routine `bdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("bdtri",
r"""
bdtri(k, n, y)
Inverse function to `bdtr` with respect to `p`.
Finds the event probability `p` such that the sum of the terms 0 through
`k` of the binomial probability density is equal to the given cumulative
probability `y`.
Parameters
----------
k : array_like
Number of successes (float), rounded down to the nearest integer.
n : array_like
Number of events (float)
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
Returns
-------
p : ndarray
The event probability such that `bdtr(\lfloor k \rfloor, n, p) = y`.
See also
--------
bdtr
betaincinv
Notes
-----
The computation is carried out using the inverse beta integral function
and the relation,::
1 - p = betaincinv(n - k, k + 1, y).
Wrapper for the Cephes [1]_ routine `bdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("bdtrik",
"""
bdtrik(y, n, p)
Inverse function to `bdtr` with respect to `k`.
Finds the number of successes `k` such that the sum of the terms 0 through
`k` of the Binomial probability density for `n` events with probability
`p` is equal to the given cumulative probability `y`.
Parameters
----------
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
n : array_like
Number of events (float).
p : array_like
Success probability (float).
Returns
-------
k : ndarray
The number of successes `k` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `k` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `k`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("bdtrin",
"""
bdtrin(k, y, p)
Inverse function to `bdtr` with respect to `n`.
Finds the number of events `n` such that the sum of the terms 0 through
`k` of the Binomial probability density for events with probability `p` is
equal to the given cumulative probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
p : array_like
Success probability (float).
Returns
-------
n : ndarray
The number of events `n` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `n` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `n`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("binom",
"""
binom(n, k)
Binomial coefficient
See Also
--------
comb : The number of combinations of N things taken k at a time.
""")
add_newdoc("btdtria",
r"""
btdtria(p, b, x)
Inverse of `btdtr` with respect to `a`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `a`, returning the value of `a` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
p : array_like
Cumulative probability, in [0, 1].
b : array_like
Shape parameter (`b` > 0).
x : array_like
The quantile, in [0, 1].
Returns
-------
a : ndarray
The value of the shape parameter `a` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative distribution function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtrib : Inverse with respect to `b`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("btdtrib",
r"""
btdtria(a, p, x)
Inverse of `btdtr` with respect to `b`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `b`, returning the value of `b` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
p : array_like
Cumulative probability, in [0, 1].
x : array_like
The quantile, in [0, 1].
Returns
-------
b : ndarray
The value of the shape parameter `b` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative distribution function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtria : Inverse with respect to `a`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("bei",
r"""
bei(x, out=None)
Kelvin function bei.
Defined as
.. math::
\mathrm{bei}(x) = \Im[J_0(x e^{3 \pi i / 4})]
where :math:`J_0` is the Bessel function of the first kind of
order zero (see `jv`). See [dlmf]_ for more details.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the Kelvin function.
See Also
--------
ber : the corresponding real part
beip : the derivative of bei
jv : Bessel function of the first kind
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10.61
Examples
--------
It can be expressed using Bessel functions.
>>> import scipy.special as sc
>>> x = np.array([1.0, 2.0, 3.0, 4.0])
>>> sc.jv(0, x * np.exp(3 * np.pi * 1j / 4)).imag
array([0.24956604, 0.97229163, 1.93758679, 2.29269032])
>>> sc.bei(x)
array([0.24956604, 0.97229163, 1.93758679, 2.29269032])
""")
add_newdoc("beip",
r"""
beip(x, out=None)
Derivative of the Kelvin function bei.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
The values of the derivative of bei.
See Also
--------
bei
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10#PT5
""")
add_newdoc("ber",
r"""
ber(x, out=None)
Kelvin function ber.
Defined as
.. math::
\mathrm{ber}(x) = \Re[J_0(x e^{3 \pi i / 4})]
where :math:`J_0` is the Bessel function of the first kind of
order zero (see `jv`). See [dlmf]_ for more details.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the Kelvin function.
See Also
--------
bei : the corresponding real part
berp : the derivative of bei
jv : Bessel function of the first kind
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10.61
Examples
--------
It can be expressed using Bessel functions.
>>> import scipy.special as sc
>>> x = np.array([1.0, 2.0, 3.0, 4.0])
>>> sc.jv(0, x * np.exp(3 * np.pi * 1j / 4)).real
array([ 0.98438178, 0.75173418, -0.22138025, -2.56341656])
>>> sc.ber(x)
array([ 0.98438178, 0.75173418, -0.22138025, -2.56341656])
""")
add_newdoc("berp",
r"""
berp(x, out=None)
Derivative of the Kelvin function ber.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
The values of the derivative of ber.
See Also
--------
ber
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10#PT5
""")
add_newdoc("besselpoly",
r"""
besselpoly(a, lmb, nu, out=None)
Weighted integral of the Bessel function of the first kind.
Computes
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
Parameters
----------
a : array_like
Scale factor inside the Bessel function.
lmb : array_like
Power of `x`
nu : array_like
Order of the Bessel function.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Value of the integral.
""")
add_newdoc("beta",
r"""
beta(a, b, out=None)
Beta function.
This function is defined in [1]_ as
.. math::
B(a, b) = \int_0^1 t^{a-1}(1-t)^{b-1}dt
= \frac{\Gamma(a)\Gamma(b)}{\Gamma(a+b)},
where :math:`\Gamma` is the gamma function.
Parameters
----------
a, b : array-like
Real-valued arguments
out : ndarray, optional
Optional output array for the function result
Returns
-------
scalar or ndarray
Value of the beta function
See Also
--------
gamma : the gamma function
betainc : the incomplete beta function
betaln : the natural logarithm of the absolute
value of the beta function
References
----------
.. [1] NIST Digital Library of Mathematical Functions,
Eq. 5.12.1. https://dlmf.nist.gov/5.12
Examples
--------
>>> import scipy.special as sc
The beta function relates to the gamma function by the
definition given above:
>>> sc.beta(2, 3)
0.08333333333333333
>>> sc.gamma(2)*sc.gamma(3)/sc.gamma(2 + 3)
0.08333333333333333
As this relationship demonstrates, the beta function
is symmetric:
>>> sc.beta(1.7, 2.4)
0.16567527689031739
>>> sc.beta(2.4, 1.7)
0.16567527689031739
This function satisfies :math:`B(1, b) = 1/b`:
>>> sc.beta(1, 4)
0.25
""")
add_newdoc("betainc",
r"""
betainc(a, b, x, out=None)
Incomplete beta function.
Computes the incomplete beta function, defined as [1]_:
.. math::
I_x(a, b) = \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)} \int_0^x
t^{a-1}(1-t)^{b-1}dt,
for :math:`0 \leq x \leq 1`.
Parameters
----------
a, b : array-like
Positive, real-valued parameters
x : array-like
Real-valued such that :math:`0 \leq x \leq 1`,
the upper limit of integration
out : ndarray, optional
Optional output array for the function values
Returns
-------
array-like
Value of the incomplete beta function
See Also
--------
beta : beta function
betaincinv : inverse of the incomplete beta function
Notes
-----
The incomplete beta function is also sometimes defined
without the `gamma` terms, in which case the above
definition is the so-called regularized incomplete beta
function. Under this definition, you can get the incomplete
beta function by multiplying the result of the SciPy
function by `beta`.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/8.17
Examples
--------
Let :math:`B(a, b)` be the `beta` function.
>>> import scipy.special as sc
The coefficient in terms of `gamma` is equal to
:math:`1/B(a, b)`. Also, when :math:`x=1`
the integral is equal to :math:`B(a, b)`.
Therefore, :math:`I_{x=1}(a, b) = 1` for any :math:`a, b`.
>>> sc.betainc(0.2, 3.5, 1.0)
1.0
It satisfies
:math:`I_x(a, b) = x^a F(a, 1-b, a+1, x)/ (aB(a, b))`,
where :math:`F` is the hypergeometric function `hyp2f1`:
>>> a, b, x = 1.4, 3.1, 0.5
>>> x**a * sc.hyp2f1(a, 1 - b, a + 1, x)/(a * sc.beta(a, b))
0.8148904036225295
>>> sc.betainc(a, b, x)
0.8148904036225296
This functions satisfies the relationship
:math:`I_x(a, b) = 1 - I_{1-x}(b, a)`:
>>> sc.betainc(2.2, 3.1, 0.4)
0.49339638807619446
>>> 1 - sc.betainc(3.1, 2.2, 1 - 0.4)
0.49339638807619446
""")
add_newdoc("betaincinv",
r"""
betaincinv(a, b, y, out=None)
Inverse of the incomplete beta function.
Computes :math:`x` such that:
.. math::
y = I_x(a, b) = \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)}
\int_0^x t^{a-1}(1-t)^{b-1}dt,
where :math:`I_x` is the normalized incomplete beta
function `betainc` and
:math:`\Gamma` is the `gamma` function [1]_.
Parameters
----------
a, b : array-like
Positive, real-valued parameters
y : array-like
Real-valued input
out : ndarray, optional
Optional output array for function values
Returns
-------
array-like
Value of the inverse of the incomplete beta function
See Also
--------
betainc : incomplete beta function
gamma : gamma function
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/8.17
Examples
--------
>>> import scipy.special as sc
This function is the inverse of `betainc` for fixed
values of :math:`a` and :math:`b`.
>>> a, b = 1.2, 3.1
>>> y = sc.betainc(a, b, 0.2)
>>> sc.betaincinv(a, b, y)
0.2
>>>
>>> a, b = 7.5, 0.4
>>> x = sc.betaincinv(a, b, 0.5)
>>> sc.betainc(a, b, x)
0.5
""")
add_newdoc("betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(a, b)))``.
""")
add_newdoc("boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox1p
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox, inv_boxcox
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox1p, inv_boxcox1p
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("btdtr",
r"""
btdtr(a, b, x)
Cumulative distribution function of the beta distribution.
Returns the integral from zero to `x` of the beta probability density
function,
.. math::
I = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
Shape parameter (a > 0).
b : array_like
Shape parameter (b > 0).
x : array_like
Upper limit of integration, in [0, 1].
Returns
-------
I : ndarray
Cumulative distribution function of the beta distribution with
parameters `a` and `b` at `x`.
See Also
--------
betainc
Notes
-----
This function is identical to the incomplete beta integral function
`betainc`.
Wrapper for the Cephes [1]_ routine `btdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("btdtri",
r"""
btdtri(a, b, p)
The `p`-th quantile of the beta distribution.
This function is the inverse of the beta cumulative distribution function,
`btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
b : array_like
Shape parameter (`b` > 0).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
See Also
--------
betaincinv
btdtr
Notes
-----
The value of `x` is found by interval halving or Newton iterations.
Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent
problem of finding the inverse of the incomplete beta integral.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("cbrt",
"""
cbrt(x)
Element-wise cube root of `x`.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
The cube root of each value in `x`.
Examples
--------
>>> from scipy.special import cbrt
>>> cbrt(8)
2.0
>>> cbrt([-8, -3, 0.125, 1.331])
array([-2. , -1.44224957, 0.5 , 1.1 ])
""")
add_newdoc("chdtr",
r"""
chdtr(v, x, out=None)
Chi square cumulative distribution function.
Returns the area under the left tail (from 0 to `x`) of the Chi
square probability density function with `v` degrees of freedom:
.. math::
\frac{1}{2^{v/2} \Gamma(v/2)} \int_0^x t^{v/2 - 1} e^{-t/2} dt
Here :math:`\Gamma` is the Gamma function; see `gamma`. This
integral can be expressed in terms of the regularized lower
incomplete gamma function `gammainc` as
``gammainc(v / 2, x / 2)``. [1]_
Parameters
----------
v : array_like
Degrees of freedom.
x : array_like
Upper bound of the integral.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the cumulative distribution function.
See Also
--------
chdtrc, chdtri, chdtriv, gammainc
References
----------
.. [1] Chi-Square distribution,
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
>>> import scipy.special as sc
It can be expressed in terms of the regularized lower incomplete
gamma function.
>>> v = 1
>>> x = np.arange(4)
>>> sc.chdtr(v, x)
array([0. , 0.68268949, 0.84270079, 0.91673548])
>>> sc.gammainc(v / 2, x / 2)
array([0. , 0.68268949, 0.84270079, 0.91673548])
""")
add_newdoc("chdtrc",
r"""
chdtrc(v, x, out=None)
Chi square survival function.
Returns the area under the right hand tail (from `x` to infinity)
of the Chi square probability density function with `v` degrees of
freedom:
.. math::
\frac{1}{2^{v/2} \Gamma(v/2)} \int_x^\infty t^{v/2 - 1} e^{-t/2} dt
Here :math:`\Gamma` is the Gamma function; see `gamma`. This
integral can be expressed in terms of the regularized upper
incomplete gamma function `gammaincc` as
``gammaincc(v / 2, x / 2)``. [1]_
Parameters
----------
v : array_like
Degrees of freedom.
x : array_like
Lower bound of the integral.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the survival function.
See Also
--------
chdtr, chdtri, chdtriv, gammaincc
References
----------
.. [1] Chi-Square distribution,
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
>>> import scipy.special as sc
It can be expressed in terms of the regularized upper incomplete
gamma function.
>>> v = 1
>>> x = np.arange(4)
>>> sc.chdtrc(v, x)
array([1. , 0.31731051, 0.15729921, 0.08326452])
>>> sc.gammaincc(v / 2, x / 2)
array([1. , 0.31731051, 0.15729921, 0.08326452])
""")
add_newdoc("chdtri",
"""
chdtri(v, p, out=None)
Inverse to `chdtrc` with respect to `x`.
Returns `x` such that ``chdtrc(v, x) == p``.
Parameters
----------
v : array_like
Degrees of freedom.
p : array_like
Probability.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
x : scalar or ndarray
Value so that the probability a Chi square random variable
with `v` degrees of freedom is greater than `x` equals `p`.
See Also
--------
chdtrc, chdtr, chdtriv
References
----------
.. [1] Chi-Square distribution,
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
>>> import scipy.special as sc
It inverts `chdtrc`.
>>> v, p = 1, 0.3
>>> sc.chdtrc(v, sc.chdtri(v, p))
0.3
>>> x = 1
>>> sc.chdtri(v, sc.chdtrc(v, x))
1.0
""")
add_newdoc("chdtriv",
"""
chdtriv(p, x, out=None)
Inverse to `chdtr` with respect to `v`.
Returns `v` such that ``chdtr(v, x) == p``.
Parameters
----------
p : array_like
Probability that the Chi square random variable is less than
or equal to `x`.
x : array_like
Nonnegative input.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Degrees of freedom.
See Also
--------
chdtr, chdtrc, chdtri
References
----------
.. [1] Chi-Square distribution,
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
>>> import scipy.special as sc
It inverts `chdtr`.
>>> p, x = 0.5, 1
>>> sc.chdtr(sc.chdtriv(p, x), x)
0.5000000000202172
>>> v = 1
>>> sc.chdtriv(sc.chdtr(v, x), v)
1.0000000000000013
""")
add_newdoc("chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("chndtrix",
"""
chndtrix(p, df, nc)
Inverse to `chndtr` vs `x`
""")
add_newdoc("chndtridf",
"""
chndtridf(x, p, nc)
Inverse to `chndtr` vs `df`
""")
add_newdoc("chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to `chndtr` vs `nc`
""")
add_newdoc("cosdg",
"""
cosdg(x, out=None)
Cosine of the angle `x` given in degrees.
Parameters
----------
x : array_like
Angle, given in degrees.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Cosine of the input.
See Also
--------
sindg, tandg, cotdg
Examples
--------
>>> import scipy.special as sc
It is more accurate than using cosine directly.
>>> x = 90 + 180 * np.arange(3)
>>> sc.cosdg(x)
array([-0., 0., -0.])
>>> np.cos(x * np.pi / 180)
array([ 6.1232340e-17, -1.8369702e-16, 3.0616170e-16])
""")
add_newdoc("cosm1",
"""
cosm1(x, out=None)
cos(x) - 1 for use when `x` is near zero.
Parameters
----------
x : array_like
Real valued argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of ``cos(x) - 1``.
See Also
--------
expm1, log1p
Examples
--------
>>> import scipy.special as sc
It is more accurate than computing ``cos(x) - 1`` directly for
``x`` around 0.
>>> x = 1e-30
>>> np.cos(x) - 1
0.0
>>> sc.cosm1(x)
-5.0000000000000005e-61
""")
add_newdoc("cotdg",
"""
cotdg(x, out=None)
Cotangent of the angle `x` given in degrees.
Parameters
----------
x : array_like
Angle, given in degrees.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Cotangent at the input.
See Also
--------
sindg, cosdg, tandg
Examples
--------
>>> import scipy.special as sc
It is more accurate than using cotangent directly.
>>> x = 90 + 180 * np.arange(3)
>>> sc.cotdg(x)
array([0., 0., 0.])
>>> 1 / np.tan(x * np.pi / 180)
array([6.1232340e-17, 1.8369702e-16, 3.0616170e-16])
""")
add_newdoc("dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2), t=0..x).
See Also
--------
wofz, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-15, 15, num=1000)
>>> plt.plot(x, special.dawsn(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$dawsn(x)$')
>>> plt.show()
""")
add_newdoc("ellipe",
r"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpe`.
For `m > 0` the computation uses the approximation,
.. math:: E(m) \approx P(1-m) - (1-m) \log(1-m) Q(1-m),
where :math:`P` and :math:`Q` are tenth-order polynomials. For
`m < 0`, the relation
.. math:: E(m) = E(m/(m - 1)) \sqrt(1-m)
is used.
The parameterization in terms of :math:`m` follows that of section
17.2 in [2]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
This function is used in finding the circumference of an
ellipse with semi-major axis `a` and semi-minor axis `b`.
>>> from scipy import special
>>> a = 3.5
>>> b = 2.1
>>> e_sq = 1.0 - b**2/a**2 # eccentricity squared
Then the circumference is found using the following:
>>> C = 4*a*special.ellipe(e_sq) # circumference formula
>>> C
17.868899204378693
When `a` and `b` are the same (meaning eccentricity is 0),
this reduces to the circumference of a circle.
>>> 4*a*special.ellipe(0.0) # formula for ellipse with a = b
21.991148575128552
>>> 2*np.pi*a # formula for circle of radius a
21.991148575128552
""")
add_newdoc("ellipeinc",
r"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellie`.
Computation uses arithmetic-geometric means algorithm.
The parameterization in terms of :math:`m` follows that of section
17.2 in [2]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter `m` between
0 and 1, and real argument `u`.
Parameters
----------
m : array_like
Parameter.
u : array_like
Argument.
Returns
-------
sn, cn, dn, ph : ndarrays
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value `ph` is such that if `u = ellipkinc(ph, m)`,
then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpj`.
These functions are periodic, with quarter-period on the real axis
equal to the complete elliptic integral `ellipk(m)`.
Relation to incomplete elliptic integral: If `u = ellipkinc(phi,m)`, then
`sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called
the amplitude of `u`.
Computation is by means of the arithmetic-geometric mean algorithm,
except when `m` is within 1e-9 of 0 or 1. In the latter case with `m`
close to 1, the approximation applies only for `phi < pi/2`.
See also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around `m` = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as `m = 1 - p`.
Returns
-------
K : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpk`.
For `p <= 1`, computation uses the approximation,
.. math:: K(p) \\approx P(p) - \\log(p) Q(p),
where :math:`P` and :math:`Q` are tenth-order polynomials. The
argument `p` is used internally rather than `m` so that the logarithmic
singularity at `m = 1` will be shifted to the origin; this preserves
maximum accuracy. For `p > 1`, the identity
.. math:: K(p) = K(1/p)/\\sqrt(p)
is used.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("ellipk",
r"""
ellipk(m)
Complete elliptic integral of the first kind.
This function is defined as
.. math:: K(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{-1/2} dt
Parameters
----------
m : array_like
The parameter of the elliptic integral.
Returns
-------
K : array_like
Value of the elliptic integral.
Notes
-----
For more precision around point m = 1, use `ellipkm1`, which this
function calls.
The parameterization in terms of :math:`m` follows that of section
17.2 in [1]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind around m = 1
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("ellipkinc",
r"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{-1/2} dt
This function is also called `F(phi, m)`.
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
Wrapper for the Cephes [1]_ routine `ellik`. The computation is
carried out using the arithmetic-geometric mean algorithm.
The parameterization in terms of :math:`m` follows that of section
17.2 in [2]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points `x`.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.15.0
""")
add_newdoc("erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points `x`.
See Also
--------
erfc, erfinv, erfcinv, wofz, erfcx, erfi
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erf(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erf(x)$')
>>> plt.show()
""")
add_newdoc("erfc",
"""
erfc(x, out=None)
Complementary error function, ``1 - erf(x)``.
Parameters
----------
x : array_like
Real or complex valued argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the complementary error function
See Also
--------
erf, erfi, erfcx, dawsn, wofz
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfc(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfc(x)$')
>>> plt.show()
""")
add_newdoc("erfi",
"""
erfi(z, out=None)
Imaginary error function, ``-i erf(i z)``.
Parameters
----------
z : array_like
Real or complex valued argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the imaginary error function
See Also
--------
erf, erfc, erfcx, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfi(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfi(x)$')
>>> plt.show()
""")
add_newdoc("erfcx",
"""
erfcx(x, out=None)
Scaled complementary error function, ``exp(x**2) * erfc(x)``.
Parameters
----------
x : array_like
Real or complex valued argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the scaled complementary error function
See Also
--------
erf, erfc, erfi, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfcx(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfcx(x)$')
>>> plt.show()
""")
add_newdoc("erfinv",
"""Inverse of the error function.
Computes the inverse of the error function.
In the complex domain, there is no unique complex number w satisfying
erf(w)=z. This indicates a true inverse function would have multi-value.
When the domain restricts to the real, -1 < x < 1, there is a unique real
number satisfying erf(erfinv(x)) = x.
Parameters
----------
y : ndarray
Argument at which to evaluate. Domain: [-1, 1]
Returns
-------
erfinv : ndarray
The inverse of erf of y, element-wise)
See Also
--------
erf : Error function of a complex argument
erfc : Complementary error function, ``1 - erf(x)``
erfcinv : Inverse of the complementary error function
Examples
--------
1) evaluating a float number
>>> from scipy import special
>>> special.erfinv(0.5)
0.4769362762044698
2) evaluating an ndarray
>>> from scipy import special
>>> y = np.linspace(-1.0, 1.0, num=10)
>>> special.erfinv(y)
array([ -inf, -0.86312307, -0.5407314 , -0.30457019, -0.0987901 ,
0.0987901 , 0.30457019, 0.5407314 , 0.86312307, inf])
""")
add_newdoc("erfcinv",
"""Inverse of the complementary error function.
Computes the inverse of the complementary error function.
In the complex domain, there is no unique complex number w satisfying
erfc(w)=z. This indicates a true inverse function would have multi-value.
When the domain restricts to the real, 0 < x < 2, there is a unique real
number satisfying erfc(erfcinv(x)) = erfcinv(erfc(x)).
It is related to inverse of the error function by erfcinv(1-x) = erfinv(x)
Parameters
----------
y : ndarray
Argument at which to evaluate. Domain: [0, 2]
Returns
-------
erfcinv : ndarray
The inverse of erfc of y, element-wise
See Also
--------
erf : Error function of a complex argument
erfc : Complementary error function, ``1 - erf(x)``
erfinv : Inverse of the error function
Examples
--------
1) evaluating a float number
>>> from scipy import special
>>> special.erfcinv(0.5)
0.4769362762044698
2) evaluating an ndarray
>>> from scipy import special
>>> y = np.linspace(0.0, 2.0, num=11)
>>> special.erfcinv(y)
array([ inf, 0.9061938 , 0.59511608, 0.37080716, 0.17914345,
-0. , -0.17914345, -0.37080716, -0.59511608, -0.9061938 ,
-inf])
""")
add_newdoc("eval_jacobi",
r"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
The Jacobi polynomials can be defined via the Gauss hypergeometric
function :math:`{}_2F_1` as
.. math::
P_n^{(\alpha, \beta)}(x) = \frac{(\alpha + 1)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 1 + \alpha + \beta + n; \alpha + 1; (1 - z)/2)
where :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When
:math:`n` is an integer the result is a polynomial of degree
:math:`n`. See 22.5.42 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
beta : array_like
Parameter
x : array_like
Points at which to evaluate the polynomial
Returns
-------
P : ndarray
Values of the Jacobi polynomial
See Also
--------
roots_jacobi : roots and quadrature weights of Jacobi polynomials
jacobi : Jacobi polynomial object
hyp2f1 : Gauss hypergeometric function
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_sh_jacobi",
r"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
Defined by
.. math::
G_n^{(p, q)}(x)
= \binom{2n + p - 1}{n}^{-1} P_n^{(p - q, q - 1)}(2x - 1),
where :math:`P_n^{(\cdot, \cdot)}` is the n-th Jacobi
polynomial. See 22.5.2 in [AS]_ for details.
Parameters
----------
n : int
Degree of the polynomial. If not an integer, the result is
determined via the relation to `binom` and `eval_jacobi`.
p : float
Parameter
q : float
Parameter
Returns
-------
G : ndarray
Values of the shifted Jacobi polynomial.
See Also
--------
roots_sh_jacobi : roots and quadrature weights of shifted Jacobi
polynomials
sh_jacobi : shifted Jacobi polynomial object
eval_jacobi : evaluate Jacobi polynomials
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_gegenbauer",
r"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
The Gegenbauer polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
C_n^{(\alpha)} = \frac{(2\alpha)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 2\alpha + n; \alpha + 1/2; (1 - z)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. See 22.5.46 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
x : array_like
Points at which to evaluate the Gegenbauer polynomial
Returns
-------
C : ndarray
Values of the Gegenbauer polynomial
See Also
--------
roots_gegenbauer : roots and quadrature weights of Gegenbauer
polynomials
gegenbauer : Gegenbauer polynomial object
hyp2f1 : Gauss hypergeometric function
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_chebyt",
r"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind at a point.
The Chebyshev polynomials of the first kind can be defined via the
Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
T_n(x) = {}_2F_1(n, -n; 1/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. See 22.5.47 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
T : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyt : roots and quadrature weights of Chebyshev
polynomials of the first kind
chebyu : Chebychev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
Notes
-----
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_chebyu",
r"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind at a point.
The Chebyshev polynomials of the second kind can be defined via
the Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
U_n(x) = (n + 1) {}_2F_1(-n, n + 2; 3/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. See 22.5.48 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
U : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyu : roots and quadrature weights of Chebyshev
polynomials of the second kind
chebyu : Chebyshev polynomial object
eval_chebyt : evaluate Chebyshev polynomials of the first kind
hyp2f1 : Gauss hypergeometric function
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_chebys",
r"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
S_n(x) = U_n(x/2)
where :math:`U_n` is a Chebyshev polynomial of the second
kind. See 22.5.13 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
S : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebys : roots and quadrature weights of Chebyshev
polynomials of the second kind on [-2, 2]
chebys : Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> import scipy.special as sc
They are a scaled version of the Chebyshev polynomials of the
second kind.
>>> x = np.linspace(-2, 2, 6)
>>> sc.eval_chebys(3, x)
array([-4. , 0.672, 0.736, -0.736, -0.672, 4. ])
>>> sc.eval_chebyu(3, x / 2)
array([-4. , 0.672, 0.736, -0.736, -0.672, 4. ])
""")
add_newdoc("eval_chebyc",
r"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
C_n(x) = 2 T_n(x/2)
where :math:`T_n` is a Chebyshev polynomial of the first kind. See
22.5.11 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
C : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyc : roots and quadrature weights of Chebyshev
polynomials of the first kind on [-2, 2]
chebyc : Chebyshev polynomial object
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
eval_chebyt : evaluate Chebycshev polynomials of the first kind
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> import scipy.special as sc
They are a scaled version of the Chebyshev polynomials of the
first kind.
>>> x = np.linspace(-2, 2, 6)
>>> sc.eval_chebyc(3, x)
array([-2. , 1.872, 1.136, -1.136, -1.872, 2. ])
>>> 2 * sc.eval_chebyt(3, x / 2)
array([-2. , 1.872, 1.136, -1.136, -1.872, 2. ])
""")
add_newdoc("eval_sh_chebyt",
r"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the first kind at a
point.
These polynomials are defined as
.. math::
T_n^*(x) = T_n(2x - 1)
where :math:`T_n` is a Chebyshev polynomial of the first kind. See
22.5.14 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
Returns
-------
T : ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyt : roots and quadrature weights of shifted
Chebyshev polynomials of the first kind
sh_chebyt : shifted Chebyshev polynomial object
eval_chebyt : evaluate Chebyshev polynomials of the first kind
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_sh_chebyu",
r"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the second kind at a
point.
These polynomials are defined as
.. math::
U_n^*(x) = U_n(2x - 1)
where :math:`U_n` is a Chebyshev polynomial of the first kind. See
22.5.15 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
Returns
-------
U : ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyu : roots and quadrature weights of shifted
Chebychev polynomials of the second kind
sh_chebyu : shifted Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_legendre",
r"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
The Legendre polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
P_n(x) = {}_2F_1(-n, n + 1; 1; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. See 22.5.49 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Legendre polynomial
Returns
-------
P : ndarray
Values of the Legendre polynomial
See Also
--------
roots_legendre : roots and quadrature weights of Legendre
polynomials
legendre : Legendre polynomial object
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.legendre.Legendre : Legendre series
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> from scipy.special import eval_legendre
Evaluate the zero-order Legendre polynomial at x = 0
>>> eval_legendre(0, 0)
1.0
Evaluate the first-order Legendre polynomial between -1 and 1
>>> X = np.linspace(-1, 1, 5) # Domain of Legendre polynomials
>>> eval_legendre(1, X)
array([-1. , -0.5, 0. , 0.5, 1. ])
Evaluate Legendre polynomials of order 0 through 4 at x = 0
>>> N = range(0, 5)
>>> eval_legendre(N, 0)
array([ 1. , 0. , -0.5 , 0. , 0.375])
Plot Legendre polynomials of order 0 through 4
>>> X = np.linspace(-1, 1)
>>> import matplotlib.pyplot as plt
>>> for n in range(0, 5):
... y = eval_legendre(n, X)
... plt.plot(X, y, label=r'$P_{}(x)$'.format(n))
>>> plt.title("Legendre Polynomials")
>>> plt.xlabel("x")
>>> plt.ylabel(r'$P_n(x)$')
>>> plt.legend(loc='lower right')
>>> plt.show()
""")
add_newdoc("eval_sh_legendre",
r"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
These polynomials are defined as
.. math::
P_n^*(x) = P_n(2x - 1)
where :math:`P_n` is a Legendre polynomial. See 2.2.11 in [AS]_
for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the value is
determined via the relation to `eval_legendre`.
x : array_like
Points at which to evaluate the shifted Legendre polynomial
Returns
-------
P : ndarray
Values of the shifted Legendre polynomial
See Also
--------
roots_sh_legendre : roots and quadrature weights of shifted
Legendre polynomials
sh_legendre : shifted Legendre polynomial object
eval_legendre : evaluate Legendre polynomials
numpy.polynomial.legendre.Legendre : Legendre series
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_genlaguerre",
r"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
The generalized Laguerre polynomials can be defined via the
confluent hypergeometric function :math:`{}_1F_1` as
.. math::
L_n^{(\alpha)}(x) = \binom{n + \alpha}{n}
{}_1F_1(-n, \alpha + 1, x).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. See 22.5.54 in [AS]_ for details. The Laguerre
polynomials are the special case where :math:`\alpha = 0`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the confluent hypergeometric
function.
alpha : array_like
Parameter; must have ``alpha > -1``
x : array_like
Points at which to evaluate the generalized Laguerre
polynomial
Returns
-------
L : ndarray
Values of the generalized Laguerre polynomial
See Also
--------
roots_genlaguerre : roots and quadrature weights of generalized
Laguerre polynomials
genlaguerre : generalized Laguerre polynomial object
hyp1f1 : confluent hypergeometric function
eval_laguerre : evaluate Laguerre polynomials
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_laguerre",
r"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
The Laguerre polynomials can be defined via the confluent
hypergeometric function :math:`{}_1F_1` as
.. math::
L_n(x) = {}_1F_1(-n, 1, x).
See 22.5.16 and 22.5.54 in [AS]_ for details. When :math:`n` is an
integer the result is a polynomial of degree :math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the confluent hypergeometric
function.
x : array_like
Points at which to evaluate the Laguerre polynomial
Returns
-------
L : ndarray
Values of the Laguerre polynomial
See Also
--------
roots_laguerre : roots and quadrature weights of Laguerre
polynomials
laguerre : Laguerre polynomial object
numpy.polynomial.laguerre.Laguerre : Laguerre series
eval_genlaguerre : evaluate generalized Laguerre polynomials
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_hermite",
r"""
eval_hermite(n, x, out=None)
Evaluate physicist's Hermite polynomial at a point.
Defined by
.. math::
H_n(x) = (-1)^n e^{x^2} \frac{d^n}{dx^n} e^{-x^2};
:math:`H_n` is a polynomial of degree :math:`n`. See 22.11.7 in
[AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
Returns
-------
H : ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermite : roots and quadrature weights of physicist's
Hermite polynomials
hermite : physicist's Hermite polynomial object
numpy.polynomial.hermite.Hermite : Physicist's Hermite series
eval_hermitenorm : evaluate Probabilist's Hermite polynomials
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_hermitenorm",
r"""
eval_hermitenorm(n, x, out=None)
Evaluate probabilist's (normalized) Hermite polynomial at a
point.
Defined by
.. math::
He_n(x) = (-1)^n e^{x^2/2} \frac{d^n}{dx^n} e^{-x^2/2};
:math:`He_n` is a polynomial of degree :math:`n`. See 22.11.8 in
[AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
Returns
-------
He : ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermitenorm : roots and quadrature weights of probabilist's
Hermite polynomials
hermitenorm : probabilist's Hermite polynomial object
numpy.polynomial.hermite_e.HermiteE : Probabilist's Hermite series
eval_hermite : evaluate physicist's Hermite polynomials
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("exp1",
r"""
exp1(z, out=None)
Exponential integral E1.
For complex :math:`z \ne 0` the exponential integral can be defined as
[1]_
.. math::
E_1(z) = \int_z^\infty \frac{e^{-t}}{t} dt,
where the path of the integral does not cross the negative real
axis or pass through the origin.
Parameters
----------
z: array_like
Real or complex argument.
out: ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the exponential integral E1
See Also
--------
expi : exponential integral :math:`Ei`
expn : generalization of :math:`E_1`
Notes
-----
For :math:`x > 0` it is related to the exponential integral
:math:`Ei` (see `expi`) via the relation
.. math::
E_1(x) = -Ei(-x).
References
----------
.. [1] Digital Library of Mathematical Functions, 6.2.1
https://dlmf.nist.gov/6.2#E1
Examples
--------
>>> import scipy.special as sc
It has a pole at 0.
>>> sc.exp1(0)
inf
It has a branch cut on the negative real axis.
>>> sc.exp1(-1)
nan
>>> sc.exp1(complex(-1, 0))
(-1.8951178163559368-3.141592653589793j)
>>> sc.exp1(complex(-1, -0.0))
(-1.8951178163559368+3.141592653589793j)
It approaches 0 along the positive real axis.
>>> sc.exp1([1, 10, 100, 1000])
array([2.19383934e-01, 4.15696893e-06, 3.68359776e-46, 0.00000000e+00])
It is related to `expi`.
>>> x = np.array([1, 2, 3, 4])
>>> sc.exp1(x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
>>> -sc.expi(-x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
""")
add_newdoc("exp10",
"""
exp10(x)
Compute ``10**x`` element-wise.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
``10**x``, computed element-wise.
Examples
--------
>>> from scipy.special import exp10
>>> exp10(3)
1000.0
>>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])
>>> exp10(x)
array([[ 0.1 , 0.31622777, 1. ],
[ 3.16227766, 10. , 31.6227766 ]])
""")
add_newdoc("exp2",
"""
exp2(x)
Compute ``2**x`` element-wise.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
``2**x``, computed element-wise.
Examples
--------
>>> from scipy.special import exp2
>>> exp2(3)
8.0
>>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])
>>> exp2(x)
array([[ 0.5 , 0.70710678, 1. ],
[ 1.41421356, 2. , 2.82842712]])
""")
add_newdoc("expi",
r"""
expi(x, out=None)
Exponential integral Ei.
For real :math:`x`, the exponential integral is defined as [1]_
.. math::
Ei(x) = \int_{-\infty}^x \frac{e^t}{t} dt.
For :math:`x > 0` the integral is understood as a Cauchy principle
value.
It is extended to the complex plane by analytic continuation of
the function on the interval :math:`(0, \infty)`. The complex
variant has a branch cut on the negative real axis.
Parameters
----------
x: array_like
Real or complex valued argument
out: ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the exponential integral
Notes
-----
The exponential integrals :math:`E_1` and :math:`Ei` satisfy the
relation
.. math::
E_1(x) = -Ei(-x)
for :math:`x > 0`.
See Also
--------
exp1 : Exponential integral :math:`E_1`
expn : Generalized exponential integral :math:`E_n`
References
----------
.. [1] Digital Library of Mathematical Functions, 6.2.5
https://dlmf.nist.gov/6.2#E5
Examples
--------
>>> import scipy.special as sc
It is related to `exp1`.
>>> x = np.array([1, 2, 3, 4])
>>> -sc.expi(-x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
>>> sc.exp1(x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
The complex variant has a branch cut on the negative real axis.
>>> import scipy.special as sc
>>> sc.expi(-1 + 1e-12j)
(-0.21938393439552062+3.1415926535894254j)
>>> sc.expi(-1 - 1e-12j)
(-0.21938393439552062-3.1415926535894254j)
As the complex variant approaches the branch cut, the real parts
approach the value of the real variant.
>>> sc.expi(-1)
-0.21938393439552062
The SciPy implementation returns the real variant for complex
values on the branch cut.
>>> sc.expi(complex(-1, 0.0))
(-0.21938393439552062-0j)
>>> sc.expi(complex(-1, -0.0))
(-0.21938393439552062-0j)
""")
add_newdoc('expit',
"""
expit(x)
Expit (a.k.a. logistic sigmoid) ufunc for ndarrays.
The expit function, also known as the logistic sigmoid function, is
defined as ``expit(x) = 1/(1+exp(-x))``. It is the inverse of the
logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are `expit` of the corresponding entry of x.
See Also
--------
logit
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.special import expit, logit
>>> expit([-np.inf, -1.5, 0, 1.5, np.inf])
array([ 0. , 0.18242552, 0.5 , 0.81757448, 1. ])
`logit` is the inverse of `expit`:
>>> logit(expit([-2.5, 0, 3.1, 5.0]))
array([-2.5, 0. , 3.1, 5. ])
Plot expit(x) for x in [-6, 6]:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-6, 6, 121)
>>> y = expit(x)
>>> plt.plot(x, y)
>>> plt.grid()
>>> plt.xlim(-6, 6)
>>> plt.xlabel('x')
>>> plt.title('expit(x)')
>>> plt.show()
""")
add_newdoc("expm1",
"""
expm1(x)
Compute ``exp(x) - 1``.
When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation
of ``exp(x) - 1`` can suffer from catastrophic loss of precision.
``expm1(x)`` is implemented to avoid the loss of precision that occurs when
`x` is near zero.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
``exp(x) - 1`` computed element-wise.
Examples
--------
>>> from scipy.special import expm1
>>> expm1(1.0)
1.7182818284590451
>>> expm1([-0.2, -0.1, 0, 0.1, 0.2])
array([-0.18126925, -0.09516258, 0. , 0.10517092, 0.22140276])
The exact value of ``exp(7.5e-13) - 1`` is::
7.5000000000028125000000007031250000001318...*10**-13.
Here is what ``expm1(7.5e-13)`` gives:
>>> expm1(7.5e-13)
7.5000000000028135e-13
Compare that to ``exp(7.5e-13) - 1``, where the subtraction results in
a "catastrophic" loss of precision:
>>> np.exp(7.5e-13) - 1
7.5006667543675576e-13
""")
add_newdoc("expn",
r"""
expn(n, x, out=None)
Generalized exponential integral En.
For integer :math:`n \geq 0` and real :math:`x \geq 0` the
generalized exponential integral is defined as [dlmf]_
.. math::
E_n(x) = x^{n - 1} \int_x^\infty \frac{e^{-t}}{t^n} dt.
Parameters
----------
n: array_like
Non-negative integers
x: array_like
Real argument
out: ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the generalized exponential integral
See Also
--------
exp1 : special case of :math:`E_n` for :math:`n = 1`
expi : related to :math:`E_n` when :math:`n = 1`
References
----------
.. [dlmf] Digital Library of Mathematical Functions, 8.19.2
https://dlmf.nist.gov/8.19#E2
Examples
--------
>>> import scipy.special as sc
Its domain is nonnegative n and x.
>>> sc.expn(-1, 1.0), sc.expn(1, -1.0)
(nan, nan)
It has a pole at ``x = 0`` for ``n = 1, 2``; for larger ``n`` it
is equal to ``1 / (n - 1)``.
>>> sc.expn([0, 1, 2, 3, 4], 0)
array([ inf, inf, 1. , 0.5 , 0.33333333])
For n equal to 0 it reduces to ``exp(-x) / x``.
>>> x = np.array([1, 2, 3, 4])
>>> sc.expn(0, x)
array([0.36787944, 0.06766764, 0.01659569, 0.00457891])
>>> np.exp(-x) / x
array([0.36787944, 0.06766764, 0.01659569, 0.00457891])
For n equal to 1 it reduces to `exp1`.
>>> sc.expn(1, x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
>>> sc.exp1(x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
""")
add_newdoc("exprel",
r"""
exprel(x)
Relative error exponential, ``(exp(x) - 1)/x``.
When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation
of ``exp(x) - 1`` can suffer from catastrophic loss of precision.
``exprel(x)`` is implemented to avoid the loss of precision that occurs when
`x` is near zero.
Parameters
----------
x : ndarray
Input array. `x` must contain real numbers.
Returns
-------
float
``(exp(x) - 1)/x``, computed element-wise.
See Also
--------
expm1
Notes
-----
.. versionadded:: 0.17.0
Examples
--------
>>> from scipy.special import exprel
>>> exprel(0.01)
1.0050167084168056
>>> exprel([-0.25, -0.1, 0, 0.1, 0.25])
array([ 0.88479687, 0.95162582, 1. , 1.05170918, 1.13610167])
Compare ``exprel(5e-9)`` to the naive calculation. The exact value
is ``1.00000000250000000416...``.
>>> exprel(5e-9)
1.0000000025
>>> (np.exp(5e-9) - 1)/5e-9
0.99999999392252903
""")
add_newdoc("fdtr",
r"""
fdtr(dfn, dfd, x)
F cumulative distribution function.
Returns the value of the cumulative distribution function of the
F-distribution, also known as Snedecor's F-distribution or the
Fisher-Snedecor distribution.
The F-distribution with parameters :math:`d_n` and :math:`d_d` is the
distribution of the random variable,
.. math::
X = \frac{U_n/d_n}{U_d/d_d},
where :math:`U_n` and :math:`U_d` are random variables distributed
:math:`\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom,
respectively.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`.
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2).
Wrapper for the Cephes [1]_ routine `fdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("fdtrc",
r"""
fdtrc(dfn, dfd, x)
F survival function.
Returns the complemented F-distribution function (the integral of the
density from `x` to infinity).
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The complemented F-distribution function with parameters `dfn` and
`dfd` at `x`.
See also
--------
fdtr
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2).
Wrapper for the Cephes [1]_ routine `fdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("fdtri",
r"""
fdtri(dfn, dfd, p)
The `p`-th quantile of the F-distribution.
This function is the inverse of the F-distribution CDF, `fdtr`, returning
the `x` such that `fdtr(dfn, dfd, x) = p`.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
Notes
-----
The computation is carried out using the relation to the inverse
regularized beta function, :math:`I^{-1}_x(a, b)`. Let
:math:`z = I^{-1}_p(d_d/2, d_n/2).` Then,
.. math::
x = \frac{d_d (1 - z)}{d_n z}.
If `p` is such that :math:`x < 0.5`, the following relation is used
instead for improved stability: let
:math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then,
.. math::
x = \frac{d_d z'}{d_n (1 - z')}.
Wrapper for the Cephes [1]_ routine `fdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to `fdtr` vs dfd
Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to `fdtr` vs dfn
finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("fresnel",
r"""
fresnel(z, out=None)
Fresnel integrals.
The Fresnel integrals are defined as
.. math::
S(z) &= \int_0^z \sin(\pi t^2 /2) dt \\
C(z) &= \int_0^z \cos(\pi t^2 /2) dt.
See [dlmf]_ for details.
Parameters
----------
z : array_like
Real or complex valued argument
out : 2-tuple of ndarrays, optional
Optional output arrays for the function results
Returns
-------
S, C : 2-tuple of scalar or ndarray
Values of the Fresnel integrals
See Also
--------
fresnel_zeros : zeros of the Fresnel integrals
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/7.2#iii
Examples
--------
>>> import scipy.special as sc
As z goes to infinity along the real axis, S and C converge to 0.5.
>>> S, C = sc.fresnel([0.1, 1, 10, 100, np.inf])
>>> S
array([0.00052359, 0.43825915, 0.46816998, 0.4968169 , 0.5 ])
>>> C
array([0.09999753, 0.7798934 , 0.49989869, 0.4999999 , 0.5 ])
They are related to the error function `erf`.
>>> z = np.array([1, 2, 3, 4])
>>> zeta = 0.5 * np.sqrt(np.pi) * (1 - 1j) * z
>>> S, C = sc.fresnel(z)
>>> C + 1j*S
array([0.7798934 +0.43825915j, 0.48825341+0.34341568j,
0.60572079+0.496313j , 0.49842603+0.42051575j])
>>> 0.5 * (1 + 1j) * sc.erf(zeta)
array([0.7798934 +0.43825915j, 0.48825341+0.34341568j,
0.60572079+0.496313j , 0.49842603+0.42051575j])
""")
add_newdoc("gamma",
r"""
gamma(z)
gamma function.
The gamma function is defined as
.. math::
\Gamma(z) = \int_0^\infty t^{z-1} e^{-t} dt
for :math:`\Re(z) > 0` and is extended to the rest of the complex
plane by analytic continuation. See [dlmf]_ for more details.
Parameters
----------
z : array_like
Real or complex valued argument
Returns
-------
scalar or ndarray
Values of the gamma function
Notes
-----
The gamma function is often referred to as the generalized
factorial since :math:`\Gamma(n + 1) = n!` for natural numbers
:math:`n`. More generally it satisfies the recurrence relation
:math:`\Gamma(z + 1) = z \cdot \Gamma(z)` for complex :math:`z`,
which, combined with the fact that :math:`\Gamma(1) = 1`, implies
the above identity for :math:`z = n`.
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/5.2#E1
Examples
--------
>>> from scipy.special import gamma, factorial
>>> gamma([0, 0.5, 1, 5])
array([ inf, 1.77245385, 1. , 24. ])
>>> z = 2.5 + 1j
>>> gamma(z)
(0.77476210455108352+0.70763120437959293j)
>>> gamma(z+1), z*gamma(z) # Recurrence property
((1.2292740569981171+2.5438401155000685j),
(1.2292740569981158+2.5438401155000658j))
>>> gamma(0.5)**2 # gamma(0.5) = sqrt(pi)
3.1415926535897927
Plot gamma(x) for real x
>>> x = np.linspace(-3.5, 5.5, 2251)
>>> y = gamma(x)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'b', alpha=0.6, label='gamma(x)')
>>> k = np.arange(1, 7)
>>> plt.plot(k, factorial(k-1), 'k*', alpha=0.6,
... label='(x-1)!, x = 1, 2, ...')
>>> plt.xlim(-3.5, 5.5)
>>> plt.ylim(-10, 25)
>>> plt.grid()
>>> plt.xlabel('x')
>>> plt.legend(loc='lower right')
>>> plt.show()
""")
add_newdoc("gammainc",
r"""
gammainc(a, x)
Regularized lower incomplete gamma function.
It is defined as
.. math::
P(a, x) = \frac{1}{\Gamma(a)} \int_0^x t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. See [dlmf]_ for details.
Parameters
----------
a : array_like
Positive parameter
x : array_like
Nonnegative argument
Returns
-------
scalar or ndarray
Values of the lower incomplete gamma function
Notes
-----
The function satisfies the relation ``gammainc(a, x) +
gammaincc(a, x) = 1`` where `gammaincc` is the regularized upper
incomplete gamma function.
The implementation largely follows that of [boost]_.
See also
--------
gammaincc : regularized upper incomplete gamma function
gammaincinv : inverse of the regularized lower incomplete gamma
function with respect to `x`
gammainccinv : inverse of the regularized upper incomplete gamma
function with respect to `x`
References
----------
.. [dlmf] NIST Digital Library of Mathematical functions
https://dlmf.nist.gov/8.2#E4
.. [boost] Maddock et. al., "Incomplete Gamma Functions",
https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
Examples
--------
>>> import scipy.special as sc
It is the CDF of the gamma distribution, so it starts at 0 and
monotonically increases to 1.
>>> sc.gammainc(0.5, [0, 1, 10, 100])
array([0. , 0.84270079, 0.99999226, 1. ])
It is equal to one minus the upper incomplete gamma function.
>>> a, x = 0.5, 0.4
>>> sc.gammainc(a, x)
0.6289066304773024
>>> 1 - sc.gammaincc(a, x)
0.6289066304773024
""")
add_newdoc("gammaincc",
r"""
gammaincc(a, x)
Regularized upper incomplete gamma function.
It is defined as
.. math::
Q(a, x) = \frac{1}{\Gamma(a)} \int_x^\infty t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. See [dlmf]_ for details.
Parameters
----------
a : array_like
Positive parameter
x : array_like
Nonnegative argument
Returns
-------
scalar or ndarray
Values of the upper incomplete gamma function
Notes
-----
The function satisfies the relation ``gammainc(a, x) +
gammaincc(a, x) = 1`` where `gammainc` is the regularized lower
incomplete gamma function.
The implementation largely follows that of [boost]_.
See also
--------
gammainc : regularized lower incomplete gamma function
gammaincinv : inverse of the regularized lower incomplete gamma
function with respect to `x`
gammainccinv : inverse to of the regularized upper incomplete
gamma function with respect to `x`
References
----------
.. [dlmf] NIST Digital Library of Mathematical functions
https://dlmf.nist.gov/8.2#E4
.. [boost] Maddock et. al., "Incomplete Gamma Functions",
https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
Examples
--------
>>> import scipy.special as sc
It is the survival function of the gamma distribution, so it
starts at 1 and monotonically decreases to 0.
>>> sc.gammaincc(0.5, [0, 1, 10, 100, 1000])
array([1.00000000e+00, 1.57299207e-01, 7.74421643e-06, 2.08848758e-45,
0.00000000e+00])
It is equal to one minus the lower incomplete gamma function.
>>> a, x = 0.5, 0.4
>>> sc.gammaincc(a, x)
0.37109336952269756
>>> 1 - sc.gammainc(a, x)
0.37109336952269756
""")
add_newdoc("gammainccinv",
"""
gammainccinv(a, y)
Inverse of the upper incomplete gamma function with respect to `x`
Given an input :math:`y` between 0 and 1, returns :math:`x` such
that :math:`y = Q(a, x)`. Here :math:`Q` is the upper incomplete
gamma function; see `gammaincc`. This is well-defined because the
upper incomplete gamma function is monotonic as can be seen from
its definition in [dlmf]_.
Parameters
----------
a : array_like
Positive parameter
y : array_like
Argument between 0 and 1, inclusive
Returns
-------
scalar or ndarray
Values of the inverse of the upper incomplete gamma function
See Also
--------
gammaincc : regularized upper incomplete gamma function
gammainc : regularized lower incomplete gamma function
gammaincinv : inverse of the regularized lower incomplete gamma
function with respect to `x`
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/8.2#E4
Examples
--------
>>> import scipy.special as sc
It starts at infinity and monotonically decreases to 0.
>>> sc.gammainccinv(0.5, [0, 0.1, 0.5, 1])
array([ inf, 1.35277173, 0.22746821, 0. ])
It inverts the upper incomplete gamma function.
>>> a, x = 0.5, [0, 0.1, 0.5, 1]
>>> sc.gammaincc(a, sc.gammainccinv(a, x))
array([0. , 0.1, 0.5, 1. ])
>>> a, x = 0.5, [0, 10, 50]
>>> sc.gammainccinv(a, sc.gammaincc(a, x))
array([ 0., 10., 50.])
""")
add_newdoc("gammaincinv",
"""
gammaincinv(a, y)
Inverse to the lower incomplete gamma function with respect to `x`.
Given an input :math:`y` between 0 and 1, returns :math:`x` such
that :math:`y = P(a, x)`. Here :math:`P` is the regularized lower
incomplete gamma function; see `gammainc`. This is well-defined
because the lower incomplete gamma function is monotonic as can be
seen from its definition in [dlmf]_.
Parameters
----------
a : array_like
Positive parameter
y : array_like
Parameter between 0 and 1, inclusive
Returns
-------
scalar or ndarray
Values of the inverse of the lower incomplete gamma function
See Also
--------
gammainc : regularized lower incomplete gamma function
gammaincc : regularized upper incomplete gamma function
gammainccinv : inverse of the regualizred upper incomplete gamma
function with respect to `x`
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/8.2#E4
Examples
--------
>>> import scipy.special as sc
It starts at 0 and monotonically increases to infinity.
>>> sc.gammaincinv(0.5, [0, 0.1 ,0.5, 1])
array([0. , 0.00789539, 0.22746821, inf])
It inverts the lower incomplete gamma function.
>>> a, x = 0.5, [0, 0.1, 0.5, 1]
>>> sc.gammainc(a, sc.gammaincinv(a, x))
array([0. , 0.1, 0.5, 1. ])
>>> a, x = 0.5, [0, 10, 25]
>>> sc.gammaincinv(a, sc.gammainc(a, x))
array([ 0. , 10. , 25.00001465])
""")
add_newdoc("gammaln",
r"""
gammaln(x, out=None)
Logarithm of the absolute value of the gamma function.
Defined as
.. math::
\ln(\lvert\Gamma(x)\rvert)
where :math:`\Gamma` is the gamma function. For more details on
the gamma function, see [dlmf]_.
Parameters
----------
x : array_like
Real argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the log of the absolute value of gamma
See Also
--------
gammasgn : sign of the gamma function
loggamma : principal branch of the logarithm of the gamma function
Notes
-----
It is the same function as the Python standard library function
:func:`math.lgamma`.
When used in conjunction with `gammasgn`, this function is useful
for working in logspace on the real axis without having to deal
with complex numbers via the relation ``exp(gammaln(x)) =
gammasgn(x) * gamma(x)``.
For complex-valued log-gamma, use `loggamma` instead of `gammaln`.
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/5
Examples
--------
>>> import scipy.special as sc
It has two positive zeros.
>>> sc.gammaln([1, 2])
array([0., 0.])
It has poles at nonpositive integers.
>>> sc.gammaln([0, -1, -2, -3, -4])
array([inf, inf, inf, inf, inf])
It asymptotically approaches ``x * log(x)`` (Stirling's formula).
>>> x = np.array([1e10, 1e20, 1e40, 1e80])
>>> sc.gammaln(x)
array([2.20258509e+11, 4.50517019e+21, 9.11034037e+41, 1.83206807e+82])
>>> x * np.log(x)
array([2.30258509e+11, 4.60517019e+21, 9.21034037e+41, 1.84206807e+82])
""")
add_newdoc("gammasgn",
r"""
gammasgn(x)
Sign of the gamma function.
It is defined as
.. math::
\text{gammasgn}(x) =
\begin{cases}
+1 & \Gamma(x) > 0 \\
-1 & \Gamma(x) < 0
\end{cases}
where :math:`\Gamma` is the gamma function; see `gamma`. This
definition is complete since the gamma function is never zero;
see the discussion after [dlmf]_.
Parameters
----------
x : array_like
Real argument
Returns
-------
scalar or ndarray
Sign of the gamma function
Notes
-----
The gamma function can be computed as ``gammasgn(x) *
np.exp(gammaln(x))``.
See Also
--------
gamma : the gamma function
gammaln : log of the absolute value of the gamma function
loggamma : analytic continuation of the log of the gamma function
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/5.2#E1
Examples
--------
>>> import scipy.special as sc
It is 1 for `x > 0`.
>>> sc.gammasgn([1, 2, 3, 4])
array([1., 1., 1., 1.])
It alternates between -1 and 1 for negative integers.
>>> sc.gammasgn([-0.5, -1.5, -2.5, -3.5])
array([-1., 1., -1., 1.])
It can be used to compute the gamma function.
>>> x = [1.5, 0.5, -0.5, -1.5]
>>> sc.gammasgn(x) * np.exp(sc.gammaln(x))
array([ 0.88622693, 1.77245385, -3.5449077 , 2.3632718 ])
>>> sc.gamma(x)
array([ 0.88622693, 1.77245385, -3.5449077 , 2.3632718 ])
""")
add_newdoc("gdtr",
r"""
gdtr(a, b, x)
Gamma distribution cumulative distribution function.
Returns the integral from zero to `x` of the gamma probability density
function,
.. math::
F = \int_0^x \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (upper limit of integration; float).
See also
--------
gdtrc : 1 - CDF of the gamma distribution.
Returns
-------
F : ndarray
The CDF of the gamma distribution with parameters `a` and `b`
evaluated at `x`.
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("gdtrc",
r"""
gdtrc(a, b, x)
Gamma distribution survival function.
Integral from `x` to infinity of the gamma probability density function,
.. math::
F = \int_x^\infty \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (lower limit of integration; float).
Returns
-------
F : ndarray
The survival function of the gamma distribution with parameters `a`
and `b` evaluated at `x`.
See Also
--------
gdtr, gdtrix
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of `gdtr` vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtria
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of `gdtr` vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrib
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of `gdtr` vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the pth quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `x` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `x`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrix
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("hankel1",
r"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the first kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
See also
--------
hankel1e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("hankel1e",
r"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v, z) = hankel1(v, z) * exp(-1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("hankel2",
r"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\imath \pi v/2) K_v(z \exp(\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
See also
--------
hankel2e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("hankel2e",
r"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel2e(v, z) = hankel2(v, z) * exp(1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\frac{\imath \pi v}{2}) K_v(z exp(\frac{\imath\pi}{2}))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("hyp0f1",
r"""
hyp0f1(v, z, out=None)
Confluent hypergeometric limit function 0F1.
Parameters
----------
v : array_like
Real-valued parameter
z : array_like
Real- or complex-valued argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
The confluent hypergeometric limit function
Notes
-----
This function is defined as:
.. math:: _0F_1(v, z) = \sum_{k=0}^{\infty}\frac{z^k}{(v)_k k!}.
It's also the limit as :math:`q \to \infty` of :math:`_1F_1(q; v; z/q)`,
and satisfies the differential equation :math:`f''(z) + vf'(z) =
f(z)`. See [1]_ for more information.
References
----------
.. [1] Wolfram MathWorld, "Confluent Hypergeometric Limit Function",
http://mathworld.wolfram.com/ConfluentHypergeometricLimitFunction.html
Examples
--------
>>> import scipy.special as sc
It is one when `z` is zero.
>>> sc.hyp0f1(1, 0)
1.0
It is the limit of the confluent hypergeometric function as `q`
goes to infinity.
>>> q = np.array([1, 10, 100, 1000])
>>> v = 1
>>> z = 1
>>> sc.hyp1f1(q, v, z / q)
array([2.71828183, 2.31481985, 2.28303778, 2.27992985])
>>> sc.hyp0f1(v, z)
2.2795853023360673
It is related to Bessel functions.
>>> n = 1
>>> x = np.linspace(0, 1, 5)
>>> sc.jv(n, x)
array([0. , 0.12402598, 0.24226846, 0.3492436 , 0.44005059])
>>> (0.5 * x)**n / sc.factorial(n) * sc.hyp0f1(n + 1, -0.25 * x**2)
array([0. , 0.12402598, 0.24226846, 0.3492436 , 0.44005059])
""")
add_newdoc("hyp1f1",
r"""
hyp1f1(a, b, x, out=None)
Confluent hypergeometric function 1F1.
The confluent hypergeometric function is defined by the series
.. math::
{}_1F_1(a; b; x) = \sum_{k = 0}^\infty \frac{(a)_k}{(b)_k k!} x^k.
See [dlmf]_ for more details. Here :math:`(\cdot)_k` is the
Pochhammer symbol; see `poch`.
Parameters
----------
a, b : array_like
Real parameters
x : array_like
Real or complex argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the confluent hypergeometric function
See also
--------
hyperu : another confluent hypergeometric function
hyp0f1 : confluent hypergeometric limit function
hyp2f1 : Gaussian hypergeometric function
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/13.2#E2
Examples
--------
>>> import scipy.special as sc
It is one when `x` is zero:
>>> sc.hyp1f1(0.5, 0.5, 0)
1.0
It is singular when `b` is a nonpositive integer.
>>> sc.hyp1f1(0.5, -1, 0)
inf
It is a polynomial when `a` is a nonpositive integer.
>>> a, b, x = -1, 0.5, np.array([1.0, 2.0, 3.0, 4.0])
>>> sc.hyp1f1(a, b, x)
array([-1., -3., -5., -7.])
>>> 1 + (a / b) * x
array([-1., -3., -5., -7.])
It reduces to the exponential function when `a = b`.
>>> sc.hyp1f1(2, 2, [1, 2, 3, 4])
array([ 2.71828183, 7.3890561 , 20.08553692, 54.59815003])
>>> np.exp([1, 2, 3, 4])
array([ 2.71828183, 7.3890561 , 20.08553692, 54.59815003])
""")
add_newdoc("hyp2f1",
r"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z)
Parameters
----------
a, b, c : array_like
Arguments, should be real-valued.
z : array_like
Argument, real or complex.
Returns
-------
hyp2f1 : scalar or ndarray
The values of the gaussian hypergeometric function.
See also
--------
hyp0f1 : confluent hypergeometric limit function.
hyp1f1 : Kummer's (confluent hypergeometric) function.
Notes
-----
This function is defined for :math:`|z| < 1` as
.. math::
\mathrm{hyp2f1}(a, b, c, z) = \sum_{n=0}^\infty
\frac{(a)_n (b)_n}{(c)_n}\frac{z^n}{n!},
and defined on the rest of the complex z-plane by analytic
continuation [1]_.
Here :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When
:math:`n` is an integer the result is a polynomial of degree :math:`n`.
The implementation for complex values of ``z`` is described in [2]_.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/15.2
.. [2] S. Zhang and J.M. Jin, "Computation of Special Functions", Wiley 1996
.. [3] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
>>> import scipy.special as sc
It has poles when `c` is a negative integer.
>>> sc.hyp2f1(1, 1, -2, 1)
inf
It is a polynomial when `a` or `b` is a negative integer.
>>> a, b, c = -1, 1, 1.5
>>> z = np.linspace(0, 1, 5)
>>> sc.hyp2f1(a, b, c, z)
array([1. , 0.83333333, 0.66666667, 0.5 , 0.33333333])
>>> 1 + a * b * z / c
array([1. , 0.83333333, 0.66666667, 0.5 , 0.33333333])
It is symmetric in `a` and `b`.
>>> a = np.linspace(0, 1, 5)
>>> b = np.linspace(0, 1, 5)
>>> sc.hyp2f1(a, b, 1, 0.5)
array([1. , 1.03997334, 1.1803406 , 1.47074441, 2. ])
>>> sc.hyp2f1(b, a, 1, 0.5)
array([1. , 1.03997334, 1.1803406 , 1.47074441, 2. ])
It contains many other functions as special cases.
>>> z = 0.5
>>> sc.hyp2f1(1, 1, 2, z)
1.3862943611198901
>>> -np.log(1 - z) / z
1.3862943611198906
>>> sc.hyp2f1(0.5, 1, 1.5, z**2)
1.098612288668109
>>> np.log((1 + z) / (1 - z)) / (2 * z)
1.0986122886681098
>>> sc.hyp2f1(0.5, 1, 1.5, -z**2)
0.9272952180016117
>>> np.arctan(z) / z
0.9272952180016123
""")
add_newdoc("hyperu",
r"""
hyperu(a, b, x, out=None)
Confluent hypergeometric function U
It is defined as the solution to the equation
.. math::
x \frac{d^2w}{dx^2} + (b - x) \frac{dw}{dx} - aw = 0
which satisfies the property
.. math::
U(a, b, x) \sim x^{-a}
as :math:`x \to \infty`. See [dlmf]_ for more details.
Parameters
----------
a, b : array_like
Real-valued parameters
x : array_like
Real-valued argument
out : ndarray
Optional output array for the function values
Returns
-------
scalar or ndarray
Values of `U`
References
----------
.. [dlmf] NIST Digital Library of Mathematics Functions
https://dlmf.nist.gov/13.2#E6
Examples
--------
>>> import scipy.special as sc
It has a branch cut along the negative `x` axis.
>>> x = np.linspace(-0.1, -10, 5)
>>> sc.hyperu(1, 1, x)
array([nan, nan, nan, nan, nan])
It approaches zero as `x` goes to infinity.
>>> x = np.array([1, 10, 100])
>>> sc.hyperu(1, 1, x)
array([0.59634736, 0.09156333, 0.00990194])
It satisfies Kummer's transformation.
>>> a, b, x = 2, 1, 1
>>> sc.hyperu(a, b, x)
0.1926947246463881
>>> x**(1 - b) * sc.hyperu(a - b + 1, 2 - b, x)
0.1926947246463881
""")
add_newdoc("i0",
r"""
i0(x)
Modified Bessel function of order 0.
Defined as,
.. math::
I_0(x) = \sum_{k=0}^\infty \frac{(x^2/4)^k}{(k!)^2} = J_0(\imath x),
where :math:`J_0` is the Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i0`.
See also
--------
iv
i0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 0
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i0`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i0e`.
See also
--------
iv
i0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("i1",
r"""
i1(x)
Modified Bessel function of order 1.
Defined as,
.. math::
I_1(x) = \frac{1}{2}x \sum_{k=0}^\infty \frac{(x^2/4)^k}{k! (k + 1)!}
= -\imath J_1(\imath x),
where :math:`J_1` is the Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i1`.
See also
--------
iv
i1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 1
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i1`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i1e`.
See also
--------
iv
i1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("_igam_fac",
"""
Internal function, do not use.
""")
add_newdoc("it2i0k0",
r"""
it2i0k0(x, out=None)
Integrals related to modified Bessel functions of order 0.
Computes the integrals
.. math::
\int_0^x \frac{I_0(t) - 1}{t} dt \\
\int_x^\infty \frac{K_0(t)}{t} dt.
Parameters
----------
x : array_like
Values at which to evaluate the integrals.
out : tuple of ndarrays, optional
Optional output arrays for the function results.
Returns
-------
ii0 : scalar or ndarray
The integral for `i0`
ik0 : scalar or ndarray
The integral for `k0`
""")
add_newdoc("it2j0y0",
r"""
it2j0y0(x, out=None)
Integrals related to Bessel functions of the first kind of order 0.
Computes the integrals
.. math::
\int_0^x \frac{1 - J_0(t)}{t} dt \\
\int_x^\infty \frac{Y_0(t)}{t} dt.
For more on :math:`J_0` and :math:`Y_0` see `j0` and `y0`.
Parameters
----------
x : array_like
Values at which to evaluate the integrals.
out : tuple of ndarrays, optional
Optional output arrays for the function results.
Returns
-------
ij0 : scalar or ndarray
The integral for `j0`
iy0 : scalar or ndarray
The integral for `y0`
""")
add_newdoc("it2struve0",
r"""
it2struve0(x)
Integral related to the Struve function of order 0.
Returns the integral,
.. math::
\int_x^\infty \frac{H_0(t)}{t}\,dt
where :math:`H_0` is the Struve function of order 0.
Parameters
----------
x : array_like
Lower limit of integration.
Returns
-------
I : ndarray
The value of the integral.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("itairy",
"""
itairy(x)
Integrals of Airy functions
Calculates the integrals of Airy functions from 0 to `x`.
Parameters
----------
x: array_like
Upper limit of integration (float).
Returns
-------
Apt
Integral of Ai(t) from 0 to x.
Bpt
Integral of Bi(t) from 0 to x.
Ant
Integral of Ai(-t) from 0 to x.
Bnt
Integral of Bi(-t) from 0 to x.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("iti0k0",
r"""
iti0k0(x, out=None)
Integrals of modified Bessel functions of order 0.
Computes the integrals
.. math::
\int_0^x I_0(t) dt \\
\int_0^x K_0(t) dt.
For more on :math:`I_0` and :math:`K_0` see `i0` and `k0`.
Parameters
----------
x : array_like
Values at which to evaluate the integrals.
out : tuple of ndarrays, optional
Optional output arrays for the function results.
Returns
-------
ii0 : scalar or ndarray
The integral for `i0`
ik0 : scalar or ndarray
The integral for `k0`
""")
add_newdoc("itj0y0",
r"""
itj0y0(x, out=None)
Integrals of Bessel functions of the first kind of order 0.
Computes the integrals
.. math::
\int_0^x J_0(t) dt \\
\int_0^x Y_0(t) dt.
For more on :math:`J_0` and :math:`Y_0` see `j0` and `y0`.
Parameters
----------
x : array_like
Values at which to evaluate the integrals.
out : tuple of ndarrays, optional
Optional output arrays for the function results.
Returns
-------
ij0 : scalar or ndarray
The integral of `j0`
iy0 : scalar or ndarray
The integral of `y0`
""")
add_newdoc("itmodstruve0",
r"""
itmodstruve0(x)
Integral of the modified Struve function of order 0.
.. math::
I = \int_0^x L_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`L_0` from 0 to `x`.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("itstruve0",
r"""
itstruve0(x)
Integral of the Struve function of order 0.
.. math::
I = \int_0^x H_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`H_0` from 0 to `x`.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("iv",
r"""
iv(v, z)
Modified Bessel function of the first kind of real order.
Parameters
----------
v : array_like
Order. If `z` is of real type and negative, `v` must be integer
valued.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the modified Bessel function.
Notes
-----
For real `z` and :math:`v \in [-50, 50]`, the evaluation is carried out
using Temme's method [1]_. For larger orders, uniform asymptotic
expansions are applied.
For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is
called. It uses a power series for small `z`, the asymptotic expansion
for large `abs(z)`, the Miller algorithm normalized by the Wronskian
and a Neumann series for intermediate magnitudes, and the uniform
asymptotic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large
orders. Backward recurrence is used to generate sequences or reduce
orders when necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
See also
--------
kve : This function with leading exponential behavior stripped off.
References
----------
.. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976)
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("ive",
r"""
ive(v, z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v, z) = iv(v, z) * exp(-abs(z.real))
Parameters
----------
v : array_like of float
Order.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the exponentially scaled modified Bessel function.
Notes
-----
For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a
power series for small `z`, the asymptotic expansion for large
`abs(z)`, the Miller algorithm normalized by the Wronskian and a
Neumann series for intermediate magnitudes, and the uniform asymptotic
expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders.
Backward recurrence is used to generate sequences or reduce orders when
necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("j0",
r"""
j0(x)
Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval the following rational approximation is used:
.. math::
J_0(x) \approx (w - r_1^2)(w - r_2^2) \frac{P_3(w)}{Q_8(w)},
where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of
:math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3
and 8, respectively.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `j0`.
It should not be confused with the spherical Bessel functions (see
`spherical_jn`).
See also
--------
jv : Bessel function of real order and complex argument.
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("j1",
"""
j1(x)
Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 24 term Chebyshev expansion is used. In the second, the
asymptotic trigonometric representation is employed using two rational
functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `j1`.
It should not be confused with the spherical Bessel functions (see
`spherical_jn`).
See also
--------
jv
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("jn",
"""
jn(n, x)
Bessel function of the first kind of integer order and real argument.
Notes
-----
`jn` is an alias of `jv`.
Not to be confused with the spherical Bessel functions (see `spherical_jn`).
See also
--------
jv
spherical_jn : spherical Bessel functions.
""")
add_newdoc("jv",
r"""
jv(v, z)
Bessel function of the first kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the Bessel function, :math:`J_v(z)`.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
Not to be confused with the spherical Bessel functions (see `spherical_jn`).
See also
--------
jve : :math:`J_v` with leading exponential behavior stripped off.
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("jve",
r"""
jve(v, z)
Exponentially scaled Bessel function of order `v`.
Defined as::
jve(v, z) = jv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("k0",
r"""
k0(x)
Modified Bessel function of the second kind of order 0, :math:`K_0`.
This function is also sometimes referred to as the modified Bessel
function of the third kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
K : ndarray
Value of the modified Bessel function :math:`K_0` at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0`.
See also
--------
kv
k0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0e`.
See also
--------
kv
k0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("k1",
"""
k1(x)
Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the modified Bessel function K of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1`.
See also
--------
kv
k1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1e`.
See also
--------
kv
k1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("kei",
r"""
kei(x, out=None)
Kelvin function kei.
Defined as
.. math::
\mathrm{kei}(x) = \Im[K_0(x e^{\pi i / 4})]
where :math:`K_0` is the modified Bessel function of the second
kind (see `kv`). See [dlmf]_ for more details.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the Kelvin function.
See Also
--------
ker : the corresponding real part
keip : the derivative of kei
kv : modified Bessel function of the second kind
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10.61
Examples
--------
It can be expressed using the modified Bessel function of the
second kind.
>>> import scipy.special as sc
>>> x = np.array([1.0, 2.0, 3.0, 4.0])
>>> sc.kv(0, x * np.exp(np.pi * 1j / 4)).imag
array([-0.49499464, -0.20240007, -0.05112188, 0.0021984 ])
>>> sc.kei(x)
array([-0.49499464, -0.20240007, -0.05112188, 0.0021984 ])
""")
add_newdoc("keip",
r"""
keip(x, out=None)
Derivative of the Kelvin function kei.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
The values of the derivative of kei.
See Also
--------
kei
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10#PT5
""")
add_newdoc("kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at `x`. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("ker",
r"""
ker(x, out=None)
Kelvin function ker.
Defined as
.. math::
\mathrm{ker}(x) = \Re[K_0(x e^{\pi i / 4})]
Where :math:`K_0` is the modified Bessel function of the second
kind (see `kv`). See [dlmf]_ for more details.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
See Also
--------
kei : the corresponding imaginary part
kerp : the derivative of ker
kv : modified Bessel function of the second kind
Returns
-------
scalar or ndarray
Values of the Kelvin function.
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10.61
Examples
--------
It can be expressed using the modified Bessel function of the
second kind.
>>> import scipy.special as sc
>>> x = np.array([1.0, 2.0, 3.0, 4.0])
>>> sc.kv(0, x * np.exp(np.pi * 1j / 4)).real
array([ 0.28670621, -0.04166451, -0.06702923, -0.03617885])
>>> sc.ker(x)
array([ 0.28670621, -0.04166451, -0.06702923, -0.03617885])
""")
add_newdoc("kerp",
r"""
kerp(x, out=None)
Derivative of the Kelvin function ker.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the derivative of ker.
See Also
--------
ker
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10#PT5
""")
add_newdoc("kl_div",
r"""
kl_div(x, y, out=None)
Elementwise function for computing Kullback-Leibler divergence.
.. math::
\mathrm{kl\_div}(x, y) =
\begin{cases}
x \log(x / y) - x + y & x > 0, y > 0 \\
y & x = 0, y \ge 0 \\
\infty & \text{otherwise}
\end{cases}
Parameters
----------
x, y : array_like
Real arguments
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the Kullback-Liebler divergence.
See Also
--------
entr, rel_entr
Notes
-----
.. versionadded:: 0.15.0
This function is non-negative and is jointly convex in `x` and `y`.
The origin of this function is in convex programming; see [1]_ for
details. This is why the the function contains the extra :math:`-x
+ y` terms over what might be expected from the Kullback-Leibler
divergence. For a version of the function without the extra terms,
see `rel_entr`.
References
----------
.. [1] Grant, Boyd, and Ye, "CVX: Matlab Software for Disciplined Convex
Programming", http://cvxr.com/cvx/
""")
add_newdoc("kn",
r"""
kn(n, x)
Modified Bessel function of the second kind of integer order `n`
Returns the modified Bessel function of the second kind for integer order
`n` at real `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions.
Parameters
----------
n : array_like of int
Order of Bessel functions (floats will truncate with a warning)
z : array_like of float
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kv : Same function, but accepts real order and complex argument
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kn
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in range(6):
... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kn([4, 5, 6], 1)
array([ 44.23241585, 360.9605896 , 3653.83831186])
""")
add_newdoc("kolmogi",
"""
kolmogi(p)
Inverse Survival Function of Kolmogorov distribution
It is the inverse function to `kolmogorov`.
Returns y such that ``kolmogorov(y) == p``.
Parameters
----------
p : float array_like
Probability
Returns
-------
float
The value(s) of kolmogi(p)
Notes
-----
`kolmogorov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.kstwobign` distribution.
See Also
--------
kolmogorov : The Survival Function for the distribution
scipy.stats.kstwobign : Provides the functionality as a continuous distribution
smirnov, smirnovi : Functions for the one-sided distribution
Examples
--------
>>> from scipy.special import kolmogi
>>> kolmogi([0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0])
array([ inf, 1.22384787, 1.01918472, 0.82757356, 0.67644769,
0.57117327, 0. ])
""")
add_newdoc("kolmogorov",
r"""
kolmogorov(y)
Complementary cumulative distribution (Survival Function) function of
Kolmogorov distribution.
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (``D_n*\sqrt(n)`` as n goes to infinity)
of a two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that ``sqrt(n) * max absolute deviation > y``.
Parameters
----------
y : float array_like
Absolute deviation between the Empirical CDF (ECDF) and the target CDF,
multiplied by sqrt(n).
Returns
-------
float
The value(s) of kolmogorov(y)
Notes
-----
`kolmogorov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.kstwobign` distribution.
See Also
--------
kolmogi : The Inverse Survival Function for the distribution
scipy.stats.kstwobign : Provides the functionality as a continuous distribution
smirnov, smirnovi : Functions for the one-sided distribution
Examples
--------
Show the probability of a gap at least as big as 0, 0.5 and 1.0.
>>> from scipy.special import kolmogorov
>>> from scipy.stats import kstwobign
>>> kolmogorov([0, 0.5, 1.0])
array([ 1. , 0.96394524, 0.26999967])
Compare a sample of size 1000 drawn from a Laplace(0, 1) distribution against
the target distribution, a Normal(0, 1) distribution.
>>> from scipy.stats import norm, laplace
>>> rng = np.random.default_rng()
>>> n = 1000
>>> lap01 = laplace(0, 1)
>>> x = np.sort(lap01.rvs(n, random_state=rng))
>>> np.mean(x), np.std(x)
(-0.05841730131499543, 1.3968109101997568)
Construct the Empirical CDF and the K-S statistic Dn.
>>> target = norm(0,1) # Normal mean 0, stddev 1
>>> cdfs = target.cdf(x)
>>> ecdfs = np.arange(n+1, dtype=float)/n
>>> gaps = np.column_stack([cdfs - ecdfs[:n], ecdfs[1:] - cdfs])
>>> Dn = np.max(gaps)
>>> Kn = np.sqrt(n) * Dn
>>> print('Dn=%f, sqrt(n)*Dn=%f' % (Dn, Kn))
Dn=0.043363, sqrt(n)*Dn=1.371265
>>> print(chr(10).join(['For a sample of size n drawn from a N(0, 1) distribution:',
... ' the approximate Kolmogorov probability that sqrt(n)*Dn>=%f is %f' % (Kn, kolmogorov(Kn)),
... ' the approximate Kolmogorov probability that sqrt(n)*Dn<=%f is %f' % (Kn, kstwobign.cdf(Kn))]))
For a sample of size n drawn from a N(0, 1) distribution:
the approximate Kolmogorov probability that sqrt(n)*Dn>=1.371265 is 0.046533
the approximate Kolmogorov probability that sqrt(n)*Dn<=1.371265 is 0.953467
Plot the Empirical CDF against the target N(0, 1) CDF.
>>> import matplotlib.pyplot as plt
>>> plt.step(np.concatenate([[-3], x]), ecdfs, where='post', label='Empirical CDF')
>>> x3 = np.linspace(-3, 3, 100)
>>> plt.plot(x3, target.cdf(x3), label='CDF for N(0, 1)')
>>> plt.ylim([0, 1]); plt.grid(True); plt.legend();
>>> # Add vertical lines marking Dn+ and Dn-
>>> iminus, iplus = np.argmax(gaps, axis=0)
>>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r', linestyle='dashed', lw=4)
>>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='r', linestyle='dashed', lw=4)
>>> plt.show()
""")
add_newdoc("_kolmogc",
r"""
Internal function, do not use.
""")
add_newdoc("_kolmogci",
r"""
Internal function, do not use.
""")
add_newdoc("_kolmogp",
r"""
Internal function, do not use.
""")
add_newdoc("kv",
r"""
kv(v, z)
Modified Bessel function of the second kind of real order `v`
Returns the modified Bessel function of the second kind for real order
`v` at complex `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions. They are defined as those solutions
of the modified Bessel equation for which,
.. math::
K_v(x) \sim \sqrt{\pi/(2x)} \exp(-x)
as :math:`x \to \infty` [3]_.
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results. Note that input must be of complex type to get complex
output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kve : This function with leading exponential behavior stripped off.
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
.. [3] NIST Digital Library of Mathematical Functions,
Eq. 10.25.E3. https://dlmf.nist.gov/10.25.E3
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kv
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in np.linspace(0, 6, 5):
... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kv([4, 4.5, 5], 1+2j)
array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j])
""")
add_newdoc("kve",
r"""
kve(v, z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order `v` at
complex `z`::
kve(v, z) = kv(v, z) * exp(z)
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The exponentially scaled modified Bessel function of the second kind.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
""")
add_newdoc("_lanczos_sum_expg_scaled",
"""
Internal function, do not use.
""")
add_newdoc("_lgam1p",
"""
Internal function, do not use.
""")
add_newdoc("log1p",
"""
log1p(x, out=None)
Calculates log(1 + x) for use when `x` is near zero.
Parameters
----------
x : array_like
Real or complex valued input.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of ``log(1 + x)``.
See Also
--------
expm1, cosm1
Examples
--------
>>> import scipy.special as sc
It is more accurate than using ``log(1 + x)`` directly for ``x``
near 0. Note that in the below example ``1 + 1e-17 == 1`` to
double precision.
>>> sc.log1p(1e-17)
1e-17
>>> np.log(1 + 1e-17)
0.0
""")
add_newdoc("_log1pmx",
"""
Internal function, do not use.
""")
add_newdoc('logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
See Also
--------
expit
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.special import logit, expit
>>> logit([0, 0.25, 0.5, 0.75, 1])
array([ -inf, -1.09861229, 0. , 1.09861229, inf])
`expit` is the inverse of `logit`:
>>> expit(logit([0.1, 0.75, 0.999]))
array([ 0.1 , 0.75 , 0.999])
Plot logit(x) for x in [0, 1]:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 1, 501)
>>> y = logit(x)
>>> plt.plot(x, y)
>>> plt.grid()
>>> plt.ylim(-6, 6)
>>> plt.xlabel('x')
>>> plt.title('logit(x)')
>>> plt.show()
""")
add_newdoc("lpmv",
r"""
lpmv(m, v, x)
Associated Legendre function of integer order and real degree.
Defined as
.. math::
P_v^m = (-1)^m (1 - x^2)^{m/2} \frac{d^m}{dx^m} P_v(x)
where
.. math::
P_v = \sum_{k = 0}^\infty \frac{(-v)_k (v + 1)_k}{(k!)^2}
\left(\frac{1 - x}{2}\right)^k
is the Legendre function of the first kind. Here :math:`(\cdot)_k`
is the Pochhammer symbol; see `poch`.
Parameters
----------
m : array_like
Order (int or float). If passed a float not equal to an
integer the function returns NaN.
v : array_like
Degree (float).
x : array_like
Argument (float). Must have ``|x| <= 1``.
Returns
-------
pmv : ndarray
Value of the associated Legendre function.
See Also
--------
lpmn : Compute the associated Legendre function for all orders
``0, ..., m`` and degrees ``0, ..., n``.
clpmn : Compute the associated Legendre function at complex
arguments.
Notes
-----
Note that this implementation includes the Condon-Shortley phase.
References
----------
.. [1] Zhang, Jin, "Computation of Special Functions", John Wiley
and Sons, Inc, 1996.
""")
add_newdoc("mathieu_a",
"""
mathieu_a(m, q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("mathieu_b",
"""
mathieu_b(m, q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("mathieu_cem",
"""
mathieu_cem(m, q, x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of ce_m(x, q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("mathieu_modsem1",
"""
mathieu_modsem1(m, q, x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x, q), of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of se_m(x, q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("modstruve",
r"""
modstruve(v, x)
Modified Struve function.
Return the value of the modified Struve function of order `v` at `x`. The
modified Struve function is defined as,
.. math::
L_v(x) = -\imath \exp(-\pi\imath v/2) H_v(\imath x),
where :math:`H_v` is the Struve function.
Parameters
----------
v : array_like
Order of the modified Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
L : ndarray
Value of the modified Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the function:
- power series
- expansion in Bessel functions (if :math:`|x| < |v| + 20`)
- asymptotic large-x expansion (if :math:`x \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
struve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/11
""")
add_newdoc("nbdtr",
r"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function.
Returns the sum of the terms 0 through `k` of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=0}^k {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that `k` or fewer failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k` or fewer failures before `n` successes in a
sequence of events with individual success probability `p`.
See also
--------
nbdtrc
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1).
Wrapper for the Cephes [1]_ routine `nbdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("nbdtrc",
r"""
nbdtrc(k, n, p)
Negative binomial survival function.
Returns the sum of the terms `k + 1` to infinity of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that more than `k` failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k + 1` or more failures before `n` successes in a
sequence of events with individual success probability `p`.
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n).
Wrapper for the Cephes [1]_ routine `nbdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("nbdtri",
"""
nbdtri(k, n, y)
Inverse of `nbdtr` vs `p`.
Returns the inverse with respect to the parameter `p` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
Returns
-------
p : ndarray
Probability of success in a single event (float) such that
`nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `nbdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("nbdtrik",
r"""
nbdtrik(y, n, p)
Inverse of `nbdtr` vs `k`.
Returns the inverse with respect to the parameter `k` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
k : ndarray
The maximum number of allowed failures such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `k` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `k`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("nbdtrin",
r"""
nbdtrin(k, y, p)
Inverse of `nbdtr` vs `n`.
Returns the inverse with respect to the parameter `n` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
p : array_like
Probability of success in a single event (float).
Returns
-------
n : ndarray
The number of successes `n` such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `n` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `n`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("ncfdtr",
r"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
The non-central F describes the distribution of,
.. math::
Z = \frac{X/d_n}{Y/d_d}
where :math:`X` and :math:`Y` are independently distributed, with
:math:`X` distributed non-central :math:`\chi^2` with noncentrality
parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y`
distributed :math:`\chi^2` with :math:`d_d` degrees of freedom.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`.
The cumulative distribution function is computed using Formula 26.6.20 of
[2]_:
.. math::
F(d_n, d_d, n_c, f) = \sum_{j=0}^\infty e^{-n_c/2} \frac{(n_c/2)^j}{j!} I_{x}(\frac{d_n}{2} + j, \frac{d_d}{2}),
where :math:`I` is the regularized incomplete beta function, and
:math:`x = f d_n/(f d_n + d_d)`.
The computation time required for this routine is proportional to the
noncentrality parameter `nc`. Very large values of this parameter can
consume immense computer resources. This is why the search range is
bounded by 10,000.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("ncfdtri",
"""
ncfdtri(dfn, dfd, nc, p)
Inverse with respect to `f` of the CDF of the non-central F distribution.
See `ncfdtr` for more details.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
Returns
-------
f : float
Quantiles, i.e., the upper limit of integration.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtri
Compute the CDF for several values of `f`:
>>> f = [0.5, 1, 1.5]
>>> p = ncfdtr(2, 3, 1.5, f)
>>> p
array([ 0.20782291, 0.36107392, 0.47345752])
Compute the inverse. We recover the values of `f`, as expected:
>>> ncfdtri(2, 3, 1.5, p)
array([ 0.5, 1. , 1.5])
""")
add_newdoc("ncfdtridfd",
"""
ncfdtridfd(dfn, p, nc, f)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
This is the inverse with respect to `dfd` of `ncfdtr`.
See `ncfdtr` for more details.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e., the upper limit of integration.
Returns
-------
dfd : float
Degrees of freedom of the denominator sum of squares.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtridfd
Compute the CDF for several values of `dfd`:
>>> dfd = [1, 2, 3]
>>> p = ncfdtr(2, dfd, 0.25, 15)
>>> p
array([ 0.8097138 , 0.93020416, 0.96787852])
Compute the inverse. We recover the values of `dfd`, as expected:
>>> ncfdtridfd(2, p, 0.25, 15)
array([ 1., 2., 3.])
""")
add_newdoc("ncfdtridfn",
"""
ncfdtridfn(p, dfd, nc, f)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
This is the inverse with respect to `dfn` of `ncfdtr`.
See `ncfdtr` for more details.
Parameters
----------
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : float
Quantiles, i.e., the upper limit of integration.
Returns
-------
dfn : float
Degrees of freedom of the numerator sum of squares.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtridfn
Compute the CDF for several values of `dfn`:
>>> dfn = [1, 2, 3]
>>> p = ncfdtr(dfn, 2, 0.25, 15)
>>> p
array([ 0.92562363, 0.93020416, 0.93188394])
Compute the inverse. We recover the values of `dfn`, as expected:
>>> ncfdtridfn(p, 2, 0.25, 15)
array([ 1., 2., 3.])
""")
add_newdoc("ncfdtrinc",
"""
ncfdtrinc(dfn, dfd, p, f)
Calculate non-centrality parameter for non-central F distribution.
This is the inverse with respect to `nc` of `ncfdtr`.
See `ncfdtr` for more details.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
f : array_like
Quantiles, i.e., the upper limit of integration.
Returns
-------
nc : float
Noncentrality parameter.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtrinc
Compute the CDF for several values of `nc`:
>>> nc = [0.5, 1.5, 2.0]
>>> p = ncfdtr(2, 3, nc, 15)
>>> p
array([ 0.96309246, 0.94327955, 0.93304098])
Compute the inverse. We recover the values of `nc`, as expected:
>>> ncfdtrinc(2, 3, p, 15)
array([ 0.5, 1.5, 2. ])
""")
add_newdoc("nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central `t` distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e., the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise, it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e., the upper limit of integration.
""")
add_newdoc("nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e., the upper limit of integration.
""")
add_newdoc("nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("ndtr",
r"""
ndtr(x)
Gaussian cumulative distribution function.
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`
.. math::
\frac{1}{\sqrt{2\pi}} \int_{-\infty}^x \exp(-t^2/2) dt
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
log_ndtr
""")
add_newdoc("nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
ndtr
""")
add_newdoc("log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function.
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the log of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
ndtr
""")
add_newdoc("ndtri",
"""
ndtri(y)
Inverse of `ndtr` vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to `x`)
is equal to y.
""")
add_newdoc("obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("obl_rad1",
"""
obl_rad1(m, n, c, x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("obl_rad1_cv",
"""
obl_rad1_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("obl_rad2",
"""
obl_rad2(m, n, c, x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("obl_rad2_cv",
"""
obl_rad2_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d, dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("pbvv",
"""
pbvv(v, x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("pbwa",
r"""
pbwa(a, x)
Parabolic cylinder function W.
The function is a particular solution to the differential equation
.. math::
y'' + \left(\frac{1}{4}x^2 - a\right)y = 0,
for a full definition see section 12.14 in [1]_.
Parameters
----------
a : array_like
Real parameter
x : array_like
Real argument
Returns
-------
w : scalar or ndarray
Value of the function
wp : scalar or ndarray
Value of the derivative in x
Notes
-----
The function is a wrapper for a Fortran routine by Zhang and Jin
[2]_. The implementation is accurate only for ``|a|, |x| < 5`` and
returns NaN outside that range.
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30.
https://dlmf.nist.gov/14.30
.. [2] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("pdtr",
r"""
pdtr(k, m, out=None)
Poisson cumulative distribution function.
Defined as the probability that a Poisson-distributed random
variable with event rate :math:`m` is less than or equal to
:math:`k`. More concretely, this works out to be [1]_
.. math::
\exp(-m) \sum_{j = 0}^{\lfloor{k}\rfloor} \frac{m^j}{m!}.
Parameters
----------
k : array_like
Nonnegative real argument
m : array_like
Nonnegative real shape parameter
out : ndarray
Optional output array for the function results
See Also
--------
pdtrc : Poisson survival function
pdtrik : inverse of `pdtr` with respect to `k`
pdtri : inverse of `pdtr` with respect to `m`
Returns
-------
scalar or ndarray
Values of the Poisson cumulative distribution function
References
----------
.. [1] https://en.wikipedia.org/wiki/Poisson_distribution
Examples
--------
>>> import scipy.special as sc
It is a cumulative distribution function, so it converges to 1
monotonically as `k` goes to infinity.
>>> sc.pdtr([1, 10, 100, np.inf], 1)
array([0.73575888, 0.99999999, 1. , 1. ])
It is discontinuous at integers and constant between integers.
>>> sc.pdtr([1, 1.5, 1.9, 2], 1)
array([0.73575888, 0.73575888, 0.73575888, 0.9196986 ])
""")
add_newdoc("pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be non-negative doubles.
""")
add_newdoc("pdtri",
"""
pdtri(k, y)
Inverse to `pdtr` vs m
Returns the Poisson variable `m` such that the sum from 0 to `k` of
the Poisson density is equal to the given probability `y`:
calculated by gammaincinv(k+1, y). `k` must be a nonnegative
integer and `y` between 0 and 1.
""")
add_newdoc("pdtrik",
"""
pdtrik(p, m)
Inverse to `pdtr` vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("poch",
r"""
poch(z, m)
Pochhammer symbol.
The Pochhammer symbol (rising factorial) is defined as
.. math::
(z)_m = \frac{\Gamma(z + m)}{\Gamma(z)}
For positive integer `m` it reads
.. math::
(z)_m = z (z + 1) ... (z + m - 1)
See [dlmf]_ for more details.
Parameters
----------
z, m : array_like
Real-valued arguments.
Returns
-------
scalar or ndarray
The value of the function.
References
----------
.. [dlmf] Nist, Digital Library of Mathematical Functions
https://dlmf.nist.gov/5.2#iii
Examples
--------
>>> import scipy.special as sc
It is 1 when m is 0.
>>> sc.poch([1, 2, 3, 4], 0)
array([1., 1., 1., 1.])
For z equal to 1 it reduces to the factorial function.
>>> sc.poch(1, 5)
120.0
>>> 1 * 2 * 3 * 4 * 5
120
It can be expressed in terms of the gamma function.
>>> z, m = 3.7, 2.1
>>> sc.poch(z, m)
20.529581933776953
>>> sc.gamma(z + m) / sc.gamma(z)
20.52958193377696
""")
add_newdoc("pro_ang1",
"""
pro_ang1(m, n, c, x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pro_ang1_cv",
"""
pro_ang1_cv(m, n, c, cv, x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pro_cv",
"""
pro_cv(m, n, c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("pro_rad1",
"""
pro_rad1(m, n, c, x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pro_rad1_cv",
"""
pro_rad1_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pro_rad2",
"""
pro_rad2(m, n, c, x)
Prolate spheroidal radial function of the second kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pro_rad2_cv",
"""
pro_rad2_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("psi",
"""
psi(z, out=None)
The digamma function.
The logarithmic derivative of the gamma function evaluated at ``z``.
Parameters
----------
z : array_like
Real or complex argument.
out : ndarray, optional
Array for the computed values of ``psi``.
Returns
-------
digamma : ndarray
Computed values of ``psi``.
Notes
-----
For large values not close to the negative real axis, ``psi`` is
computed using the asymptotic series (5.11.2) from [1]_. For small
arguments not close to the negative real axis, the recurrence
relation (5.5.2) from [1]_ is used until the argument is large
enough to use the asymptotic series. For values close to the
negative real axis, the reflection formula (5.5.4) from [1]_ is
used first. Note that ``psi`` has a family of zeros on the
negative real axis which occur between the poles at nonpositive
integers. Around the zeros the reflection formula suffers from
cancellation and the implementation loses precision. The sole
positive zero and the first negative zero, however, are handled
separately by precomputing series expansions using [2]_, so the
function should maintain full accuracy around the origin.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/5
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
Examples
--------
>>> from scipy.special import psi
>>> z = 3 + 4j
>>> psi(z)
(1.55035981733341+1.0105022091860445j)
Verify psi(z) = psi(z + 1) - 1/z:
>>> psi(z + 1) - 1/z
(1.55035981733341+1.0105022091860445j)
""")
add_newdoc("radian",
"""
radian(d, m, s, out=None)
Convert from degrees to radians.
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
Parameters
----------
d : array_like
Degrees, can be real-valued.
m : array_like
Minutes, can be real-valued.
s : array_like
Seconds, can be real-valued.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the inputs in radians.
Examples
--------
>>> import scipy.special as sc
There are many ways to specify an angle.
>>> sc.radian(90, 0, 0)
1.5707963267948966
>>> sc.radian(0, 60 * 90, 0)
1.5707963267948966
>>> sc.radian(0, 0, 60**2 * 90)
1.5707963267948966
The inputs can be real-valued.
>>> sc.radian(1.5, 0, 0)
0.02617993877991494
>>> sc.radian(1, 30, 0)
0.02617993877991494
""")
add_newdoc("rel_entr",
r"""
rel_entr(x, y, out=None)
Elementwise function for computing relative entropy.
.. math::
\mathrm{rel\_entr}(x, y) =
\begin{cases}
x \log(x / y) & x > 0, y > 0 \\
0 & x = 0, y \ge 0 \\
\infty & \text{otherwise}
\end{cases}
Parameters
----------
x, y : array_like
Input arrays
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Relative entropy of the inputs
See Also
--------
entr, kl_div
Notes
-----
.. versionadded:: 0.15.0
This function is jointly convex in x and y.
The origin of this function is in convex programming; see
[1]_. Given two discrete probability distributions :math:`p_1,
\ldots, p_n` and :math:`q_1, \ldots, q_n`, to get the relative
entropy of statistics compute the sum
.. math::
\sum_{i = 1}^n \mathrm{rel\_entr}(p_i, q_i).
See [2]_ for details.
References
----------
.. [1] Grant, Boyd, and Ye, "CVX: Matlab Software for Disciplined Convex
Programming", http://cvxr.com/cvx/
.. [2] Kullback-Leibler divergence,
https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
""")
add_newdoc("rgamma",
r"""
rgamma(z, out=None)
Reciprocal of the gamma function.
Defined as :math:`1 / \Gamma(z)`, where :math:`\Gamma` is the
gamma function. For more on the gamma function see `gamma`.
Parameters
----------
z : array_like
Real or complex valued input
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Function results
Notes
-----
The gamma function has no zeros and has simple poles at
nonpositive integers, so `rgamma` is an entire function with zeros
at the nonpositive integers. See the discussion in [dlmf]_ for
more details.
See Also
--------
gamma, gammaln, loggamma
References
----------
.. [dlmf] Nist, Digital Library of Mathematical functions,
https://dlmf.nist.gov/5.2#i
Examples
--------
>>> import scipy.special as sc
It is the reciprocal of the gamma function.
>>> sc.rgamma([1, 2, 3, 4])
array([1. , 1. , 0.5 , 0.16666667])
>>> 1 / sc.gamma([1, 2, 3, 4])
array([1. , 1. , 0.5 , 0.16666667])
It is zero at nonpositive integers.
>>> sc.rgamma([0, -1, -2, -3])
array([0., 0., 0., 0.])
It rapidly underflows to zero along the positive real axis.
>>> sc.rgamma([10, 100, 179])
array([2.75573192e-006, 1.07151029e-156, 0.00000000e+000])
""")
add_newdoc("round",
"""
round(x, out=None)
Round to the nearest integer.
Returns the nearest integer to `x`. If `x` ends in 0.5 exactly,
the nearest even integer is chosen.
Parameters
----------
x : array_like
Real valued input.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
The nearest integers to the elements of `x`. The result is of
floating type, not integer type.
Examples
--------
>>> import scipy.special as sc
It rounds to even.
>>> sc.round([0.5, 1.5])
array([0., 2.])
""")
add_newdoc("shichi",
r"""
shichi(x, out=None)
Hyperbolic sine and cosine integrals.
The hyperbolic sine integral is
.. math::
\int_0^x \frac{\sinh{t}}{t}dt
and the hyperbolic cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cosh{t} - 1}{t} dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principle branch of the logarithm.
Parameters
----------
x : array_like
Real or complex points at which to compute the hyperbolic sine
and cosine integrals.
Returns
-------
si : ndarray
Hyperbolic sine integral at ``x``
ci : ndarray
Hyperbolic cosine integral at ``x``
Notes
-----
For real arguments with ``x < 0``, ``chi`` is the real part of the
hyperbolic cosine integral. For such points ``chi(x)`` and ``chi(x
+ 0j)`` differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[1]_ *shichi* routine. For complex arguments the algorithm is based
on Mpmath's [2]_ *shi* and *chi* routines.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("sici",
r"""
sici(x, out=None)
Sine and cosine integrals.
The sine integral is
.. math::
\int_0^x \frac{\sin{t}}{t}dt
and the cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cos{t} - 1}{t}dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principle branch of the logarithm.
Parameters
----------
x : array_like
Real or complex points at which to compute the sine and cosine
integrals.
Returns
-------
si : ndarray
Sine integral at ``x``
ci : ndarray
Cosine integral at ``x``
Notes
-----
For real arguments with ``x < 0``, ``ci`` is the real part of the
cosine integral. For such points ``ci(x)`` and ``ci(x + 0j)``
differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[1]_ *sici* routine. For complex arguments the algorithm is based
on Mpmath's [2]_ *si* and *ci* routines.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("sindg",
"""
sindg(x, out=None)
Sine of the angle `x` given in degrees.
Parameters
----------
x : array_like
Angle, given in degrees.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Sine at the input.
See Also
--------
cosdg, tandg, cotdg
Examples
--------
>>> import scipy.special as sc
It is more accurate than using sine directly.
>>> x = 180 * np.arange(3)
>>> sc.sindg(x)
array([ 0., -0., 0.])
>>> np.sin(x * np.pi / 180)
array([ 0.0000000e+00, 1.2246468e-16, -2.4492936e-16])
""")
add_newdoc("smirnov",
r"""
smirnov(n, d)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function,(aka the Survival Function) of Dn+ (or Dn-)
for a one-sided test of equality between an empirical and a
theoretical distribution. It is equal to the probability that the
maximum difference between a theoretical distribution and an empirical
one based on `n` samples is greater than d.
Parameters
----------
n : int
Number of samples
d : float array_like
Deviation between the Empirical CDF (ECDF) and the target CDF.
Returns
-------
float
The value(s) of smirnov(n, d), Prob(Dn+ >= d) (Also Prob(Dn- >= d))
Notes
-----
`smirnov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.ksone` distribution.
See Also
--------
smirnovi : The Inverse Survival Function for the distribution
scipy.stats.ksone : Provides the functionality as a continuous distribution
kolmogorov, kolmogi : Functions for the two-sided distribution
Examples
--------
>>> from scipy.special import smirnov
Show the probability of a gap at least as big as 0, 0.5 and 1.0 for a sample of size 5
>>> smirnov(5, [0, 0.5, 1.0])
array([ 1. , 0.056, 0. ])
Compare a sample of size 5 drawn from a source N(0.5, 1) distribution against
a target N(0, 1) CDF.
>>> from scipy.stats import norm
>>> rng = np.random.default_rng()
>>> n = 5
>>> gendist = norm(0.5, 1) # Normal distribution, mean 0.5, stddev 1
>>> x = np.sort(gendist.rvs(size=n, random_state=rng))
>>> x
array([-1.3922078 , -0.13526532, 0.1371477 , 0.18981686, 1.81948167])
>>> target = norm(0, 1)
>>> cdfs = target.cdf(x)
>>> cdfs
array([0.08192974, 0.44620105, 0.55454297, 0.57527368, 0.96558101])
# Construct the Empirical CDF and the K-S statistics (Dn+, Dn-, Dn)
>>> ecdfs = np.arange(n+1, dtype=float)/n
>>> cols = np.column_stack([x, ecdfs[1:], cdfs, cdfs - ecdfs[:n], ecdfs[1:] - cdfs])
>>> np.set_printoptions(precision=3)
>>> cols
array([[-1.392, 0.2 , 0.082, 0.082, 0.118],
[-0.135, 0.4 , 0.446, 0.246, -0.046],
[ 0.137, 0.6 , 0.555, 0.155, 0.045],
[ 0.19 , 0.8 , 0.575, -0.025, 0.225],
[ 1.819, 1. , 0.966, 0.166, 0.034]])
>>> gaps = cols[:, -2:]
>>> Dnpm = np.max(gaps, axis=0)
>>> print('Dn-=%f, Dn+=%f' % (Dnpm[0], Dnpm[1]))
Dn-=0.246201, Dn+=0.224726
>>> probs = smirnov(n, Dnpm)
>>> print(chr(10).join(['For a sample of size %d drawn from a N(0, 1) distribution:' % n,
... ' Smirnov n=%d: Prob(Dn- >= %f) = %.4f' % (n, Dnpm[0], probs[0]),
... ' Smirnov n=%d: Prob(Dn+ >= %f) = %.4f' % (n, Dnpm[1], probs[1])]))
For a sample of size 5 drawn from a N(0, 1) distribution:
Smirnov n=5: Prob(Dn- >= 0.246201) = 0.4713
Smirnov n=5: Prob(Dn+ >= 0.224726) = 0.5243
Plot the Empirical CDF against the target N(0, 1) CDF
>>> import matplotlib.pyplot as plt
>>> plt.step(np.concatenate([[-3], x]), ecdfs, where='post', label='Empirical CDF')
>>> x3 = np.linspace(-3, 3, 100)
>>> plt.plot(x3, target.cdf(x3), label='CDF for N(0, 1)')
>>> plt.ylim([0, 1]); plt.grid(True); plt.legend();
# Add vertical lines marking Dn+ and Dn-
>>> iminus, iplus = np.argmax(gaps, axis=0)
>>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r', linestyle='dashed', lw=4)
>>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='m', linestyle='dashed', lw=4)
>>> plt.show()
""")
add_newdoc("smirnovi",
"""
smirnovi(n, p)
Inverse to `smirnov`
Returns `d` such that ``smirnov(n, d) == p``, the critical value
corresponding to `p`.
Parameters
----------
n : int
Number of samples
p : float array_like
Probability
Returns
-------
float
The value(s) of smirnovi(n, p), the critical values.
Notes
-----
`smirnov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.ksone` distribution.
See Also
--------
smirnov : The Survival Function (SF) for the distribution
scipy.stats.ksone : Provides the functionality as a continuous distribution
kolmogorov, kolmogi, scipy.stats.kstwobign : Functions for the two-sided distribution
""")
add_newdoc("_smirnovc",
"""
_smirnovc(n, d)
Internal function, do not use.
""")
add_newdoc("_smirnovci",
"""
Internal function, do not use.
""")
add_newdoc("_smirnovp",
"""
_smirnovp(n, p)
Internal function, do not use.
""")
add_newdoc("spence",
r"""
spence(z, out=None)
Spence's function, also known as the dilogarithm.
It is defined to be
.. math::
\int_1^z \frac{\log(t)}{1 - t}dt
for complex :math:`z`, where the contour of integration is taken
to avoid the branch cut of the logarithm. Spence's function is
analytic everywhere except the negative real axis where it has a
branch cut.
Parameters
----------
z : array_like
Points at which to evaluate Spence's function
Returns
-------
s : ndarray
Computed values of Spence's function
Notes
-----
There is a different convention which defines Spence's function by
the integral
.. math::
-\int_0^z \frac{\log(1 - t)}{t}dt;
this is our ``spence(1 - z)``.
""")
add_newdoc("stdtr",
"""
stdtr(df, t)
Student t distribution cumulative distribution function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("stdtridf",
"""
stdtridf(p, t)
Inverse of `stdtr` vs df
Returns the argument df such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("stdtrit",
"""
stdtrit(df, p)
Inverse of `stdtr` vs `t`
Returns the argument `t` such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("struve",
r"""
struve(v, x)
Struve function.
Return the value of the Struve function of order `v` at `x`. The Struve
function is defined as,
.. math::
H_v(x) = (z/2)^{v + 1} \sum_{n=0}^\infty \frac{(-1)^n (z/2)^{2n}}{\Gamma(n + \frac{3}{2}) \Gamma(n + v + \frac{3}{2})},
where :math:`\Gamma` is the gamma function.
Parameters
----------
v : array_like
Order of the Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
H : ndarray
Value of the Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the Struve function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
modstruve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/11
""")
add_newdoc("tandg",
"""
tandg(x, out=None)
Tangent of angle `x` given in degrees.
Parameters
----------
x : array_like
Angle, given in degrees.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Tangent at the input.
See Also
--------
sindg, cosdg, cotdg
Examples
--------
>>> import scipy.special as sc
It is more accurate than using tangent directly.
>>> x = 180 * np.arange(3)
>>> sc.tandg(x)
array([0., 0., 0.])
>>> np.tan(x * np.pi / 180)
array([ 0.0000000e+00, -1.2246468e-16, -2.4492936e-16])
""")
add_newdoc("tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2) * erfc(-i*z)
See Also
--------
dawsn, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> z = special.wofz(x)
>>> plt.plot(x, z.real, label='wofz(x).real')
>>> plt.plot(x, z.imag, label='wofz(x).imag')
>>> plt.xlabel('$x$')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.show()
""")
add_newdoc("xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("y0",
r"""
y0(x)
Bessel function of the second kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval a rational approximation :math:`R(x)` is employed to
compute,
.. math::
Y_0(x) = R(x) + \frac{2 \log(x) J_0(x)}{\pi},
where :math:`J_0` is the Bessel function of the first kind of order 0.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `y0`.
See also
--------
j0
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("y1",
"""
y1(x)
Bessel function of the second kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 25 term Chebyshev expansion is used, and computing
:math:`J_1` (the Bessel function of the first kind) is required. In the
second, the asymptotic trigonometric representation is employed using two
rational functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `y1`.
See also
--------
j1
yn
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("yn",
r"""
yn(n, x)
Bessel function of the second kind of integer order and real argument.
Parameters
----------
n : array_like
Order (integer).
z : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function, :math:`Y_n(x)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `yn`.
The function is evaluated by forward recurrence on `n`, starting with
values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1,
the routine for `y0` or `y1` is called directly.
See also
--------
yv : For real order and real or complex argument.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("yv",
r"""
yv(v, z)
Bessel function of the second kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind, :math:`Y_v(x)`.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
yve : :math:`Y_v` with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("yve",
r"""
yve(v, z)
Exponentially scaled Bessel function of the second kind of real order.
Returns the exponentially scaled Bessel function of the second
kind of real order `v` at complex `z`::
yve(v, z) = yv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("_zeta",
"""
_zeta(x, q)
Internal function, Hurwitz zeta.
""")
add_newdoc("zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``. For ``x < 1`` the analytic continuation is
computed. For more information on the Riemann zeta function, see
[dlmf]_.
Parameters
----------
x : array_like of float
Values at which to compute zeta(x) - 1 (must be real).
Returns
-------
out : array_like
Values of zeta(x) - 1.
See Also
--------
zeta
Examples
--------
>>> from scipy.special import zetac, zeta
Some special values:
>>> zetac(2), np.pi**2/6 - 1
(0.64493406684822641, 0.6449340668482264)
>>> zetac(-1), -1.0/12 - 1
(-1.0833333333333333, -1.0833333333333333)
Compare ``zetac(x)`` to ``zeta(x) - 1`` for large `x`:
>>> zetac(60), zeta(60) - 1
(8.673617380119933e-19, 0.0)
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/25
""")
add_newdoc("_riemann_zeta",
"""
Internal function, use `zeta` instead.
""")
add_newdoc("_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using Bessel function series
Returns
-------
v, err
""")
add_newdoc("_spherical_jn",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("_spherical_jn_d",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("_spherical_yn",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("_spherical_yn_d",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("_spherical_in",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("_spherical_in_d",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("_spherical_kn",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("_spherical_kn_d",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("loggamma",
r"""
loggamma(z, out=None)
Principal branch of the logarithm of the gamma function.
Defined to be :math:`\log(\Gamma(x))` for :math:`x > 0` and
extended to the complex plane by analytic continuation. The
function has a single branch cut on the negative real axis.
.. versionadded:: 0.18.0
Parameters
----------
z : array-like
Values in the complex plain at which to compute ``loggamma``
out : ndarray, optional
Output array for computed values of ``loggamma``
Returns
-------
loggamma : ndarray
Values of ``loggamma`` at z.
Notes
-----
It is not generally true that :math:`\log\Gamma(z) =
\log(\Gamma(z))`, though the real parts of the functions do
agree. The benefit of not defining `loggamma` as
:math:`\log(\Gamma(z))` is that the latter function has a
complicated branch cut structure whereas `loggamma` is analytic
except for on the negative real axis.
The identities
.. math::
\exp(\log\Gamma(z)) &= \Gamma(z) \\
\log\Gamma(z + 1) &= \log(z) + \log\Gamma(z)
make `loggamma` useful for working in complex logspace.
On the real line `loggamma` is related to `gammaln` via
``exp(loggamma(x + 0j)) = gammasgn(x)*exp(gammaln(x))``, up to
rounding error.
The implementation here is based on [hare1997]_.
See also
--------
gammaln : logarithm of the absolute value of the gamma function
gammasgn : sign of the gamma function
References
----------
.. [hare1997] D.E.G. Hare,
*Computing the Principal Branch of log-Gamma*,
Journal of Algorithms, Volume 25, Issue 2, November 1997, pages 221-236.
""")
add_newdoc("_sinpi",
"""
Internal function, do not use.
""")
add_newdoc("_cospi",
"""
Internal function, do not use.
""")
add_newdoc("owens_t",
"""
owens_t(h, a)
Owen's T Function.
The function T(h, a) gives the probability of the event
(X > h and 0 < Y < a * X) where X and Y are independent
standard normal random variables.
Parameters
----------
h: array_like
Input value.
a: array_like
Input value.
Returns
-------
t: scalar or ndarray
Probability of the event (X > h and 0 < Y < a * X),
where X and Y are independent standard normal random variables.
Examples
--------
>>> from scipy import special
>>> a = 3.5
>>> h = 0.78
>>> special.owens_t(h, a)
0.10877216734852274
References
----------
.. [1] M. Patefield and D. Tandy, "Fast and accurate calculation of
Owen's T Function", Statistical Software vol. 5, pp. 1-25, 2000.
""")
add_newdoc("_factorial",
"""
Internal function, do not use.
""")
add_newdoc("wright_bessel",
r"""
wright_bessel(a, b, x)
Wright's generalized Bessel function.
Wright's generalized Bessel function is an entire function and defined as
.. math:: \Phi(a, b; x) = \sum_{k=0}^\infty \frac{x^k}{k! \Gamma(a k + b)}
See also [1].
Parameters
----------
a : array_like of float
a >= 0
b : array_like of float
b >= 0
x : array_like of float
x >= 0
Notes
-----
Due to the compexity of the function with its three parameters, only
non-negative arguments are implemented.
Examples
--------
>>> from scipy.special import wright_bessel
>>> a, b, x = 1.5, 1.1, 2.5
>>> wright_bessel(a, b-1, x)
4.5314465939443025
Now, let us verify the relation
.. math:: \Phi(a, b-1; x) = a x \Phi(a, b+a; x) + (b-1) \Phi(a, b; x)
>>> a * x * wright_bessel(a, b+a, x) + (b-1) * wright_bessel(a, b, x)
4.5314465939443025
References
----------
.. [1] Digital Library of Mathematical Functions, 10.46.
https://dlmf.nist.gov/10.46.E1
""")
add_newdoc("ndtri_exp",
r"""
ndtri_exp(y)
Inverse of `log_ndtr` vs x. Allows for greater precision than
`ndtri` composed with `numpy.exp` for very small values of y and for
y close to 0.
Parameters
----------
y : array_like of float
Returns
-------
scalar or ndarray
Inverse of the log CDF of the standard normal distribution, evaluated
at y.
Examples
--------
>>> import scipy.special as sc
`ndtri_exp` agrees with the naive implementation when the latter does
not suffer from underflow.
>>> sc.ndtri_exp(-1)
-0.33747496376420244
>>> sc.ndtri(np.exp(-1))
-0.33747496376420244
For extreme values of y, the naive approach fails
>>> sc.ndtri(np.exp(-800))
-inf
>>> sc.ndtri(np.exp(-1e-20))
inf
whereas `ndtri_exp` is still able to compute the result to high precision.
>>> sc.ndtri_exp(-800)
-39.88469483825668
>>> sc.ndtri_exp(-1e-20)
9.262340089798409
See Also
--------
log_ndtr, ndtri, ndtr
""")
|
e-q/scipy
|
scipy/special/add_newdocs.py
|
Python
|
bsd-3-clause
| 242,499
|
[
"Gaussian"
] |
56870c9d7f63910f534be2f1f6b6e47fbd1f14582851b7d4332fcc43ab8f14e2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin, use {% raw %}{% url 'admin:index' %}{% endraw %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("{{ cookiecutter.repo_name }}.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception("Bad Request!")}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception("Permissin Denied")}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception("Page not Found")}),
url(r'^500/$', default_views.server_error),
]
|
ovidner/cookiecutter-django
|
{{cookiecutter.repo_name}}/config/urls.py
|
Python
|
bsd-3-clause
| 1,472
|
[
"VisIt"
] |
0c5b7f4b567d56eae8bb4a60aaff404790f98299cdfd0d1ecdc1f99f708d1a35
|
#
# @file TestRDFAnnotationC.py
# @brief RDFAnnotation parser unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# $Id: TestRDFAnnotationC.py 11441 2010-07-09 02:22:23Z mhucka $
# $HeadURL: https://sbml.svn.sourceforge.net/svnroot/sbml/trunk/libsbml/src/bindings/python/test/annotation/TestRDFAnnotationC.py $
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/annotation/test/TestRDFAnnotationC.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestRDFAnnotationC(unittest.TestCase):
global d
d = None
global m
m = None
def setUp(self):
filename = "../../sbml/annotation/test/test-data/annotation.xml"
self.d = libsbml.readSBML(filename)
self.m = self.d.getModel()
pass
def tearDown(self):
_dummyList = [ self.d ]; _dummyList[:] = []; del _dummyList
pass
def test_RDFAnnotation_C_delete(self):
obj = self.m.getCompartment(0)
node = libsbml.RDFAnnotationParser.parseCVTerms(obj)
n1 = libsbml.RDFAnnotationParser.deleteRDFAnnotation(node)
self.assert_( n1.getNumChildren() == 0 )
self.assert_(( "annotation" == n1.getName() ))
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
pass
def test_RDFAnnotation_C_getModelHistory(self):
self.assert_( (self.m == None) == False )
history = self.m.getModelHistory()
self.assert_( history != None )
mc = history.getCreator(0)
self.assert_(( "Le Novere" == mc.getFamilyName() ))
self.assert_(( "Nicolas" == mc.getGivenName() ))
self.assert_(( "lenov@ebi.ac.uk" == mc.getEmail() ))
self.assert_(( "EMBL-EBI" == mc.getOrganisation() ))
date = history.getCreatedDate()
self.assert_( date.getYear() == 2005 )
self.assert_( date.getMonth() == 2 )
self.assert_( date.getDay() == 2 )
self.assert_( date.getHour() == 14 )
self.assert_( date.getMinute() == 56 )
self.assert_( date.getSecond() == 11 )
self.assert_( date.getSignOffset() == 0 )
self.assert_( date.getHoursOffset() == 0 )
self.assert_( date.getMinutesOffset() == 0 )
self.assert_(( "2005-02-02T14:56:11Z" == date.getDateAsString() ))
date = history.getModifiedDate()
self.assert_( date.getYear() == 2006 )
self.assert_( date.getMonth() == 5 )
self.assert_( date.getDay() == 30 )
self.assert_( date.getHour() == 10 )
self.assert_( date.getMinute() == 46 )
self.assert_( date.getSecond() == 2 )
self.assert_( date.getSignOffset() == 0 )
self.assert_( date.getHoursOffset() == 0 )
self.assert_( date.getMinutesOffset() == 0 )
self.assert_(( "2006-05-30T10:46:02Z" == date.getDateAsString() ))
pass
def test_RDFAnnotation_C_parseCVTerms(self):
obj = self.m.getCompartment(0)
node = libsbml.RDFAnnotationParser.parseCVTerms(obj)
self.assert_( node.getNumChildren() == 1 )
rdf = node.getChild(0)
self.assert_(( "RDF" == rdf.getName() ))
self.assert_(( "rdf" == rdf.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == rdf.getURI() ))
self.assert_( rdf.getNumChildren() == 1 )
desc = rdf.getChild(0)
self.assert_(( "Description" == desc.getName() ))
self.assert_(( "rdf" == desc.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == desc.getURI() ))
self.assert_( desc.getNumChildren() == 1 )
is1 = desc.getChild(0)
self.assert_(( "is" == is1.getName() ))
self.assert_(( "bqbiol" == is1.getPrefix() ))
self.assert_( is1.getNumChildren() == 1 )
Bag = is1.getChild(0)
self.assert_(( "Bag" == Bag.getName() ))
self.assert_(( "rdf" == Bag.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == Bag.getURI() ))
self.assert_( Bag.getNumChildren() == 4 )
li = Bag.getChild(0)
self.assert_(( "li" == li.getName() ))
self.assert_(( "rdf" == li.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == li.getURI() ))
self.assert_( li.getNumChildren() == 0 )
li1 = Bag.getChild(1)
self.assert_(( "li" == li1.getName() ))
self.assert_(( "rdf" == li1.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == li1.getURI() ))
self.assert_( li1.getNumChildren() == 0 )
li2 = Bag.getChild(2)
self.assert_(( "li" == li2.getName() ))
self.assert_(( "rdf" == li2.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == li2.getURI() ))
self.assert_( li2.getNumChildren() == 0 )
li3 = Bag.getChild(3)
self.assert_(( "li" == li3.getName() ))
self.assert_(( "rdf" == li3.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == li3.getURI() ))
self.assert_( li3.getNumChildren() == 0 )
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
pass
def test_RDFAnnotation_C_parseModelHistory(self):
node = libsbml.RDFAnnotationParser.parseModelHistory(self.m)
self.assert_( node.getNumChildren() == 1 )
rdf = node.getChild(0)
self.assert_(( "RDF" == rdf.getName() ))
self.assert_(( "rdf" == rdf.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == rdf.getURI() ))
self.assert_( rdf.getNumChildren() == 1 )
desc = rdf.getChild(0)
self.assert_(( "Description" == desc.getName() ))
self.assert_(( "rdf" == desc.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == desc.getURI() ))
self.assert_( desc.getNumChildren() == 3 )
creator = desc.getChild(0)
self.assert_(( "creator" == creator.getName() ))
self.assert_(( "dc" == creator.getPrefix() ))
self.assert_(( "http://purl.org/dc/elements/1.1/" == creator.getURI() ))
self.assert_( creator.getNumChildren() == 1 )
Bag = creator.getChild(0)
self.assert_(( "Bag" == Bag.getName() ))
self.assert_(( "rdf" == Bag.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == Bag.getURI() ))
self.assert_( Bag.getNumChildren() == 1 )
li = Bag.getChild(0)
self.assert_(( "li" == li.getName() ))
self.assert_(( "rdf" == li.getPrefix() ))
self.assert_(( "http://www.w3.org/1999/02/22-rdf-syntax-ns#" == li.getURI() ))
self.assert_( li.getNumChildren() == 3 )
N = li.getChild(0)
self.assert_(( "N" == N.getName() ))
self.assert_(( "vCard" == N.getPrefix() ))
self.assert_(( "http://www.w3.org/2001/vcard-rdf/3.0#" == N.getURI() ))
self.assert_( N.getNumChildren() == 2 )
Family = N.getChild(0)
self.assert_(( "Family" == Family.getName() ))
self.assert_(( "vCard" == Family.getPrefix() ))
self.assert_(( "http://www.w3.org/2001/vcard-rdf/3.0#" == Family.getURI() ))
self.assert_( Family.getNumChildren() == 1 )
Given = N.getChild(1)
self.assert_(( "Given" == Given.getName() ))
self.assert_(( "vCard" == Given.getPrefix() ))
self.assert_(( "http://www.w3.org/2001/vcard-rdf/3.0#" == Given.getURI() ))
self.assert_( Given.getNumChildren() == 1 )
EMAIL = li.getChild(1)
self.assert_(( "EMAIL" == EMAIL.getName() ))
self.assert_(( "vCard" == EMAIL.getPrefix() ))
self.assert_(( "http://www.w3.org/2001/vcard-rdf/3.0#" == EMAIL.getURI() ))
self.assert_( EMAIL.getNumChildren() == 1 )
ORG = li.getChild(2)
self.assert_(( "ORG" == ORG.getName() ))
self.assert_(( "vCard" == ORG.getPrefix() ))
self.assert_(( "http://www.w3.org/2001/vcard-rdf/3.0#" == ORG.getURI() ))
self.assert_( ORG.getNumChildren() == 1 )
Orgname = ORG.getChild(0)
self.assert_(( "Orgname" == Orgname.getName() ))
self.assert_(( "vCard" == Orgname.getPrefix() ))
self.assert_(( "http://www.w3.org/2001/vcard-rdf/3.0#" == Orgname.getURI() ))
self.assert_( Orgname.getNumChildren() == 1 )
created = desc.getChild(1)
self.assert_(( "created" == created.getName() ))
self.assert_(( "dcterms" == created.getPrefix() ))
self.assert_(( "http://purl.org/dc/terms/" == created.getURI() ))
self.assert_( created.getNumChildren() == 1 )
cr_date = created.getChild(0)
self.assert_(( "W3CDTF" == cr_date.getName() ))
self.assert_(( "dcterms" == cr_date.getPrefix() ))
self.assert_(( "http://purl.org/dc/terms/" == cr_date.getURI() ))
self.assert_( cr_date.getNumChildren() == 1 )
modified = desc.getChild(2)
self.assert_(( "modified" == modified.getName() ))
self.assert_(( "dcterms" == modified.getPrefix() ))
self.assert_(( "http://purl.org/dc/terms/" == modified.getURI() ))
self.assert_( modified.getNumChildren() == 1 )
mo_date = created.getChild(0)
self.assert_(( "W3CDTF" == mo_date.getName() ))
self.assert_(( "dcterms" == mo_date.getPrefix() ))
self.assert_(( "http://purl.org/dc/terms/" == mo_date.getURI() ))
self.assert_( mo_date.getNumChildren() == 1 )
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestRDFAnnotationC))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
alexholehouse/SBMLIntegrator
|
libsbml-5.0.0/src/bindings/python/test/annotation/TestRDFAnnotationC.py
|
Python
|
gpl-3.0
| 10,360
|
[
"VisIt"
] |
51a73c3440b798a74cb526be3d914b30d5c879abac0bb4864e3c051898048cfc
|
#__docformat__ = "restructuredtext en"
# ******NOTICE***************
# optimize.py module by Travis E. Oliphant
#
# You may copy and use this module as you see fit with no
# guarantee implied provided you keep this notice in all copies.
# *****END NOTICE************
# A collection of optimization algorithms. Version 0.5
# CHANGES
# Added fminbound (July 2001)
# Added brute (Aug. 2002)
# Finished line search satisfying strong Wolfe conditions (Mar. 2004)
# Updated strong Wolfe conditions line search to use
# cubic-interpolation (Mar. 2004)
# Minimization routines
__all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg',
'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der',
'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime',
'line_search', 'check_grad', 'OptimizeResult', 'show_options',
'OptimizeWarning']
__docformat__ = "restructuredtext en"
import warnings
import sys
from numpy import (atleast_1d, eye, argmin, zeros, shape, squeeze,
asarray, sqrt, Inf, asfarray, isinf)
import numpy as np
from ._linesearch import (line_search_wolfe1, line_search_wolfe2,
line_search_wolfe2 as line_search,
LineSearchWarning)
from ._numdiff import approx_derivative
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
from scipy._lib._util import MapWrapper, check_random_state, rng_integers
from scipy.optimize._differentiable_functions import ScalarFunction, FD_METHODS
# standard status messages of optimizers
_status_message = {'success': 'Optimization terminated successfully.',
'maxfev': 'Maximum number of function evaluations has '
'been exceeded.',
'maxiter': 'Maximum number of iterations has been '
'exceeded.',
'pr_loss': 'Desired error not necessarily achieved due '
'to precision loss.',
'nan': 'NaN result encountered.',
'out_of_bounds': 'The result is outside of the provided '
'bounds.'}
class MemoizeJac:
""" Decorator that caches the return values of a function returning `(fun, grad)`
each time it is called. """
def __init__(self, fun):
self.fun = fun
self.jac = None
self._value = None
self.x = None
def _compute_if_needed(self, x, *args):
if not np.all(x == self.x) or self._value is None or self.jac is None:
self.x = np.asarray(x).copy()
fg = self.fun(x, *args)
self.jac = fg[1]
self._value = fg[0]
def __call__(self, x, *args):
""" returns the the function value """
self._compute_if_needed(x, *args)
return self._value
def derivative(self, x, *args):
self._compute_if_needed(x, *args)
return self.jac
class OptimizeResult(dict):
""" Represents the optimization result.
Attributes
----------
x : ndarray
The solution of the optimization.
success : bool
Whether or not the optimizer exited successfully.
status : int
Termination status of the optimizer. Its value depends on the
underlying solver. Refer to `message` for details.
message : str
Description of the cause of the termination.
fun, jac, hess: ndarray
Values of objective function, its Jacobian and its Hessian (if
available). The Hessians may be approximations, see the documentation
of the function in question.
hess_inv : object
Inverse of the objective function's Hessian; may be an approximation.
Not available for all solvers. The type of this attribute may be
either np.ndarray or scipy.sparse.linalg.LinearOperator.
nfev, njev, nhev : int
Number of evaluations of the objective functions and of its
Jacobian and Hessian.
nit : int
Number of iterations performed by the optimizer.
maxcv : float
The maximum constraint violation.
Notes
-----
`OptimizeResult` may have additional attributes not listed here depending
on the specific solver being used. Since this class is essentially a
subclass of dict with attribute accessors, one can see which
attributes are available using the `OptimizeResult.keys` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
raise AttributeError(name) from e
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in sorted(self.items())])
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
class OptimizeWarning(UserWarning):
pass
def _check_unknown_options(unknown_options):
if unknown_options:
msg = ", ".join(map(str, unknown_options.keys()))
# Stack level 4: this is called from _minimize_*, which is
# called from another function in SciPy. Level 4 is the first
# level in user code.
warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4)
def is_array_scalar(x):
"""Test whether `x` is either a scalar or an array scalar.
"""
return np.size(x) == 1
_epsilon = sqrt(np.finfo(float).eps)
def vecnorm(x, ord=2):
if ord == Inf:
return np.amax(np.abs(x))
elif ord == -Inf:
return np.amin(np.abs(x))
else:
return np.sum(np.abs(x)**ord, axis=0)**(1.0 / ord)
def _prepare_scalar_function(fun, x0, jac=None, args=(), bounds=None,
epsilon=None, finite_diff_rel_step=None,
hess=None):
"""
Creates a ScalarFunction object for use with scalar minimizers
(BFGS/LBFGSB/SLSQP/TNC/CG/etc).
Parameters
----------
fun : callable
The objective function to be minimized.
``fun(x, *args) -> float``
where ``x`` is an 1-D array with shape (n,) and ``args``
is a tuple of the fixed parameters needed to completely
specify the function.
x0 : ndarray, shape (n,)
Initial guess. Array of real elements of size (n,),
where 'n' is the number of independent variables.
jac : {callable, '2-point', '3-point', 'cs', None}, optional
Method for computing the gradient vector. If it is a callable, it
should be a function that returns the gradient vector:
``jac(x, *args) -> array_like, shape (n,)``
If one of `{'2-point', '3-point', 'cs'}` is selected then the gradient
is calculated with a relative step for finite differences. If `None`,
then two-point finite differences with an absolute step is used.
args : tuple, optional
Extra arguments passed to the objective function and its
derivatives (`fun`, `jac` functions).
bounds : sequence, optional
Bounds on variables. 'new-style' bounds are required.
eps : float or ndarray
If `jac is None` the absolute step size used for numerical
approximation of the jacobian via forward differences.
finite_diff_rel_step : None or array_like, optional
If `jac in ['2-point', '3-point', 'cs']` the relative step size to
use for numerical approximation of the jacobian. The absolute step
size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``,
possibly adjusted to fit into the bounds. For ``method='3-point'``
the sign of `h` is ignored. If None (default) then step is selected
automatically.
hess : {callable, '2-point', '3-point', 'cs', None}
Computes the Hessian matrix. If it is callable, it should return the
Hessian matrix:
``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
Alternatively, the keywords {'2-point', '3-point', 'cs'} select a
finite difference scheme for numerical estimation.
Whenever the gradient is estimated via finite-differences, the Hessian
cannot be estimated with options {'2-point', '3-point', 'cs'} and needs
to be estimated using one of the quasi-Newton strategies.
Returns
-------
sf : ScalarFunction
"""
if callable(jac):
grad = jac
elif jac in FD_METHODS:
# epsilon is set to None so that ScalarFunction is made to use
# rel_step
epsilon = None
grad = jac
else:
# default (jac is None) is to do 2-point finite differences with
# absolute step size. ScalarFunction has to be provided an
# epsilon value that is not None to use absolute steps. This is
# normally the case from most _minimize* methods.
grad = '2-point'
epsilon = epsilon
if hess is None:
# ScalarFunction requires something for hess, so we give a dummy
# implementation here if nothing is provided, return a value of None
# so that downstream minimisers halt. The results of `fun.hess`
# should not be used.
def hess(x, *args):
return None
if bounds is None:
bounds = (-np.inf, np.inf)
# ScalarFunction caches. Reuse of fun(x) during grad
# calculation reduces overall function evaluations.
sf = ScalarFunction(fun, x0, args, grad, hess,
finite_diff_rel_step, bounds, epsilon=epsilon)
return sf
def _clip_x_for_func(func, bounds):
# ensures that x values sent to func are clipped to bounds
# this is used as a mitigation for gh11403, slsqp/tnc sometimes
# suggest a move that is outside the limits by 1 or 2 ULP. This
# unclean fix makes sure x is strictly within bounds.
def eval(x):
x = _check_clip_x(x, bounds)
return func(x)
return eval
def _check_clip_x(x, bounds):
if (x < bounds[0]).any() or (x > bounds[1]).any():
warnings.warn("Values in x were outside bounds during a "
"minimize step, clipping to bounds", RuntimeWarning)
x = np.clip(x, bounds[0], bounds[1])
return x
return x
def rosen(x):
"""
The Rosenbrock function.
The function computed is::
sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)
Parameters
----------
x : array_like
1-D array of points at which the Rosenbrock function is to be computed.
Returns
-------
f : float
The value of the Rosenbrock function.
See Also
--------
rosen_der, rosen_hess, rosen_hess_prod
Examples
--------
>>> from scipy.optimize import rosen
>>> X = 0.1 * np.arange(10)
>>> rosen(X)
76.56
For higher-dimensional input ``rosen`` broadcasts.
In the following example, we use this to plot a 2D landscape.
Note that ``rosen_hess`` does not broadcast in this manner.
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.mplot3d import Axes3D
>>> x = np.linspace(-1, 1, 50)
>>> X, Y = np.meshgrid(x, x)
>>> ax = plt.subplot(111, projection='3d')
>>> ax.plot_surface(X, Y, rosen([X, Y]))
>>> plt.show()
"""
x = asarray(x)
r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
axis=0)
return r
def rosen_der(x):
"""
The derivative (i.e. gradient) of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the derivative is to be computed.
Returns
-------
rosen_der : (N,) ndarray
The gradient of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_hess, rosen_hess_prod
Examples
--------
>>> from scipy.optimize import rosen_der
>>> X = 0.1 * np.arange(9)
>>> rosen_der(X)
array([ -2. , 10.6, 15.6, 13.4, 6.4, -3. , -12.4, -19.4, 62. ])
"""
x = asarray(x)
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = np.zeros_like(x)
der[1:-1] = (200 * (xm - xm_m1**2) -
400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
der[-1] = 200 * (x[-1] - x[-2]**2)
return der
def rosen_hess(x):
"""
The Hessian matrix of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
Returns
-------
rosen_hess : ndarray
The Hessian matrix of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_der, rosen_hess_prod
Examples
--------
>>> from scipy.optimize import rosen_hess
>>> X = 0.1 * np.arange(4)
>>> rosen_hess(X)
array([[-38., 0., 0., 0.],
[ 0., 134., -40., 0.],
[ 0., -40., 130., -80.],
[ 0., 0., -80., 200.]])
"""
x = atleast_1d(x)
H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1)
diagonal = np.zeros(len(x), dtype=x.dtype)
diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
H = H + np.diag(diagonal)
return H
def rosen_hess_prod(x, p):
"""
Product of the Hessian matrix of the Rosenbrock function with a vector.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
p : array_like
1-D array, the vector to be multiplied by the Hessian matrix.
Returns
-------
rosen_hess_prod : ndarray
The Hessian matrix of the Rosenbrock function at `x` multiplied
by the vector `p`.
See Also
--------
rosen, rosen_der, rosen_hess
Examples
--------
>>> from scipy.optimize import rosen_hess_prod
>>> X = 0.1 * np.arange(9)
>>> p = 0.5 * np.arange(9)
>>> rosen_hess_prod(X, p)
array([ -0., 27., -10., -95., -192., -265., -278., -195., -180.])
"""
x = atleast_1d(x)
Hp = np.zeros(len(x), dtype=x.dtype)
Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1]
Hp[1:-1] = (-400 * x[:-2] * p[:-2] +
(202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1] -
400 * x[1:-1] * p[2:])
Hp[-1] = -400 * x[-2] * p[-2] + 200*p[-1]
return Hp
def _wrap_scalar_function(function, args):
# wraps a minimizer function to count number of evaluations
# and to easily provide an args kwd.
ncalls = [0]
if function is None:
return ncalls, None
def function_wrapper(x, *wrapper_args):
ncalls[0] += 1
# A copy of x is sent to the user function (gh13740)
fx = function(np.copy(x), *(wrapper_args + args))
# Ideally, we'd like to a have a true scalar returned from f(x). For
# backwards-compatibility, also allow np.array([1.3]), np.array([[1.3]]) etc.
if not np.isscalar(fx):
try:
fx = np.asarray(fx).item()
except (TypeError, ValueError) as e:
raise ValueError("The user-provided objective function "
"must return a scalar value.") from e
return fx
return ncalls, function_wrapper
class _MaxFuncCallError(RuntimeError):
pass
def _wrap_scalar_function_maxfun_validation(function, args, maxfun):
# wraps a minimizer function to count number of evaluations
# and to easily provide an args kwd.
ncalls = [0]
if function is None:
return ncalls, None
def function_wrapper(x, *wrapper_args):
if ncalls[0] >= maxfun:
raise _MaxFuncCallError("Too many function calls")
ncalls[0] += 1
# A copy of x is sent to the user function (gh13740)
fx = function(np.copy(x), *(wrapper_args + args))
# Ideally, we'd like to a have a true scalar returned from f(x). For
# backwards-compatibility, also allow np.array([1.3]),
# np.array([[1.3]]) etc.
if not np.isscalar(fx):
try:
fx = np.asarray(fx).item()
except (TypeError, ValueError) as e:
raise ValueError("The user-provided objective function "
"must return a scalar value.") from e
return fx
return ncalls, function_wrapper
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
full_output=0, disp=1, retall=0, callback=None, initial_simplex=None):
"""
Minimize a function using the downhill simplex algorithm.
This algorithm only uses function values, not derivatives or second
derivatives.
Parameters
----------
func : callable func(x,*args)
The objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func, i.e., ``f(x,*args)``.
xtol : float, optional
Absolute error in xopt between iterations that is acceptable for
convergence.
ftol : number, optional
Absolute error in func(xopt) between iterations that is acceptable for
convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : number, optional
Maximum number of function evaluations to make.
full_output : bool, optional
Set to True if fopt and warnflag outputs are desired.
disp : bool, optional
Set to True to print convergence messages.
retall : bool, optional
Set to True to return list of solutions at each iteration.
callback : callable, optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
initial_simplex : array_like of shape (N + 1, N), optional
Initial simplex. If given, overrides `x0`.
``initial_simplex[j,:]`` should contain the coordinates of
the jth vertex of the ``N+1`` vertices in the simplex, where
``N`` is the dimension.
Returns
-------
xopt : ndarray
Parameter that minimizes function.
fopt : float
Value of function at minimum: ``fopt = func(xopt)``.
iter : int
Number of iterations performed.
funcalls : int
Number of function calls made.
warnflag : int
1 : Maximum number of function evaluations made.
2 : Maximum number of iterations reached.
allvecs : list
Solution at each iteration.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Nelder-Mead' `method` in particular.
Notes
-----
Uses a Nelder-Mead simplex algorithm to find the minimum of function of
one or more variables.
This algorithm has a long history of successful use in applications.
But it will usually be slower than an algorithm that uses first or
second derivative information. In practice, it can have poor
performance in high-dimensional problems and is not robust to
minimizing complicated functions. Additionally, there currently is no
complete theory describing when the algorithm will successfully
converge to the minimum, or how fast it will if it does. Both the ftol and
xtol criteria must be met for convergence.
Examples
--------
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.fmin(f, 1)
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 17
Function evaluations: 34
>>> minimum[0]
-8.8817841970012523e-16
References
----------
.. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function
minimization", The Computer Journal, 7, pp. 308-313
.. [2] Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now
Respectable", in Numerical Analysis 1995, Proceedings of the
1995 Dundee Biennial Conference in Numerical Analysis, D.F.
Griffiths and G.A. Watson (Eds.), Addison Wesley Longman,
Harlow, UK, pp. 191-208.
"""
opts = {'xatol': xtol,
'fatol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'return_all': retall,
'initial_simplex': initial_simplex}
res = _minimize_neldermead(func, x0, args, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_neldermead(func, x0, args=(), callback=None,
maxiter=None, maxfev=None, disp=False,
return_all=False, initial_simplex=None,
xatol=1e-4, fatol=1e-4, adaptive=False, bounds=None,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter, maxfev : int
Maximum allowed number of iterations and function evaluations.
Will default to ``N*200``, where ``N`` is the number of
variables, if neither `maxiter` or `maxfev` is set. If both
`maxiter` and `maxfev` are set, minimization will stop at the
first reached.
return_all : bool, optional
Set to True to return a list of the best solution at each of the
iterations.
initial_simplex : array_like of shape (N + 1, N)
Initial simplex. If given, overrides `x0`.
``initial_simplex[j,:]`` should contain the coordinates of
the jth vertex of the ``N+1`` vertices in the simplex, where
``N`` is the dimension.
xatol : float, optional
Absolute error in xopt between iterations that is acceptable for
convergence.
fatol : number, optional
Absolute error in func(xopt) between iterations that is acceptable for
convergence.
adaptive : bool, optional
Adapt algorithm parameters to dimensionality of problem. Useful for
high-dimensional minimization [1]_.
bounds : sequence or `Bounds`, optional
Bounds on variables. There are two ways to specify the bounds:
1. Instance of `Bounds` class.
2. Sequence of ``(min, max)`` pairs for each element in `x`. None
is used to specify no bound.
Note that this just clips all vertices in simplex based on
the bounds.
References
----------
.. [1] Gao, F. and Han, L.
Implementing the Nelder-Mead simplex algorithm with adaptive
parameters. 2012. Computational Optimization and Applications.
51:1, pp. 259-277
"""
if 'ftol' in unknown_options:
warnings.warn("ftol is deprecated for Nelder-Mead,"
" use fatol instead. If you specified both, only"
" fatol is used.",
DeprecationWarning)
if (np.isclose(fatol, 1e-4) and
not np.isclose(unknown_options['ftol'], 1e-4)):
# only ftol was probably specified, use it.
fatol = unknown_options['ftol']
unknown_options.pop('ftol')
if 'xtol' in unknown_options:
warnings.warn("xtol is deprecated for Nelder-Mead,"
" use xatol instead. If you specified both, only"
" xatol is used.",
DeprecationWarning)
if (np.isclose(xatol, 1e-4) and
not np.isclose(unknown_options['xtol'], 1e-4)):
# only xtol was probably specified, use it.
xatol = unknown_options['xtol']
unknown_options.pop('xtol')
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
x0 = asfarray(x0).flatten()
if adaptive:
dim = float(len(x0))
rho = 1
chi = 1 + 2/dim
psi = 0.75 - 1/(2*dim)
sigma = 1 - 1/dim
else:
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
nonzdelt = 0.05
zdelt = 0.00025
if bounds is not None:
lower_bound, upper_bound = bounds.lb, bounds.ub
# check bounds
if (lower_bound > upper_bound).any():
raise ValueError("Nelder Mead - one of the lower bounds is greater than an upper bound.")
if np.any(lower_bound > x0) or np.any(x0 > upper_bound):
warnings.warn("Initial guess is not within the specified bounds",
OptimizeWarning, 3)
if bounds is not None:
x0 = np.clip(x0, lower_bound, upper_bound)
if initial_simplex is None:
N = len(x0)
sim = np.empty((N + 1, N), dtype=x0.dtype)
sim[0] = x0
for k in range(N):
y = np.array(x0, copy=True)
if y[k] != 0:
y[k] = (1 + nonzdelt)*y[k]
else:
y[k] = zdelt
sim[k + 1] = y
else:
sim = np.asfarray(initial_simplex).copy()
if sim.ndim != 2 or sim.shape[0] != sim.shape[1] + 1:
raise ValueError("`initial_simplex` should be an array of shape (N+1,N)")
if len(x0) != sim.shape[1]:
raise ValueError("Size of `initial_simplex` is not consistent with `x0`")
N = sim.shape[1]
if retall:
allvecs = [sim[0]]
# If neither are set, then set both to default
if maxiter is None and maxfun is None:
maxiter = N * 200
maxfun = N * 200
elif maxiter is None:
# Convert remaining Nones, to np.inf, unless the other is np.inf, in
# which case use the default to avoid unbounded iteration
if maxfun == np.inf:
maxiter = N * 200
else:
maxiter = np.inf
elif maxfun is None:
if maxiter == np.inf:
maxfun = N * 200
else:
maxfun = np.inf
if bounds is not None:
sim = np.clip(sim, lower_bound, upper_bound)
one2np1 = list(range(1, N + 1))
fsim = np.full((N + 1,), np.inf, dtype=float)
fcalls, func = _wrap_scalar_function_maxfun_validation(func, args, maxfun)
try:
for k in range(N + 1):
fsim[k] = func(sim[k])
except _MaxFuncCallError:
pass
finally:
ind = np.argsort(fsim)
sim = np.take(sim, ind, 0)
fsim = np.take(fsim, ind, 0)
ind = np.argsort(fsim)
fsim = np.take(fsim, ind, 0)
# sort so sim[0,:] has the lowest function value
sim = np.take(sim, ind, 0)
iterations = 1
while (fcalls[0] < maxfun and iterations < maxiter):
try:
if (np.max(np.ravel(np.abs(sim[1:] - sim[0]))) <= xatol and
np.max(np.abs(fsim[0] - fsim[1:])) <= fatol):
break
xbar = np.add.reduce(sim[:-1], 0) / N
xr = (1 + rho) * xbar - rho * sim[-1]
if bounds is not None:
xr = np.clip(xr, lower_bound, upper_bound)
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
if bounds is not None:
xe = np.clip(xe, lower_bound, upper_bound)
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
if bounds is not None:
xc = np.clip(xc, lower_bound, upper_bound)
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 - psi) * xbar + psi * sim[-1]
if bounds is not None:
xcc = np.clip(xcc, lower_bound, upper_bound)
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma * (sim[j] - sim[0])
if bounds is not None:
sim[j] = np.clip(
sim[j], lower_bound, upper_bound)
fsim[j] = func(sim[j])
iterations += 1
except _MaxFuncCallError:
pass
finally:
ind = np.argsort(fsim)
sim = np.take(sim, ind, 0)
fsim = np.take(fsim, ind, 0)
if callback is not None:
callback(sim[0])
if retall:
allvecs.append(sim[0])
x = sim[0]
fval = np.min(fsim)
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print('Warning: ' + msg)
elif iterations >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print('Warning: ' + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iterations)
print(" Function evaluations: %d" % fcalls[0])
result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x, final_simplex=(sim, fsim))
if retall:
result['allvecs'] = allvecs
return result
def approx_fprime(xk, f, epsilon=_epsilon, *args):
"""Finite-difference approximation of the gradient of a scalar function.
Parameters
----------
xk : array_like
The coordinate vector at which to determine the gradient of `f`.
f : callable
The function of which to determine the gradient (partial derivatives).
Should take `xk` as first argument, other arguments to `f` can be
supplied in ``*args``. Should return a scalar, the value of the
function at `xk`.
epsilon : {float, array_like}, optional
Increment to `xk` to use for determining the function gradient.
If a scalar, uses the same finite difference delta for all partial
derivatives. If an array, should contain one value per element of
`xk`. Defaults to ``sqrt(np.finfo(float).eps)``, which is approximately
1.49e-08.
\\*args : args, optional
Any other arguments that are to be passed to `f`.
Returns
-------
grad : ndarray
The partial derivatives of `f` to `xk`.
See Also
--------
check_grad : Check correctness of gradient function against approx_fprime.
Notes
-----
The function gradient is determined by the forward finite difference
formula::
f(xk[i] + epsilon[i]) - f(xk[i])
f'[i] = ---------------------------------
epsilon[i]
The main use of `approx_fprime` is in scalar function optimizers like
`fmin_bfgs`, to determine numerically the Jacobian of a function.
Examples
--------
>>> from scipy import optimize
>>> def func(x, c0, c1):
... "Coordinate vector `x` should be an array of size two."
... return c0 * x[0]**2 + c1*x[1]**2
>>> x = np.ones(2)
>>> c0, c1 = (1, 200)
>>> eps = np.sqrt(np.finfo(float).eps)
>>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1)
array([ 2. , 400.00004198])
"""
xk = np.asarray(xk, float)
f0 = f(xk, *args)
if not np.isscalar(f0):
try:
f0 = f0.item()
except (ValueError, AttributeError) as e:
raise ValueError("The user-provided "
"objective function must "
"return a scalar value.") from e
return approx_derivative(f, xk, method='2-point', abs_step=epsilon,
args=args, f0=f0)
def check_grad(func, grad, x0, *args, epsilon=_epsilon,
direction='all', seed=None):
"""Check the correctness of a gradient function by comparing it against a
(forward) finite-difference approximation of the gradient.
Parameters
----------
func : callable ``func(x0, *args)``
Function whose derivative is to be checked.
grad : callable ``grad(x0, *args)``
Gradient of `func`.
x0 : ndarray
Points to check `grad` against forward difference approximation of grad
using `func`.
args : \\*args, optional
Extra arguments passed to `func` and `grad`.
epsilon : float, optional
Step size used for the finite difference approximation. It defaults to
``sqrt(np.finfo(float).eps)``, which is approximately 1.49e-08.
direction : str, optional
If set to ``'random'``, then gradients along a random vector
are used to check `grad` against forward difference approximation
using `func`. By default it is ``'all'``, in which case, all
the one hot direction vectors are considered to check `grad`.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Specify `seed` for reproducing the return value from this function.
The random numbers generated with this seed affect the random vector
along which gradients are computed to check ``grad``. Note that `seed`
is only used when `direction` argument is set to `'random'`.
Returns
-------
err : float
The square root of the sum of squares (i.e., the 2-norm) of the
difference between ``grad(x0, *args)`` and the finite difference
approximation of `grad` using func at the points `x0`.
See Also
--------
approx_fprime
Examples
--------
>>> def func(x):
... return x[0]**2 - 0.5 * x[1]**3
>>> def grad(x):
... return [2 * x[0], -1.5 * x[1]**2]
>>> from scipy.optimize import check_grad
>>> check_grad(func, grad, [1.5, -1.5])
2.9802322387695312e-08 # may vary
>>> rng = np.random.default_rng()
>>> check_grad(func, grad, [1.5, -1.5],
... direction='random', seed=rng)
2.9802322387695312e-08
"""
step = epsilon
x0 = np.asarray(x0)
def g(w, func, x0, v, *args):
return func(x0 + w*v, *args)
if direction == 'random':
random_state = check_random_state(seed)
v = random_state.normal(0, 1, size=(x0.shape))
_args = (func, x0, v) + args
_func = g
vars = np.zeros((1,))
analytical_grad = np.dot(grad(x0, *args), v)
elif direction == 'all':
_args = args
_func = func
vars = x0
analytical_grad = grad(x0, *args)
else:
raise ValueError("{} is not a valid string for "
"``direction`` argument".format(direction))
return sqrt(sum((analytical_grad -
approx_fprime(vars, _func, step, *_args))**2))
def approx_fhess_p(x0, p, fprime, epsilon, *args):
# calculate fprime(x0) first, as this may be cached by ScalarFunction
f1 = fprime(*((x0,) + args))
f2 = fprime(*((x0 + epsilon*p,) + args))
return (f2 - f1) / epsilon
class _LineSearchError(RuntimeError):
pass
def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval,
**kwargs):
"""
Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
suitable step length is not found, and raise an exception if a
suitable step length is not found.
Raises
------
_LineSearchError
If no suitable step size is found
"""
extra_condition = kwargs.pop('extra_condition', None)
ret = line_search_wolfe1(f, fprime, xk, pk, gfk,
old_fval, old_old_fval,
**kwargs)
if ret[0] is not None and extra_condition is not None:
xp1 = xk + ret[0] * pk
if not extra_condition(ret[0], xp1, ret[3], ret[5]):
# Reject step if extra_condition fails
ret = (None,)
if ret[0] is None:
# line search failed: try different one.
with warnings.catch_warnings():
warnings.simplefilter('ignore', LineSearchWarning)
kwargs2 = {}
for key in ('c1', 'c2', 'amax'):
if key in kwargs:
kwargs2[key] = kwargs[key]
ret = line_search_wolfe2(f, fprime, xk, pk, gfk,
old_fval, old_old_fval,
extra_condition=extra_condition,
**kwargs2)
if ret[0] is None:
raise _LineSearchError()
return ret
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1,
retall=0, callback=None):
"""
Minimize a function using the BFGS algorithm.
Parameters
----------
f : callable ``f(x,*args)``
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable ``f'(x,*args)``, optional
Gradient of f.
args : tuple, optional
Extra arguments passed to f and fprime.
gtol : float, optional
Gradient norm must be less than `gtol` before successful termination.
norm : float, optional
Order of norm (Inf is max, -Inf is min)
epsilon : int or ndarray, optional
If `fprime` is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function to call after each
iteration. Called as ``callback(xk)``, where ``xk`` is the
current parameter vector.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True, return ``fopt``, ``func_calls``, ``grad_calls``, and
``warnflag`` in addition to ``xopt``.
disp : bool, optional
Print convergence message if True.
retall : bool, optional
Return a list of results at each iteration if True.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e., ``f(xopt) == fopt``.
fopt : float
Minimum value.
gopt : ndarray
Value of gradient at minimum, f'(xopt), which should be near 0.
Bopt : ndarray
Value of 1/f''(xopt), i.e., the inverse Hessian matrix.
func_calls : int
Number of function_calls made.
grad_calls : int
Number of gradient calls made.
warnflag : integer
1 : Maximum number of iterations exceeded.
2 : Gradient and/or function calls not changing.
3 : NaN result encountered.
allvecs : list
The value of `xopt` at each iteration. Only returned if `retall` is
True.
Notes
-----
Optimize the function, `f`, whose gradient is given by `fprime`
using the quasi-Newton method of Broyden, Fletcher, Goldfarb,
and Shanno (BFGS).
See Also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See ``method='BFGS'`` in particular.
References
----------
Wright, and Nocedal 'Numerical Optimization', 1999, p. 198.
Examples
--------
>>> from scipy.optimize import fmin_bfgs
>>> def quadratic_cost(x, Q):
... return x @ Q @ x
...
>>> x0 = np.array([-3, -4])
>>> cost_weight = np.diag([1., 10.])
>>> # Note that a trailing comma is necessary for a tuple with single element
>>> fmin_bfgs(quadratic_cost, x0, args=(cost_weight,))
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 7 # may vary
Function evaluations: 24 # may vary
Gradient evaluations: 8 # may vary
array([ 2.85169950e-06, -4.61820139e-07])
>>> def quadratic_cost_grad(x, Q):
... return 2 * Q @ x
...
>>> fmin_bfgs(quadratic_cost, x0, quadratic_cost_grad, args=(cost_weight,))
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 7
Function evaluations: 8
Gradient evaluations: 8
array([ 2.85916637e-06, -4.54371951e-07])
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'],
res['nfev'], res['njev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False, finite_diff_rel_step=None,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
BFGS algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac is None` the absolute step size used for numerical
approximation of the jacobian via forward differences.
return_all : bool, optional
Set to True to return a list of the best solution at each of the
iterations.
finite_diff_rel_step : None or array_like, optional
If `jac in ['2-point', '3-point', 'cs']` the relative step size to
use for numerical approximation of the jacobian. The absolute step
size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``,
possibly adjusted to fit into the bounds. For ``method='3-point'``
the sign of `h` is ignored. If None (default) then step is selected
automatically.
"""
_check_unknown_options(unknown_options)
retall = return_all
x0 = asarray(x0).flatten()
if x0.ndim == 0:
x0.shape = (1,)
if maxiter is None:
maxiter = len(x0) * 200
sf = _prepare_scalar_function(fun, x0, jac, args=args, epsilon=eps,
finite_diff_rel_step=finite_diff_rel_step)
f = sf.fun
myfprime = sf.grad
old_fval = f(x0)
gfk = myfprime(x0)
k = 0
N = len(x0)
I = np.eye(N, dtype=int)
Hk = I
# Sets the initial step guess to dx ~ 1
old_old_fval = old_fval + np.linalg.norm(gfk) / 2
xk = x0
if retall:
allvecs = [x0]
warnflag = 0
gnorm = vecnorm(gfk, ord=norm)
while (gnorm > gtol) and (k < maxiter):
pk = -np.dot(Hk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk,
old_fval, old_old_fval, amin=1e-100, amax=1e100)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
xkp1 = xk + alpha_k * pk
if retall:
allvecs.append(xkp1)
sk = xkp1 - xk
xk = xkp1
if gfkp1 is None:
gfkp1 = myfprime(xkp1)
yk = gfkp1 - gfk
gfk = gfkp1
if callback is not None:
callback(xk)
k += 1
gnorm = vecnorm(gfk, ord=norm)
if (gnorm <= gtol):
break
if not np.isfinite(old_fval):
# We correctly found +-Inf as optimal value, or something went
# wrong.
warnflag = 2
break
rhok_inv = np.dot(yk, sk)
# this was handled in numeric, let it remaines for more safety
if rhok_inv == 0.:
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
else:
rhok = 1. / rhok_inv
A1 = I - sk[:, np.newaxis] * yk[np.newaxis, :] * rhok
A2 = I - yk[:, np.newaxis] * sk[np.newaxis, :] * rhok
Hk = np.dot(A1, np.dot(Hk, A2)) + (rhok * sk[:, np.newaxis] *
sk[np.newaxis, :])
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
elif np.isnan(gnorm) or np.isnan(fval) or np.isnan(xk).any():
warnflag = 3
msg = _status_message['nan']
else:
msg = _status_message['success']
if disp:
print("%s%s" % ("Warning: " if warnflag != 0 else "", msg))
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % sf.nfev)
print(" Gradient evaluations: %d" % sf.ngev)
result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=sf.nfev,
njev=sf.ngev, status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,
maxiter=None, full_output=0, disp=1, retall=0, callback=None):
"""
Minimize a function using a nonlinear conjugate gradient algorithm.
Parameters
----------
f : callable, ``f(x, *args)``
Objective function to be minimized. Here `x` must be a 1-D array of
the variables that are to be changed in the search for a minimum, and
`args` are the other (fixed) parameters of `f`.
x0 : ndarray
A user-supplied initial estimate of `xopt`, the optimal value of `x`.
It must be a 1-D array of values.
fprime : callable, ``fprime(x, *args)``, optional
A function that returns the gradient of `f` at `x`. Here `x` and `args`
are as described above for `f`. The returned value must be a 1-D array.
Defaults to None, in which case the gradient is approximated
numerically (see `epsilon`, below).
args : tuple, optional
Parameter values passed to `f` and `fprime`. Must be supplied whenever
additional fixed parameters are needed to completely specify the
functions `f` and `fprime`.
gtol : float, optional
Stop when the norm of the gradient is less than `gtol`.
norm : float, optional
Order to use for the norm of the gradient
(``-np.Inf`` is min, ``np.Inf`` is max).
epsilon : float or ndarray, optional
Step size(s) to use when `fprime` is approximated numerically. Can be a
scalar or a 1-D array. Defaults to ``sqrt(eps)``, with eps the
floating point machine precision. Usually ``sqrt(eps)`` is about
1.5e-8.
maxiter : int, optional
Maximum number of iterations to perform. Default is ``200 * len(x0)``.
full_output : bool, optional
If True, return `fopt`, `func_calls`, `grad_calls`, and `warnflag` in
addition to `xopt`. See the Returns section below for additional
information on optional return values.
disp : bool, optional
If True, return a convergence message, followed by `xopt`.
retall : bool, optional
If True, add to the returned values the results of each iteration.
callback : callable, optional
An optional user-supplied function, called after each iteration.
Called as ``callback(xk)``, where ``xk`` is the current value of `x0`.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e., ``f(xopt) == fopt``.
fopt : float, optional
Minimum value found, f(xopt). Only returned if `full_output` is True.
func_calls : int, optional
The number of function_calls made. Only returned if `full_output`
is True.
grad_calls : int, optional
The number of gradient calls made. Only returned if `full_output` is
True.
warnflag : int, optional
Integer value with warning status, only returned if `full_output` is
True.
0 : Success.
1 : The maximum number of iterations was exceeded.
2 : Gradient and/or function calls were not changing. May indicate
that precision was lost, i.e., the routine did not converge.
3 : NaN result encountered.
allvecs : list of ndarray, optional
List of arrays, containing the results at each iteration.
Only returned if `retall` is True.
See Also
--------
minimize : common interface to all `scipy.optimize` algorithms for
unconstrained and constrained minimization of multivariate
functions. It provides an alternative way to call
``fmin_cg``, by specifying ``method='CG'``.
Notes
-----
This conjugate gradient algorithm is based on that of Polak and Ribiere
[1]_.
Conjugate gradient methods tend to work better when:
1. `f` has a unique global minimizing point, and no local minima or
other stationary points,
2. `f` is, at least locally, reasonably well approximated by a
quadratic function of the variables,
3. `f` is continuous and has a continuous gradient,
4. `fprime` is not too large, e.g., has a norm less than 1000,
5. The initial guess, `x0`, is reasonably close to `f` 's global
minimizing point, `xopt`.
References
----------
.. [1] Wright & Nocedal, "Numerical Optimization", 1999, pp. 120-122.
Examples
--------
Example 1: seek the minimum value of the expression
``a*u**2 + b*u*v + c*v**2 + d*u + e*v + f`` for given values
of the parameters and an initial guess ``(u, v) = (0, 0)``.
>>> args = (2, 3, 7, 8, 9, 10) # parameter values
>>> def f(x, *args):
... u, v = x
... a, b, c, d, e, f = args
... return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f
>>> def gradf(x, *args):
... u, v = x
... a, b, c, d, e, f = args
... gu = 2*a*u + b*v + d # u-component of the gradient
... gv = b*u + 2*c*v + e # v-component of the gradient
... return np.asarray((gu, gv))
>>> x0 = np.asarray((0, 0)) # Initial guess.
>>> from scipy import optimize
>>> res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args)
Optimization terminated successfully.
Current function value: 1.617021
Iterations: 4
Function evaluations: 8
Gradient evaluations: 8
>>> res1
array([-1.80851064, -0.25531915])
Example 2: solve the same problem using the `minimize` function.
(This `myopts` dictionary shows all of the available options,
although in practice only non-default values would be needed.
The returned value will be a dictionary.)
>>> opts = {'maxiter' : None, # default value.
... 'disp' : True, # non-default value.
... 'gtol' : 1e-5, # default value.
... 'norm' : np.inf, # default value.
... 'eps' : 1.4901161193847656e-08} # default value.
>>> res2 = optimize.minimize(f, x0, jac=gradf, args=args,
... method='CG', options=opts)
Optimization terminated successfully.
Current function value: 1.617021
Iterations: 4
Function evaluations: 8
Gradient evaluations: 8
>>> res2.x # minimum found
array([-1.80851064, -0.25531915])
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_cg(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_cg(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False, finite_diff_rel_step=None,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
conjugate gradient algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac is None` the absolute step size used for numerical
approximation of the jacobian via forward differences.
return_all : bool, optional
Set to True to return a list of the best solution at each of the
iterations.
finite_diff_rel_step : None or array_like, optional
If `jac in ['2-point', '3-point', 'cs']` the relative step size to
use for numerical approximation of the jacobian. The absolute step
size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``,
possibly adjusted to fit into the bounds. For ``method='3-point'``
the sign of `h` is ignored. If None (default) then step is selected
automatically.
"""
_check_unknown_options(unknown_options)
retall = return_all
x0 = asarray(x0).flatten()
if maxiter is None:
maxiter = len(x0) * 200
sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
finite_diff_rel_step=finite_diff_rel_step)
f = sf.fun
myfprime = sf.grad
old_fval = f(x0)
gfk = myfprime(x0)
k = 0
xk = x0
# Sets the initial step guess to dx ~ 1
old_old_fval = old_fval + np.linalg.norm(gfk) / 2
if retall:
allvecs = [xk]
warnflag = 0
pk = -gfk
gnorm = vecnorm(gfk, ord=norm)
sigma_3 = 0.01
while (gnorm > gtol) and (k < maxiter):
deltak = np.dot(gfk, gfk)
cached_step = [None]
def polak_ribiere_powell_step(alpha, gfkp1=None):
xkp1 = xk + alpha * pk
if gfkp1 is None:
gfkp1 = myfprime(xkp1)
yk = gfkp1 - gfk
beta_k = max(0, np.dot(yk, gfkp1) / deltak)
pkp1 = -gfkp1 + beta_k * pk
gnorm = vecnorm(gfkp1, ord=norm)
return (alpha, xkp1, pkp1, gfkp1, gnorm)
def descent_condition(alpha, xkp1, fp1, gfkp1):
# Polak-Ribiere+ needs an explicit check of a sufficient
# descent condition, which is not guaranteed by strong Wolfe.
#
# See Gilbert & Nocedal, "Global convergence properties of
# conjugate gradient methods for optimization",
# SIAM J. Optimization 2, 21 (1992).
cached_step[:] = polak_ribiere_powell_step(alpha, gfkp1)
alpha, xk, pk, gfk, gnorm = cached_step
# Accept step if it leads to convergence.
if gnorm <= gtol:
return True
# Accept step if sufficient descent condition applies.
return np.dot(pk, gfk) <= -sigma_3 * np.dot(gfk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval,
old_old_fval, c2=0.4, amin=1e-100, amax=1e100,
extra_condition=descent_condition)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
# Reuse already computed results if possible
if alpha_k == cached_step[0]:
alpha_k, xk, pk, gfk, gnorm = cached_step
else:
alpha_k, xk, pk, gfk, gnorm = polak_ribiere_powell_step(alpha_k, gfkp1)
if retall:
allvecs.append(xk)
if callback is not None:
callback(xk)
k += 1
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
elif np.isnan(gnorm) or np.isnan(fval) or np.isnan(xk).any():
warnflag = 3
msg = _status_message['nan']
else:
msg = _status_message['success']
if disp:
print("%s%s" % ("Warning: " if warnflag != 0 else "", msg))
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % sf.nfev)
print(" Gradient evaluations: %d" % sf.ngev)
result = OptimizeResult(fun=fval, jac=gfk, nfev=sf.nfev,
njev=sf.ngev, status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0,
callback=None):
"""
Unconstrained minimization of a function using the Newton-CG method.
Parameters
----------
f : callable ``f(x, *args)``
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable ``f'(x, *args)``
Gradient of f.
fhess_p : callable ``fhess_p(x, p, *args)``, optional
Function which computes the Hessian of f times an
arbitrary vector, p.
fhess : callable ``fhess(x, *args)``, optional
Function to compute the Hessian matrix of f.
args : tuple, optional
Extra arguments passed to f, fprime, fhess_p, and fhess
(the same set of extra arguments is supplied to all of
these functions).
epsilon : float or ndarray, optional
If fhess is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function which is called after
each iteration. Called as callback(xk), where xk is the
current parameter vector.
avextol : float, optional
Convergence is assumed when the average relative error in
the minimizer falls below this amount.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True, return the optional outputs.
disp : bool, optional
If True, print convergence message.
retall : bool, optional
If True, return a list of results at each iteration.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e., ``f(xopt) == fopt``.
fopt : float
Value of the function at xopt, i.e., ``fopt = f(xopt)``.
fcalls : int
Number of function calls made.
gcalls : int
Number of gradient calls made.
hcalls : int
Number of Hessian calls made.
warnflag : int
Warnings generated by the algorithm.
1 : Maximum number of iterations exceeded.
2 : Line search failure (precision loss).
3 : NaN result encountered.
allvecs : list
The result at each iteration, if retall is True (see below).
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Newton-CG' `method` in particular.
Notes
-----
Only one of `fhess_p` or `fhess` need to be given. If `fhess`
is provided, then `fhess_p` will be ignored. If neither `fhess`
nor `fhess_p` is provided, then the hessian product will be
approximated using finite differences on `fprime`. `fhess_p`
must compute the hessian times an arbitrary vector. If it is not
given, finite-differences on `fprime` are used to compute
it.
Newton-CG methods are also called truncated Newton methods. This
function differs from scipy.optimize.fmin_tnc because
1. scipy.optimize.fmin_ncg is written purely in Python using NumPy
and scipy while scipy.optimize.fmin_tnc calls a C function.
2. scipy.optimize.fmin_ncg is only for unconstrained minimization
while scipy.optimize.fmin_tnc is for unconstrained minimization
or box constrained minimization. (Box constraints give
lower and upper bounds for each variable separately.)
References
----------
Wright & Nocedal, 'Numerical Optimization', 1999, p. 140.
"""
opts = {'xtol': avextol,
'eps': epsilon,
'maxiter': maxiter,
'disp': disp,
'return_all': retall}
res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p,
callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['nfev'], res['njev'],
res['nhev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
callback=None, xtol=1e-5, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Note that the `jac` parameter (Jacobian) is required.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Average relative error in solution `xopt` acceptable for
convergence.
maxiter : int
Maximum number of iterations to perform.
eps : float or ndarray
If `hessp` is approximated, use this value for the step size.
return_all : bool, optional
Set to True to return a list of the best solution at each of the
iterations.
"""
_check_unknown_options(unknown_options)
if jac is None:
raise ValueError('Jacobian is required for Newton-CG method')
fhess_p = hessp
fhess = hess
avextol = xtol
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
# TODO: allow hess to be approximated by FD?
# TODO: add hessp (callable or FD) to ScalarFunction?
sf = _prepare_scalar_function(fun, x0, jac, args=args, epsilon=eps, hess=fhess)
f = sf.fun
fprime = sf.grad
def terminate(warnflag, msg):
if disp:
print(msg)
print(" Current function value: %f" % old_fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % sf.nfev)
print(" Gradient evaluations: %d" % sf.ngev)
print(" Hessian evaluations: %d" % hcalls)
fval = old_fval
result = OptimizeResult(fun=fval, jac=gfk, nfev=sf.nfev,
njev=sf.ngev, nhev=hcalls, status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
hcalls = 0
if maxiter is None:
maxiter = len(x0)*200
cg_maxiter = 20*len(x0)
xtol = len(x0) * avextol
update = [2 * xtol]
xk = x0
if retall:
allvecs = [xk]
k = 0
gfk = None
old_fval = f(x0)
old_old_fval = None
float64eps = np.finfo(np.float64).eps
while np.add.reduce(np.abs(update)) > xtol:
if k >= maxiter:
msg = "Warning: " + _status_message['maxiter']
return terminate(1, msg)
# Compute a search direction pk by applying the CG method to
# del2 f(xk) p = - grad f(xk) starting from 0.
b = -fprime(xk)
maggrad = np.add.reduce(np.abs(b))
eta = np.min([0.5, np.sqrt(maggrad)])
termcond = eta * maggrad
xsupi = zeros(len(x0), dtype=x0.dtype)
ri = -b
psupi = -ri
i = 0
dri0 = np.dot(ri, ri)
if fhess is not None: # you want to compute hessian once.
A = sf.hess(xk)
hcalls = hcalls + 1
for k2 in range(cg_maxiter):
if np.add.reduce(np.abs(ri)) <= termcond:
break
if fhess is None:
if fhess_p is None:
Ap = approx_fhess_p(xk, psupi, fprime, epsilon)
else:
Ap = fhess_p(xk, psupi, *args)
hcalls = hcalls + 1
else:
Ap = np.dot(A, psupi)
# check curvature
Ap = asarray(Ap).squeeze() # get rid of matrices...
curv = np.dot(psupi, Ap)
if 0 <= curv <= 3 * float64eps:
break
elif curv < 0:
if (i > 0):
break
else:
# fall back to steepest descent direction
xsupi = dri0 / (-curv) * b
break
alphai = dri0 / curv
xsupi = xsupi + alphai * psupi
ri = ri + alphai * Ap
dri1 = np.dot(ri, ri)
betai = dri1 / dri0
psupi = -ri + betai * psupi
i = i + 1
dri0 = dri1 # update np.dot(ri,ri) for next time.
else:
# curvature keeps increasing, bail out
msg = ("Warning: CG iterations didn't converge. The Hessian is not "
"positive definite.")
return terminate(3, msg)
pk = xsupi # search direction is solution to system.
gfk = -b # gradient at xk
try:
alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, fprime, xk, pk, gfk,
old_fval, old_old_fval)
except _LineSearchError:
# Line search failed to find a better solution.
msg = "Warning: " + _status_message['pr_loss']
return terminate(2, msg)
update = alphak * pk
xk = xk + update # upcast if necessary
if callback is not None:
callback(xk)
if retall:
allvecs.append(xk)
k += 1
else:
if np.isnan(old_fval) or np.isnan(update).any():
return terminate(3, _status_message['nan'])
msg = _status_message['success']
return terminate(0, msg)
def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500,
full_output=0, disp=1):
"""Bounded minimization for scalar functions.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized (must accept and return scalars).
x1, x2 : float or array scalar
The optimization bounds.
args : tuple, optional
Extra arguments passed to function.
xtol : float, optional
The convergence tolerance.
maxfun : int, optional
Maximum number of function evaluations allowed.
full_output : bool, optional
If True, return optional outputs.
disp : int, optional
If non-zero, print messages.
0 : no message printing.
1 : non-convergence notification messages only.
2 : print a message on convergence too.
3 : print iteration results.
Returns
-------
xopt : ndarray
Parameters (over given interval) which minimize the
objective function.
fval : number
The function value at the minimum point.
ierr : int
An error flag (0 if converged, 1 if maximum number of
function calls reached).
numfunc : int
The number of function calls made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Bounded' `method` in particular.
Notes
-----
Finds a local minimizer of the scalar function `func` in the
interval x1 < xopt < x2 using Brent's method. (See `brent`
for auto-bracketing.)
Examples
--------
`fminbound` finds the minimum of the function in the given range.
The following examples illustrate the same
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.fminbound(f, -1, 2)
>>> minimum
0.0
>>> minimum = optimize.fminbound(f, 1, 2)
>>> minimum
1.0000059608609866
"""
options = {'xatol': xtol,
'maxiter': maxfun,
'disp': disp}
res = _minimize_scalar_bounded(func, (x1, x2), args, **options)
if full_output:
return res['x'], res['fun'], res['status'], res['nfev']
else:
return res['x']
def _minimize_scalar_bounded(func, bounds, args=(),
xatol=1e-5, maxiter=500, disp=0,
**unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
disp: int, optional
If non-zero, print messages.
0 : no message printing.
1 : non-convergence notification messages only.
2 : print a message on convergence too.
3 : print iteration results.
xatol : float
Absolute error in solution `xopt` acceptable for convergence.
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
# Test bounds are of correct form
if len(bounds) != 2:
raise ValueError('bounds must have two elements.')
x1, x2 = bounds
if not (is_array_scalar(x1) and is_array_scalar(x2)):
raise ValueError("Optimization bounds must be scalars"
" or array scalars.")
if x1 > x2:
raise ValueError("The lower bound exceeds the upper bound.")
flag = 0
header = ' Func-count x f(x) Procedure'
step = ' initial'
sqrt_eps = sqrt(2.2e-16)
golden_mean = 0.5 * (3.0 - sqrt(5.0))
a, b = x1, x2
fulc = a + golden_mean * (b - a)
nfc, xf = fulc, fulc
rat = e = 0.0
x = xf
fx = func(x, *args)
num = 1
fmin_data = (1, xf, fx)
fu = np.inf
ffulc = fnfc = fx
xm = 0.5 * (a + b)
tol1 = sqrt_eps * np.abs(xf) + xatol / 3.0
tol2 = 2.0 * tol1
if disp > 2:
print(" ")
print(header)
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
while (np.abs(xf - xm) > (tol2 - 0.5 * (b - a))):
golden = 1
# Check for parabolic fit
if np.abs(e) > tol1:
golden = 0
r = (xf - nfc) * (fx - ffulc)
q = (xf - fulc) * (fx - fnfc)
p = (xf - fulc) * q - (xf - nfc) * r
q = 2.0 * (q - r)
if q > 0.0:
p = -p
q = np.abs(q)
r = e
e = rat
# Check for acceptability of parabola
if ((np.abs(p) < np.abs(0.5*q*r)) and (p > q*(a - xf)) and
(p < q * (b - xf))):
rat = (p + 0.0) / q
x = xf + rat
step = ' parabolic'
if ((x - a) < tol2) or ((b - x) < tol2):
si = np.sign(xm - xf) + ((xm - xf) == 0)
rat = tol1 * si
else: # do a golden-section step
golden = 1
if golden: # do a golden-section step
if xf >= xm:
e = a - xf
else:
e = b - xf
rat = golden_mean*e
step = ' golden'
si = np.sign(rat) + (rat == 0)
x = xf + si * np.maximum(np.abs(rat), tol1)
fu = func(x, *args)
num += 1
fmin_data = (num, x, fu)
if disp > 2:
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
if fu <= fx:
if x >= xf:
a = xf
else:
b = xf
fulc, ffulc = nfc, fnfc
nfc, fnfc = xf, fx
xf, fx = x, fu
else:
if x < xf:
a = x
else:
b = x
if (fu <= fnfc) or (nfc == xf):
fulc, ffulc = nfc, fnfc
nfc, fnfc = x, fu
elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc):
fulc, ffulc = x, fu
xm = 0.5 * (a + b)
tol1 = sqrt_eps * np.abs(xf) + xatol / 3.0
tol2 = 2.0 * tol1
if num >= maxfun:
flag = 1
break
if np.isnan(xf) or np.isnan(fx) or np.isnan(fu):
flag = 2
fval = fx
if disp > 0:
_endprint(x, flag, fval, maxfun, xatol, disp)
result = OptimizeResult(fun=fval, status=flag, success=(flag == 0),
message={0: 'Solution found.',
1: 'Maximum number of function calls '
'reached.',
2: _status_message['nan']}.get(flag, ''),
x=xf, nfev=num, nit=num)
return result
class Brent:
#need to rethink design of __init__
def __init__(self, func, args=(), tol=1.48e-8, maxiter=500,
full_output=0, disp=0):
self.func = func
self.args = args
self.tol = tol
self.maxiter = maxiter
self._mintol = 1.0e-11
self._cg = 0.3819660
self.xmin = None
self.fval = None
self.iter = 0
self.funcalls = 0
self.disp = disp
# need to rethink design of set_bracket (new options, etc.)
def set_bracket(self, brack=None):
self.brack = brack
def get_bracket_info(self):
#set up
func = self.func
args = self.args
brack = self.brack
### BEGIN core bracket_info code ###
### carefully DOCUMENT any CHANGES in core ##
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
xc, xa = xa, xc
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be "
"length 2 or 3 sequence.")
### END core bracket_info code ###
return xa, xb, xc, fa, fb, fc, funcalls
def optimize(self):
# set up for optimization
func = self.func
xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info()
_mintol = self._mintol
_cg = self._cg
#################################
#BEGIN CORE ALGORITHM
#################################
x = w = v = xb
fw = fv = fx = func(*((x,) + self.args))
if (xa < xc):
a = xa
b = xc
else:
a = xc
b = xa
deltax = 0.0
funcalls += 1
iter = 0
if self.disp > 2:
print(" ")
print(f"{'Func-count':^12} {'x':^12} {'f(x)': ^12}")
print(f"{funcalls:^12g} {x:^12.6g} {fx:^12.6g}")
while (iter < self.maxiter):
tol1 = self.tol * np.abs(x) + _mintol
tol2 = 2.0 * tol1
xmid = 0.5 * (a + b)
# check for convergence
if np.abs(x - xmid) < (tol2 - 0.5 * (b - a)):
break
# XXX In the first iteration, rat is only bound in the true case
# of this conditional. This used to cause an UnboundLocalError
# (gh-4140). It should be set before the if (but to what?).
if (np.abs(deltax) <= tol1):
if (x >= xmid):
deltax = a - x # do a golden section step
else:
deltax = b - x
rat = _cg * deltax
else: # do a parabolic step
tmp1 = (x - w) * (fx - fv)
tmp2 = (x - v) * (fx - fw)
p = (x - v) * tmp2 - (x - w) * tmp1
tmp2 = 2.0 * (tmp2 - tmp1)
if (tmp2 > 0.0):
p = -p
tmp2 = np.abs(tmp2)
dx_temp = deltax
deltax = rat
# check parabolic fit
if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and
(np.abs(p) < np.abs(0.5 * tmp2 * dx_temp))):
rat = p * 1.0 / tmp2 # if parabolic step is useful.
u = x + rat
if ((u - a) < tol2 or (b - u) < tol2):
if xmid - x >= 0:
rat = tol1
else:
rat = -tol1
else:
if (x >= xmid):
deltax = a - x # if it's not do a golden section step
else:
deltax = b - x
rat = _cg * deltax
if (np.abs(rat) < tol1): # update by at least tol1
if rat >= 0:
u = x + tol1
else:
u = x - tol1
else:
u = x + rat
fu = func(*((u,) + self.args)) # calculate new output value
funcalls += 1
if (fu > fx): # if it's bigger than current
if (u < x):
a = u
else:
b = u
if (fu <= fw) or (w == x):
v = w
w = u
fv = fw
fw = fu
elif (fu <= fv) or (v == x) or (v == w):
v = u
fv = fu
else:
if (u >= x):
a = x
else:
b = x
v = w
w = x
x = u
fv = fw
fw = fx
fx = fu
if self.disp > 2:
print(f"{funcalls:^12g} {x:^12.6g} {fx:^12.6g}")
iter += 1
#################################
#END CORE ALGORITHM
#################################
self.xmin = x
self.fval = fx
self.iter = iter
self.funcalls = funcalls
def get_result(self, full_output=False):
if full_output:
return self.xmin, self.fval, self.iter, self.funcalls
else:
return self.xmin
def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500):
"""
Given a function of one variable and a possible bracket, return
the local minimum of the function isolated to a fractional precision
of tol.
Parameters
----------
func : callable f(x,*args)
Objective function.
args : tuple, optional
Additional arguments (if present).
brack : tuple, optional
Either a triple (xa,xb,xc) where xa<xb<xc and func(xb) <
func(xa), func(xc) or a pair (xa,xb) which are used as a
starting interval for a downhill bracket search (see
`bracket`). Providing the pair (xa,xb) does not always mean
the obtained solution will satisfy xa<=x<=xb.
tol : float, optional
Relative error in solution `xopt` acceptable for convergence.
full_output : bool, optional
If True, return all output args (xmin, fval, iter,
funcalls).
maxiter : int, optional
Maximum number of iterations in solution.
Returns
-------
xmin : ndarray
Optimum point.
fval : float
Optimum value.
iter : int
Number of iterations.
funcalls : int
Number of objective function evaluations made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Brent' `method` in particular.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up
convergence of golden section method.
Does not ensure that the minimum lies in the range specified by
`brack`. See `fminbound`.
Examples
--------
We illustrate the behaviour of the function when `brack` is of
size 2 and 3 respectively. In the case where `brack` is of the
form (xa,xb), we can see for the given values, the output need
not necessarily lie in the range (xa,xb).
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.brent(f,brack=(1,2))
>>> minimum
0.0
>>> minimum = optimize.brent(f,brack=(-1,0.5,2))
>>> minimum
-2.7755575615628914e-17
"""
options = {'xtol': tol,
'maxiter': maxiter}
res = _minimize_scalar_brent(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nit'], res['nfev']
else:
return res['x']
def _minimize_scalar_brent(func, brack=None, args=(), xtol=1.48e-8,
maxiter=500, disp=0,
**unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
disp: int, optional
If non-zero, print messages.
0 : no message printing.
1 : non-convergence notification messages only.
2 : print a message on convergence too.
3 : print iteration results.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up
convergence of golden section method.
"""
_check_unknown_options(unknown_options)
tol = xtol
if tol < 0:
raise ValueError('tolerance should be >= 0, got %r' % tol)
brent = Brent(func=func, args=args, tol=tol,
full_output=True, maxiter=maxiter, disp=disp)
brent.set_bracket(brack)
brent.optimize()
x, fval, nit, nfev = brent.get_result(full_output=True)
success = nit < maxiter and not (np.isnan(x) or np.isnan(fval))
if success:
message = ("\nOptimization terminated successfully;\n"
"The returned value satisfies the termination criteria\n"
f"(using xtol = {xtol} )")
else:
if nit >= maxiter:
message = "\nMaximum number of iterations exceeded"
if np.isnan(x) or np.isnan(fval):
message = f"{_status_message['nan']}"
if disp:
print(message)
return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev,
success=success, message=message)
def golden(func, args=(), brack=None, tol=_epsilon,
full_output=0, maxiter=5000):
"""
Return the minimum of a function of one variable using golden section
method.
Given a function of one variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol.
Parameters
----------
func : callable func(x,*args)
Objective function to minimize.
args : tuple, optional
Additional arguments (if present), passed to func.
brack : tuple, optional
Triple (a,b,c), where (a<b<c) and func(b) <
func(a),func(c). If bracket consists of two numbers (a,
c), then they are assumed to be a starting interval for a
downhill bracket search (see `bracket`); it doesn't always
mean that obtained solution will satisfy a<=x<=c.
tol : float, optional
x tolerance stop criterion
full_output : bool, optional
If True, return optional outputs.
maxiter : int
Maximum number of iterations to perform.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Golden' `method` in particular.
Notes
-----
Uses analog of bisection method to decrease the bracketed
interval.
Examples
--------
We illustrate the behaviour of the function when `brack` is of
size 2 and 3, respectively. In the case where `brack` is of the
form (xa,xb), we can see for the given values, the output need
not necessarily lie in the range ``(xa, xb)``.
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.golden(f, brack=(1, 2))
>>> minimum
1.5717277788484873e-162
>>> minimum = optimize.golden(f, brack=(-1, 0.5, 2))
>>> minimum
-1.5717277788484873e-162
"""
options = {'xtol': tol, 'maxiter': maxiter}
res = _minimize_scalar_golden(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nfev']
else:
return res['x']
def _minimize_scalar_golden(func, brack=None, args=(),
xtol=_epsilon, maxiter=5000, disp=0,
**unknown_options):
"""
Options
-------
xtol : float
Relative error in solution `xopt` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
disp: int, optional
If non-zero, print messages.
0 : no message printing.
1 : non-convergence notification messages only.
2 : print a message on convergence too.
3 : print iteration results.
"""
_check_unknown_options(unknown_options)
tol = xtol
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
xc, xa = xa, xc
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be length 2 or 3 sequence.")
_gR = 0.61803399 # golden ratio conjugate: 2.0/(1.0+sqrt(5.0))
_gC = 1.0 - _gR
x3 = xc
x0 = xa
if (np.abs(xc - xb) > np.abs(xb - xa)):
x1 = xb
x2 = xb + _gC * (xc - xb)
else:
x2 = xb
x1 = xb - _gC * (xb - xa)
f1 = func(*((x1,) + args))
f2 = func(*((x2,) + args))
funcalls += 2
nit = 0
if disp > 2:
print(" ")
print(f"{'Func-count':^12} {'x':^12} {'f(x)': ^12}")
for i in range(maxiter):
if np.abs(x3 - x0) <= tol * (np.abs(x1) + np.abs(x2)):
break
if (f2 < f1):
x0 = x1
x1 = x2
x2 = _gR * x1 + _gC * x3
f1 = f2
f2 = func(*((x2,) + args))
else:
x3 = x2
x2 = x1
x1 = _gR * x2 + _gC * x0
f2 = f1
f1 = func(*((x1,) + args))
funcalls += 1
if disp > 2:
if (f1 < f2):
xmin, fval = x1, f1
else:
xmin, fval = x2, f2
print(f"{funcalls:^12g} {xmin:^12.6g} {fval:^12.6g}")
nit += 1
# end of iteration loop
if (f1 < f2):
xmin = x1
fval = f1
else:
xmin = x2
fval = f2
success = nit < maxiter and not (np.isnan(fval) or np.isnan(xmin))
if success:
message = ("\nOptimization terminated successfully;\n"
"The returned value satisfies the termination criteria\n"
f"(using xtol = {xtol} )")
else:
if nit >= maxiter:
message = "\nMaximum number of iterations exceeded"
if np.isnan(xmin) or np.isnan(fval):
message = f"{_status_message['nan']}"
if disp:
print(message)
return OptimizeResult(fun=fval, nfev=funcalls, x=xmin, nit=nit,
success=success, message=message)
def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000):
"""
Bracket the minimum of the function.
Given a function and distinct initial points, search in the
downhill direction (as defined by the initial points) and return
new points xa, xb, xc that bracket the minimum of the function
f(xa) > f(xb) < f(xc). It doesn't always mean that obtained
solution will satisfy xa<=x<=xb.
Parameters
----------
func : callable f(x,*args)
Objective function to minimize.
xa, xb : float, optional
Bracketing interval. Defaults `xa` to 0.0, and `xb` to 1.0.
args : tuple, optional
Additional arguments (if present), passed to `func`.
grow_limit : float, optional
Maximum grow limit. Defaults to 110.0
maxiter : int, optional
Maximum number of iterations to perform. Defaults to 1000.
Returns
-------
xa, xb, xc : float
Bracket.
fa, fb, fc : float
Objective function values in bracket.
funcalls : int
Number of function evaluations made.
Examples
--------
This function can find a downward convex region of a function:
>>> import matplotlib.pyplot as plt
>>> from scipy.optimize import bracket
>>> def f(x):
... return 10*x**2 + 3*x + 5
>>> x = np.linspace(-2, 2)
>>> y = f(x)
>>> init_xa, init_xb = 0, 1
>>> xa, xb, xc, fa, fb, fc, funcalls = bracket(f, xa=init_xa, xb=init_xb)
>>> plt.axvline(x=init_xa, color="k", linestyle="--")
>>> plt.axvline(x=init_xb, color="k", linestyle="--")
>>> plt.plot(x, y, "-k")
>>> plt.plot(xa, fa, "bx")
>>> plt.plot(xb, fb, "rx")
>>> plt.plot(xc, fc, "bx")
>>> plt.show()
"""
_gold = 1.618034 # golden ratio: (1.0+sqrt(5.0))/2.0
_verysmall_num = 1e-21
fa = func(*(xa,) + args)
fb = func(*(xb,) + args)
if (fa < fb): # Switch so fa > fb
xa, xb = xb, xa
fa, fb = fb, fa
xc = xb + _gold * (xb - xa)
fc = func(*((xc,) + args))
funcalls = 3
iter = 0
while (fc < fb):
tmp1 = (xb - xa) * (fb - fc)
tmp2 = (xb - xc) * (fb - fa)
val = tmp2 - tmp1
if np.abs(val) < _verysmall_num:
denom = 2.0 * _verysmall_num
else:
denom = 2.0 * val
w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom
wlim = xb + grow_limit * (xc - xb)
if iter > maxiter:
raise RuntimeError("Too many iterations.")
iter += 1
if (w - xc) * (xb - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xa = xb
xb = w
fa = fb
fb = fw
return xa, xb, xc, fa, fb, fc, funcalls
elif (fw > fb):
xc = w
fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
w = xc + _gold * (xc - xb)
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(wlim - xc) >= 0.0:
w = wlim
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(xc - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xb = xc
xc = w
w = xc + _gold * (xc - xb)
fb = fc
fc = fw
fw = func(*((w,) + args))
funcalls += 1
else:
w = xc + _gold * (xc - xb)
fw = func(*((w,) + args))
funcalls += 1
xa = xb
xb = xc
xc = w
fa = fb
fb = fc
fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
def _line_for_search(x0, alpha, lower_bound, upper_bound):
"""
Given a parameter vector ``x0`` with length ``n`` and a direction
vector ``alpha`` with length ``n``, and lower and upper bounds on
each of the ``n`` parameters, what are the bounds on a scalar
``l`` such that ``lower_bound <= x0 + alpha * l <= upper_bound``.
Parameters
----------
x0 : np.array.
The vector representing the current location.
Note ``np.shape(x0) == (n,)``.
alpha : np.array.
The vector representing the direction.
Note ``np.shape(alpha) == (n,)``.
lower_bound : np.array.
The lower bounds for each parameter in ``x0``. If the ``i``th
parameter in ``x0`` is unbounded below, then ``lower_bound[i]``
should be ``-np.inf``.
Note ``np.shape(lower_bound) == (n,)``.
upper_bound : np.array.
The upper bounds for each parameter in ``x0``. If the ``i``th
parameter in ``x0`` is unbounded above, then ``upper_bound[i]``
should be ``np.inf``.
Note ``np.shape(upper_bound) == (n,)``.
Returns
-------
res : tuple ``(lmin, lmax)``
The bounds for ``l`` such that
``lower_bound[i] <= x0[i] + alpha[i] * l <= upper_bound[i]``
for all ``i``.
"""
# get nonzero indices of alpha so we don't get any zero division errors.
# alpha will not be all zero, since it is called from _linesearch_powell
# where we have a check for this.
nonzero, = alpha.nonzero()
lower_bound, upper_bound = lower_bound[nonzero], upper_bound[nonzero]
x0, alpha = x0[nonzero], alpha[nonzero]
low = (lower_bound - x0) / alpha
high = (upper_bound - x0) / alpha
# positive and negative indices
pos = alpha > 0
lmin_pos = np.where(pos, low, 0)
lmin_neg = np.where(pos, 0, high)
lmax_pos = np.where(pos, high, 0)
lmax_neg = np.where(pos, 0, low)
lmin = np.max(lmin_pos + lmin_neg)
lmax = np.min(lmax_pos + lmax_neg)
# if x0 is outside the bounds, then it is possible that there is
# no way to get back in the bounds for the parameters being updated
# with the current direction alpha.
# when this happens, lmax < lmin.
# If this is the case, then we can just return (0, 0)
return (lmin, lmax) if lmax >= lmin else (0, 0)
def _linesearch_powell(func, p, xi, tol=1e-3,
lower_bound=None, upper_bound=None, fval=None):
"""Line-search algorithm using fminbound.
Find the minimium of the function ``func(x0 + alpha*direc)``.
lower_bound : np.array.
The lower bounds for each parameter in ``x0``. If the ``i``th
parameter in ``x0`` is unbounded below, then ``lower_bound[i]``
should be ``-np.inf``.
Note ``np.shape(lower_bound) == (n,)``.
upper_bound : np.array.
The upper bounds for each parameter in ``x0``. If the ``i``th
parameter in ``x0`` is unbounded above, then ``upper_bound[i]``
should be ``np.inf``.
Note ``np.shape(upper_bound) == (n,)``.
fval : number.
``fval`` is equal to ``func(p)``, the idea is just to avoid
recomputing it so we can limit the ``fevals``.
"""
def myfunc(alpha):
return func(p + alpha*xi)
# if xi is zero, then don't optimize
if not np.any(xi):
return ((fval, p, xi) if fval is not None else (func(p), p, xi))
elif lower_bound is None and upper_bound is None:
# non-bounded minimization
alpha_min, fret, _, _ = brent(myfunc, full_output=1, tol=tol)
xi = alpha_min * xi
return squeeze(fret), p + xi, xi
else:
bound = _line_for_search(p, xi, lower_bound, upper_bound)
if np.isneginf(bound[0]) and np.isposinf(bound[1]):
# equivalent to unbounded
return _linesearch_powell(func, p, xi, fval=fval, tol=tol)
elif not np.isneginf(bound[0]) and not np.isposinf(bound[1]):
# we can use a bounded scalar minimization
res = _minimize_scalar_bounded(myfunc, bound, xatol=tol / 100)
xi = res.x * xi
return squeeze(res.fun), p + xi, xi
else:
# only bounded on one side. use the tangent function to convert
# the infinity bound to a finite bound. The new bounded region
# is a subregion of the region bounded by -np.pi/2 and np.pi/2.
bound = np.arctan(bound[0]), np.arctan(bound[1])
res = _minimize_scalar_bounded(
lambda x: myfunc(np.tan(x)),
bound,
xatol=tol / 100)
xi = np.tan(res.x) * xi
return squeeze(res.fun), p + xi, xi
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
maxfun=None, full_output=0, disp=1, retall=0, callback=None,
direc=None):
"""
Minimize a function using modified Powell's method.
This method only uses function values, not derivatives.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func.
xtol : float, optional
Line-search error tolerance.
ftol : float, optional
Relative error in ``func(xopt)`` acceptable for convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : int, optional
Maximum number of function evaluations to make.
full_output : bool, optional
If True, ``fopt``, ``xi``, ``direc``, ``iter``, ``funcalls``, and
``warnflag`` are returned.
disp : bool, optional
If True, print convergence messages.
retall : bool, optional
If True, return a list of the solution at each iteration.
callback : callable, optional
An optional user-supplied function, called after each
iteration. Called as ``callback(xk)``, where ``xk`` is the
current parameter vector.
direc : ndarray, optional
Initial fitting step and parameter order set as an (N, N) array, where N
is the number of fitting parameters in `x0`. Defaults to step size 1.0
fitting all parameters simultaneously (``np.eye((N, N))``). To
prevent initial consideration of values in a step or to change initial
step size, set to 0 or desired step size in the Jth position in the Mth
block, where J is the position in `x0` and M is the desired evaluation
step, with steps being evaluated in index order. Step size and ordering
will change freely as minimization proceeds.
Returns
-------
xopt : ndarray
Parameter which minimizes `func`.
fopt : number
Value of function at minimum: ``fopt = func(xopt)``.
direc : ndarray
Current direction set.
iter : int
Number of iterations.
funcalls : int
Number of function calls made.
warnflag : int
Integer warning flag:
1 : Maximum number of function evaluations.
2 : Maximum number of iterations.
3 : NaN result encountered.
4 : The result is out of the provided bounds.
allvecs : list
List of solutions at each iteration.
See also
--------
minimize: Interface to unconstrained minimization algorithms for
multivariate functions. See the 'Powell' method in particular.
Notes
-----
Uses a modification of Powell's method to find the minimum of
a function of N variables. Powell's method is a conjugate
direction method.
The algorithm has two loops. The outer loop merely iterates over the inner
loop. The inner loop minimizes over each current direction in the direction
set. At the end of the inner loop, if certain conditions are met, the
direction that gave the largest decrease is dropped and replaced with the
difference between the current estimated x and the estimated x from the
beginning of the inner-loop.
The technical conditions for replacing the direction of greatest
increase amount to checking that
1. No further gain can be made along the direction of greatest increase
from that iteration.
2. The direction of greatest increase accounted for a large sufficient
fraction of the decrease in the function value from that iteration of
the inner loop.
References
----------
Powell M.J.D. (1964) An efficient method for finding the minimum of a
function of several variables without calculating derivatives,
Computer Journal, 7 (2):155-162.
Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.:
Numerical Recipes (any edition), Cambridge University Press
Examples
--------
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.fmin_powell(f, -1)
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 2
Function evaluations: 18
>>> minimum
array(0.0)
"""
opts = {'xtol': xtol,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'direc': direc,
'return_all': retall}
res = _minimize_powell(func, x0, args, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['direc'], res['nit'],
res['nfev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_powell(func, x0, args=(), callback=None, bounds=None,
xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
disp=False, direc=None, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
modified Powell algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter, maxfev : int
Maximum allowed number of iterations and function evaluations.
Will default to ``N*1000``, where ``N`` is the number of
variables, if neither `maxiter` or `maxfev` is set. If both
`maxiter` and `maxfev` are set, minimization will stop at the
first reached.
direc : ndarray
Initial set of direction vectors for the Powell method.
return_all : bool, optional
Set to True to return a list of the best solution at each of the
iterations.
bounds : `Bounds`
If bounds are not provided, then an unbounded line search will be used.
If bounds are provided and the initial guess is within the bounds, then
every function evaluation throughout the minimization procedure will be
within the bounds. If bounds are provided, the initial guess is outside
the bounds, and `direc` is full rank (or left to default), then some
function evaluations during the first iteration may be outside the
bounds, but every function evaluation after the first iteration will be
within the bounds. If `direc` is not full rank, then some parameters may
not be optimized and the solution is not guaranteed to be within the
bounds.
return_all : bool, optional
Set to True to return a list of the best solution at each of the
iterations.
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
x = asarray(x0).flatten()
if retall:
allvecs = [x]
N = len(x)
# If neither are set, then set both to default
if maxiter is None and maxfun is None:
maxiter = N * 1000
maxfun = N * 1000
elif maxiter is None:
# Convert remaining Nones, to np.inf, unless the other is np.inf, in
# which case use the default to avoid unbounded iteration
if maxfun == np.inf:
maxiter = N * 1000
else:
maxiter = np.inf
elif maxfun is None:
if maxiter == np.inf:
maxfun = N * 1000
else:
maxfun = np.inf
# we need to use a mutable object here that we can update in the
# wrapper function
fcalls, func = _wrap_scalar_function_maxfun_validation(func, args, maxfun)
if direc is None:
direc = eye(N, dtype=float)
else:
direc = asarray(direc, dtype=float)
if np.linalg.matrix_rank(direc) != direc.shape[0]:
warnings.warn("direc input is not full rank, some parameters may "
"not be optimized",
OptimizeWarning, 3)
if bounds is None:
# don't make these arrays of all +/- inf. because
# _linesearch_powell will do an unnecessary check of all the elements.
# just keep them None, _linesearch_powell will not have to check
# all the elements.
lower_bound, upper_bound = None, None
else:
# bounds is standardized in _minimize.py.
lower_bound, upper_bound = bounds.lb, bounds.ub
if np.any(lower_bound > x0) or np.any(x0 > upper_bound):
warnings.warn("Initial guess is not within the specified bounds",
OptimizeWarning, 3)
fval = squeeze(func(x))
x1 = x.copy()
iter = 0
ilist = list(range(N))
while True:
try:
fx = fval
bigind = 0
delta = 0.0
for i in ilist:
direc1 = direc[i]
fx2 = fval
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol * 100,
lower_bound=lower_bound,
upper_bound=upper_bound,
fval=fval)
if (fx2 - fval) > delta:
delta = fx2 - fval
bigind = i
iter += 1
if callback is not None:
callback(x)
if retall:
allvecs.append(x)
bnd = ftol * (np.abs(fx) + np.abs(fval)) + 1e-20
if 2.0 * (fx - fval) <= bnd:
break
if fcalls[0] >= maxfun:
break
if iter >= maxiter:
break
if np.isnan(fx) and np.isnan(fval):
# Ended up in a nan-region: bail out
break
# Construct the extrapolated point
direc1 = x - x1
x2 = 2*x - x1
x1 = x.copy()
fx2 = squeeze(func(x2))
if (fx > fx2):
t = 2.0*(fx + fx2 - 2.0*fval)
temp = (fx - fval - delta)
t *= temp*temp
temp = fx - fx2
t -= delta*temp*temp
if t < 0.0:
fval, x, direc1 = _linesearch_powell(
func, x, direc1,
tol=xtol * 100,
lower_bound=lower_bound,
upper_bound=upper_bound,
fval=fval
)
if np.any(direc1):
direc[bigind] = direc[-1]
direc[-1] = direc1
except _MaxFuncCallError:
break
warnflag = 0
# out of bounds is more urgent than exceeding function evals or iters,
# but I don't want to cause inconsistencies by changing the
# established warning flags for maxfev and maxiter, so the out of bounds
# warning flag becomes 3, but is checked for first.
if bounds and (np.any(lower_bound > x) or np.any(x > upper_bound)):
warnflag = 4
msg = _status_message['out_of_bounds']
elif fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print("Warning: " + msg)
elif iter >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
elif np.isnan(fval) or np.isnan(x).any():
warnflag = 3
msg = _status_message['nan']
if disp:
print("Warning: " + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iter)
print(" Function evaluations: %d" % fcalls[0])
result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x)
if retall:
result['allvecs'] = allvecs
return result
def _endprint(x, flag, fval, maxfun, xtol, disp):
if flag == 0:
if disp > 1:
print("\nOptimization terminated successfully;\n"
"The returned value satisfies the termination criteria\n"
"(using xtol = ", xtol, ")")
if flag == 1:
if disp:
print("\nMaximum number of function evaluations exceeded --- "
"increase maxfun argument.\n")
if flag == 2:
if disp:
print("\n{}".format(_status_message['nan']))
return
def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin,
disp=False, workers=1):
"""Minimize a function over a given range by brute force.
Uses the "brute force" method, i.e., computes the function's value
at each point of a multidimensional grid of points, to find the global
minimum of the function.
The function is evaluated everywhere in the range with the datatype of the
first call to the function, as enforced by the ``vectorize`` NumPy
function. The value and type of the function evaluation returned when
``full_output=True`` are affected in addition by the ``finish`` argument
(see Notes).
The brute force approach is inefficient because the number of grid points
increases exponentially - the number of grid points to evaluate is
``Ns ** len(x)``. Consequently, even with coarse grid spacing, even
moderately sized problems can take a long time to run, and/or run into
memory limitations.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the
form ``f(x, *args)``, where ``x`` is the argument in
the form of a 1-D array and ``args`` is a tuple of any
additional fixed parameters needed to completely specify
the function.
ranges : tuple
Each component of the `ranges` tuple must be either a
"slice object" or a range tuple of the form ``(low, high)``.
The program uses these to create the grid of points on which
the objective function will be computed. See `Note 2` for
more detail.
args : tuple, optional
Any additional fixed parameters needed to completely specify
the function.
Ns : int, optional
Number of grid points along the axes, if not otherwise
specified. See `Note2`.
full_output : bool, optional
If True, return the evaluation grid and the objective function's
values on it.
finish : callable, optional
An optimization function that is called with the result of brute force
minimization as initial guess. `finish` should take `func` and
the initial guess as positional arguments, and take `args` as
keyword arguments. It may additionally take `full_output`
and/or `disp` as keyword arguments. Use None if no "polishing"
function is to be used. See Notes for more details.
disp : bool, optional
Set to True to print convergence messages from the `finish` callable.
workers : int or map-like callable, optional
If `workers` is an int the grid is subdivided into `workers`
sections and evaluated in parallel (uses
`multiprocessing.Pool <multiprocessing>`).
Supply `-1` to use all cores available to the Process.
Alternatively supply a map-like callable, such as
`multiprocessing.Pool.map` for evaluating the grid in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
Requires that `func` be pickleable.
.. versionadded:: 1.3.0
Returns
-------
x0 : ndarray
A 1-D array containing the coordinates of a point at which the
objective function had its minimum value. (See `Note 1` for
which point is returned.)
fval : float
Function value at the point `x0`. (Returned when `full_output` is
True.)
grid : tuple
Representation of the evaluation grid. It has the same
length as `x0`. (Returned when `full_output` is True.)
Jout : ndarray
Function values at each point of the evaluation
grid, i.e., ``Jout = func(*grid)``. (Returned
when `full_output` is True.)
See Also
--------
basinhopping, differential_evolution
Notes
-----
*Note 1*: The program finds the gridpoint at which the lowest value
of the objective function occurs. If `finish` is None, that is the
point returned. When the global minimum occurs within (or not very far
outside) the grid's boundaries, and the grid is fine enough, that
point will be in the neighborhood of the global minimum.
However, users often employ some other optimization program to
"polish" the gridpoint values, i.e., to seek a more precise
(local) minimum near `brute's` best gridpoint.
The `brute` function's `finish` option provides a convenient way to do
that. Any polishing program used must take `brute's` output as its
initial guess as a positional argument, and take `brute's` input values
for `args` as keyword arguments, otherwise an error will be raised.
It may additionally take `full_output` and/or `disp` as keyword arguments.
`brute` assumes that the `finish` function returns either an
`OptimizeResult` object or a tuple in the form:
``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing
value of the argument, ``Jmin`` is the minimum value of the objective
function, "..." may be some other returned values (which are not used
by `brute`), and ``statuscode`` is the status code of the `finish` program.
Note that when `finish` is not None, the values returned are those
of the `finish` program, *not* the gridpoint ones. Consequently,
while `brute` confines its search to the input grid points,
the `finish` program's results usually will not coincide with any
gridpoint, and may fall outside the grid's boundary. Thus, if a
minimum only needs to be found over the provided grid points, make
sure to pass in `finish=None`.
*Note 2*: The grid of points is a `numpy.mgrid` object.
For `brute` the `ranges` and `Ns` inputs have the following effect.
Each component of the `ranges` tuple can be either a slice object or a
two-tuple giving a range of values, such as (0, 5). If the component is a
slice object, `brute` uses it directly. If the component is a two-tuple
range, `brute` internally converts it to a slice object that interpolates
`Ns` points from its low-value to its high-value, inclusive.
Examples
--------
We illustrate the use of `brute` to seek the global minimum of a function
of two variables that is given as the sum of a positive-definite
quadratic and two deep "Gaussian-shaped" craters. Specifically, define
the objective function `f` as the sum of three other functions,
``f = f1 + f2 + f3``. We suppose each of these has a signature
``(z, *params)``, where ``z = (x, y)``, and ``params`` and the functions
are as defined below.
>>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
>>> def f1(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
>>> def f2(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
>>> def f3(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
>>> def f(z, *params):
... return f1(z, *params) + f2(z, *params) + f3(z, *params)
Thus, the objective function may have local minima near the minimum
of each of the three functions of which it is composed. To
use `fmin` to polish its gridpoint result, we may then continue as
follows:
>>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
>>> from scipy import optimize
>>> resbrute = optimize.brute(f, rranges, args=params, full_output=True,
... finish=optimize.fmin)
>>> resbrute[0] # global minimum
array([-1.05665192, 1.80834843])
>>> resbrute[1] # function value at global minimum
-3.4085818767
Note that if `finish` had been set to None, we would have gotten the
gridpoint [-1.0 1.75] where the rounded function value is -2.892.
"""
N = len(ranges)
if N > 40:
raise ValueError("Brute Force not possible with more "
"than 40 variables.")
lrange = list(ranges)
for k in range(N):
if type(lrange[k]) is not type(slice(None)):
if len(lrange[k]) < 3:
lrange[k] = tuple(lrange[k]) + (complex(Ns),)
lrange[k] = slice(*lrange[k])
if (N == 1):
lrange = lrange[0]
grid = np.mgrid[lrange]
# obtain an array of parameters that is iterable by a map-like callable
inpt_shape = grid.shape
if (N > 1):
grid = np.reshape(grid, (inpt_shape[0], np.prod(inpt_shape[1:]))).T
wrapped_func = _Brute_Wrapper(func, args)
# iterate over input arrays, possibly in parallel
with MapWrapper(pool=workers) as mapper:
Jout = np.array(list(mapper(wrapped_func, grid)))
if (N == 1):
grid = (grid,)
Jout = np.squeeze(Jout)
elif (N > 1):
Jout = np.reshape(Jout, inpt_shape[1:])
grid = np.reshape(grid.T, inpt_shape)
Nshape = shape(Jout)
indx = argmin(Jout.ravel(), axis=-1)
Nindx = np.empty(N, int)
xmin = np.empty(N, float)
for k in range(N - 1, -1, -1):
thisN = Nshape[k]
Nindx[k] = indx % Nshape[k]
indx = indx // thisN
for k in range(N):
xmin[k] = grid[k][tuple(Nindx)]
Jmin = Jout[tuple(Nindx)]
if (N == 1):
grid = grid[0]
xmin = xmin[0]
if callable(finish):
# set up kwargs for `finish` function
finish_args = _getfullargspec(finish).args
finish_kwargs = dict()
if 'full_output' in finish_args:
finish_kwargs['full_output'] = 1
if 'disp' in finish_args:
finish_kwargs['disp'] = disp
elif 'options' in finish_args:
# pass 'disp' as `options`
# (e.g., if `finish` is `minimize`)
finish_kwargs['options'] = {'disp': disp}
# run minimizer
res = finish(func, xmin, args=args, **finish_kwargs)
if isinstance(res, OptimizeResult):
xmin = res.x
Jmin = res.fun
success = res.success
else:
xmin = res[0]
Jmin = res[1]
success = res[-1] == 0
if not success:
if disp:
print("Warning: Either final optimization did not succeed "
"or `finish` does not return `statuscode` as its last "
"argument.")
if full_output:
return xmin, Jmin, grid, Jout
else:
return xmin
class _Brute_Wrapper:
"""
Object to wrap user cost function for optimize.brute, allowing picklability
"""
def __init__(self, f, args):
self.f = f
self.args = [] if args is None else args
def __call__(self, x):
# flatten needed for one dimensional case.
return self.f(np.asarray(x).flatten(), *self.args)
def show_options(solver=None, method=None, disp=True):
"""
Show documentation for additional options of optimization solvers.
These are method-specific options that can be supplied through the
``options`` dict.
Parameters
----------
solver : str
Type of optimization solver. One of 'minimize', 'minimize_scalar',
'root', 'root_scalar', 'linprog', or 'quadratic_assignment'.
method : str, optional
If not given, shows all methods of the specified solver. Otherwise,
show only the options for the specified method. Valid values
corresponds to methods' names of respective solver (e.g., 'BFGS' for
'minimize').
disp : bool, optional
Whether to print the result rather than returning it.
Returns
-------
text
Either None (for disp=True) or the text string (disp=False)
Notes
-----
The solver-specific methods are:
`scipy.optimize.minimize`
- :ref:`Nelder-Mead <optimize.minimize-neldermead>`
- :ref:`Powell <optimize.minimize-powell>`
- :ref:`CG <optimize.minimize-cg>`
- :ref:`BFGS <optimize.minimize-bfgs>`
- :ref:`Newton-CG <optimize.minimize-newtoncg>`
- :ref:`L-BFGS-B <optimize.minimize-lbfgsb>`
- :ref:`TNC <optimize.minimize-tnc>`
- :ref:`COBYLA <optimize.minimize-cobyla>`
- :ref:`SLSQP <optimize.minimize-slsqp>`
- :ref:`dogleg <optimize.minimize-dogleg>`
- :ref:`trust-ncg <optimize.minimize-trustncg>`
`scipy.optimize.root`
- :ref:`hybr <optimize.root-hybr>`
- :ref:`lm <optimize.root-lm>`
- :ref:`broyden1 <optimize.root-broyden1>`
- :ref:`broyden2 <optimize.root-broyden2>`
- :ref:`anderson <optimize.root-anderson>`
- :ref:`linearmixing <optimize.root-linearmixing>`
- :ref:`diagbroyden <optimize.root-diagbroyden>`
- :ref:`excitingmixing <optimize.root-excitingmixing>`
- :ref:`krylov <optimize.root-krylov>`
- :ref:`df-sane <optimize.root-dfsane>`
`scipy.optimize.minimize_scalar`
- :ref:`brent <optimize.minimize_scalar-brent>`
- :ref:`golden <optimize.minimize_scalar-golden>`
- :ref:`bounded <optimize.minimize_scalar-bounded>`
`scipy.optimize.root_scalar`
- :ref:`bisect <optimize.root_scalar-bisect>`
- :ref:`brentq <optimize.root_scalar-brentq>`
- :ref:`brenth <optimize.root_scalar-brenth>`
- :ref:`ridder <optimize.root_scalar-ridder>`
- :ref:`toms748 <optimize.root_scalar-toms748>`
- :ref:`newton <optimize.root_scalar-newton>`
- :ref:`secant <optimize.root_scalar-secant>`
- :ref:`halley <optimize.root_scalar-halley>`
`scipy.optimize.linprog`
- :ref:`simplex <optimize.linprog-simplex>`
- :ref:`interior-point <optimize.linprog-interior-point>`
- :ref:`revised simplex <optimize.linprog-revised_simplex>`
- :ref:`highs <optimize.linprog-highs>`
- :ref:`highs-ds <optimize.linprog-highs-ds>`
- :ref:`highs-ipm <optimize.linprog-highs-ipm>`
`scipy.optimize.quadratic_assignment`
- :ref:`faq <optimize.qap-faq>`
- :ref:`2opt <optimize.qap-2opt>`
Examples
--------
We can print documentations of a solver in stdout:
>>> from scipy.optimize import show_options
>>> show_options(solver="minimize")
...
Specifying a method is possible:
>>> show_options(solver="minimize", method="Nelder-Mead")
...
We can also get the documentations as a string:
>>> show_options(solver="minimize", method="Nelder-Mead", disp=False)
Minimization of scalar function of one or more variables using the ...
"""
import textwrap
doc_routines = {
'minimize': (
('bfgs', 'scipy.optimize._optimize._minimize_bfgs'),
('cg', 'scipy.optimize._optimize._minimize_cg'),
('cobyla', 'scipy.optimize._cobyla_py._minimize_cobyla'),
('dogleg', 'scipy.optimize._trustregion_dogleg._minimize_dogleg'),
('l-bfgs-b', 'scipy.optimize._lbfgsb_py._minimize_lbfgsb'),
('nelder-mead', 'scipy.optimize._optimize._minimize_neldermead'),
('newton-cg', 'scipy.optimize._optimize._minimize_newtoncg'),
('powell', 'scipy.optimize._optimize._minimize_powell'),
('slsqp', 'scipy.optimize._slsqp_py._minimize_slsqp'),
('tnc', 'scipy.optimize._tnc._minimize_tnc'),
('trust-ncg',
'scipy.optimize._trustregion_ncg._minimize_trust_ncg'),
('trust-constr',
'scipy.optimize._trustregion_constr.'
'_minimize_trustregion_constr'),
('trust-exact',
'scipy.optimize._trustregion_exact._minimize_trustregion_exact'),
('trust-krylov',
'scipy.optimize._trustregion_krylov._minimize_trust_krylov'),
),
'root': (
('hybr', 'scipy.optimize._minpack_py._root_hybr'),
('lm', 'scipy.optimize._root._root_leastsq'),
('broyden1', 'scipy.optimize._root._root_broyden1_doc'),
('broyden2', 'scipy.optimize._root._root_broyden2_doc'),
('anderson', 'scipy.optimize._root._root_anderson_doc'),
('diagbroyden', 'scipy.optimize._root._root_diagbroyden_doc'),
('excitingmixing', 'scipy.optimize._root._root_excitingmixing_doc'),
('linearmixing', 'scipy.optimize._root._root_linearmixing_doc'),
('krylov', 'scipy.optimize._root._root_krylov_doc'),
('df-sane', 'scipy.optimize._spectral._root_df_sane'),
),
'root_scalar': (
('bisect', 'scipy.optimize._root_scalar._root_scalar_bisect_doc'),
('brentq', 'scipy.optimize._root_scalar._root_scalar_brentq_doc'),
('brenth', 'scipy.optimize._root_scalar._root_scalar_brenth_doc'),
('ridder', 'scipy.optimize._root_scalar._root_scalar_ridder_doc'),
('toms748', 'scipy.optimize._root_scalar._root_scalar_toms748_doc'),
('secant', 'scipy.optimize._root_scalar._root_scalar_secant_doc'),
('newton', 'scipy.optimize._root_scalar._root_scalar_newton_doc'),
('halley', 'scipy.optimize._root_scalar._root_scalar_halley_doc'),
),
'linprog': (
('simplex', 'scipy.optimize._linprog._linprog_simplex_doc'),
('interior-point', 'scipy.optimize._linprog._linprog_ip_doc'),
('revised simplex', 'scipy.optimize._linprog._linprog_rs_doc'),
('highs-ipm', 'scipy.optimize._linprog._linprog_highs_ipm_doc'),
('highs-ds', 'scipy.optimize._linprog._linprog_highs_ds_doc'),
('highs', 'scipy.optimize._linprog._linprog_highs_doc'),
),
'quadratic_assignment': (
('faq', 'scipy.optimize._qap._quadratic_assignment_faq'),
('2opt', 'scipy.optimize._qap._quadratic_assignment_2opt'),
),
'minimize_scalar': (
('brent', 'scipy.optimize._optimize._minimize_scalar_brent'),
('bounded', 'scipy.optimize._optimize._minimize_scalar_bounded'),
('golden', 'scipy.optimize._optimize._minimize_scalar_golden'),
),
}
if solver is None:
text = ["\n\n\n========\n", "minimize\n", "========\n"]
text.append(show_options('minimize', disp=False))
text.extend(["\n\n===============\n", "minimize_scalar\n",
"===============\n"])
text.append(show_options('minimize_scalar', disp=False))
text.extend(["\n\n\n====\n", "root\n",
"====\n"])
text.append(show_options('root', disp=False))
text.extend(['\n\n\n=======\n', 'linprog\n',
'=======\n'])
text.append(show_options('linprog', disp=False))
text = "".join(text)
else:
solver = solver.lower()
if solver not in doc_routines:
raise ValueError('Unknown solver %r' % (solver,))
if method is None:
text = []
for name, _ in doc_routines[solver]:
text.extend(["\n\n" + name, "\n" + "="*len(name) + "\n\n"])
text.append(show_options(solver, name, disp=False))
text = "".join(text)
else:
method = method.lower()
methods = dict(doc_routines[solver])
if method not in methods:
raise ValueError("Unknown method %r" % (method,))
name = methods[method]
# Import function object
parts = name.split('.')
mod_name = ".".join(parts[:-1])
__import__(mod_name)
obj = getattr(sys.modules[mod_name], parts[-1])
# Get doc
doc = obj.__doc__
if doc is not None:
text = textwrap.dedent(doc).strip()
else:
text = ""
if disp:
print(text)
return
else:
return text
def main():
import time
times = []
algor = []
x0 = [0.8, 1.2, 0.7]
print("Nelder-Mead Simplex")
print("===================")
start = time.time()
x = fmin(rosen, x0)
print(x)
times.append(time.time() - start)
algor.append('Nelder-Mead Simplex\t')
print()
print("Powell Direction Set Method")
print("===========================")
start = time.time()
x = fmin_powell(rosen, x0)
print(x)
times.append(time.time() - start)
algor.append('Powell Direction Set Method.')
print()
print("Nonlinear CG")
print("============")
start = time.time()
x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200)
print(x)
times.append(time.time() - start)
algor.append('Nonlinear CG \t')
print()
print("BFGS Quasi-Newton")
print("=================")
start = time.time()
x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('BFGS Quasi-Newton\t')
print()
print("BFGS approximate gradient")
print("=========================")
start = time.time()
x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100)
print(x)
times.append(time.time() - start)
algor.append('BFGS without gradient\t')
print()
print("Newton-CG with Hessian product")
print("==============================")
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_prod, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('Newton-CG with hessian product')
print()
print("Newton-CG with full Hessian")
print("===========================")
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('Newton-CG with full Hessian')
print()
print("\nMinimizing the Rosenbrock function of order 3\n")
print(" Algorithm \t\t\t Seconds")
print("===========\t\t\t =========")
for alg, tme in zip(algor, times):
print(alg, "\t -- ", tme)
if __name__ == "__main__":
main()
|
grlee77/scipy
|
scipy/optimize/_optimize.py
|
Python
|
bsd-3-clause
| 134,753
|
[
"Gaussian"
] |
57b31d5190bfa974a4c6babc62f85be2a31286ffe4c80d5c48fe971ca001402d
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2009 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
"""
Mantid system testing framework. This module contains all of the necessary code
to run sets of system tests on the Mantid framework by executing scripts directly
or by importing them into MantidPlot.
File change history is stored at: <https://github.com/mantidproject/systemtests>.
"""
from __future__ import (absolute_import, division, print_function)
# == for testing conda build of mantid-framework ==========
import os
if os.environ.get('MANTID_FRAMEWORK_CONDA_SYSTEMTEST'):
# conda build of mantid-framework sometimes require importing matplotlib before mantid
import matplotlib
# =========================================================
from six import PY3
import datetime
import difflib
import imp
import inspect
from mantid.api import FileFinder
from mantid.api import FrameworkManager
from mantid.kernel import config, MemoryStats
from mantid.simpleapi import AlgorithmManager, Load, SaveNexus
import numpy
import platform
import re
import shutil
import subprocess
import shutil
import sys
import tempfile
import time
import unittest
# Path to this file
THIS_MODULE_DIR = os.path.dirname(os.path.realpath(__file__))
# Some windows paths can contain sequences such as \r, e.g. \release_systemtests
# and need escaping to be able to add to the python path
TESTING_FRAMEWORK_DIR = THIS_MODULE_DIR.replace('\\', '\\\\')
#########################################################################
# The base test class.
#########################################################################
class MantidSystemTest(unittest.TestCase):
'''Defines a base class for system tests, providing functions
that should be overridden by inheriting classes to perform tests.
'''
# Define a delimiter when reporting results
DELIMITER = '|'
# Define a prefix for reporting results
PREFIX = 'RESULT'
def __init__(self):
super(MantidSystemTest, self).__init__()
# A list of things not to check when validating
self.disableChecking = []
# Whether or not to strip off whitespace when doing simple ascii diff
self.stripWhitespace = True
# Tolerance
self.tolerance = 0.00000001
# Store the resident memory of the system (in MB) before starting the test
FrameworkManager.clear()
self.memory = MemoryStats().residentMem()/1024
def runTest(self):
raise NotImplementedError('"runTest(self)" should be overridden in a derived class')
def skipTests(self):
'''
Override this to return True when the tests should be skipped for some
reason.
See also: requiredFiles() and requiredMemoryMB()
'''
return False
def excludeInPullRequests(self):
'''
Override this to return True if the test is too slow or deemed unnecessary to
be run with every pull request. These tests will be run nightly instead.
'''
return False
def validate(self):
'''
Override this to provide a pair of workspaces which should be checked for equality
by the doValidation method.
The overriding method should return a pair of strings. This could be two workspace
names, e.g. return 'workspace1','workspace2', or a workspace name and a nexus
filename (which must have nxs suffix), e.g. return 'workspace1','GEM00001.nxs'.
'''
return None
def requiredFiles(self):
'''
Override this method if you want to require files for the test.
Return a list of files.
'''
return []
def requiredMemoryMB(self):
'''
Override this method to specify the amount of free memory,
in megabytes, that is required to run the test.
The test is skipped if there is not enough memory.
'''
return 0
def validateMethod(self):
'''
Override this to specify which validation method to use. Look at the validate* methods to
see what allowed values are.
'''
return "WorkspaceToNeXus"
def maxIterations(self):
'''Override this to perform more than 1 iteration of the implemented test.'''
return 1
def reportResult(self, name, value):
'''
Send a result to be stored as a name,value pair
'''
output = self.PREFIX + self.DELIMITER + name + self.DELIMITER + str(value) + "\n"
# Ensure that this is all printed together and not mixed with stderr
sys.stdout.flush()
sys.stdout.write(output)
sys.stdout.flush()
def __verifyRequiredFile(self, filename):
'''Return True if the specified file name is findable by Mantid.'''
# simple way is just getFullPath which never uses archive search
if os.path.exists(FileFinder.getFullPath(filename)):
return True
# try full findRuns which will use archive search if it is turned on
try:
candidates = FileFinder.findRuns(filename)
for item in candidates:
if os.path.exists(item):
return True
except RuntimeError:
return False
# file was not found
return False
def __verifyRequiredFiles(self):
# first see if there is anything to do
reqFiles = self.requiredFiles()
if len(reqFiles) <= 0:
return
# by default everything is ok
foundAll = True
# check that all of the files exist
for filename in reqFiles:
if not self.__verifyRequiredFile(filename):
print("Missing required file: '%s'" % filename)
foundAll = False
if not foundAll:
sys.exit(TestRunner.SKIP_TEST)
def __verifyMemory(self):
"""Do we need to skip due to lack of memory?"""
required = self.requiredMemoryMB()
if required <= 0:
return
# Check if memory is available
MB_avail = MemoryStats().availMem()/(1024.)
if (MB_avail < required):
print("Insufficient memory available to run test! %g MB available, need %g MB." % (MB_avail, required))
sys.exit(TestRunner.SKIP_TEST)
def execute(self):
'''Run the defined number of iterations of this test'''
# Do we need to skip due to missing files?
self.__verifyRequiredFiles()
self.__verifyMemory()
# A custom check for skipping the tests for other reasons
if self.skipTests():
sys.exit(TestRunner.SKIP_TEST)
# A custom check for skipping tests that shouldn't be run with every PR
if self.excludeInPullRequests():
sys.exit(TestRunner.SKIP_TEST)
# Start timer
start = time.time()
countmax = self.maxIterations() + 1
for i in range(1, countmax):
istart = time.time()
self.runTest()
delta_t = time.time() - istart
self.reportResult('iteration time_taken', str(i) + ' %.2f' % delta_t)
delta_t = float(time.time() - start)
# Finish
self.reportResult('time_taken', '%.2f' % delta_t)
def __prepASCIIFile(self, filename):
"""Prepare an ascii file for comparison using difflib."""
with open(filename, mode='r') as handle:
stuff = handle.readlines()
if self.stripWhitespace:
stuff = [line.strip() for line in stuff]
return stuff
def validateASCII(self):
"""Validate ASCII files using difflib."""
(measured, expected) = self.validate()
if not os.path.isabs(measured):
measured = FileFinder.Instance().getFullPath(measured)
if not os.path.isabs(expected):
expected = FileFinder.Instance().getFullPath(expected)
measured = self.__prepASCIIFile(measured)
expected = self.__prepASCIIFile(expected)
# calculate the difference
diff = difflib.Differ().compare(measured, expected)
result = []
for line in diff:
if line.startswith('+') or line.startswith('-') or line.startswith('?'):
result.append(line)
# print the difference
if len(result) > 0:
if self.stripWhitespace:
msg = "(whitespace striped from ends)"
else:
msg = ""
print("******************* Difference in files", msg)
print("\n".join(result))
print("*******************")
return False
else:
return True
def validateWorkspaceToNeXus(self):
'''
Assumes the second item from self.validate() is a nexus file and loads it
to compare to the supplied workspace.
'''
valNames = list(self.validate())
numRezToCheck = len(valNames)
mismatchName = None
validationResult = True
# results are in pairs
for valname, refname in zip(valNames[::2], valNames[1::2]):
if refname.endswith('.nxs'):
Load(Filename=refname, OutputWorkspace="RefFile")
refname = "RefFile"
else:
raise RuntimeError("Should supply a NeXus file: %s" % refname)
valPair = (valname, "RefFile")
if numRezToCheck > 2:
mismatchName = valname
if not(self.validateWorkspaces(valPair, mismatchName)):
validationResult = False
print('Workspace {0} not equal to its reference file'.format(valname))
return validationResult
def validateWorkspaceToWorkspace(self):
'''
Assumes the second item from self.validate() is an existing workspace
to compare to the supplied workspace.
'''
valNames = list(self.validate())
return self.validateWorkspaces(valNames)
def validateWorkspaces(self, valNames=None, mismatchName=None):
'''
Performs a check that two workspaces are equal using the CompareWorkspaces
algorithm. Loads one workspace from a nexus file if appropriate.
Returns true if: the workspaces match
OR the validate method has not been overridden.
Returns false if the workspace do not match. The reason will be in the log.
'''
if valNames is None:
valNames = self.validate()
checker = AlgorithmManager.create("CompareWorkspaces")
checker.setLogging(True)
checker.setPropertyValue("Workspace1", valNames[0])
checker.setPropertyValue("Workspace2", valNames[1])
checker.setProperty("Tolerance", float(self.tolerance))
if hasattr(self, 'tolerance_is_rel_err') and self.tolerance_is_rel_err:
checker.setProperty("ToleranceRelErr", True)
for d in self.disableChecking:
checker.setProperty("Check"+d, False)
checker.execute()
if not checker.getProperty("Result").value:
print(self.__class__.__name__)
if mismatchName:
SaveNexus(InputWorkspace=valNames[0],
Filename=self.__class__.__name__+mismatchName+'-mismatch.nxs')
else:
SaveNexus(InputWorkspace=valNames[0],
Filename=self.__class__.__name__+'-mismatch.nxs')
return False
return True
def doValidation(self):
"""
Perform validation. This selects which validation method to use by the result
of validateMethod() and validate(). If validate() is not overridden this will
return True.
"""
# if no validation is specified then it must be ok
validation = self.validate()
if validation is None:
return True
# if a simple boolean then use this
if type(validation) == bool:
return validation
# or numpy boolean
if type(validation) == numpy.bool_:
return bool(validation)
# switch based on validation methods
method = self.validateMethod()
if method is None:
return True # don't validate
method = method.lower()
if "validateworkspacetonexus".endswith(method):
return self.validateWorkspaceToNeXus()
elif "validateworkspacetoworkspace".endswith(method):
return self.validateWorkspaceToWorkspace()
elif "validateascii".endswith(method):
return self.validateASCII()
else:
raise RuntimeError("invalid validation method '%s'" % self.validateMethod())
def returnValidationCode(self, code):
"""
Calls doValidation() and returns 0 in success and code if failed. This will be
used as return code from the calling python subprocess
"""
if self.doValidation():
retcode = 0
else:
retcode = code
if retcode == 0:
self._success = True
else:
self._success = False
# Now the validation is complete we can clear out all the stored data and check memory usage
FrameworkManager.clear()
# Get the resident memory again and work out how much it's gone up by (in MB)
memorySwallowed = MemoryStats().residentMem()/1024 - self.memory
# Store the result
self.reportResult('memory footprint increase', memorySwallowed)
return retcode
def succeeded(self):
"""Returns true if the test has been run and it succeeded, false otherwise"""
if hasattr(self, '_success'):
return self._success
else:
return False
def cleanup(self):
'''
This function is called after a test has completed and can be used to
clean up, i.e. remove workspaces etc
'''
pass
def assertDelta(self, value, expected, delta, msg=""):
"""Check that a value is within +- delta of the expected value"""
# Build the error message
if len(msg) > 0:
msg += " "
msg += "Expected %g == %g within +- %g." % (value, expected, delta)
if (value > expected+delta) or (value < expected-delta):
raise Exception(msg)
def assertLessThan(self, value, expected, msg=""):
"""
Check that a value is < expected.
"""
# Build the error message
if len(msg) > 0:
msg += " "
msg += "Expected %g < %g " % (value, expected)
if (value >= expected):
raise Exception(msg)
def assertGreaterThan(self, value, expected, msg=""):
"""
Check that a value is > expected.
"""
# Build the error message
if len(msg) > 0:
msg += " "
msg += "Expected %g > %g " % (value, expected)
if (value <= expected):
raise Exception(msg)
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""
Check that a callable raises an exception when called.
"""
was_raised = True
try:
callableObj(*args, **kwargs)
was_raised = False
except excClass:
pass
except Exception as e:
msg = 'Expected {0} but raised {1} instead.'
raise Exception(msg.format(excClass.__name__, e.__class__.__name__))
if not was_raised:
raise Exception('{} not raised'.format(excClass.__name__))
#########################################################################
# A class to store the results of a test
#########################################################################
class TestResult(object):
'''
Stores the results of each test so that they can be reported later.
'''
def __init__(self):
self._results = []
self.name = ''
self.filename = ''
self.date = ''
self.status = ''
self.time_taken = ''
self.total_time = ''
self.output = ''
self.err = ''
def __eq__(self, other):
return self.name == other.name
def __lt__(self, other):
return self.name < other.name
def addItem(self, item):
'''
Add an item to the store, this should be a list containing 2 entries: [Name, Value]
'''
self._results.append(item)
def resultLogs(self):
'''
Get the map storing the results
'''
return self._results
#########################################################################
# A base class to support report results in an appropriate manner
#########################################################################
class ResultReporter(object):
'''
A base class for results reporting. In order to get the results in an
appropriate form, subclass this class and implement the dispatchResults
method.
'''
def __init__(self, total_number_of_tests=0, maximum_name_length=0):
'''Initialize a class instance, e.g. connect to a database'''
self._total_number_of_tests = total_number_of_tests
self._maximum_name_length = maximum_name_length
pass
def dispatchResults(self, result, number_of_completed_tests):
raise NotImplementedError('"dispatchResults(self, result)" should be overridden in a derived class')
def printResultsToConsole(self, result, number_of_completed_tests):
'''
Print the results to standard out
'''
if ((result.status == 'skipped') and (not self._show_skipped)):
pass
else:
console_output = ''
if self._quiet:
percentage = int(float(number_of_completed_tests)*100.0/float(self._total_number_of_tests))
if len(result._results) < 6:
time_taken = " -- "
else:
time_taken = result._results[6][1]
console_output += '[{:>3d}%] {:>3d}/{:>3d} : '.format(percentage, number_of_completed_tests,
self._total_number_of_tests)
console_output += '{:.<{}} ({}: {}s)'.format(result.name+" ", self._maximum_name_length+2,
result.status, time_taken)
if ((self._output_on_failure
and (result.status != 'success')
and (result.status != 'skipped'))
or (not self._quiet)):
nstars = 80
console_output += '\n' + ('*' * nstars) + '\n'
print_list = ['test_name', 'filename', 'test_date', 'host_name', 'environment',
'status', 'time_taken', 'memory footprint increase', 'output', 'err']
for key in print_list:
key_not_found = True
for i in range(len(result._results)):
if key == result._results[i][0]:
console_output += '{}: {}\n'.format(key, result._results[i][1])
key_not_found = False
if key_not_found:
try:
console_output += '{}: {}\n'.format(key, getattr(result, key))
except AttributeError:
pass
console_output += ('*' * nstars) + '\n'
print(console_output)
sys.stdout.flush()
return
#########################################################################
# A class to report results as formatted text output
#########################################################################
class TextResultReporter(ResultReporter):
'''
Report the results of a test using standard out
'''
def dispatchResults(self, result, number_of_completed_tests):
'''
The default text reporter prints to standard out
'''
self.printResultsToConsole(result, number_of_completed_tests)
return
# A class to report results as junit xml
# DO NOT MOVE
from xmlreporter import XmlResultReporter # noqa
#########################################################################
# A base class for a TestRunner
#########################################################################
class TestRunner(object):
'''
A base class to serve as a wrapper to actually run the tests in a specific
environment, i.e. console, gui
'''
SUCCESS_CODE = 0
GENERIC_FAIL_CODE = 1
SEGFAULT_CODE = 139
VALIDATION_FAIL_CODE = 99
NOT_A_TEST = 98
SKIP_TEST = 97
def __init__(self, executable, exec_args=None, escape_quotes=False, clean=False):
self._executable = executable
self._exec_args = exec_args
self._test_dir = ''
self._escape_quotes = escape_quotes
self._clean = clean
def getTestDir(self):
return self._test_dir
def setTestDir(self, test_dir):
self._test_dir = os.path.abspath(test_dir).replace('\\', '/')
def spawnSubProcess(self, cmd):
'''Spawn a new process and run the given command within it'''
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, bufsize=-1)
std_out, _ = proc.communicate()
return proc.returncode, std_out
def start(self, script):
'''Run the given test code in a new subprocess'''
exec_call = self._executable
if self._exec_args:
exec_call += ' ' + self._exec_args
# write script to temporary file and execute this file
tmp_file = tempfile.NamedTemporaryFile(mode='w', delete=False)
tmp_file.write(script.asString(clean=self._clean))
tmp_file.close()
cmd = exec_call + ' ' + tmp_file.name
results = self.spawnSubProcess(cmd)
os.remove(tmp_file.name)
return results
#########################################################################
# Encapsulate the script for running a single test
#########################################################################
class TestScript(object):
def __init__(self, test_dir, module_name, test_cls_name, exclude_in_pr_builds):
self._test_dir = test_dir
self._modname = module_name
self._test_cls_name = test_cls_name
self._exclude_in_pr_builds = not exclude_in_pr_builds
def asString(self, clean=False):
code = """
# If any tests happen to hit a PyQt4 import make sure item uses version 2 of the api
# Remove this when everything is switched to qtpy
import sip
try:
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
sip.setapi('QDate', 2)
sip.setapi('QDateTime', 2)
sip.setapi('QTextStream', 2)
sip.setapi('QTime', 2)
sip.setapi('QUrl', 2)
except AttributeError:
# PyQt < v4.6
pass
import sys
for p in ('{test_framework}', '{test_dir}'):
sys.path.append(p)
from {test_modname} import {test_cls}
systest = {test_cls}()
if {exclude_in_pr}:
systest.excludeInPullRequests = lambda: False
""".format(test_framework=TESTING_FRAMEWORK_DIR, test_dir=self._test_dir, test_modname=self._modname,
test_cls=self._test_cls_name, exclude_in_pr=self._exclude_in_pr_builds)
if (not clean):
code += "systest.execute()\n" + \
"exitcode = systest.returnValidationCode({})\n".format(TestRunner.VALIDATION_FAIL_CODE)
else:
code += "exitcode = 0\n"
code += "systest.cleanup()\nsys.exit(exitcode)\n"
return code
#########################################################################
# A class to tie together a test and its results
#########################################################################
class TestSuite(object):
'''
Tie together a test and its results.
'''
def __init__(self, test_dir, modname, testname, filename=None):
self._test_dir = test_dir
self._modname = modname
self._test_cls_name = testname
self._fqtestname = modname
# A None testname indicates the source did not load properly
# It has come this far so that it gets reported as a proper failure
# by the framework
if testname is not None:
self._fqtestname += '.' + testname
self._result = TestResult()
# Add some results that are not linked to the actually test itself
self._result.name = self._fqtestname
if filename:
self._result.filename = filename
else:
self._result.filename = self._fqtestname
self._result.addItem(['test_name', self._fqtestname])
sysinfo = platform.uname()
self._result.addItem(['host_name', sysinfo[1]])
self._result.addItem(['environment', self.envAsString()])
self._result.status = 'skipped' # the test has been skipped until it has been executed
name = property(lambda self: self._fqtestname)
status = property(lambda self: self._result.status)
def envAsString(self):
if os.name == 'nt':
system = platform.system().lower()[:3]
arch = platform.architecture()[0][:2]
env = system + arch
elif os.name == 'mac':
env = platform.mac_ver()[0]
else:
env = platform.dist()[0]
return env
def markAsSkipped(self, reason):
self.setOutputMsg(reason)
self._result.status = 'skipped'
def execute(self, runner, exclude_in_pr_builds):
if self._test_cls_name is not None:
script = TestScript(self._test_dir, self._modname, self._test_cls_name, exclude_in_pr_builds)
# Start the new process and wait until it finishes
retcode, output = runner.start(script)
else:
retcode, output = TestRunner.SKIP_TEST, ""
self._result.date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self._result.addItem(['test_date', self._result.date])
if retcode == TestRunner.SUCCESS_CODE:
status = 'success'
elif retcode == TestRunner.GENERIC_FAIL_CODE:
# This is most likely an algorithm failure, but it's not certain
status = 'algorithm failure'
elif retcode == TestRunner.VALIDATION_FAIL_CODE:
status = 'failed validation'
elif retcode == TestRunner.SEGFAULT_CODE:
status = 'crashed'
elif retcode == TestRunner.SKIP_TEST:
status = 'skipped'
elif retcode < 0:
status = 'hung'
else:
status = 'unknown'
# Check return code and add result
self._result.status = status
self._result.addItem(['status', status])
# Dump std out so we know what happened
if PY3:
if isinstance(output, bytes):
output = output.decode()
self._result.output = '\n' + output
all_lines = output.split('\n')
# Find the test results
for line in all_lines:
entries = line.split(MantidSystemTest.DELIMITER)
if len(entries) == 3 and entries[0] == MantidSystemTest.PREFIX:
self._result.addItem([entries[1], entries[2]])
def setOutputMsg(self, msg=None):
if msg is not None:
self._result.output = msg
def reportResults(self, reporters, number_of_completed_tests):
for r in reporters:
r.dispatchResults(self._result, number_of_completed_tests)
#########################################################################
# The main API class
#########################################################################
class TestManager(object):
'''A manager class that is responsible for overseeing the testing process.
This is the main interaction point for the framework.
'''
def __init__(self, test_loc=None, runner=None, output=[TextResultReporter()],
quiet=False, testsInclude=None, testsExclude=None, showSkipped=False,
exclude_in_pr_builds=None, output_on_failure=False, clean=False,
process_number=0, ncores=1, list_of_tests=None):
'''Initialize a class instance'''
# Runners and reporters
self._runner = runner
self._reporters = output
for r in self._reporters:
r._quiet = quiet
r._output_on_failure = output_on_failure
self._clean = clean
self._showSkipped = showSkipped
self._testDir = test_loc
self._quiet = quiet
self._testsInclude=testsInclude
self._testsExclude=testsExclude
self._exclude_in_pr_builds=exclude_in_pr_builds
self._passedTests = 0
self._skippedTests = 0
self._failedTests = 0
self._lastTestRun = 0
self._tests = list_of_tests
def generateMasterTestList(self):
# If given option is a directory
if os.path.isdir(self._testDir) == True:
test_dir = os.path.abspath(self._testDir).replace('\\', '/')
sys.path.append(test_dir)
self._runner.setTestDir(test_dir)
full_test_list = self.loadTestsFromDir(test_dir)
else:
if os.path.exists(self._testDir) == False:
print('Cannot find file ' + self._testDir + '.py. Please check the path.')
exit(2)
test_dir = os.path.abspath(os.path.dirname(self._testDir)).replace('\\', '/')
sys.path.append(test_dir)
self._runner.setTestDir(test_dir)
full_test_list = self.loadTestsFromModule(os.path.basename(self._testDir))
# Gather statistics on full test list
test_stats = [0, 0, 0]
test_stats[2] = len(full_test_list)
reduced_test_list = []
for t in full_test_list:
if self.__shouldTest(t) or self._showSkipped:
reduced_test_list.append(t)
if len(reduced_test_list) == 0:
print('No tests defined in ' + test_dir +
'. Please ensure all test classes sub class systemtesting.MantidSystemTest.')
exit(2)
test_stats[0] = len(reduced_test_list)
for t in reduced_test_list:
test_stats[1] = max(test_stats[1], len(t._fqtestname))
# When using multiprocessing, we have to split the list of tests among
# the processes into groups instead of test by test, to avoid issues
# with data being cleaned up before another process has finished.
#
# We create a list of test modules (= different python files in the
# 'Testing/SystemTests/tests/analysis' directory) and count how many
# tests are in each module. We also create on the fly a list of tests
# for each module.
modcounts = dict()
modtests = dict()
for t in reduced_test_list:
key = t._modname
if key in modcounts.keys():
modcounts[key] += 1
modtests[key].append(t)
else:
modcounts[key] = 1
modtests[key] = [t]
# Now we scan each test module (= python file) and list all the data files
# that are used by that module. The possible ways files are being specified
# are:
# 1. if the extension '.nxs' is present in the line
# 2. if there is a sequence of at least 4 digits inside a string
# In case number 2, we have to search for strings starting with 4 digits,
# i.e. "0123, or strings ending with 4 digits 0123".
# This might over-count, meaning some sequences of 4 digits might not be
# used for a file name specification, but it does not matter if it gets
# identified as a filename as the probability of the same sequence being
# present in another python file is small, and it would therefore not lock
# any other tests.
# Some dictionaries to store the info
files_required_by_test_module = dict()
data_file_lock_status = dict()
# The extension most commonly used
extensions = [".nxs", ".raw", ".RAW"]
# A regex check is used to iterate back from the position of '.nxs' and
# check that the current character is still part of a variable name. This
# is needed to find the start of the string, hence the total filename.
check = re.compile("[A-Za-z0-9_-]")
# In the case of looking for digits inside strings, the strings can start
# with either " or '
string_quotation_mark = ["'",'"']
# Now look through all the test modules and build the list of data files
for modkey in modtests.keys():
fname = modkey+".py"
files_required_by_test_module[modkey] = []
with open(os.path.join(os.path.dirname(self._testDir), "analysis", fname),"r") as pyfile:
for line in pyfile.readlines():
# Search for all instances of '.nxs' or '.raw'
for ext in extensions:
for indx in [m.start() for m in re.finditer(ext, line)]:
# When '.nxs' is found, iterate backwards to find the start
# of the filename.
for i in range(indx-1,1,-1):
# If the present character is not either a letter, digit,
# underscore, or hyphen then the beginning of the filename
# has been found
if not check.search(line[i]):
key = line[i+1:indx]+ext
if (key not in files_required_by_test_module[modkey]) and (key != ext):
files_required_by_test_module[modkey].append(key)
data_file_lock_status[key] = False
break
# Search for '0123 or "0123
for so in string_quotation_mark:
p = re.compile(so+r"\d{4}")
for m in p.finditer(line):
# Iterate forwards to find the closing quotation mark
for i in range(m.end(),len(line)):
if line[i] == so:
key = line[m.start()+1:i]
if key not in files_required_by_test_module[modkey]:
files_required_by_test_module[modkey].append(key)
data_file_lock_status[key] = False
break
# Search for 0123' or 0123"
for so in string_quotation_mark:
p = re.compile(r"\d{4}"+so)
for m in p.finditer(line):
# Iterate backwards to find the opening quotation mark
for i in range(m.start(),1,-1):
if line[i] == so:
key = line[i+1:m.end()-1]
if key not in files_required_by_test_module[modkey]:
files_required_by_test_module[modkey].append(key)
data_file_lock_status[key] = False
break
if (not self._quiet):
for key in files_required_by_test_module.keys():
print('=' * 45)
print(key)
for s in files_required_by_test_module[key]:
print(s)
return modcounts, modtests, test_stats, files_required_by_test_module, data_file_lock_status
def __shouldTest(self, suite):
if self._testsInclude is not None:
if self._testsInclude not in suite.name:
suite.markAsSkipped("NotIncludedTest")
return False
if self._testsExclude is not None:
if self._testsExclude in suite.name:
suite.markAsSkipped("ExcludedTest")
return False
return True
def executeTests(self, tests_done=None):
# Get the defined tests
for suite in self._tests:
if self.__shouldTest(suite):
suite.execute(self._runner, self._exclude_in_pr_builds)
if suite.status == "success":
self._passedTests += 1
elif suite.status == "skipped":
self._skippedTests += 1
else:
self._failedTests += 1
with tests_done.get_lock():
tests_done.value += 1
if not self._clean:
suite.reportResults(self._reporters, tests_done.value)
self._lastTestRun += 1
def markSkipped(self, reason=None, tests_done_value=0):
for suite in self._tests[self._lastTestRun:]:
suite.setOutputMsg(reason)
# Just let people know you were skipped
suite.reportResults(self._reporters, tests_done_value)
def loadTestsFromDir(self, test_dir):
''' Load all of the tests defined in the given directory'''
entries = os.listdir(test_dir)
tests = []
regex = re.compile('^.*\.py$', re.IGNORECASE)
for file in entries:
if regex.match(file) != None:
tests.extend(self.loadTestsFromModule(os.path.join(test_dir, file)))
return tests
def loadTestsFromModule(self, filename):
'''
Load test classes from the given module object which has been
imported with the __import__ statement
'''
modname = os.path.basename(filename)
modname = modname.split('.py')[0]
tests = []
try:
with open(filename, 'r') as pyfile:
mod = imp.load_module(modname, pyfile, filename, ("", "", imp.PY_SOURCE))
mod_attrs = dir(mod)
for key in mod_attrs:
value = getattr(mod, key)
if key is "MantidSystemTest" or not inspect.isclass(value):
continue
if self.isValidTestClass(value):
test_name = key
tests.append(TestSuite(self._runner.getTestDir(), modname, test_name, filename))
except Exception as exc:
print("Error importing module '%s': %s" % (modname, str(exc)))
# Error loading the source, add fake unnamed test so that an error
# will get generated when the tests are run and it will be counted properly
tests.append(TestSuite(self._runner.getTestDir(), modname, None, filename))
finally:
pyfile.close()
return tests
def isValidTestClass(self, class_obj):
"""Returns true if the test is a valid test class. It is valid
if: the class subclassses MantidSystemTest and has no abstract methods
"""
if not issubclass(class_obj, MantidSystemTest):
return False
# Check if the get_reference_file is abstract or not
if hasattr(class_obj, "__abstractmethods__"):
if len(class_obj.__abstractmethods__) == 0:
return True
else:
return False
else:
return True
#########################################################################
# Class to handle the environment
#########################################################################
class MantidFrameworkConfig:
def __init__(self, sourceDir=None,
data_dirs="", save_dir="",
loglevel='information', archivesearch=False):
self.__sourceDir = self.__locateSourceDir(sourceDir)
# add location of system tests
self.__testDir = self.__locateTestsDir()
# add location of the analysis tests
sys.path.insert(0, self.__locateTestsDir())
# setup the rest of the magic directories
self.__saveDir = save_dir
if not os.path.exists(save_dir):
print("Making directory %s to save results" % save_dir)
os.mkdir(save_dir)
else:
if not os.path.isdir(save_dir):
raise RuntimeError("%s is not a directory" % save_dir)
# assume a string is already semicolon-seaprated
if type(data_dirs) == str:
self.__dataDirs = data_dirs
self.__dataDirs += ";%s" % self.__saveDir
else:
data_path = ""
data_dirs.append(self.__saveDir)
for direc in data_dirs:
if not os.path.exists(direc):
raise RuntimeError('Directory ' + direc + ' was not found.')
search_dir = direc.replace('\\', '/')
if not search_dir.endswith('/'):
search_dir += '/'
data_path += search_dir + ';'
self.__dataDirs = data_path
# set the log level
self.__loglevel = loglevel
self.__datasearch = archivesearch
def __locateSourceDir(self, suggestion):
if suggestion is None:
loc = os.path.abspath(__file__)
suggestion = os.path.split(loc)[0] # get the directory
loc = os.path.abspath(suggestion)
loc = os.path.normpath(loc)
if os.path.isdir(loc):
return loc
else:
raise RuntimeError("Failed to find source directory")
def __locateTestsDir(self):
loc = os.path.join(self.__sourceDir, "..", "..", "tests", "analysis")
loc = os.path.abspath(loc)
if os.path.isdir(loc):
return loc
else:
raise RuntimeError("Expected the analysis tests directory at '%s' but it is not a directory " % loc)
def __getDataDirsAsString(self):
return self._dataDirs
def __moveFile(self, src, dst):
if os.path.exists(src):
shutil.move(src, dst)
def __copyFile(self, src, dst):
if os.path.exists(src):
shutil.copyfile(src, dst)
saveDir = property(lambda self: self.__saveDir)
testDir = property(lambda self: self.__testDir)
dataDir = property(lambda self: self.__dataDirs)
def config(self):
# backup the existing user properties so we can step all over it
self.__userPropsFile = config.getUserFilename()
self.__userPropsFileBackup = self.__userPropsFile + ".bak"
self.__userPropsFileSystest = self.__userPropsFile + ".systest"
self.__moveFile(self.__userPropsFile, self.__userPropsFileBackup)
# Make sure we only save these keys here
config.reset()
# Up the log level so that failures can give useful information
config['logging.loggers.root.level'] = self.__loglevel
# Set the correct search path
config['datasearch.directories'] = self.__dataDirs
# Save path
config['defaultsave.directory'] = self.__saveDir
# Do not show paraview dialog
config['paraview.ignore'] = "1"
# Do not update instrument definitions
config['UpdateInstrumentDefinitions.OnStartup'] = "0"
# Do not perform a version check
config['CheckMantidVersion.OnStartup'] = "0"
# Disable usage reports
config['usagereports.enabled'] = "0"
# Case insensitive
config['filefinder.casesensitive'] = 'Off'
# Maximum number of threads
config['MultiThreaded.MaxCores'] = '2'
# datasearch
if self.__datasearch:
# turn on for 'all' facilities, 'on' is only for default facility
config["datasearch.searcharchive"] = 'all'
config['network.default.timeout'] = '5'
# Save this configuration
config.saveConfig(self.__userPropsFile)
def restoreconfig(self):
self.__moveFile(self.__userPropsFile, self.__userPropsFileSystest)
self.__moveFile(self.__userPropsFileBackup, self.__userPropsFile)
#########################################################################
# Function to return a string describing the environment
# (platform) of this test.
#########################################################################
def envAsString():
"""Returns a string describing the environment
(platform) of this test."""
if os.name == 'nt':
system = platform.system().lower()[:3]
arch = platform.architecture()[0][:2]
env = system + arch
elif os.name == 'mac':
env = platform.mac_ver()[0]
else:
env = platform.dist()[0] + "-" + platform.dist()[1]
return env
#########################################################################
# Function to keep a pool of threads active in a loop to run the tests.
# Each thread starts a loop and gathers a first test module from the
# master test list which is stored in the tests_dict shared dictionary,
# starting with the number in the module list equal to the process id.
#
# Each process then checks if all the data files required by the current
# test module are available (i.e. have not been locked by another
# thread). If all files are unlocked, the thread proceeds with that test
# module. If not, it goes further down the list until it finds a module
# whose files are all available.
#
# Once it has completed the work in the current module, it checks if the
# number of modules that remains to be executed is greater than 0. If
# there is some work left to do, the thread finds the next module that
# still has not been executed (searches through the tests_lock array
# and finds the next element that has a 0 value). This aims to have all
# threads end calculation approximately at the same time.
#########################################################################
def testThreadsLoop(testDir, saveDir, dataDir, options, tests_dict,
tests_lock, tests_left, res_array, stat_dict,
total_number_of_tests, maximum_name_length,
tests_done, process_number, lock, required_files_dict,
locked_files_dict):
reporter = XmlResultReporter(showSkipped=options.showskipped,
total_number_of_tests=total_number_of_tests,
maximum_name_length=maximum_name_length)
runner = TestRunner(executable=options.executable, exec_args=options.execargs,
escape_quotes=True, clean=options.clean)
# Make sure the status is 1 to begin with as it will be replaced
res_array[process_number + 2*options.ncores] = 1
# Begin loop: as long as there are still some test modules that
# have not been run, keep looping
while (tests_left.value > 0):
# Empty test list
local_test_list = None
# Get the lock to inspect the global list of tests
lock.acquire()
# Run through the list of test modules, starting from the ith
# element where i is the process number.
for i in range(process_number,len(tests_lock)):
# If the lock for this particular module is 0, it means
# this module has not yet been run and it will be chosen
# for this particular loop
if tests_lock[i] == 0:
# Check for the lock status of the required files for this test module
modname = tests_dict[str(i)][0]._modname
no_files_are_locked = True
for f in required_files_dict[tests_dict[str(i)][0]._modname]:
if locked_files_dict[f]:
no_files_are_locked = False
break
# If all failes are available, we can proceed with this module
if no_files_are_locked:
# Lock the data files for this test module
for f in required_files_dict[modname]:
locked_files_dict[f] = True
# Set the current test list to the chosen module
local_test_list = tests_dict[str(i)]
tests_lock[i] = 1
imodule = i
tests_left.value -= 1
break
# Release the lock
lock.release()
# Check if local_test_list exists: if all data was locked,
# then there is no test list
if local_test_list:
if (not options.quiet):
print("##### Thread %2i will execute module: [%3i] %s (%i tests)" \
% (process_number, imodule, modname, len(local_test_list)))
sys.stdout.flush()
# Create a TestManager, giving it a pre-compiled list_of_tests
mgr = TestManager(test_loc=testDir,
runner=runner,
output=[reporter],
quiet=options.quiet,
testsInclude=options.testsInclude,
testsExclude=options.testsExclude,
exclude_in_pr_builds=options.exclude_in_pr_builds,
showSkipped=options.showskipped,
output_on_failure=options.output_on_failure,
process_number=process_number,
ncores=options.ncores,
clean=options.clean,
list_of_tests=local_test_list)
try:
mgr.executeTests(tests_done)
except KeyboardInterrupt:
mgr.markSkipped("KeyboardInterrupt", tests_done.value)
# Update the test results in the array shared across cores
res_array[process_number] += mgr._skippedTests
res_array[process_number + options.ncores] += mgr._failedTests
res_array[process_number + 2*options.ncores] = min(int(reporter.reportStatus()),\
res_array[process_number + 2*options.ncores])
# Delete the TestManager
del mgr
# Unlock the data files
lock.acquire()
for f in required_files_dict[modname]:
locked_files_dict[f] = False
lock.release()
# Report the errors
local_dict = dict()
with open(os.path.join(saveDir, "TEST-systemtests-%i.xml" % process_number),
'w') as xml_report:
xml_report.write(reporter.getResults(local_dict))
for key in local_dict.keys():
stat_dict[key] = local_dict[key]
return
|
mganeva/mantid
|
Testing/SystemTests/lib/systemtests/systemtesting.py
|
Python
|
gpl-3.0
| 50,994
|
[
"ParaView"
] |
5756056410af62e3cdb69d829fa7f82beba35aaaa3ad57a749b63fd16207d88d
|
# -*- coding: utf-8 -*-
# md_format_converter.Mol2TrajOutput.py
#
# Copyright (C) 2012-2016 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Manages addition of mol2 output information to segments.
"""
################################### MODULES ###################################
from __future__ import absolute_import,division,print_function,unicode_literals
from .TrajOutput import TrajOutput
################################### CLASSES ###################################
class Mol2TrajOutput(TrajOutput):
"""
Manages addition of mol2 output information to segments.
"""
def __init__(self, manual_bonds=False, **kwargs):
"""
Initializes.
Arguments:
manual_bonds (bool): Write bonds to mol2 manually
kwargs (dict): additional keyword arguments
"""
import os
self.manual_bonds = manual_bonds
super(self.__class__, self).__init__(**kwargs)
def receive_segment(self, **kwargs):
"""
Receives a trajectory segment and sends to each target.
Arugments:
kwargs (dict): Additional keyword arguments
"""
import os
while True:
segment = yield
segment_mol2 = "{0}/{1:04d}/{1:04d}{2}.mol2".format(self.outpath,
int(segment.number), self.suffix)
if not os.path.isfile(segment_mol2) or self.force:
segment.outputs.append(
dict(
format = "mol2",
filename = segment_mol2,
selection = self.selection,
first = 0,
last = 0))
if self.manual_bonds:
segment.outputs[-1]["format"] = "mol2_manual_bonds"
for target in self.targets:
target.send(segment)
@staticmethod
def add_subparser(level1_subparser, level2_subparsers, level3_classes):
"""
Adds subparser for this input format to nascent parser.
Arguments:
level1_subparser (Subparser): Level 1 subparser to which level
2 subparser will be added
level2_subparsers (Subparsers): Nascent collection of level 2
subparsers to which level 2 subparser will be added
level3_classes (list): Classes for which level 3 subparsers
will be added
Returns:
(*Subparser*, *Subparsers*): New level 2 subparser and
associated collection of level 3 subparsers
"""
level2_subparser = level2_subparsers.add_parser(
name = "mol2",
usage = "convert.py {0} mol2".format(level1_subparser.name),
help = "mol2 output")
setattr(level2_subparser, "name", "mol2")
level3_subparsers = level2_subparser.add_subparsers(
title = "Converter")
for level3_class in level3_classes:
level3_subparser = level3_class.add_subparser(level1_subparser,
level2_subparser, level3_subparsers)
arg_groups = {ag.title: ag
for ag in level3_subparser._action_groups}
if level3_subparser.name == "vmd":
arg_groups["action"].add_argument("--manual-bonds",
action = "store_true",
dest = "manual_bonds",
help = "Write bonds to mol2 manually; useful for "
"topologies in which atoms are not well-ordered")
Mol2TrajOutput.add_shared_args(level3_subparser)
level3_subparser.set_defaults(output_coroutine=Mol2TrajOutput)
return level2_subparser, level3_subparsers
|
KarlTDebiec/md_format_converter
|
Mol2TrajOutput.py
|
Python
|
bsd-3-clause
| 3,820
|
[
"VMD"
] |
84e206d637d1a2ab91cfbeffd559cb4339885156dbd2a800eed24f223cab439c
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from TestHarnessTestCase import TestHarnessTestCase
class TestHarnessTester(TestHarnessTestCase):
def testRequiredObjects(self):
"""
Test that the required_objects check works
"""
output = self.runTests('--no-color', '-i', 'required_objects')
self.assertRegex(output.decode('utf-8'), r'test_harness\.bad_object.*? \[DOESNOTEXIST NOT FOUND IN EXECUTABLE\] SKIP')
self.assertRegex(output.decode('utf-8'), r'test_harness\.good_objects.*? OK')
self.checkStatus(output.decode('utf-8'), passed=1, skipped=1)
|
harterj/moose
|
python/TestHarness/tests/test_RequiredObjects.py
|
Python
|
lgpl-2.1
| 872
|
[
"MOOSE"
] |
61b2761002fbc6bff022dd85e7efddc48271013b8fbeff4c64e06acd5ad0f67d
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
ParmEd topology parser --- :mod:`MDAnalysis.converters.ParmEdParser`
====================================================================
Converts a `ParmEd <https://parmed.github.io/ParmEd/html>`_
:class:`parmed.structure.Structure` into a :class:`MDAnalysis.core.Topology`.
Example
-------
If you want to use an MDAnalysis-written ParmEd structure for simulation
in ParmEd, you need to first read your files with ParmEd to include the
necessary topology parameters. ::
>>> import parmed as pmd
>>> import MDAnalysis as mda
>>> from MDAnalysis.tests.datafiles import PRM7_ala2, RST7_ala2
>>> prm = pmd.load_file(PRM7_ala2, RST7_ala2)
>>> prm
<AmberParm 3026 atoms; 1003 residues; 3025 bonds; PBC (orthogonal); parametrized>
We can then convert this to an MDAnalysis structure, select only the
protein atoms, and then convert it back to ParmEd. ::
>>> u = mda.Universe(prm)
>>> u
<Universe with 3026 atoms>
>>> prot = u.select_atoms('protein')
>>> prm_prot = prot.convert_to('PARMED')
>>> prm_prot
<Structure 23 atoms; 2 residues; 22 bonds; PBC (orthogonal); parametrized>
From here you can create an OpenMM simulation system and minimize the
energy. ::
>>> import openmm as mm
>>> import openmm.app as app
>>> from parmed import unit as u
>>> system = prm_prot.createSystem(nonbondedMethod=app.NoCutoff,
... constraints=app.HBonds,
... implicitSolvent=app.GBn2)
>>> integrator = mm.LangevinIntegrator(
... 300*u.kelvin, # Temperature of heat bath
... 1.0/u.picoseconds, # Friction coefficient
... 2.0*u.femtoseconds, # Time step
... )
>>> sim = app.Simulation(prm_prot.topology, system, integrator)
>>> sim.context.setPositions(prm_prot.positions)
>>> sim.minimizeEnergy(maxIterations=500)
Now you can continue on and run a simulation, if you wish.
Classes
-------
.. autoclass:: ParmEdParser
:members:
:inherited-members:
.. versionchanged:: 2.0.0
The ParmEdParser class was moved from :mod:`~MDAnalysis.topology` to
:mod:`~MDAnalysis.converters`
"""
import logging
import numpy as np
from ..topology.base import TopologyReaderBase, change_squash
from ..topology.tables import Z2SYMB
from ..core.topologyattrs import (
Atomids,
Atomnames,
AltLocs,
ChainIDs,
Atomtypes,
Occupancies,
Tempfactors,
Elements,
Masses,
Charges,
Resids,
Resnums,
Resnames,
Segids,
GBScreens,
SolventRadii,
NonbondedIndices,
RMins,
Epsilons,
RMin14s,
Epsilon14s,
Bonds,
UreyBradleys,
Angles,
Dihedrals,
Impropers,
CMaps
)
from ..core.topology import Topology
logger = logging.getLogger("MDAnalysis.converters.ParmEdParser")
def squash_identical(values):
if len(values) == 1:
return values[0]
else:
return tuple(values)
class ParmEdParser(TopologyReaderBase):
"""
For ParmEd structures
"""
format = 'PARMED'
@staticmethod
def _format_hint(thing):
"""Can this Parser read object *thing*?
.. versionadded:: 1.0.0
"""
try:
import parmed as pmd
except ImportError: # if no parmed, probably not parmed
return False
else:
return isinstance(thing, pmd.Structure)
def parse(self, **kwargs):
"""Parse PARMED into Topology
Returns
-------
MDAnalysis *Topology* object
.. versionchanged:: 2.0.0
Elements are no longer guessed, if the elements present in the
parmed object are not recoginsed (usually given an atomic mass of 0)
then they will be assigned an empty string.
"""
structure = self.filename
#### === ATOMS === ####
names = []
masses = []
charges = []
types = []
atomic_numbers = []
serials = []
resnames = []
resids = []
chainids = []
segids = []
altLocs = []
bfactors = []
occupancies = []
screens = []
solvent_radii = []
nonbonded_indices = []
rmins = []
epsilons = []
rmin14s = []
epsilon14s = []
for atom in structure.atoms:
names.append(atom.name)
masses.append(atom.mass)
charges.append(atom.charge)
types.append(atom.type)
atomic_numbers.append(atom.atomic_number)
serials.append(atom.number)
resnames.append(atom.residue.name)
resids.append(atom.residue.number)
chainids.append(atom.residue.chain)
segids.append(atom.residue.segid)
altLocs.append(atom.altloc)
bfactors.append(atom.bfactor)
occupancies.append(atom.occupancy)
screens.append(atom.screen)
solvent_radii.append(atom.solvent_radius)
nonbonded_indices.append(atom.nb_idx)
rmins.append(atom.rmin)
epsilons.append(atom.epsilon)
rmin14s.append(atom.rmin_14)
epsilon14s.append(atom.epsilon_14)
attrs = []
n_atoms = len(names)
elements = []
for z, name in zip(atomic_numbers, names):
try:
elements.append(Z2SYMB[z])
except KeyError:
elements.append('')
# Make Atom TopologyAttrs
for vals, Attr, dtype in (
(names, Atomnames, object),
(masses, Masses, np.float32),
(charges, Charges, np.float32),
(types, Atomtypes, object),
(elements, Elements, object),
(serials, Atomids, np.int32),
(chainids, ChainIDs, object),
(altLocs, AltLocs, object),
(bfactors, Tempfactors, np.float32),
(occupancies, Occupancies, np.float32),
(screens, GBScreens, np.float32),
(solvent_radii, SolventRadii, np.float32),
(nonbonded_indices, NonbondedIndices, np.int32),
(rmins, RMins, np.float32),
(epsilons, Epsilons, np.float32),
(rmin14s, RMin14s, np.float32),
(epsilon14s, Epsilon14s, np.float32),
):
attrs.append(Attr(np.array(vals, dtype=dtype)))
resids = np.array(resids, dtype=np.int32)
resnames = np.array(resnames, dtype=object)
chainids = np.array(chainids, dtype=object)
segids = np.array(segids, dtype=object)
residx, (resids, resnames, chainids, segids) = change_squash(
(resids, resnames, chainids, segids),
(resids, resnames, chainids, segids))
n_residues = len(resids)
attrs.append(Resids(resids))
attrs.append(Resnums(resids.copy()))
attrs.append(Resnames(resnames))
segidx, (segids,) = change_squash((segids,), (segids,))
n_segments = len(segids)
attrs.append(Segids(segids))
#### === OTHERS === ####
bond_values = {}
bond_types = []
bond_orders = []
ub_values = {}
ub_types = []
angle_values = {}
angle_types = []
dihedral_values = {}
dihedral_types = []
improper_values = {}
improper_types = []
cmap_values = {}
cmap_types = []
for bond in structure.bonds:
idx = (bond.atom1.idx, bond.atom2.idx)
if idx not in bond_values:
bond_values[idx] = ([bond], [bond.order])
else:
bond_values[idx][0].append(bond)
bond_values[idx][1].append(bond.order)
try:
bond_values, values = zip(*list(bond_values.items()))
except ValueError:
bond_values, bond_types, bond_orders = [], [], []
else:
bond_types, bond_orders = zip(*values)
bond_types = list(map(squash_identical, bond_types))
bond_orders = list(map(squash_identical, bond_orders))
attrs.append(Bonds(bond_values, types=bond_types, guessed=False,
order=bond_orders))
for pmdlist, na, values, types in (
(structure.urey_bradleys, 2, ub_values, ub_types),
(structure.angles, 3, angle_values, angle_types),
(structure.dihedrals, 4, dihedral_values, dihedral_types),
(structure.impropers, 4, improper_values, improper_types),
(structure.cmaps, 5, cmap_values, cmap_types),
):
for p in pmdlist:
atoms = ['atom{}'.format(i) for i in range(1, na+1)]
idx = tuple(getattr(p, a).idx for a in atoms)
if idx not in values:
values[idx] = [p]
else:
values[idx].append(p)
for dct, Attr in (
(ub_values, UreyBradleys),
(angle_values, Angles),
(dihedral_values, Dihedrals),
(improper_values, Impropers),
(cmap_values, CMaps),
):
try:
vals, types = zip(*list(dct.items()))
except ValueError:
vals, types = [], []
types = list(map(squash_identical, types))
attrs.append(Attr(vals, types=types, guessed=False, order=None))
top = Topology(n_atoms, n_residues, n_segments,
attrs=attrs,
atom_resindex=residx,
residue_segindex=segidx)
return top
|
MDAnalysis/mdanalysis
|
package/MDAnalysis/converters/ParmEdParser.py
|
Python
|
gpl-2.0
| 10,806
|
[
"MDAnalysis",
"OpenMM"
] |
1824f7a72591f59479457b049ee1c0d72a94c679a74bdc1994ab175a03406316
|
# pyenchant
#
# Copyright (C) 2004-2011, Ryan Kelly
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPsE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
# In addition, as a special exception, you are
# given permission to link the code of this program with
# non-LGPL Spelling Provider libraries (eg: a MSFT Office
# spell checker backend) and distribute linked combinations including
# the two. You must obey the GNU Lesser General Public License in all
# respects for all of the code used other than said providers. If you modify
# this file, you may extend this exception to your version of the
# file, but you are not obligated to do so. If you do not wish to
# do so, delete this exception statement from your version.
#
"""
enchant: Access to the enchant spellchecking library
=====================================================
This module provides several classes for performing spell checking
via the Enchant spellchecking library. For more details on Enchant,
visit the project website:
http://www.abisource.com/enchant/
Spellchecking is performed using 'Dict' objects, which represent
a language dictionary. Their use is best demonstrated by a quick
example::
>>> import enchant
>>> d = enchant.Dict("en_US") # create dictionary for US English
>>> d.check("enchant")
True
>>> d.check("enchnt")
False
>>> d.suggest("enchnt")
['enchant', 'enchants', 'enchanter', 'penchant', 'incant', 'enchain', 'enchanted']
Languages are identified by standard string tags such as "en" (English)
and "fr" (French). Specific language dialects can be specified by
including an additional code - for example, "en_AU" refers to Australian
English. The later form is preferred as it is more widely supported.
To check whether a dictionary exists for a given language, the function
'dict_exists' is available. Dictionaries may also be created using the
function 'request_dict'.
A finer degree of control over the dictionaries and how they are created
can be obtained using one or more 'Broker' objects. These objects are
responsible for locating dictionaries for a specific language.
In Python 2.x, unicode strings are supported transparently in the
standard manner - if a unicode string is given as an argument, the
result will be a unicode string. Note that Enchant works in UTF-8
internally, so passing an ASCII string to a dictionary for a language
requiring Unicode may result in UTF-8 strings being returned.
In Python 3.x unicode strings are expected throughout. Bytestrings
should not be passed into any functions.
Errors that occur in this module are reported by raising subclasses
of 'Error'.
"""
_DOC_ERRORS = ['enchnt','enchnt','incant','fr']
# Make version info available
__ver_major__ = 1
__ver_minor__ = 6
__ver_patch__ = 6
__ver_sub__ = ""
__version__ = "%d.%d.%d%s" % (__ver_major__,__ver_minor__,
__ver_patch__,__ver_sub__)
import os
try:
from enchant import _enchant as _e
except ImportError:
if not os.environ.get("PYENCHANT_IGNORE_MISSING_LIB",False):
raise
_e = None
from enchant.errors import *
from enchant.utils import EnchantStr, get_default_language, UTF16EnchantStr
from enchant.pypwl import PyPWL
# Due to the unfortunate name collision between the enchant "tokenize" module
# and the stdlib "tokenize" module, certain values of sys.path can cause
# the former to override the latter and break the "warnings" module.
# This hacks around it by making a dummy "warnings" module.
try:
import warnings
except ImportError:
class warnings(object):
def warn(self,*args,**kwds):
pass
warnings = warnings()
class ProviderDesc(object):
"""Simple class describing an Enchant provider.
Each provider has the following information associated with it:
* name: Internal provider name (e.g. "aspell")
* desc: Human-readable description (e.g. "Aspell Provider")
* file: Location of the library containing the provider
"""
_DOC_ERRORS = ["desc"]
def __init__(self,name,desc,file):
self.name = name
self.desc = desc
self.file = file
def __str__(self):
return "<Enchant: %s>" % self.desc
def __repr__(self):
return str(self)
def __eq__(self,pd):
"""Equality operator on ProviderDesc objects."""
return (self.name == pd.name and \
self.desc == pd.desc and \
self.file == pd.file)
def __hash__(self):
"""Hash operator on ProviderDesc objects."""
return hash(self.name + self.desc + self.file)
class _EnchantObject(object):
"""Base class for enchant objects.
This class implements some general functionality for interfacing with
the '_enchant' C-library in a consistent way. All public objects
from the 'enchant' module are subclasses of this class.
All enchant objects have an attribute '_this' which contains the
pointer to the underlying C-library object. The method '_check_this'
can be called to ensure that this point is not None, raising an
exception if it is.
"""
def __init__(self):
"""_EnchantObject constructor."""
self._this = None
# To be importable when enchant C lib is missing, we need
# to create a dummy default broker.
if _e is not None:
self._init_this()
def _check_this(self,msg=None):
"""Check that self._this is set to a pointer, rather than None."""
if self._this is None:
if msg is None:
msg = "%s unusable: the underlying C-library object has been freed."
msg = msg % (self.__class__.__name__,)
raise Error(msg)
def _init_this(self):
"""Initialise the underlying C-library object pointer."""
raise NotImplementedError
def _raise_error(self,default="Unspecified Error",eclass=Error):
"""Raise an exception based on available error messages.
This method causes an Error to be raised. Subclasses should
override it to retrieve an error indication from the underlying
API if possible. If such a message cannot be retrieved, the
argument value <default> is used. The class of the exception
can be specified using the argument <eclass>
"""
raise eclass(default)
_raise_error._DOC_ERRORS = ["eclass"]
def __getstate__(self):
"""Customize pickling of PyEnchant objects.
Since it's not safe for multiple objects to share the same C-library
object, we make sure it's unset when pickling.
"""
state = self.__dict__.copy()
state["_this"] = None
return state
def __setstate__(self,state):
self.__dict__.update(state)
self._init_this()
class Broker(_EnchantObject):
"""Broker object for the Enchant spellchecker.
Broker objects are responsible for locating and managing dictionaries.
Unless custom functionality is required, there is no need to use Broker
objects directly. The 'enchant' module provides a default broker object
so that 'Dict' objects can be created directly.
The most important methods of this class include:
* dict_exists: check existence of a specific language dictionary
* request_dict: obtain a dictionary for specific language
* set_ordering: specify which dictionaries to try for for a
given language.
"""
def __init__(self):
"""Broker object constructor.
This method is the constructor for the 'Broker' object. No
arguments are required.
"""
_EnchantObject.__init__(self)
def _init_this(self):
self._this = _e.broker_init()
if not self._this:
raise Error("Could not initialise an enchant broker.")
self._live_dicts = {}
def __del__(self):
"""Broker object destructor."""
if _e is not None:
self._free()
def __getstate__(self):
state = super(Broker,self).__getstate__()
state.pop("_live_dicts")
return state
def _raise_error(self,default="Unspecified Error",eclass=Error):
"""Overrides _EnchantObject._raise_error to check broker errors."""
err = _e.broker_get_error(self._this)
if err == "" or err is None:
raise eclass(default)
raise eclass(err)
def _free(self):
"""Free system resource associated with a Broker object.
This method can be called to free the underlying system resources
associated with a Broker object. It is called automatically when
the object is garbage collected. If called explicitly, the
Broker and any associated Dict objects must no longer be used.
"""
if self._this is not None:
# During shutdown, this finalizer may be called before
# some Dict finalizers. Ensure all pointers are freed.
for (dict,count) in list(self._live_dicts.items()):
while count:
self._free_dict_data(dict)
count -= 1
_e.broker_free(self._this)
self._this = None
def request_dict(self,tag=None):
"""Request a Dict object for the language specified by <tag>.
This method constructs and returns a Dict object for the
requested language. 'tag' should be a string of the appropriate
form for specifying a language, such as "fr" (French) or "en_AU"
(Australian English). The existence of a specific language can
be tested using the 'dict_exists' method.
If <tag> is not given or is None, an attempt is made to determine
the current language in use. If this cannot be determined, Error
is raised.
NOTE: this method is functionally equivalent to calling the Dict()
constructor and passing in the <broker> argument.
"""
return Dict(tag,self)
request_dict._DOC_ERRORS = ["fr"]
def _request_dict_data(self,tag):
"""Request raw C pointer data for a dictionary.
This method call passes on the call to the C library, and does
some internal bookkeeping.
"""
self._check_this()
tag = EnchantStr(tag)
new_dict = _e.broker_request_dict(self._this,tag.encode())
if new_dict is None:
eStr = "Dictionary for language '%s' could not be found"
self._raise_error(eStr % (tag,),DictNotFoundError)
if new_dict not in self._live_dicts:
self._live_dicts[new_dict] = 1
else:
self._live_dicts[new_dict] += 1
return new_dict
def request_pwl_dict(self,pwl):
"""Request a Dict object for a personal word list.
This method behaves as 'request_dict' but rather than returning
a dictionary for a specific language, it returns a dictionary
referencing a personal word list. A personal word list is a file
of custom dictionary entries, one word per line.
"""
self._check_this()
pwl = EnchantStr(pwl)
new_dict = _e.broker_request_pwl_dict(self._this,pwl.encode())
if new_dict is None:
eStr = "Personal Word List file '%s' could not be loaded"
self._raise_error(eStr % (pwl,))
if new_dict not in self._live_dicts:
self._live_dicts[new_dict] = 1
else:
self._live_dicts[new_dict] += 1
d = Dict(False)
d._switch_this(new_dict,self)
return d
def _free_dict(self,dict):
"""Free memory associated with a dictionary.
This method frees system resources associated with a Dict object.
It is equivalent to calling the object's 'free' method. Once this
method has been called on a dictionary, it must not be used again.
"""
self._free_dict_data(dict._this)
dict._this = None
dict._broker = None
def _free_dict_data(self,dict):
"""Free the underlying pointer for a dict."""
self._check_this()
_e.broker_free_dict(self._this,dict)
self._live_dicts[dict] -= 1
if self._live_dicts[dict] == 0:
del self._live_dicts[dict]
def dict_exists(self,tag):
"""Check availability of a dictionary.
This method checks whether there is a dictionary available for
the language specified by 'tag'. It returns True if a dictionary
is available, and False otherwise.
"""
self._check_this()
tag = EnchantStr(tag)
val = _e.broker_dict_exists(self._this,tag.encode())
return bool(val)
def set_ordering(self,tag,ordering):
"""Set dictionary preferences for a language.
The Enchant library supports the use of multiple dictionary programs
and multiple languages. This method specifies which dictionaries
the broker should prefer when dealing with a given language. 'tag'
must be an appropriate language specification and 'ordering' is a
string listing the dictionaries in order of preference. For example
a valid ordering might be "aspell,myspell,ispell".
The value of 'tag' can also be set to "*" to set a default ordering
for all languages for which one has not been set explicitly.
"""
self._check_this()
tag = EnchantStr(tag)
ordering = EnchantStr(ordering)
_e.broker_set_ordering(self._this,tag.encode(),ordering.encode())
def describe(self):
"""Return list of provider descriptions.
This method returns a list of descriptions of each of the
dictionary providers available. Each entry in the list is a
ProviderDesc object.
"""
self._check_this()
self.__describe_result = []
_e.broker_describe(self._this,self.__describe_callback)
return [ ProviderDesc(*r) for r in self.__describe_result]
def __describe_callback(self,name,desc,file):
"""Collector callback for dictionary description.
This method is used as a callback into the _enchant function
'enchant_broker_describe'. It collects the given arguments in
a tuple and appends them to the list '__describe_result'.
"""
s = EnchantStr("")
name = s.decode(name)
desc = s.decode(desc)
file = s.decode(file)
self.__describe_result.append((name,desc,file))
def list_dicts(self):
"""Return list of available dictionaries.
This method returns a list of dictionaries available to the
broker. Each entry in the list is a two-tuple of the form:
(tag,provider)
where <tag> is the language lag for the dictionary and
<provider> is a ProviderDesc object describing the provider
through which that dictionary can be obtained.
"""
self._check_this()
self.__list_dicts_result = []
_e.broker_list_dicts(self._this,self.__list_dicts_callback)
return [ (r[0],ProviderDesc(*r[1])) for r in self.__list_dicts_result]
def __list_dicts_callback(self,tag,name,desc,file):
"""Collector callback for listing dictionaries.
This method is used as a callback into the _enchant function
'enchant_broker_list_dicts'. It collects the given arguments into
an appropriate tuple and appends them to '__list_dicts_result'.
"""
s = EnchantStr("")
tag = s.decode(tag)
name = s.decode(name)
desc = s.decode(desc)
file = s.decode(file)
self.__list_dicts_result.append((tag,(name,desc,file)))
def list_languages(self):
"""List languages for which dictionaries are available.
This function returns a list of language tags for which a
dictionary is available.
"""
langs = []
for (tag,prov) in self.list_dicts():
if tag not in langs:
langs.append(tag)
return langs
def __describe_dict(self,dict_data):
"""Get the description tuple for a dict data object.
<dict_data> must be a C-library pointer to an enchant dictionary.
The return value is a tuple of the form:
(<tag>,<name>,<desc>,<file>)
"""
# Define local callback function
cb_result = []
def cb_func(tag,name,desc,file):
s = EnchantStr("")
tag = s.decode(tag)
name = s.decode(name)
desc = s.decode(desc)
file = s.decode(file)
cb_result.append((tag,name,desc,file))
# Actually call the describer function
_e.dict_describe(dict_data,cb_func)
return cb_result[0]
__describe_dict._DOC_ERRORS = ["desc"]
def get_param(self,name):
"""Get the value of a named parameter on this broker.
Parameters are used to provide runtime information to individual
provider backends. See the method 'set_param' for more details.
"""
name = EnchantStr(name)
return name.decode(_e.broker_get_param(self._this,name.encode()))
get_param._DOC_ERRORS = ["param"]
def set_param(self,name,value):
"""Set the value of a named parameter on this broker.
Parameters are used to provide runtime information to individual
provider backends. For example, the myspell provider will search
any directories given in the "enchant.myspell.dictionary.path"
parameter when looking for its dictionary files.
"""
name = EnchantStr(name)
value = EnchantStr(value)
_e.broker_set_param(self._this,name.encode(),value.encode())
class Dict(_EnchantObject):
"""Dictionary object for the Enchant spellchecker.
Dictionary objects are responsible for checking the spelling of words
and suggesting possible corrections. Each dictionary is owned by a
Broker object, but unless a new Broker has explicitly been created
then this will be the 'enchant' module default Broker and is of little
interest.
The important methods of this class include:
* check(): check whether a word id spelled correctly
* suggest(): suggest correct spellings for a word
* add(): add a word to the user's personal dictionary
* remove(): add a word to the user's personal exclude list
* add_to_session(): add a word to the current spellcheck session
* store_replacement(): indicate a replacement for a given word
Information about the dictionary is available using the following
attributes:
* tag: the language tag of the dictionary
* provider: a ProviderDesc object for the dictionary provider
"""
def __init__(self,tag=None,broker=None):
"""Dict object constructor.
A dictionary belongs to a specific language, identified by the
string <tag>. If the tag is not given or is None, an attempt to
determine the language currently in use is made using the 'locale'
module. If the current language cannot be determined, Error is raised.
If <tag> is instead given the value of False, a 'dead' Dict object
is created without any reference to a language. This is typically
only useful within PyEnchant itself. Any other non-string value
for <tag> raises Error.
Each dictionary must also have an associated Broker object which
obtains the dictionary information from the underlying system. This
may be specified using <broker>. If not given, the default broker
is used.
"""
# Initialise misc object attributes to None
self.provider = None
# If no tag was given, use the default language
if tag is None:
tag = get_default_language()
if tag is None:
err = "No tag specified and default language could not "
err = err + "be determined."
raise Error(err)
self.tag = tag
# If no broker was given, use the default broker
if broker is None:
broker = _broker
self._broker = broker
# Now let the superclass initialise the C-library object
_EnchantObject.__init__(self)
def _init_this(self):
# Create dead object if False was given as the tag.
# Otherwise, use the broker to get C-library pointer data.
self._this = None
if self.tag:
this = self._broker._request_dict_data(self.tag)
self._switch_this(this,self._broker)
def __del__(self):
"""Dict object destructor."""
# Calling free() might fail if python is shutting down
try:
self._free()
except AttributeError:
pass
def _switch_this(self,this,broker):
"""Switch the underlying C-library pointer for this object.
As all useful state for a Dict is stored by the underlying C-library
pointer, it is very convenient to allow this to be switched at
run-time. Pass a new dict data object into this method to affect
the necessary changes. The creating Broker object (at the Python
level) must also be provided.
This should *never* *ever* be used by application code. It's
a convenience for developers only, replacing the clunkier <data>
parameter to __init__ from earlier versions.
"""
# Free old dict data
Dict._free(self)
# Hook in the new stuff
self._this = this
self._broker = broker
# Update object properties
desc = self.__describe(check_this=False)
self.tag = desc[0]
self.provider = ProviderDesc(*desc[1:])
if self.provider.name == "myspell":
self._StringClass = UTF16EnchantStr
else:
self._StringClass = EnchantStr
_switch_this._DOC_ERRORS = ["init"]
def _check_this(self,msg=None):
"""Extend _EnchantObject._check_this() to check Broker validity.
It is possible for the managing Broker object to be freed without
freeing the Dict. Thus validity checking must take into account
self._broker._this as well as self._this.
"""
if self._broker is None or self._broker._this is None:
self._this = None
_EnchantObject._check_this(self,msg)
def _raise_error(self,default="Unspecified Error",eclass=Error):
"""Overrides _EnchantObject._raise_error to check dict errors."""
err = _e.dict_get_error(self._this)
if err == "" or err is None:
raise eclass(default)
raise eclass(err)
def _free(self):
"""Free the system resources associated with a Dict object.
This method frees underlying system resources for a Dict object.
Once it has been called, the Dict object must no longer be used.
It is called automatically when the object is garbage collected.
"""
if self._this is not None:
# The broker may have been freed before the dict.
# It will have freed the underlying pointers already.
if self._broker is not None and self._broker._this is not None:
self._broker._free_dict(self)
def check(self,word):
"""Check spelling of a word.
This method takes a word in the dictionary language and returns
True if it is correctly spelled, and false otherwise.
"""
self._check_this()
word = self._StringClass(word)
# Enchant asserts that the word is non-empty.
# Check it up-front to avoid nasty warnings on stderr.
if len(word) == 0:
raise ValueError("can't check spelling of empty string")
val = _e.dict_check(self._this,word.encode())
if val == 0:
return True
if val > 0:
return False
self._raise_error()
def suggest(self,word):
"""Suggest possible spellings for a word.
This method tries to guess the correct spelling for a given
word, returning the possibilities in a list.
"""
self._check_this()
word = self._StringClass(word)
# Enchant asserts that the word is non-empty.
# Check it up-front to avoid nasty warnings on stderr.
if len(word) == 0:
raise ValueError("can't suggest spellings for empty string")
suggs = _e.dict_suggest(self._this,word.encode())
return [word.decode(w) for w in suggs]
def add(self,word):
"""Add a word to the user's personal word list."""
self._check_this()
word = self._StringClass(word)
_e.dict_add(self._this,word.encode())
def remove(self,word):
"""Add a word to the user's personal exclude list."""
self._check_this()
word = self._StringClass(word)
_e.dict_remove(self._this,word.encode())
def add_to_pwl(self,word):
"""Add a word to the user's personal word list."""
warnings.warn("Dict.add_to_pwl is deprecated, please use Dict.add",
category=DeprecationWarning,stacklevel=2)
self._check_this()
word = self._StringClass(word)
_e.dict_add_to_pwl(self._this,word.encode())
def add_to_session(self,word):
"""Add a word to the session personal list."""
self._check_this()
word = self._StringClass(word)
_e.dict_add_to_session(self._this,word.encode())
def remove_from_session(self,word):
"""Add a word to the session exclude list."""
self._check_this()
word = self._StringClass(word)
_e.dict_remove_from_session(self._this,word.encode())
def is_added(self,word):
"""Check whether a word is in the personal word list."""
self._check_this()
word = self._StringClass(word)
return _e.dict_is_added(self._this,word.encode())
def is_removed(self,word):
"""Check whether a word is in the personal exclude list."""
self._check_this()
word = self._StringClass(word)
return _e.dict_is_removed(self._this,word.encode())
def is_in_session(self,word):
"""Check whether a word is in the session list."""
warnings.warn("Dict.is_in_session is deprecated, "\
"please use Dict.is_added",
category=DeprecationWarning,stacklevel=2)
self._check_this()
word = self._StringClass(word)
return _e.dict_is_in_session(self._this,word.encode())
def store_replacement(self,mis,cor):
"""Store a replacement spelling for a miss-spelled word.
This method makes a suggestion to the spellchecking engine that the
miss-spelled word <mis> is in fact correctly spelled as <cor>. Such
a suggestion will typically mean that <cor> appears early in the
list of suggested spellings offered for later instances of <mis>.
"""
if not mis:
raise ValueError("can't store replacement for an empty string")
if not cor:
raise ValueError("can't store empty string as a replacement")
self._check_this()
mis = self._StringClass(mis)
cor = self._StringClass(cor)
_e.dict_store_replacement(self._this,mis.encode(),cor.encode())
store_replacement._DOC_ERRORS = ["mis","mis"]
def __describe(self,check_this=True):
"""Return a tuple describing the dictionary.
This method returns a four-element tuple describing the underlying
spellchecker system providing the dictionary. It will contain the
following strings:
* language tag
* name of dictionary provider
* description of dictionary provider
* dictionary file
Direct use of this method is not recommended - instead, access this
information through the 'tag' and 'provider' attributes.
"""
if check_this:
self._check_this()
_e.dict_describe(self._this,self.__describe_callback)
return self.__describe_result
def __describe_callback(self,tag,name,desc,file):
"""Collector callback for dictionary description.
This method is used as a callback into the _enchant function
'enchant_dict_describe'. It collects the given arguments in
a tuple and stores them in the attribute '__describe_result'.
"""
s = EnchantStr("")
tag = s.decode(tag)
name = s.decode(name)
desc = s.decode(desc)
file = s.decode(file)
self.__describe_result = (tag,name,desc,file)
class DictWithPWL(Dict):
"""Dictionary with separately-managed personal word list.
NOTE: As of version 1.4.0, enchant manages a per-user pwl and
exclude list. This class is now only needed if you want
to explicitly maintain a separate word list in addition to
the default one.
This class behaves as the standard Dict class, but also manages a
personal word list stored in a separate file. The file must be
specified at creation time by the 'pwl' argument to the constructor.
Words added to the dictionary are automatically appended to the pwl file.
A personal exclude list can also be managed, by passing another filename
to the constructor in the optional 'pel' argument. If this is not given,
requests to exclude words are ignored.
If either 'pwl' or 'pel' are None, an in-memory word list is used.
This will prevent calls to add() and remove() from affecting the user's
default word lists.
The Dict object managing the PWL is available as the 'pwl' attribute.
The Dict object managing the PEL is available as the 'pel' attribute.
To create a DictWithPWL from the user's default language, use None
as the 'tag' argument.
"""
_DOC_ERRORS = ["pel","pel","PEL","pel"]
def __init__(self,tag,pwl=None,pel=None,broker=None):
"""DictWithPWL constructor.
The argument 'pwl', if not None, names a file containing the
personal word list. If this file does not exist, it is created
with default permissions.
The argument 'pel', if not None, names a file containing the personal
exclude list. If this file does not exist, it is created with
default permissions.
"""
Dict.__init__(self,tag,broker)
if pwl is not None:
if not os.path.exists(pwl):
f = open(pwl,"wt")
f.close()
del f
self.pwl = self._broker.request_pwl_dict(pwl)
else:
self.pwl = PyPWL()
if pel is not None:
if not os.path.exists(pel):
f = open(pel,"wt")
f.close()
del f
self.pel = self._broker.request_pwl_dict(pel)
else:
self.pel = PyPWL()
def _check_this(self,msg=None):
"""Extend Dict._check_this() to check PWL validity."""
if self.pwl is None:
self._free()
if self.pel is None:
self._free()
Dict._check_this(self,msg)
self.pwl._check_this(msg)
self.pel._check_this(msg)
def _free(self):
"""Extend Dict._free() to free the PWL as well."""
if self.pwl is not None:
self.pwl._free()
self.pwl = None
if self.pel is not None:
self.pel._free()
self.pel = None
Dict._free(self)
def check(self,word):
"""Check spelling of a word.
This method takes a word in the dictionary language and returns
True if it is correctly spelled, and false otherwise. It checks
both the dictionary and the personal word list.
"""
if self.pel.check(word):
return False
if self.pwl.check(word):
return True
if Dict.check(self,word):
return True
return False
def suggest(self,word):
"""Suggest possible spellings for a word.
This method tries to guess the correct spelling for a given
word, returning the possibilities in a list.
"""
suggs = Dict.suggest(self,word)
suggs.extend([w for w in self.pwl.suggest(word) if w not in suggs])
for i in range(len(suggs)-1,-1,-1):
if self.pel.check(suggs[i]):
del suggs[i]
return suggs
def add(self,word):
"""Add a word to the associated personal word list.
This method adds the given word to the personal word list, and
automatically saves the list to disk.
"""
self._check_this()
self.pwl.add(word)
self.pel.remove(word)
def remove(self,word):
"""Add a word to the associated exclude list."""
self._check_this()
self.pwl.remove(word)
self.pel.add(word)
def add_to_pwl(self,word):
"""Add a word to the associated personal word list.
This method adds the given word to the personal word list, and
automatically saves the list to disk.
"""
self._check_this()
self.pwl.add_to_pwl(word)
self.pel.remove(word)
def is_added(self,word):
"""Check whether a word is in the personal word list."""
self._check_this()
return self.pwl.is_added(word)
def is_removed(self,word):
"""Check whether a word is in the personal exclude list."""
self._check_this()
return self.pel.is_added(word)
## Create a module-level default broker object, and make its important
## methods available at the module level.
_broker = Broker()
request_dict = _broker.request_dict
request_pwl_dict = _broker.request_pwl_dict
dict_exists = _broker.dict_exists
list_dicts = _broker.list_dicts
list_languages = _broker.list_languages
get_param = _broker.get_param
set_param = _broker.set_param
# Expose the "get_version" function.
def get_enchant_version():
"""Get the version string for the underlying enchant library."""
return _e.get_version()
# Run unit tests when called from comand-line
if __name__ == "__main__":
import sys
import enchant.tests
res = enchant.tests.runtestsuite()
if len(res.errors) > 0 or len(res.failures) > 0:
sys.exit(1)
sys.exit(0)
|
halfak/pyenchant
|
enchant/__init__.py
|
Python
|
lgpl-2.1
| 35,236
|
[
"VisIt"
] |
5309d5ca2cc87c5dcd4538f8d42a578fe2ea3d92d70af2faf9ba119cc93fee46
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Discogs album search support to the autotagger. Requires the
discogs-client library.
"""
from __future__ import division, absolute_import, print_function
import beets.ui
from beets import logging
from beets import config
from beets.autotag.hooks import AlbumInfo, TrackInfo, Distance
from beets.plugins import BeetsPlugin
from beets.util import confit
from discogs_client import Release, Client
from discogs_client.exceptions import DiscogsAPIError
from requests.exceptions import ConnectionError
from six.moves import http_client
import beets
import re
import time
import json
import socket
import os
# Silence spurious INFO log lines generated by urllib3.
urllib3_logger = logging.getLogger('requests.packages.urllib3')
urllib3_logger.setLevel(logging.CRITICAL)
USER_AGENT = u'beets/{0} +http://beets.io/'.format(beets.__version__)
# Exceptions that discogs_client should really handle but does not.
CONNECTION_ERRORS = (ConnectionError, socket.error, http_client.HTTPException,
ValueError, # JSON decoding raises a ValueError.
DiscogsAPIError)
class DiscogsPlugin(BeetsPlugin):
def __init__(self):
super(DiscogsPlugin, self).__init__()
self.config.add({
'apikey': 'rAzVUQYRaoFjeBjyWuWZ',
'apisecret': 'plxtUTqoCzwxZpqdPysCwGuBSmZNdZVy',
'tokenfile': 'discogs_token.json',
'source_weight': 0.5,
})
self.config['apikey'].redact = True
self.config['apisecret'].redact = True
self.discogs_client = None
self.register_listener('import_begin', self.setup)
def setup(self, session=None):
"""Create the `discogs_client` field. Authenticate if necessary.
"""
c_key = self.config['apikey'].as_str()
c_secret = self.config['apisecret'].as_str()
# Get the OAuth token from a file or log in.
try:
with open(self._tokenfile()) as f:
tokendata = json.load(f)
except IOError:
# No token yet. Generate one.
token, secret = self.authenticate(c_key, c_secret)
else:
token = tokendata['token']
secret = tokendata['secret']
self.discogs_client = Client(USER_AGENT, c_key, c_secret,
token, secret)
def reset_auth(self):
"""Delete toke file & redo the auth steps.
"""
os.remove(self._tokenfile())
self.setup()
def _tokenfile(self):
"""Get the path to the JSON file for storing the OAuth token.
"""
return self.config['tokenfile'].get(confit.Filename(in_app_dir=True))
def authenticate(self, c_key, c_secret):
# Get the link for the OAuth page.
auth_client = Client(USER_AGENT, c_key, c_secret)
try:
_, _, url = auth_client.get_authorize_url()
except CONNECTION_ERRORS as e:
self._log.debug(u'connection error: {0}', e)
raise beets.ui.UserError(u'communication with Discogs failed')
beets.ui.print_(u"To authenticate with Discogs, visit:")
beets.ui.print_(url)
# Ask for the code and validate it.
code = beets.ui.input_(u"Enter the code:")
try:
token, secret = auth_client.get_access_token(code)
except DiscogsAPIError:
raise beets.ui.UserError(u'Discogs authorization failed')
except CONNECTION_ERRORS as e:
self._log.debug(u'connection error: {0}', e)
raise beets.ui.UserError(u'Discogs token request failed')
# Save the token for later use.
self._log.debug(u'Discogs token {0}, secret {1}', token, secret)
with open(self._tokenfile(), 'w') as f:
json.dump({'token': token, 'secret': secret}, f)
return token, secret
def album_distance(self, items, album_info, mapping):
"""Returns the album distance.
"""
dist = Distance()
if album_info.data_source == 'Discogs':
dist.add('source', self.config['source_weight'].as_number())
return dist
def candidates(self, items, artist, album, va_likely):
"""Returns a list of AlbumInfo objects for discogs search results
matching an album and artist (if not various).
"""
if not self.discogs_client:
return
if va_likely:
query = album
else:
query = '%s %s' % (artist, album)
try:
return self.get_albums(query)
except DiscogsAPIError as e:
self._log.debug(u'API Error: {0} (query: {1})', e, query)
if e.status_code == 401:
self.reset_auth()
return self.candidates(items, artist, album, va_likely)
else:
return []
except CONNECTION_ERRORS:
self._log.debug(u'Connection error in album search', exc_info=True)
return []
def album_for_id(self, album_id):
"""Fetches an album by its Discogs ID and returns an AlbumInfo object
or None if the album is not found.
"""
if not self.discogs_client:
return
self._log.debug(u'Searching for release {0}', album_id)
# Discogs-IDs are simple integers. We only look for those at the end
# of an input string as to avoid confusion with other metadata plugins.
# An optional bracket can follow the integer, as this is how discogs
# displays the release ID on its webpage.
match = re.search(r'(^|\[*r|discogs\.com/.+/release/)(\d+)($|\])',
album_id)
if not match:
return None
result = Release(self.discogs_client, {'id': int(match.group(2))})
# Try to obtain title to verify that we indeed have a valid Release
try:
getattr(result, 'title')
except DiscogsAPIError as e:
if e.status_code != 404:
self._log.debug(u'API Error: {0} (query: {1})', e, result._uri)
if e.status_code == 401:
self.reset_auth()
return self.album_for_id(album_id)
return None
except CONNECTION_ERRORS:
self._log.debug(u'Connection error in album lookup', exc_info=True)
return None
return self.get_album_info(result)
def get_albums(self, query):
"""Returns a list of AlbumInfo objects for a discogs search query.
"""
# Strip non-word characters from query. Things like "!" and "-" can
# cause a query to return no results, even if they match the artist or
# album title. Use `re.UNICODE` flag to avoid stripping non-english
# word characters.
# FIXME: Encode as ASCII to work around a bug:
# https://github.com/beetbox/beets/issues/1051
# When the library is fixed, we should encode as UTF-8.
query = re.sub(r'(?u)\W+', ' ', query).encode('ascii', "replace")
# Strip medium information from query, Things like "CD1" and "disk 1"
# can also negate an otherwise positive result.
query = re.sub(br'(?i)\b(CD|disc)\s*\d+', '', query)
try:
releases = self.discogs_client.search(query,
type='release').page(1)
except CONNECTION_ERRORS:
self._log.debug(u"Communication error while searching for {0!r}",
query, exc_info=True)
return []
return [self.get_album_info(release) for release in releases[:5]]
def get_album_info(self, result):
"""Returns an AlbumInfo object for a discogs Release object.
"""
artist, artist_id = self.get_artist([a.data for a in result.artists])
album = re.sub(r' +', ' ', result.title)
album_id = result.data['id']
# Use `.data` to access the tracklist directly instead of the
# convenient `.tracklist` property, which will strip out useful artist
# information and leave us with skeleton `Artist` objects that will
# each make an API call just to get the same data back.
tracks = self.get_tracks(result.data['tracklist'])
albumtype = ', '.join(
result.data['formats'][0].get('descriptions', [])) or None
va = result.data['artists'][0]['name'].lower() == 'various'
if va:
artist = config['va_name'].as_str()
year = result.data['year']
label = result.data['labels'][0]['name']
mediums = len(set(t.medium for t in tracks))
catalogno = result.data['labels'][0]['catno']
if catalogno == 'none':
catalogno = None
country = result.data.get('country')
media = result.data['formats'][0]['name']
data_url = result.data['uri']
return AlbumInfo(album, album_id, artist, artist_id, tracks, asin=None,
albumtype=albumtype, va=va, year=year, month=None,
day=None, label=label, mediums=mediums,
artist_sort=None, releasegroup_id=None,
catalognum=catalogno, script=None, language=None,
country=country, albumstatus=None, media=media,
albumdisambig=None, artist_credit=None,
original_year=None, original_month=None,
original_day=None, data_source='Discogs',
data_url=data_url)
def get_artist(self, artists):
"""Returns an artist string (all artists) and an artist_id (the main
artist) for a list of discogs album or track artists.
"""
artist_id = None
bits = []
for i, artist in enumerate(artists):
if not artist_id:
artist_id = artist['id']
name = artist['name']
# Strip disambiguation number.
name = re.sub(r' \(\d+\)$', '', name)
# Move articles to the front.
name = re.sub(r'(?i)^(.*?), (a|an|the)$', r'\2 \1', name)
bits.append(name)
if artist['join'] and i < len(artists) - 1:
bits.append(artist['join'])
artist = ' '.join(bits).replace(' ,', ',') or None
return artist, artist_id
def get_tracks(self, tracklist):
"""Returns a list of TrackInfo objects for a discogs tracklist.
"""
tracks = []
index_tracks = {}
index = 0
for track in tracklist:
# Only real tracks have `position`. Otherwise, it's an index track.
if track['position']:
index += 1
tracks.append(self.get_track_info(track, index))
else:
index_tracks[index + 1] = track['title']
# Fix up medium and medium_index for each track. Discogs position is
# unreliable, but tracks are in order.
medium = None
medium_count, index_count = 0, 0
for track in tracks:
# Handle special case where a different medium does not indicate a
# new disc, when there is no medium_index and the ordinal of medium
# is not sequential. For example, I, II, III, IV, V. Assume these
# are the track index, not the medium.
medium_is_index = track.medium and not track.medium_index and (
len(track.medium) != 1 or
ord(track.medium) - 64 != medium_count + 1
)
if not medium_is_index and medium != track.medium:
# Increment medium_count and reset index_count when medium
# changes.
medium = track.medium
medium_count += 1
index_count = 0
index_count += 1
track.medium, track.medium_index = medium_count, index_count
# Get `disctitle` from Discogs index tracks. Assume that an index track
# before the first track of each medium is a disc title.
for track in tracks:
if track.medium_index == 1:
if track.index in index_tracks:
disctitle = index_tracks[track.index]
else:
disctitle = None
track.disctitle = disctitle
return tracks
def get_track_info(self, track, index):
"""Returns a TrackInfo object for a discogs track.
"""
title = track['title']
track_id = None
medium, medium_index = self.get_track_index(track['position'])
artist, artist_id = self.get_artist(track.get('artists', []))
length = self.get_track_length(track['duration'])
return TrackInfo(title, track_id, artist, artist_id, length, index,
medium, medium_index, artist_sort=None,
disctitle=None, artist_credit=None)
def get_track_index(self, position):
"""Returns the medium and medium index for a discogs track position.
"""
# medium_index is a number at the end of position. medium is everything
# else. E.g. (A)(1), (Side A, Track )(1), (A)(), ()(1), etc.
match = re.match(r'^(.*?)(\d*)$', position.upper())
if match:
medium, index = match.groups()
else:
self._log.debug(u'Invalid position: {0}', position)
medium = index = None
return medium or None, index or None
def get_track_length(self, duration):
"""Returns the track length in seconds for a discogs duration.
"""
try:
length = time.strptime(duration, '%M:%S')
except ValueError:
return None
return length.tm_min * 60 + length.tm_sec
|
jcoady9/beets
|
beetsplug/discogs.py
|
Python
|
mit
| 14,464
|
[
"VisIt"
] |
975636ce12c53541c91736b2e4513f71a07ec4c443bcc7eb5cf37a8ad2d3614d
|
########################################################################
# $HeadURL$
########################################################################
""" Legend encapsulates a graphical plot legend drawing tool
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
__RCSID__ = "$Id$"
from matplotlib.patches import Rectangle
from matplotlib.text import Text
from DIRAC.Core.Utilities.Graphs.GraphUtilities import *
from DIRAC.Core.Utilities.Graphs.Palette import Palette
from DIRAC.Core.Utilities.Graphs.GraphData import GraphData
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
import types
class Legend( object ):
def __init__(self,data=None,axes=None,*aw,**kw):
self.text_size = 0
self.column_width = 0
self.labels = {}
if type(data) == types.DictType:
for label,ddict in data.items():
#self.labels[label] = pretty_float(max([ float(x) for x in ddict.values() if x ]) )
self.labels[label] = "%.1f" % max([ float(x) for x in ddict.values() if x ])
elif type(data) == types.InstanceType and data.__class__ == GraphData:
self.labels = data.getLabels()
else:
self.labels = data
#self.labels.reverse()
self.ax = axes
self.canvas = None
if self.ax:
self.canvas = self.ax.figure.canvas
self.ax.set_axis_off()
self.prefs = evalPrefs(*aw,**kw)
self.palette = Palette()
if self.labels and self.labels[0][0] != 'NoLabels':
percent_flag = self.prefs.get('legend_unit','')
if percent_flag == "%":
sum_value = sum(data.label_values)
if sum_value > 0.:
self.labels = [(l,v/sum_value*100.) for l,v in self.labels ]
self.__get_column_width()
def dumpPrefs(self):
for key in self.prefs:
print key.rjust(20),':',str(self.prefs[key]).ljust(40)
def setLabels(self,labels):
self.labels = labels
def setAxes(self,axes):
self.ax = axes
self.canvas = self.ax.figure.canvas
self.ax.set_axis_off()
def getLegendSize(self):
self.__get_column_width()
legend_position = self.prefs['legend_position']
legend_width = float(self.prefs['legend_width'])
legend_height = float(self.prefs['legend_height'])
legend_padding = float(self.prefs['legend_padding'])
legend_text_size = self.prefs.get('legend_text_size',self.prefs['text_size'])
legend_text_padding = self.prefs.get('legend_text_padding',self.prefs['text_padding'])
if legend_position in ['right','left']:
# One column in case of vertical legend
legend_width = self.column_width+legend_padding
nLabels = len(self.labels)
legend_max_height = nLabels*(legend_text_size+legend_text_padding)
elif legend_position == 'bottom':
nColumns = min(self.prefs['legend_max_columns'],int(legend_width/self.column_width))
nLabels = len(self.labels)
maxRows = self.prefs['legend_max_rows']
nRows_ax = int(legend_height/1.6/self.prefs['text_size'])
nRows_label = nLabels/nColumns + (nLabels%nColumns != 0)
nRows = max(1,min(min(nRows_label,maxRows),nRows_ax ))
text_padding = self.prefs['text_padding']
text_padding = pixelToPoint(text_padding,self.prefs['dpi'])
legend_height = min(legend_height,(nRows*(self.text_size+text_padding)+text_padding))
legend_max_height = nLabels*(self.text_size+text_padding)
return legend_width,legend_height,legend_max_height
def __get_legend_text_size(self):
dpi = self.prefs['dpi']
text_size = self.prefs['text_size']
text_padding = self.prefs['text_padding']
legend_text_size = self.prefs.get('legend_text_size',text_size)
legend_text_padding = self.prefs.get('legend_text_padding',text_padding)
return legend_text_size,legend_text_padding
def __get_column_width(self):
max_length = 0
max_column_text = ''
flag = self.prefs.get('legend_numbers',True)
unit = self.prefs.get('legend_unit',False)
for label,num in self.labels:
if not flag: num = None
if num is not None:
column_length = len(str(label)+str(num)) + 1
else:
column_length = len(str(label)) + 1
if column_length > max_length:
max_length = column_length
if flag:
if type(num) == types.IntType or type(num) == types.LongType:
numString = str(num)
else:
numString = "%.1f" % float(num)
max_column_text = '%s %s' % (str(label),numString)
if unit:
max_column_text += "%"
else:
max_column_text = '%s ' % str(label)
figure = Figure()
canvas = FigureCanvasAgg(figure)
dpi = self.prefs['dpi']
figure.set_dpi( dpi )
l_size, _ = self.__get_legend_text_size()
self.text_size = pixelToPoint(l_size,dpi)
text = Text(0.,0.,text=max_column_text,size=self.text_size)
text.set_figure(figure)
bbox = text.get_window_extent(canvas.get_renderer())
columnwidth = bbox.width+6*l_size
self.column_width = columnwidth if columnwidth <= self.prefs['legend_width'] else self.prefs['legend_width'] - 6*l_size #make sure the legend fit in the box
def draw(self):
dpi = self.prefs['dpi']
ax_xsize = self.ax.get_window_extent().width
ax_ysize = self.ax.get_window_extent().height
nLabels = len(self.labels)
nColumns = min(self.prefs['legend_max_columns'],int(ax_xsize/self.column_width))
maxRows = self.prefs['legend_max_rows']
nRows_ax = int(ax_ysize/1.6/self.prefs['text_size'])
nRows_label = nLabels/nColumns + (nLabels%nColumns != 0)
nRows = max(1,min(min(nRows_label,maxRows),nRows_ax ))
maxLabels = nColumns*nRows - 1
self.ax.set_xlim(0.,float(ax_xsize))
self.ax.set_ylim(-float(ax_ysize),0.)
legend_text_size,legend_text_padding = self.__get_legend_text_size()
legend_text_size_point = pixelToPoint(legend_text_size,dpi)
box_width = legend_text_size
legend_offset = (ax_xsize - nColumns*self.column_width)/2
nc = 0
#self.labels.reverse()
for label,num in self.labels:
num_flag = self.prefs.get('legend_numbers',True)
percent_flag = self.prefs.get('legend_unit','')
if num_flag:
if percent_flag == "%":
num = "%.1f" % num +'%'
else:
num = "%.1f" % num
else:
num = None
color = self.palette.getColor(label)
row = nc%nRows
column = nc/nRows
if row == nRows-1 and column == nColumns-1 and nc != nLabels-1:
last_text = '... plus %d more' % (nLabels-nc)
self.ax.text(float(column*self.column_width)+legend_offset,-float(row*1.6*box_width),
last_text,horizontalalignment='left',
verticalalignment='top',size=legend_text_size_point)
break
else:
self.ax.text(float(column*self.column_width)+2.*box_width+legend_offset,-row*1.6*box_width,
str(label),horizontalalignment='left',
verticalalignment='top',size=legend_text_size_point)
if num is not None:
self.ax.text(float((column+1)*self.column_width)-2*box_width+legend_offset,-float(row*1.6*box_width),
str(num),horizontalalignment='right',
verticalalignment='top',size=legend_text_size_point)
box = Rectangle((float(column*self.column_width)+legend_offset,-float(row*1.6*box_width)-box_width),
box_width,box_width)
box.set_ec('black')
box.set_linewidth(pixelToPoint(0.5,dpi))
box.set_fc(color)
self.ax.add_patch(box)
nc += 1
|
andresailer/DIRAC
|
Core/Utilities/Graphs/Legend.py
|
Python
|
gpl-3.0
| 7,924
|
[
"DIRAC"
] |
bef0dcfc8d8d91add9b94c84a6bb1ef253f2e92d93df3817aa01c1b494bb48b6
|
# encoding:utf-8
import re
import os
def is_md_file(filename):
pattern = re.compile(r'.[Mm]{1}[Dd]{1}')
return len(re.findall(pattern, filename)) > 0
def deal_with(filename):
with open(filename) as f:
content = f.read()
# print content
p_img = re.compile(r'(\[code=img\].+\[/code\])+?', re.IGNORECASE)
p_color = re.compile(r'(\[color=.+\])', re.IGNORECASE)
p_code_begin = re.compile(r'\[code=java\]')
p_code_end = re.compile(r'\[/code\]')
# 移除他自己的定义的高亮部分文字
content = content.replace('#', '')
content = content.replace('[Comments]', '')
content = content.replace('[Fields]', '')
content = content.replace('[Keywords]', '')
content = re.sub(p_img, ' ', content)
content = re.sub(p_color, '#### ', content)
content = re.sub(p_code_begin, '``` ', content)
content = re.sub(p_code_end, '```', content)
print content
with open(filename, mode='w') as f:
f.write(content)
def visit(args, dirname, names):
if os.path.isdir(dirname):
for n in names:
if is_md_file(n):
deal_with(os.path.join(dirname, n))
def main():
# cwd =os.getcwd()
cwd = os.path.join(os.getcwd(), 'test')
os.path.walk(cwd, visit, 'walk')
if __name__ == '__main__':
main()
|
EManual/EManual.github.io
|
android-tmp/cleanup.py
|
Python
|
apache-2.0
| 1,333
|
[
"VisIt"
] |
11331a11514fe588059324a9418c76a1f74f1a2151ea78cbd771545b7afaad4b
|
import os, time
import numpy as np
from ase import Atoms
from ase.parallel import paropen
from ase.units import Hartree, Bohr
from ase.io.trajectory import PickleTrajectory
from ase.calculators.singlepoint import SinglePointCalculator
from gpaw import GPAW
from gpaw.mpi import world
from gpaw.tddft import TDDFT
from gpaw.tddft.ehrenfest import EhrenfestVelocityVerlet
# -------------------------------------------------------------------
name = 'h2_osc'
# Equilibrium distance in Ang cf. setups page for H dimer
d_bond = 0.754 # ~0.766 during oscillation
d_disp = 0.03
# Timestep and expected oscillatory period in attoseconds
timestep = 5.0
period = 7.58e3 # ~545.7 meV cf. CRC Handbook of Phys. & Chem. #09_08_91
ndiv = int(np.ceil(0.1e3 / timestep)) # update stats every 0.1 fs
niter = ndiv * int(np.ceil(2 * period / (ndiv * timestep)))
if __name__ == '__main__':
if not os.path.isfile(name + '_gs.gpw'):
atoms = Atoms('H2', positions=[(0, 0, 0), (0, 0, d_bond + d_disp)])
atoms.set_pbc(False)
atoms.center(vacuum=4.0)
cell_c = np.sum(atoms.get_cell()**2, axis=1)**0.5
N_c = 8 * np.round(cell_c / (0.18 * 8))
calc = GPAW(gpts=N_c, nbands=1, basis='dzp', txt=name + '_gs.txt')
atoms.set_calculator(calc)
atoms.get_potential_energy()
calc.write(name + '_gs.gpw', mode='all')
del atoms, calc
time.sleep(10)
while not os.path.isfile(name + '_gs.gpw'):
print 'Node %d waiting for file...' % world.rank
time.sleep(10)
world.barrier()
tdcalc = TDDFT(name + '_gs.gpw', txt=name + '_td.txt', propagator='EFSICN')
ehrenfest = EhrenfestVelocityVerlet(tdcalc)
traj = PickleTrajectory(name + '_td.traj', 'w', tdcalc.get_atoms())
t0 = time.time()
f = paropen(name + '_td.log', 'w')
for i in range(1, niter+1):
ehrenfest.propagate(timestep)
if i % ndiv == 0:
rate = 60 * ndiv / (time.time()-t0)
ekin = tdcalc.atoms.get_kinetic_energy()
epot = tdcalc.get_td_energy() * Hartree
F_av = ehrenfest.F * Hartree / Bohr
print >>f, 'i=%06d (%6.2f min^-1), ekin=%13.9f, epot=%13.9f, etot=%13.9f' % (i, rate, ekin, epot, ekin+epot)
t0 = time.time()
# Hack to prevent calls to GPAW::get_potential_energy when saving
spa = tdcalc.get_atoms()
spc = SinglePointCalculator(epot, F_av, None, None, spa)
spa.set_calculator(spc)
traj.write(spa)
f.close()
traj.close()
|
robwarm/gpaw-symm
|
gpaw/test/big/ehrenfest/h2_osc.py
|
Python
|
gpl-3.0
| 2,549
|
[
"ASE",
"GPAW"
] |
36d917a505f7556fd33fd04b9fbcb284239a603409c6c38073b98bdb38ece5fa
|
from __future__ import print_function
from pybrain.structure import FeedForwardNetwork
from pybrain.tools.validation import ModuleValidator,Validator
from pybrain.utilities import percentError
from pybrain.tools.customxml import NetworkReader
from pybrain.datasets import SupervisedDataSet
import numpy
import pylab
import os
def myplot(trns,ctrns = None,tsts = None,ctsts = None,iter = 0):
plotdir = os.path.join(os.getcwd(),'plot')
pylab.clf()
try:
assert len(tsts) > 1
tstsplot = True
except:
tstsplot = False
try:
assert len(ctsts) > 1
ctstsplot = True
except:
ctstsplot = False
try:
assert len(ctrns) > 1
ctrnsplot = True
except:
ctrnsplot = False
if tstsplot:
pylab.plot(tsts['input'],tsts['target'],c='b')
pylab.scatter(trns['input'],trns['target'],c='r')
if ctrnsplot:
pylab.scatter(trns['input'],ctrns,c='y')
if tstsplot and ctstsplot:
pylab.plot(tsts['input'], ctsts,c='g')
pylab.xlabel('x')
pylab.ylabel('y')
pylab.title('Neuron Number:'+str(nneuron))
pylab.grid(True)
plotname = os.path.join(plotdir,('jpq2layers_plot'+ str(iter)))
pylab.savefig(plotname)
# set-up the neural network
nneuron = 5
mom = 0.98
netname="LSL-"+str(nneuron)+"-"+str(mom)
mv=ModuleValidator()
v = Validator()
#create the test DataSet
x = numpy.arange(0.0, 1.0+0.01, 0.01)
s = 0.5+0.4*numpy.sin(2*numpy.pi*x)
tsts = SupervisedDataSet(1,1)
tsts.setField('input',x.reshape(len(x),1))
tsts.setField('target',s.reshape(len(s),1))
#read the train DataSet from file
trndata = SupervisedDataSet.loadFromFile(os.path.join(os.getcwd(),'trndata'))
myneuralnet = os.path.join(os.getcwd(),'myneuralnet.xml')
if os.path.isfile(myneuralnet):
n = NetworkReader.readFrom(myneuralnet,name=netname)
#calculate the test DataSet based on the trained Neural Network
ctsts = mv.calculateModuleOutput(n,tsts)
tserr = v.MSE(ctsts,tsts['target'])
print('MSE error on TSTS:',tserr)
myplot(trndata,tsts = tsts,ctsts = ctsts)
pylab.show()
|
affordablewindurbines/jarvisproject
|
pybrain/examples/supervised/test_network_read_write/jpq2layersReader.py
|
Python
|
gpl-3.0
| 2,034
|
[
"NEURON"
] |
342f8f1a2e491c942cdd1c8c5093fb5fa2c394b30437c0b3dff166bbdec4424d
|
import tensorflow as tf
def cat_id_column(train_x, test_x, unlabeled, col_name):
train_set = set(train_x[col_name])
test_set = set(test_x[col_name])
unlabeled_set = set(unlabeled[col_name])
id_max = max(train_set.union(test_set).union(unlabeled_set))
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_identity(
key=col_name, num_buckets=id_max + 1))
def cat_id_embedding(train_x, test_x, unlabeled, col_name, dimension):
train_set = set(train_x[col_name])
test_set = set(test_x[col_name])
unlabeled_set = set(unlabeled[col_name])
id_max = max(train_set.union(test_set).union(unlabeled_set))
return tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key=col_name, num_buckets=id_max + 1), dimension)
def cat_id_column_fixed_buckets(col_name, buckets):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_identity(
key=col_name, num_buckets=buckets))
def numeric_column(k):
return tf.feature_column.numeric_column(key=k)
def bucketized(k, boundaries):
return tf.feature_column.indicator_column(
tf.feature_column.bucketized_column(
source_column=numeric_column(k),
boundaries=boundaries))
def cat_dict_column(train_x, test_x, unlabeled, col_name):
train_set = set(train_x[col_name])
test_set = set(test_x[col_name])
unlabeled_set = set(unlabeled[col_name])
dictionary_set = train_set.union(test_set).union(unlabeled_set)
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
col_name, dictionary_set))
def cat_dict_embedding(train_x, test_x, unlabeled, col_name, dimension):
train_set = set(train_x[col_name])
test_set = set(test_x[col_name])
unlabeled_set = set(unlabeled[col_name])
dictionary_set = train_set.union(test_set).union(unlabeled_set)
return tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_vocabulary_list(
col_name, dictionary_set), dimension)
def story_model_columns(train_x, test_x, unlabeled):
return [
cat_dict_embedding(train_x, test_x, unlabeled, 'Geohash40', 2000),
cat_dict_column(train_x, test_x, unlabeled, 'Geohash35'),
cat_dict_column(train_x, test_x, unlabeled, 'Geohash30'),
cat_dict_column(train_x, test_x, unlabeled, 'Geohash25'),
cat_dict_column(train_x, test_x, unlabeled, 'Geohash20'),
cat_dict_column(train_x, test_x, unlabeled, 'Geohash15'),
cat_dict_embedding(train_x, test_x, unlabeled, 'Tags1', 1000),
cat_dict_column(train_x, test_x, unlabeled, 'Mentions1'),
cat_id_column_fixed_buckets('Hour', 24),
cat_id_column_fixed_buckets('HalfQuarterDay', 8),
cat_id_column(train_x, test_x, unlabeled, 'WeeksAgo'),
cat_id_column(train_x, test_x, unlabeled, 'DaysAgo'),
bucketized('Md', [1, 3, 5, 10, 20, 30, 50, 100, 1000, 10000]),
numeric_column('Visit'),
numeric_column('Screenshot'),
numeric_column('Starred'),
numeric_column('ImgFile'),
numeric_column('AudioFile'),
numeric_column('Task')
]
|
matthiasn/iWasWhere
|
src/tensorflow/feature_columns.py
|
Python
|
agpl-3.0
| 3,305
|
[
"VisIt"
] |
fd06b855940ad3193f4c90ca32e8dadcda55faba7b004c8f4d851bdd6454471e
|
"""Github two-factor authentication requirement was disabled for a user."""
from streamalert.shared.rule import rule
@rule(logs=['ghe:general'])
def github_disable_two_factor_requirement_user(rec):
"""
author: @mimeframe
description: Two-factor authentication requirement was disabled for a user.
repro_steps: (a) Visit /settings/two_factor_authentication/configure
reference: https://help.github.com/enterprise/2.11/admin/articles/audited-actions/
"""
return rec['action'] == 'two_factor_authentication.disabled'
|
airbnb/streamalert
|
rules/community/github/github_disable_two_factor_requirement_user.py
|
Python
|
apache-2.0
| 555
|
[
"VisIt"
] |
3279c0629714a4ec3a22e8c25a819e3251cfaaf4974366d18753658b5b0c29ac
|
# -*- coding: utf-8 -*-
from ....interfaces import utility as util # utility
from ....pipeline import engine as pe # pypeline engine
from ....interfaces import camino as camino
from ....interfaces import fsl as fsl
from ....interfaces import camino2trackvis as cam2trk
from ....algorithms import misc as misc
from ...misc.utils import get_affine, get_data_dims, get_vox_dims
def create_camino_dti_pipeline(name="dtiproc"):
"""Creates a pipeline that does the same diffusion processing as in the
:doc:`../../users/examples/dmri_camino_dti` example script. Given a diffusion-weighted image,
b-values, and b-vectors, the workflow will return the tractography
computed from diffusion tensors and from PICo probabilistic tractography.
Example
-------
>>> import os
>>> nipype_camino_dti = create_camino_dti_pipeline("nipype_camino_dti")
>>> nipype_camino_dti.inputs.inputnode.dwi = os.path.abspath('dwi.nii')
>>> nipype_camino_dti.inputs.inputnode.bvecs = os.path.abspath('bvecs')
>>> nipype_camino_dti.inputs.inputnode.bvals = os.path.abspath('bvals')
>>> nipype_camino_dti.run() # doctest: +SKIP
Inputs::
inputnode.dwi
inputnode.bvecs
inputnode.bvals
Outputs::
outputnode.fa
outputnode.trace
outputnode.tracts_pico
outputnode.tracts_dt
outputnode.tensors
"""
inputnode1 = pe.Node(interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), name="inputnode1")
"""
Setup for Diffusion Tensor Computation
--------------------------------------
In this section we create the nodes necessary for diffusion analysis.
First, the diffusion image is converted to voxel order.
"""
image2voxel = pe.Node(interface=camino.Image2Voxel(), name="image2voxel")
fsl2scheme = pe.Node(interface=camino.FSL2Scheme(), name="fsl2scheme")
fsl2scheme.inputs.usegradmod = True
"""
Second, diffusion tensors are fit to the voxel-order data.
"""
dtifit = pe.Node(interface=camino.DTIFit(), name='dtifit')
"""
Next, a lookup table is generated from the schemefile and the
signal-to-noise ratio (SNR) of the unweighted (q=0) data.
"""
dtlutgen = pe.Node(interface=camino.DTLUTGen(), name="dtlutgen")
dtlutgen.inputs.snr = 16.0
dtlutgen.inputs.inversion = 1
"""
In this tutorial we implement probabilistic tractography using the PICo algorithm.
PICo tractography requires an estimate of the fibre direction and a model of its
uncertainty in each voxel; this is produced using the following node.
"""
picopdfs = pe.Node(interface=camino.PicoPDFs(), name="picopdfs")
picopdfs.inputs.inputmodel = 'dt'
"""
An FSL BET node creates a brain mask is generated from the diffusion image for seeding the PICo tractography.
"""
bet = pe.Node(interface=fsl.BET(), name="bet")
bet.inputs.mask = True
"""
Finally, tractography is performed.
First DT streamline tractography.
"""
trackdt = pe.Node(interface=camino.TrackDT(), name="trackdt")
"""
Now camino's Probablistic Index of connectivity algorithm.
In this tutorial, we will use only 1 iteration for time-saving purposes.
"""
trackpico = pe.Node(interface=camino.TrackPICo(), name="trackpico")
trackpico.inputs.iterations = 1
"""
Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse.
"""
cam2trk_dt = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_dt")
cam2trk_dt.inputs.min_length = 30
cam2trk_dt.inputs.voxel_order = 'LAS'
cam2trk_pico = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_pico")
cam2trk_pico.inputs.min_length = 30
cam2trk_pico.inputs.voxel_order = 'LAS'
"""
Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview, using the following two nodes.
"""
# vtkstreamlines = pe.Node(interface=camino.VtkStreamlines(), name="vtkstreamlines")
# procstreamlines = pe.Node(interface=camino.ProcStreamlines(), name="procstreamlines")
# procstreamlines.inputs.outputtracts = 'oogl'
"""
We can also produce a variety of scalar values from our fitted tensors. The following nodes generate the fractional anisotropy and diffusivity trace maps and their associated headers.
"""
fa = pe.Node(interface=camino.ComputeFractionalAnisotropy(), name='fa')
# md = pe.Node(interface=camino.MD(),name='md')
trace = pe.Node(interface=camino.ComputeTensorTrace(), name='trace')
dteig = pe.Node(interface=camino.ComputeEigensystem(), name='dteig')
analyzeheader_fa = pe.Node(interface=camino.AnalyzeHeader(), name="analyzeheader_fa")
analyzeheader_fa.inputs.datatype = "double"
analyzeheader_trace = analyzeheader_fa.clone('analyzeheader_trace')
# analyzeheader_md = pe.Node(interface= camino.AnalyzeHeader(), name = "analyzeheader_md")
# analyzeheader_md.inputs.datatype = "double"
# analyzeheader_trace = analyzeheader_md.clone('analyzeheader_trace')
fa2nii = pe.Node(interface=misc.CreateNifti(), name='fa2nii')
trace2nii = fa2nii.clone("trace2nii")
"""
Since we have now created all our nodes, we can now define our workflow and start making connections.
"""
tractography = pe.Workflow(name='tractography')
tractography.connect([(inputnode1, bet, [("dwi", "in_file")])])
"""
File format conversion
"""
tractography.connect([(inputnode1, image2voxel, [("dwi", "in_file")]),
(inputnode1, fsl2scheme, [("bvecs", "bvec_file"),
("bvals", "bval_file")])
])
"""
Tensor fitting
"""
tractography.connect([(image2voxel, dtifit, [['voxel_order', 'in_file']]),
(fsl2scheme, dtifit, [['scheme', 'scheme_file']])
])
"""
Workflow for applying DT streamline tractogpahy
"""
tractography.connect([(bet, trackdt, [("mask_file", "seed_file")])])
tractography.connect([(dtifit, trackdt, [("tensor_fitted", "in_file")])])
"""
Workflow for applying PICo
"""
tractography.connect([(bet, trackpico, [("mask_file", "seed_file")])])
tractography.connect([(fsl2scheme, dtlutgen, [("scheme", "scheme_file")])])
tractography.connect([(dtlutgen, picopdfs, [("dtLUT", "luts")])])
tractography.connect([(dtifit, picopdfs, [("tensor_fitted", "in_file")])])
tractography.connect([(picopdfs, trackpico, [("pdfs", "in_file")])])
# Mean diffusivity still appears broken
# tractography.connect([(dtifit, md,[("tensor_fitted","in_file")])])
# tractography.connect([(md, analyzeheader_md,[("md","in_file")])])
# tractography.connect([(inputnode, analyzeheader_md,[(('dwi', get_vox_dims), 'voxel_dims'),
# (('dwi', get_data_dims), 'data_dims')])])
# This line is commented out because the ProcStreamlines node keeps throwing memory errors
# tractography.connect([(track, procstreamlines,[("tracked","in_file")])])
"""
Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the
tensor fitting.
This is also where our voxel- and data-grabbing functions come in. We pass these functions, along with the original DWI image from the input node, to the header-generating nodes. This ensures that the files will be correct and readable.
"""
tractography.connect([(dtifit, fa, [("tensor_fitted", "in_file")])])
tractography.connect([(fa, analyzeheader_fa, [("fa", "in_file")])])
tractography.connect([(inputnode1, analyzeheader_fa, [(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
tractography.connect([(fa, fa2nii, [('fa', 'data_file')])])
tractography.connect([(inputnode1, fa2nii, [(('dwi', get_affine), 'affine')])])
tractography.connect([(analyzeheader_fa, fa2nii, [('header', 'header_file')])])
tractography.connect([(dtifit, trace, [("tensor_fitted", "in_file")])])
tractography.connect([(trace, analyzeheader_trace, [("trace", "in_file")])])
tractography.connect([(inputnode1, analyzeheader_trace, [(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
tractography.connect([(trace, trace2nii, [('trace', 'data_file')])])
tractography.connect([(inputnode1, trace2nii, [(('dwi', get_affine), 'affine')])])
tractography.connect([(analyzeheader_trace, trace2nii, [('header', 'header_file')])])
tractography.connect([(dtifit, dteig, [("tensor_fitted", "in_file")])])
tractography.connect([(trackpico, cam2trk_pico, [('tracked', 'in_file')])])
tractography.connect([(trackdt, cam2trk_dt, [('tracked', 'in_file')])])
tractography.connect([(inputnode1, cam2trk_pico, [(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
tractography.connect([(inputnode1, cam2trk_dt, [(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
inputnode = pe.Node(interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), name="inputnode")
outputnode = pe.Node(interface=util.IdentityInterface(fields=["fa",
"trace",
"tracts_pico",
"tracts_dt",
"tensors"]),
name="outputnode")
workflow = pe.Workflow(name=name)
workflow.base_output_dir = name
workflow.connect([(inputnode, tractography, [("dwi", "inputnode1.dwi"),
("bvals", "inputnode1.bvals"),
("bvecs", "inputnode1.bvecs")])])
workflow.connect([(tractography, outputnode, [("cam2trk_dt.trackvis", "tracts_dt"),
("cam2trk_pico.trackvis", "tracts_pico"),
("fa2nii.nifti_file", "fa"),
("trace2nii.nifti_file", "trace"),
("dtifit.tensor_fitted", "tensors")])
])
return workflow
|
mick-d/nipype
|
nipype/workflows/dmri/camino/diffusion.py
|
Python
|
bsd-3-clause
| 10,861
|
[
"ParaView",
"VTK"
] |
80a44cef35b6a11b356ea0ade3acd94988c8e82b6b4ef5f1cea4e1f456df8df6
|
#################################################################
# Code written by Edward Choi (mp2893@gatech.edu)
# For bug report, please contact author using the email address
#################################################################
import sys, random
import numpy as np
import cPickle as pickle
from collections import OrderedDict
import argparse
import theano
import theano.tensor as T
from theano import config
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
def unzip(zipped):
new_params = OrderedDict()
for key, value in zipped.iteritems():
new_params[key] = value.get_value()
return new_params
def numpy_floatX(data):
return np.asarray(data, dtype=config.floatX)
def load_embedding(infile):
Wemb = np.array(pickle.load(open(infile, 'rb'))).astype(config.floatX)
return Wemb
def init_params(options):
params = OrderedDict()
timeFile = options['timeFile']
embFile = options['embFile']
embSize = options['embSize']
inputDimSize = options['inputDimSize']
numClass = options['numClass']
if len(embFile) > 0:
print 'using external code embedding'
params['W_emb'] = load_embedding(embFile)
embSize = params['W_emb'].shape[1]
else:
print 'using randomly initialized code embedding'
params['W_emb'] = np.random.uniform(-0.01, 0.01, (inputDimSize, embSize)).astype(config.floatX)
params['b_emb'] = np.zeros(embSize).astype(config.floatX)
prevDimSize = embSize
if len(timeFile) > 0: prevDimSize += 1 #We need to consider an extra dimension for the duration information
for count, hiddenDimSize in enumerate(options['hiddenDimSize']):
params['W_'+str(count)] = np.random.uniform(-0.01, 0.01, (prevDimSize, hiddenDimSize)).astype(config.floatX)
params['W_r_'+str(count)] = np.random.uniform(-0.01, 0.01, (prevDimSize, hiddenDimSize)).astype(config.floatX)
params['W_z_'+str(count)] = np.random.uniform(-0.01, 0.01, (prevDimSize, hiddenDimSize)).astype(config.floatX)
params['U_'+str(count)] = np.random.uniform(-0.01, 0.01, (hiddenDimSize, hiddenDimSize)).astype(config.floatX)
params['U_r_'+str(count)] = np.random.uniform(-0.01, 0.01, (hiddenDimSize, hiddenDimSize)).astype(config.floatX)
params['U_z_'+str(count)] = np.random.uniform(-0.01, 0.01, (hiddenDimSize, hiddenDimSize)).astype(config.floatX)
params['b_'+str(count)] = np.zeros(hiddenDimSize).astype(config.floatX)
params['b_r_'+str(count)] = np.zeros(hiddenDimSize).astype(config.floatX)
params['b_z_'+str(count)] = np.zeros(hiddenDimSize).astype(config.floatX)
prevDimSize = hiddenDimSize
params['W_output'] = np.random.uniform(-0.01, 0.01, (prevDimSize, numClass)).astype(config.floatX)
params['b_output'] = np.zeros(numClass).astype(config.floatX)
if options['predictTime']:
params['W_time'] = np.random.uniform(-0.01, 0.01, (prevDimSize, 1)).astype(config.floatX)
params['b_time'] = np.zeros(1).astype(config.floatX)
return params
def init_tparams(params, options):
tparams = OrderedDict()
for key, value in params.iteritems():
if not options['embFineTune'] and key == 'W_emb': continue
tparams[key] = theano.shared(value, name=key)
return tparams
def dropout_layer(state_before, use_noise, trng, dropout_rate):
proj = T.switch(use_noise, (state_before * trng.binomial(state_before.shape, p=dropout_rate, n=1, dtype=state_before.dtype)), state_before * 0.5)
return proj
def gru_layer(tparams, emb, layerIndex, hiddenDimSize, mask=None):
timesteps = emb.shape[0]
if emb.ndim == 3: n_samples = emb.shape[1]
else: n_samples = 1
W_rx = T.dot(emb, tparams['W_r_'+layerIndex])
W_zx = T.dot(emb, tparams['W_z_'+layerIndex])
Wx = T.dot(emb, tparams['W_'+layerIndex])
def stepFn(stepMask, wrx, wzx, wx, h):
r = T.nnet.sigmoid(wrx + T.dot(h, tparams['U_r_'+layerIndex]) + tparams['b_r_'+layerIndex])
z = T.nnet.sigmoid(wzx + T.dot(h, tparams['U_z_'+layerIndex]) + tparams['b_z_'+layerIndex])
h_tilde = T.tanh(wx + T.dot(r*h, tparams['U_'+layerIndex]) + tparams['b_'+layerIndex])
h_new = z * h + ((1. - z) * h_tilde)
h_new = stepMask[:, None] * h_new + (1. - stepMask)[:, None] * h
return h_new
results, updates = theano.scan(fn=stepFn, sequences=[mask,W_rx,W_zx,Wx], outputs_info=T.alloc(numpy_floatX(0.0), n_samples, hiddenDimSize), name='gru_layer'+layerIndex, n_steps=timesteps)
return results
def build_model(tparams, options, W_emb=None):
trng = RandomStreams(123)
use_noise = theano.shared(numpy_floatX(0.))
if len(options['timeFile']) > 0: useTime = True
else: useTime = False
x = T.tensor3('x', dtype=config.floatX)
t = T.matrix('t', dtype=config.floatX)
y = T.tensor3('y', dtype=config.floatX)
t_label = T.matrix('t_label', dtype=config.floatX)
mask = T.matrix('mask', dtype=config.floatX)
lengths = T.vector('lengths', dtype=config.floatX)
n_timesteps = x.shape[0]
n_samples = x.shape[1]
if options['embFineTune']: emb = T.tanh(T.dot(x, tparams['W_emb']) + tparams['b_emb'])
else: emb = T.tanh(T.dot(x, W_emb) + tparams['b_emb'])
if useTime:
emb = T.concatenate([t.reshape([n_timesteps,n_samples,1]), emb], axis=2) #Adding the time element to the embedding
inputVector = emb
for i, hiddenDimSize in enumerate(options['hiddenDimSize']):
memories = gru_layer(tparams, inputVector, str(i), hiddenDimSize, mask=mask)
memories = dropout_layer(memories, use_noise, trng, options['dropout_rate'])
inputVector = memories
def softmaxStep(memory2d):
return T.nnet.softmax(T.dot(memory2d, tparams['W_output']) + tparams['b_output'])
logEps = options['logEps']
results, updates = theano.scan(fn=softmaxStep, sequences=[inputVector], outputs_info=None, name='softmax_layer', n_steps=n_timesteps)
results = results * mask[:,:,None]
cross_entropy = -(y * T.log(results + logEps) + (1. - y) * T.log(1. - results + logEps))
prediction_loss = cross_entropy.sum(axis=2).sum(axis=0) / lengths
if options['predictTime']:
duration = T.maximum(T.dot(inputVector, tparams['W_time']) + tparams['b_time'], 0) #ReLU
duration = duration.reshape([n_timesteps,n_samples]) * mask
duration_loss = 0.5 * ((duration - t_label) ** 2).sum(axis=0) / lengths
cost = T.mean(prediction_loss) + options['tradeoff'] * T.mean(duration_loss) + options['L2_output'] * (tparams['W_output'] ** 2).sum() + options['L2_time'] * (tparams['W_time'] ** 2).sum()
else:
cost = T.mean(prediction_loss) + options['L2_output'] * (tparams['W_output'] ** 2).sum()
if options['predictTime']: return use_noise, x, y, t, t_label, mask, lengths, cost
elif useTime: return use_noise, x, y, t, mask, lengths, cost
else: return use_noise, x, y, mask, lengths, cost
def adadelta(tparams, grads, x, y, mask, lengths, cost, options, t=None, t_label=None):
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.), name='%s_grad' % k) for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.), name='%s_rup2' % k) for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.), name='%s_rgrad2' % k) for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
if options['predictTime']:
f_grad_shared = theano.function([x, y, t, t_label, mask, lengths], cost, updates=zgup + rg2up, name='adadelta_f_grad_shared')
elif len(options['timeFile']) > 0:
f_grad_shared = theano.function([x, y, t, mask, lengths], cost, updates=zgup + rg2up, name='adadelta_f_grad_shared')
else:
f_grad_shared = theano.function([x, y, mask, lengths], cost, updates=zgup + rg2up, name='adadelta_f_grad_shared')
updir = [-T.sqrt(ru2 + 1e-6) / T.sqrt(rg2 + 1e-6) * zg for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
f_update = theano.function([], [], updates=ru2up + param_up, on_unused_input='ignore', name='adadelta_f_update')
return f_grad_shared, f_update
def padMatrixWithTimePrediction(seqs, labels, times, options):
lengths = np.array([len(seq) for seq in seqs]) - 1
n_samples = len(seqs)
maxlen = np.max(lengths)
inputDimSize = options['inputDimSize']
numClass = options['numClass']
x = np.zeros((maxlen, n_samples, inputDimSize)).astype(config.floatX)
y = np.zeros((maxlen, n_samples, numClass)).astype(config.floatX)
t = np.zeros((maxlen, n_samples)).astype(config.floatX)
t_label = np.zeros((maxlen, n_samples)).astype(config.floatX)
mask = np.zeros((maxlen, n_samples)).astype(config.floatX)
for idx, (seq,time,label) in enumerate(zip(seqs,times,labels)):
for xvec, subseq in zip(x[:,idx,:], seq[:-1]):
xvec[subseq] = 1.
for yvec, subseq in zip(y[:,idx,:], label[1:]):
yvec[subseq] = 1.
mask[:lengths[idx], idx] = 1.
t[:lengths[idx], idx] = time[:-1]
t_label[:lengths[idx], idx] = time[1:]
lengths = np.array(lengths, dtype=config.floatX)
if options['useLogTime']:
t = np.log(t + options['logEps'])
t_label = np.log(t_label + options['logEps'])
return x, y, t, t_label, mask, lengths
def padMatrixWithTime(seqs, labels, times, options):
lengths = np.array([len(seq) for seq in seqs]) - 1
n_samples = len(seqs)
maxlen = np.max(lengths)
inputDimSize = options['inputDimSize']
numClass = options['numClass']
x = np.zeros((maxlen, n_samples, inputDimSize)).astype(config.floatX)
y = np.zeros((maxlen, n_samples, numClass)).astype(config.floatX)
t = np.zeros((maxlen, n_samples)).astype(config.floatX)
mask = np.zeros((maxlen, n_samples)).astype(config.floatX)
for idx, (seq,time,label) in enumerate(zip(seqs,times,labels)):
for xvec, subseq in zip(x[:,idx,:], seq[:-1]):
xvec[subseq] = 1.
for yvec, subseq in zip(y[:,idx,:], label[1:]):
yvec[subseq] = 1.
mask[:lengths[idx], idx] = 1.
t[:lengths[idx], idx] = time[:-1]
lengths = np.array(lengths, dtype=config.floatX)
if options['useLogTime']:
t = np.log(t + options['logEps'])
return x, y, t, mask, lengths
def padMatrixWithoutTime(seqs, labels, options):
lengths = np.array([len(seq) for seq in seqs]) - 1
n_samples = len(seqs)
maxlen = np.max(lengths)
inputDimSize = options['inputDimSize']
numClass = options['numClass']
x = np.zeros((maxlen, n_samples, inputDimSize)).astype(config.floatX)
y = np.zeros((maxlen, n_samples, numClass)).astype(config.floatX)
mask = np.zeros((maxlen, n_samples)).astype(config.floatX)
for idx, (seq,label) in enumerate(zip(seqs,labels)):
for xvec, subseq in zip(x[:,idx,:], seq[:-1]):
xvec[subseq] = 1.
for yvec, subseq in zip(y[:,idx,:], label[1:]):
yvec[subseq] = 1.
mask[:lengths[idx], idx] = 1.
lengths = np.array(lengths, dtype=config.floatX)
return x, y, mask, lengths
def load_data(seqFile, labelFile, timeFile):
train_set_x = pickle.load(open(seqFile+'.train', 'rb'))
valid_set_x = pickle.load(open(seqFile+'.valid', 'rb'))
test_set_x = pickle.load(open(seqFile+'.test', 'rb'))
train_set_y = pickle.load(open(labelFile+'.train', 'rb'))
valid_set_y = pickle.load(open(labelFile+'.valid', 'rb'))
test_set_y = pickle.load(open(labelFile+'.test', 'rb'))
train_set_t = None
valid_set_t = None
test_set_t = None
if len(timeFile) > 0:
train_set_t = pickle.load(open(timeFile+'.train', 'rb'))
valid_set_t = pickle.load(open(timeFile+'.valid', 'rb'))
test_set_t = pickle.load(open(timeFile+'.test', 'rb'))
'''For debugging purposes
sequences = np.array(pickle.load(open(seqFile, 'rb')))
labels = np.array(pickle.load(open(labelFile, 'rb')))
if len(timeFile) > 0:
times = np.array(pickle.load(open(timeFile, 'rb')))
dataSize = len(labels)
np.random.seed(0)
ind = np.random.permutation(dataSize)
nTest = int(0.15 * dataSize)
nValid = int(0.10 * dataSize)
test_indices = ind[:nTest]
valid_indices = ind[nTest:nTest+nValid]
train_indices = ind[nTest+nValid:]
train_set_x = sequences[train_indices]
train_set_y = labels[train_indices]
test_set_x = sequences[test_indices]
test_set_y = labels[test_indices]
valid_set_x = sequences[valid_indices]
valid_set_y = labels[valid_indices]
train_set_t = None
test_set_t = None
valid_set_t = None
if len(timeFile) > 0:
train_set_t = times[train_indices]
test_set_t = times[test_indices]
valid_set_t = times[valid_indices]
'''
def len_argsort(seq):
return sorted(range(len(seq)), key=lambda x: len(seq[x]))
train_sorted_index = len_argsort(train_set_x)
train_set_x = [train_set_x[i] for i in train_sorted_index]
train_set_y = [train_set_y[i] for i in train_sorted_index]
valid_sorted_index = len_argsort(valid_set_x)
valid_set_x = [valid_set_x[i] for i in valid_sorted_index]
valid_set_y = [valid_set_y[i] for i in valid_sorted_index]
test_sorted_index = len_argsort(test_set_x)
test_set_x = [test_set_x[i] for i in test_sorted_index]
test_set_y = [test_set_y[i] for i in test_sorted_index]
if len(timeFile) > 0:
train_set_t = [train_set_t[i] for i in train_sorted_index]
valid_set_t = [valid_set_t[i] for i in valid_sorted_index]
test_set_t = [test_set_t[i] for i in test_sorted_index]
train_set = (train_set_x, train_set_y, train_set_t)
valid_set = (valid_set_x, valid_set_y, valid_set_t)
test_set = (test_set_x, test_set_y, test_set_t)
return train_set, valid_set, test_set
def calculate_auc(test_model, dataset, options):
inputDimSize = options['inputDimSize']
numClass = options['numClass']
batchSize = options['batchSize']
useTime = options['useTime']
predictTime = options['predictTime']
n_batches = int(np.ceil(float(len(dataset[0])) / float(batchSize)))
aucSum = 0.0
dataCount = 0.0
for index in xrange(n_batches):
batchX = dataset[0][index*batchSize:(index+1)*batchSize]
batchY = dataset[1][index*batchSize:(index+1)*batchSize]
if predictTime:
batchT = dataset[2][index*batchSize:(index+1)*batchSize]
x, y, t, t_label, mask, lengths = padMatrixWithTimePrediction(batchX, batchY, batchT, options)
auc = test_model(x, y, t, t_label, mask, lengths)
elif useTime:
batchT = dataset[2][index*batchSize:(index+1)*batchSize]
x, y, t, mask, lengths = padMatrixWithTime(batchX, batchY, batchT, options)
auc = test_model(x, y, t, mask, lengths)
else:
x, y, mask, lengths = padMatrixWithoutTime(batchX, batchY, options)
auc = test_model(x, y, mask, lengths)
aucSum += auc * len(batchX)
dataCount += float(len(batchX))
return aucSum / dataCount
def train_doctorAI(
seqFile='seqFile.txt',
inputDimSize=20000,
labelFile='labelFile.txt',
numClass=500,
outFile='outFile.txt',
timeFile='timeFile.txt',
predictTime=False,
tradeoff=1.0,
useLogTime=True,
embFile='embFile.txt',
embSize=200,
embFineTune=True,
hiddenDimSize=[200,200],
batchSize=100,
max_epochs=10,
L2_output=0.001,
L2_time=0.001,
dropout_rate=0.5,
logEps=1e-8,
verbose=False
):
options = locals().copy()
if len(timeFile) > 0: useTime = True
else: useTime = False
options['useTime'] = useTime
print 'Initializing the parameters ... ',
params = init_params(options)
tparams = init_tparams(params, options)
print 'Building the model ... ',
f_grad_shared = None
f_update = None
if predictTime and embFineTune:
print 'predicting duration, fine-tuning code representations'
use_noise, x, y, t, t_label, mask, lengths, cost = build_model(tparams, options)
grads = T.grad(cost, wrt=tparams.values())
f_grad_shared, f_update = adadelta(tparams, grads, x, y, mask, lengths, cost, options, t, t_label)
elif predictTime and not embFineTune:
print 'predicting duration, not fine-tuning code representations'
W_emb = theano.shared(params['W_emb'], name='W_emb')
use_noise, x, y, t, t_label, mask, lengths, cost = build_model(tparams, options, W_emb)
grads = T.grad(cost, wrt=tparams.values())
f_grad_shared, f_update = adadelta(tparams, grads, x, y, mask, lengths, cost, options, t, t_label)
elif useTime and embFineTune:
print 'using duration information, fine-tuning code representations'
use_noise, x, y, t, mask, lengths, cost = build_model(tparams, options)
grads = T.grad(cost, wrt=tparams.values())
f_grad_shared, f_update = adadelta(tparams, grads, x, y, mask, lengths, cost, options, t)
elif useTime and not embFineTune:
print 'using duration information, not fine-tuning code representations'
W_emb = theano.shared(params['W_emb'], name='W_emb')
use_noise, x, y, t, mask, lengths, cost = build_model(tparams, options, W_emb)
grads = T.grad(cost, wrt=tparams.values())
f_grad_shared, f_update = adadelta(tparams, grads, x, y, mask, lengths, cost, options, t)
elif not useTime and embFineTune:
print 'not using duration information, fine-tuning code representations'
use_noise, x, y, mask, lengths, cost = build_model(tparams, options)
grads = T.grad(cost, wrt=tparams.values())
f_grad_shared, f_update = adadelta(tparams, grads, x, y, mask, lengths, cost, options)
elif not useTime and not embFineTune:
print 'not using duration information, not fine-tuning code representations'
W_emb = theano.shared(params['W_emb'], name='W_emb')
use_noise, x, y, mask, lengths, cost = build_model(tparams, options, W_emb)
grads = T.grad(cost, wrt=tparams.values())
f_grad_shared, f_update = adadelta(tparams, grads, x, y, mask, lengths, cost, options)
print 'Loading data ... ',
trainSet, validSet, testSet = load_data(seqFile, labelFile, timeFile)
n_batches = int(np.ceil(float(len(trainSet[0])) / float(batchSize)))
print 'done'
if predictTime: test_model = theano.function(inputs=[x, y, t, t_label, mask, lengths], outputs=cost, name='test_model')
elif useTime: test_model = theano.function(inputs=[x, y, t, mask, lengths], outputs=cost, name='test_model')
else: test_model = theano.function(inputs=[x, y, mask, lengths], outputs=cost, name='test_model')
bestValidCrossEntropy = 1e20
bestValidEpoch = 0
testCrossEntropy = 0.0
print 'Optimization start !!'
for epoch in xrange(max_epochs):
iteration = 0
costVector = []
for index in random.sample(range(n_batches), n_batches):
use_noise.set_value(1.)
batchX = trainSet[0][index*batchSize:(index+1)*batchSize]
batchY = trainSet[1][index*batchSize:(index+1)*batchSize]
if predictTime:
batchT = trainSet[2][index*batchSize:(index+1)*batchSize]
x, y, t, t_label, mask, lengths = padMatrixWithTimePrediction(batchX, batchY, batchT, options)
cost = f_grad_shared(x, y, t, t_label, mask, lengths)
elif useTime:
batchT = trainSet[2][index*batchSize:(index+1)*batchSize]
x, y, t, mask, lengths = padMatrixWithTime(batchX, batchY, batchT, options)
cost = f_grad_shared(x, y, t, mask, lengths)
else:
x, y, mask, lengths = padMatrixWithoutTime(batchX, batchY, options)
cost = f_grad_shared(x, y, mask, lengths)
costVector.append(cost)
f_update()
if (iteration % 10 == 0) and verbose: print 'epoch:%d, iteration:%d/%d, cost:%f' % (epoch, iteration, n_batches, cost)
iteration += 1
print 'epoch:%d, mean_cost:%f' % (epoch, np.mean(costVector))
use_noise.set_value(0.)
validAuc = calculate_auc(test_model, validSet, options)
print 'Validation cross entropy:%f at epoch:%d' % (validAuc, epoch)
if validAuc < bestValidCrossEntropy:
bestValidCrossEntropy = validAuc
bestValidEpoch = epoch
bestParams = unzip(tparams)
testCrossEntropy = calculate_auc(test_model, testSet, options)
print 'Test cross entropy:%f at epoch:%d' % (testCrossEntropy, epoch)
tempParams = unzip(tparams)
np.savez_compressed(outFile + '.' + str(epoch), **tempParams)
print 'The best valid cross entropy:%f at epoch:%d' % (bestValidCrossEntropy, bestValidEpoch)
print 'The test cross entropy: %f' % testCrossEntropy
def parse_arguments(parser):
parser.add_argument('seq_file', type=str, metavar='<visit_file>', help='The path to the Pickled file containing visit information of patients')
parser.add_argument('n_input_codes', type=int, metavar='<n_input_codes>', help='The number of unique input medical codes')
parser.add_argument('label_file', type=str, metavar='<label_file>', help='The path to the Pickled file containing label information of patients')
parser.add_argument('n_output_codes', type=int, metavar='<n_output_codes>', help='The number of unique label medical codes')
parser.add_argument('out_file', metavar='out_file', help='The path to the output models. The models will be saved after every epoch')
parser.add_argument('--time_file', type=str, default='', help='The path to the Pickled file containing durations between visits of patients. If you are not using duration information, do not use this option')
parser.add_argument('--predict_time', type=int, default=0, choices=[0,1], help='Use this option if you want the GRU to also predict the time duration until the next visit (0 for false, 1 for true) (default value: 0)')
parser.add_argument('--tradeoff', type=float, default=1.0, help='Tradeoff variable for balancing the two loss functions: code prediction function and duration prediction function (default value: 1.0)')
parser.add_argument('--use_log_time', type=int, default=1, choices=[0,1], help='Use logarithm of time duration to dampen the impact of the outliers (0 for false, 1 for true) (default value: 1)')
parser.add_argument('--embed_file', type=str, default='', help='The path to the Pickled file containing the representation vectors of medical codes. If you are not using medical code representations, do not use this option')
parser.add_argument('--embed_size', type=int, default=200, help='The size of the visit embedding before passing it to the GRU layers. If you are not providing your own medical code vectors, you must specify this value (default value: 200)')
parser.add_argument('--embed_finetune', type=int, default=1, choices=[0,1], help='If you are using randomly initialized code representations, always use this option. If you are using an external medical code representations, and you want to fine-tune them as you train the GRU, use this option as well. (0 for false, 1 for true) (default value: 1)')
parser.add_argument('--hidden_dim_size', type=str, default='[200,200]', help='The size of the hidden layers of the GRU. This is a string argument. For example, [500,400] means you are using a two-layer GRU where the lower layer uses a 500-dimensional hidden layer, and the upper layer uses a 400-dimensional hidden layer. (default value: [200,200])')
parser.add_argument('--batch_size', type=int, default=100, help='The size of a single mini-batch (default value: 100)')
parser.add_argument('--n_epochs', type=int, default=10, help='The number of training epochs (default value: 10)')
parser.add_argument('--L2_softmax', type=float, default=0.001, help='L2 regularization for the softmax function (default value: 0.001)')
parser.add_argument('--L2_time', type=float, default=0.001, help='L2 regularization for the linear regression (default value: 0.001)')
parser.add_argument('--dropout_rate', type=float, default=0.5, help='Dropout rate between GRU hidden layers, and between the final hidden layer and the softmax layer (default value: 0.5)')
parser.add_argument('--log_eps', type=float, default=1e-8, help='A small value to prevent log(0) (default value: 1e-8)')
parser.add_argument('--verbose', action='store_true', help='Print output after every 10 mini-batches (default false)')
args = parser.parse_args()
return args
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args = parse_arguments(parser)
hiddenDimSize = [int(strDim) for strDim in args.hidden_dim_size[1:-1].split(',')]
if args.predict_time and args.time_file == '':
print 'Cannot predict time duration without time file'
sys.exit()
train_doctorAI(
seqFile=args.seq_file,
inputDimSize=args.n_input_codes,
labelFile=args.label_file,
numClass=args.n_output_codes,
outFile=args.out_file,
timeFile=args.time_file,
predictTime=args.predict_time,
tradeoff=args.tradeoff,
useLogTime=args.use_log_time,
embFile=args.embed_file,
embSize=args.embed_size,
embFineTune=args.embed_finetune,
hiddenDimSize=hiddenDimSize,
batchSize=args.batch_size,
max_epochs=args.n_epochs,
L2_output=args.L2_softmax,
L2_time=args.L2_time,
dropout_rate=args.dropout_rate,
logEps=args.log_eps,
verbose=args.verbose
)
|
mp2893/doctorai
|
doctorAI.py
|
Python
|
bsd-3-clause
| 24,190
|
[
"VisIt"
] |
05769fdf34b8f14f16ee003fad1e9a25f78a7df687aae0c6073af4d034cdf66f
|
from lib.typecheck import *
import lib.visit as v
import lib.const as C
from .. import util
from ..meta import class_lookup
from ..meta.program import Program
from ..meta.clazz import Clazz
from ..meta.method import Method
from ..meta.field import Field
from ..meta.statement import Statement
from ..meta.expression import Expression
"""
Replacing collections of interface types with actual classes
"""
class Collection(object):
__impl = { \
C.J.MAP: C.J.TMAP, \
C.J.LST: C.J.LNK, \
C.J.LNK: C.J.LNK, \
C.J.STK: C.J.STK, \
C.J.QUE: C.J.DEQ }
# autobox type parameters and/or replace interfaces with implementing classes
# e.g., List<T> x = new List<T>(); => new ArrayList<T>();
# this should *not* be recursive, e.g., Map<K, List<V>> => TreeMap<K, List<V>>
@staticmethod
def repl_itf(tname, init=True):
if not util.is_collection(tname): return tname
_ids = util.of_collection(tname)
ids = map(util.autoboxing, _ids)
collection = ids[0]
if init: collection = Collection.__impl[collection]
generics = ids[1:] # don't be recursive, like map(repl_itf, ids[1:])
return u"{}<{}>".format(collection, ','.join(generics))
@v.on("node")
def visit(self, node):
"""
This is the generic method to initialize the dynamic dispatcher
"""
@v.when(Program)
def visit(self, node): pass
@v.when(Clazz)
def visit(self, node): pass
@v.when(Field)
def visit(self, node):
node.typ = Collection.repl_itf(node.typ, False)
@v.when(Method)
def visit(self, node): pass
@v.when(Statement)
def visit(self, node): return [node]
@v.when(Expression)
def visit(self, node):
if node.kind == C.E.NEW:
if node.e.kind == C.E.CALL:
mid = unicode(node.e.f)
if util.is_class_name(mid):
node.e.f.id = Collection.repl_itf(mid)
return node
|
plum-umd/java-sketch
|
java_sk/decode/collection.py
|
Python
|
mit
| 1,855
|
[
"VisIt"
] |
a9f5d5acad63ab9ec46f9369d8b545254e0e979091eabbe9f83e02498329a41d
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
:Author: Joshua L. Adelman, University of Pittsburgh
:Contact: jla65@pitt.edu
Sample code to use the routine for fast RMSD & rotational matrix calculation.
For the example provided below, the minimum least-squares RMSD for the two
7-atom fragments should be 0.719106 A.
And the corresponding 3x3 rotation matrix is:
[[ 0.72216358 -0.52038257 -0.45572112]
[ 0.69118937 0.51700833 0.50493528]
[-0.0271479 -0.67963547 0.73304748]]
"""
import numpy as np
import MDAnalysis.lib.qcprot as qcp
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from nose.plugins.attrib import attr
import MDAnalysis.analysis.rms as rms
# Calculate rmsd after applying rotation
def rmsd(a, b):
"""Returns RMSD between two coordinate sets a and b."""
return np.sqrt(np.sum(np.power(a - b, 2)) / a.shape[1])
def test_CalcRMSDRotationalMatrix():
# Setup coordinates
frag_a = np.zeros((3, 7), dtype=np.float64)
frag_b = np.zeros((3, 7), dtype=np.float64)
N = 7
frag_a[0][0] = -2.803
frag_a[1][0] = -15.373
frag_a[2][0] = 24.556
frag_a[0][1] = 0.893
frag_a[1][1] = -16.062
frag_a[2][1] = 25.147
frag_a[0][2] = 1.368
frag_a[1][2] = -12.371
frag_a[2][2] = 25.885
frag_a[0][3] = -1.651
frag_a[1][3] = -12.153
frag_a[2][3] = 28.177
frag_a[0][4] = -0.440
frag_a[1][4] = -15.218
frag_a[2][4] = 30.068
frag_a[0][5] = 2.551
frag_a[1][5] = -13.273
frag_a[2][5] = 31.372
frag_a[0][6] = 0.105
frag_a[1][6] = -11.330
frag_a[2][6] = 33.567
frag_b[0][0] = -14.739
frag_b[1][0] = -18.673
frag_b[2][0] = 15.040
frag_b[0][1] = -12.473
frag_b[1][1] = -15.810
frag_b[2][1] = 16.074
frag_b[0][2] = -14.802
frag_b[1][2] = -13.307
frag_b[2][2] = 14.408
frag_b[0][3] = -17.782
frag_b[1][3] = -14.852
frag_b[2][3] = 16.171
frag_b[0][4] = -16.124
frag_b[1][4] = -14.617
frag_b[2][4] = 19.584
frag_b[0][5] = -15.029
frag_b[1][5] = -11.037
frag_b[2][5] = 18.902
frag_b[0][6] = -18.577
frag_b[1][6] = -10.001
frag_b[2][6] = 17.996
# Allocate rotation array
rot = np.zeros((9,), dtype=np.float64)
# Calculate center of geometry
comA = np.sum(frag_a, axis=1) / N
comB = np.sum(frag_b, axis=1) / N
# Center each fragment
frag_a = frag_a - comA.reshape(3, 1)
frag_b = frag_b - comB.reshape(3, 1)
# Calculate rmsd and rotation matrix
qcp_rmsd = qcp.CalcRMSDRotationalMatrix(frag_a.T, frag_b.T, N, rot, None)
#print 'qcp rmsd = ',rmsd
#print 'rotation matrix:'
#print rot.reshape((3,3))
# rotate frag_b to obtain optimal alignment
frag_br = frag_b.T * np.matrix(rot.reshape((3, 3)))
aligned_rmsd = rmsd(frag_br.T, frag_a)
#print 'rmsd after applying rotation: ',rmsd
assert_almost_equal(aligned_rmsd, 0.719106, 6, "RMSD between fragments A and B does not match excpected value.")
expected_rot = np.array([
[0.72216358, -0.52038257, -0.45572112],
[0.69118937, 0.51700833, 0.50493528],
[-0.0271479, -0.67963547, 0.73304748]])
assert_almost_equal(rot.reshape((3, 3)), expected_rot, 6,
"Rotation matrix for aliging B to A does not have expected values.")
def test_innerproduct():
a = 2450.0
b = np.array([430, 452, 474, 500, 526, 552, 570, 600, 630])
c = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]])
d = np.array([[13,14,15], [16,17,18], [19,20,21], [22,23,24]])
e = np.zeros(9,dtype = np.float64)
g = qcp.InnerProduct(e, c.astype(np.float64), d.astype(np.float64), 4, None)
assert_almost_equal(a, g)
assert_array_almost_equal(b, e)
def test_RMSDmatrix():
c = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]])
d = np.array([[13,14,15], [16,17,18], [19,20,21], [22,23,24]])
f = np.zeros(9,dtype = np.float64)
h = 20.73219522556076
i = np.array([0.9977195, 0.02926979, 0.06082009, -.0310942, 0.9990878, 0.02926979, -0.05990789, -.0310942, 0.9977195])
j = qcp.CalcRMSDRotationalMatrix(c.astype(np.float64), d.astype(np.float64), 4 , f, None)
assert_almost_equal(h, j)
assert_array_almost_equal(f, i, 6)
def test_rmsd():
c = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]])
d = np.array([[13,14,15], [16,17,18], [19,20,21], [22,23,24]])
k = np.array([[.9977195, .02926979, .06082009], [-.0310942, .9990878, .02926979], [-.05990789, -.0310942, .9977195]])
l = np.dot(d, k)
m = rms.rmsd(l, c)
h = 20.73219522556076
assert_almost_equal(m, h, 6)
def test_weights():
c = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]])
d = np.array([[13,14,15], [16,17,18], [19,20,21], [22,23,24]])
n = np.array([1,2,3,4]).astype(np.float64)
o = np.zeros(9,dtype=np.float64)
p = qcp.CalcRMSDRotationalMatrix(c.astype(np.float64), d.astype(np.float64), 4, o, n)
assert_almost_equal(p, 32.798779202159416)
q = np.array([0.99861395, .022982, .04735006, -.02409085, .99944556, .022982, -.04679564, -.02409085, .99861395])
np.testing.assert_almost_equal(q, o)
|
alejob/mdanalysis
|
testsuite/MDAnalysisTests/test_qcprot.py
|
Python
|
gpl-2.0
| 6,133
|
[
"MDAnalysis"
] |
fad52c4755916f0db3c7aa3834f48aee9d2732e4461f2eac9f9083da3a017a10
|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from common import *
from terrain.steps import reload_the_page
from selenium.common.exceptions import InvalidElementStateException
from contentstore.utils import reverse_course_url
from nose.tools import assert_in, assert_not_in, assert_equal, assert_not_equal # pylint: disable=no-name-in-module
@step(u'I am viewing the grading settings')
def view_grading_settings(step):
world.click_course_settings()
link_css = 'li.nav-course-settings-grading a'
world.css_click(link_css)
@step(u'I add "([^"]*)" new grade')
def add_grade(step, many):
grade_css = '.new-grade-button'
for i in range(int(many)):
world.css_click(grade_css)
@step(u'I delete a grade')
def delete_grade(step):
#grade_css = 'li.grade-specific-bar > a.remove-button'
#range_css = '.grade-specific-bar'
#world.css_find(range_css)[1].mouseover()
#world.css_click(grade_css)
world.browser.execute_script('document.getElementsByClassName("remove-button")[0].click()')
@step(u'Grade list has "([^"]*)" grades$')
def check_grade_values(step, grade_list): # pylint: disable=unused-argument
visible_list = ''.join(
[grade.text for grade in world.css_find('.letter-grade')]
)
assert_equal(visible_list, grade_list, 'Grade lists should be equal')
@step(u'I see I now have "([^"]*)" grades$')
def view_grade_slider(step, how_many):
grade_slider_css = '.grade-specific-bar'
all_grades = world.css_find(grade_slider_css)
assert_equal(len(all_grades), int(how_many))
@step(u'I move a grading section')
def move_grade_slider(step):
moveable_css = '.ui-resizable-e'
f = world.css_find(moveable_css).first
f.action_chains.drag_and_drop_by_offset(f._element, 100, 0).perform()
@step(u'I see that the grade range has changed')
def confirm_change(step):
range_css = '.range'
all_ranges = world.css_find(range_css)
for i in range(len(all_ranges)):
assert_not_equal(world.css_html(range_css, index=i), '0-50')
@step(u'I change assignment type "([^"]*)" to "([^"]*)"$')
def change_assignment_name(step, old_name, new_name):
name_id = '#course-grading-assignment-name'
index = get_type_index(old_name)
f = world.css_find(name_id)[index]
assert_not_equal(index, -1)
for __ in xrange(len(old_name)):
f._element.send_keys(Keys.END, Keys.BACK_SPACE)
f._element.send_keys(new_name)
@step(u'I go back to the main course page')
def main_course_page(step):
main_page_link = reverse_course_url('course_handler', world.scenario_dict['COURSE'].id)
world.visit(main_page_link)
assert_in('Course Outline', world.css_text('h1.page-header'))
@step(u'I do( not)? see the assignment name "([^"]*)"$')
def see_assignment_name(step, do_not, name):
# TODO: rewrite this once grading has been added back to the course outline
pass
# assignment_menu_css = 'ul.menu > li > a'
# # First assert that it is there, make take a bit to redraw
# assert_true(
# world.css_find(assignment_menu_css),
# msg="Could not find assignment menu"
# )
#
# assignment_menu = world.css_find(assignment_menu_css)
# allnames = [item.html for item in assignment_menu]
# if do_not:
# assert_not_in(name, allnames)
# else:
# assert_in(name, allnames)
@step(u'I delete the assignment type "([^"]*)"$')
def delete_assignment_type(step, to_delete):
delete_css = '.remove-grading-data'
world.css_click(delete_css, index=get_type_index(to_delete))
@step(u'I add a new assignment type "([^"]*)"$')
def add_assignment_type(step, new_name):
add_button_css = '.add-grading-data'
world.css_click(add_button_css)
name_id = '#course-grading-assignment-name'
new_assignment = world.css_find(name_id)[-1]
new_assignment._element.send_keys(new_name)
@step(u'I set the assignment weight to "([^"]*)"$')
def set_weight(step, weight):
weight_id = '#course-grading-assignment-gradeweight'
weight_field = world.css_find(weight_id)[-1]
old_weight = world.css_value(weight_id, -1)
for count in range(len(old_weight)):
weight_field._element.send_keys(Keys.END, Keys.BACK_SPACE)
weight_field._element.send_keys(weight)
@step(u'the assignment weight is displayed as "([^"]*)"$')
def verify_weight(step, weight):
weight_id = '#course-grading-assignment-gradeweight'
assert_equal(world.css_value(weight_id, -1), weight)
@step(u'I do not see the changes persisted on refresh$')
def changes_not_persisted(step):
reload_the_page(step)
name_id = '#course-grading-assignment-name'
assert_equal(world.css_value(name_id), 'Homework')
@step(u'I see the assignment type "(.*)"$')
def i_see_the_assignment_type(_step, name):
assignment_css = '#course-grading-assignment-name'
assignments = world.css_find(assignment_css)
types = [ele['value'] for ele in assignments]
assert_in(name, types)
@step(u'I change the highest grade range to "(.*)"$')
def change_grade_range(_step, range_name):
range_css = 'span.letter-grade'
grade = world.css_find(range_css).first
grade.value = range_name
@step(u'I see the highest grade range is "(.*)"$')
def i_see_highest_grade_range(_step, range_name):
range_css = 'span.letter-grade'
grade = world.css_find(range_css).first
assert_equal(grade.value, range_name)
@step(u'I cannot edit the "Fail" grade range$')
def cannot_edit_fail(_step):
range_css = 'span.letter-grade'
ranges = world.css_find(range_css)
assert_equal(len(ranges), 2)
assert_not_equal(ranges.last.value, 'Failure')
# try to change the grade range -- this should throw an exception
try:
ranges.last.value = 'Failure'
except (InvalidElementStateException):
pass # We should get this exception on failing to edit the element
# check to be sure that nothing has changed
ranges = world.css_find(range_css)
assert_equal(len(ranges), 2)
assert_not_equal(ranges.last.value, 'Failure')
@step(u'I change the grace period to "(.*)"$')
def i_change_grace_period(_step, grace_period):
grace_period_css = '#course-grading-graceperiod'
ele = world.css_find(grace_period_css).first
# Sometimes it takes a moment for the JavaScript
# to populate the field. If we don't wait for
# this to happen, then we can end up with
# an invalid value (e.g. "00:0048:00")
# which prevents us from saving.
assert_true(world.css_has_value(grace_period_css, "00:00"))
# Set the new grace period
ele.value = grace_period
@step(u'I see the grace period is "(.*)"$')
def the_grace_period_is(_step, grace_period):
grace_period_css = '#course-grading-graceperiod'
# The default value is 00:00
# so we need to wait for it to change
world.wait_for(
lambda _: world.css_has_value(grace_period_css, grace_period)
)
def get_type_index(name):
name_id = '#course-grading-assignment-name'
all_types = world.css_find(name_id)
for index in range(len(all_types)):
if world.css_value(name_id, index=index) == name:
return index
return -1
|
B-MOOC/edx-platform
|
cms/djangoapps/contentstore/features/grading.py
|
Python
|
agpl-3.0
| 7,217
|
[
"VisIt"
] |
1991d8e6f2d44bda59daec11d3b662d71c09bba2f809ffeab2ac62c9f84a36ed
|
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import unittest as ut
import numpy as np
import espressomd
from espressomd import electrostatics
import tests_common
@ut.skipIf(not espressomd.has_features(["ELECTROSTATICS"]),
"Features not available, skipping test!")
class ElectrostaticInteractionsTests(ut.TestCase):
# Handle to espresso system
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
def setUp(self):
self.system.box_l = [20, 20, 20]
self.system.time_step = 0.01
if not self.system.part.exists(0):
self.system.part.add(id=0, pos=(1.0, 2.0, 2.0), q=1)
if not self.system.part.exists(1):
self.system.part.add(
id=1, pos=(3.0, 2.0, 2.0), q=-1)
print("ut.TestCase setUp")
def calc_dh_potential(self, r, df_params):
kT = 1.0
q1 = self.system.part[0].q
q2 = self.system.part[1].q
u = np.zeros_like(r)
# r<r_cut
i = np.where(r < df_params['r_cut'])[0]
u[i] = df_params['prefactor'] * kT * q1 * \
q2 * np.exp(-df_params['kappa'] * r[i]) / r[i]
return u
@ut.skipIf(not espressomd.has_features(["P3M"]),
"Features not available, skipping test!")
def test_p3m(self):
self.system.part[0].pos = [1.0, 2.0, 2.0]
self.system.part[1].pos = [3.0, 2.0, 2.0]
# results,
p3m_energy = -0.501062398379
p3m_force = 2.48921612e-01
test_P3M = tests_common.generate_test_for_class(
self.system,
electrostatics.P3M,
dict(
accuracy=9.910945054074526e-08,
mesh=[22, 22, 22],
cao=7,
r_cut=8.906249999999998,
alpha=0.387611049779351,
tune=False))
p3m = espressomd.electrostatics.P3M(prefactor=1.0,
accuracy=9.910945054074526e-08,
mesh=[22, 22, 22],
cao=7,
r_cut=8.906249999999998,
alpha=0.387611049779351,
tune=False)
self.system.actors.add(p3m)
self.assertAlmostEqual(self.system.analysis.energy()['coulomb'],
p3m_energy)
# need to update forces
self.system.integrator.run(0)
np.testing.assert_allclose(np.copy(self.system.part[0].f),
[p3m_force, 0, 0], atol=1E-5)
np.testing.assert_allclose(np.copy(self.system.part[1].f),
[-p3m_force, 0, 0], atol=1E-10)
self.system.actors.remove(p3m)
def test_dh(self):
dh_params = dict(prefactor=1.0,
kappa=2.0,
r_cut=2.0)
test_DH = tests_common.generate_test_for_class(
self.system,
electrostatics.DH,
dh_params)
dh = espressomd.electrostatics.DH(
prefactor=dh_params[
'prefactor'],
kappa=dh_params['kappa'],
r_cut=dh_params['r_cut'])
self.system.actors.add(dh)
dr = 0.001
r = np.arange(.5, 1.01 * dh_params['r_cut'], dr)
u_dh = self.calc_dh_potential(r, dh_params)
f_dh = -np.gradient(u_dh, dr)
# zero the discontinuity, and re-evaluate the derivitive as a backwards
# difference
i_cut = np.argmin((dh_params['r_cut'] - r)**2)
f_dh[i_cut] = 0
f_dh[i_cut - 1] = (u_dh[i_cut - 2] - u_dh[i_cut - 1]) / dr
u_dh_core = np.zeros_like(r)
f_dh_core = np.zeros_like(r)
# need to update forces
for i, ri in enumerate(r):
self.system.part[1].pos = self.system.part[0].pos + [ri, 0, 0]
self.system.integrator.run(0)
u_dh_core[i] = self.system.analysis.energy()['coulomb']
f_dh_core[i] = self.system.part[0].f[0]
np.testing.assert_allclose(u_dh_core,
u_dh,
atol=1e-7)
np.testing.assert_allclose(f_dh_core,
-f_dh,
atol=1e-2)
self.system.actors.remove(dh)
if __name__ == "__main__":
print("Features: ", espressomd.features())
ut.main()
|
hmenke/espresso
|
testsuite/python/electrostaticInteractions.py
|
Python
|
gpl-3.0
| 5,238
|
[
"ESPResSo"
] |
a1ace35d78809061f820fbac7b23093784589d0b6c4c0ad437b504f666a1c8be
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1453357629.056074
__CHEETAH_genTimestamp__ = 'Thu Jan 21 15:27:09 2016'
__CHEETAH_src__ = '/home/babel/Build/Test/OpenPLi5/openpli5.0/build/tmp/work/tmnanoseplus-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+186ea358f6-r0/git/plugin/controllers/views/web/getcurrlocation.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Jan 21 15:27:08 2016'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class getcurrlocation(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(getcurrlocation, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_30545535 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2locations>
\t<e2location>''')
_v = VFFSL(SL,"location",True) # u'$location' on line 4, col 14
if _v is not None: write(_filter(_v, rawExpr=u'$location')) # from line 4, col 14.
write(u'''</e2location>
</e2locations>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_30545535
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_getcurrlocation= 'respond'
## END CLASS DEFINITION
if not hasattr(getcurrlocation, '_initCheetahAttributes'):
templateAPIClass = getattr(getcurrlocation, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(getcurrlocation)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=getcurrlocation()).run()
|
MOA-2011/e2openplugin-OpenWebif
|
plugin/controllers/views/web/getcurrlocation.py
|
Python
|
gpl-2.0
| 5,020
|
[
"VisIt"
] |
63c70a5eb3b6bf08cdfca0a2f8b9884b2ee19bf1cbc67f48daa1e66215f779e9
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# Convolve MTSS rotamers with MD trajectory.
# Copyright (c) 2011-2017 Philip Fowler and AUTHORS
# Published under the GNU Public Licence, version 2 (or higher)
#
# Includes a rotamer library for MTSS at 298 K by Gunnar Jeschke,
# which is published under the same licence by permission.
from __future__ import absolute_import, division, print_function
import MDAnalysis
import MDAnalysis.analysis.distances
import numpy as np
import os.path
from . import library
from .convolve import RotamerDistancesBase
import logging
logger = logging.getLogger("MDAnalysis.app")
class RotamerDistances(RotamerDistancesBase):
"""Calculation of distance distributions between two spin labels."""
def __init__(self, *args, **kwargs):
"""RotamerDistances(universe, residue_list, **kwargs)
:Arguments:
*universe*
:class:`MDAnalysis.Universe`
*residue*
residue number ``(r1)`` that indicate the labelled sites
:Keywords:
*dcdFilenameAll*
name of the temporary files with rotamers fitted [``'trj'``]
*dcdFilenameNoClashes*
name of the temporary files with rotamers fitted [``'trj'``]
*outputFileRawDistances*
stem of the name of the file containing the raw distances
(the final name will be ``<outputFile><resid_1>-<resid_2>_distances.dat``
[``'distances'``]
*libname*
library name; the library is loaded with
:class:`rotcon.library.RotamerLibrary` [``'MTSSL 298K'``]
*discardFrames*
skip initial frames < *discardFrames* [``0``]
*clashDistance*
discard rotamer if any distance between rotamer atoms
and protein atoms is < *clashDistance*. Values down to
1.5 Å are reasonable. The default is conservative. [``2.2`` Å]
*useNOelectron*
True = geometic midpoints of N1 and O1 atoms are used for distance calculation
False = N1 atoms are used for distance measurement,
"""
proteinStructure = args[0]
residue = args[1]
outputFileRawDistances, ext = os.path.splitext(kwargs.pop('outputFileRawDistances', 'distances'))
ext = ext or ".dat"
self.outputFileRawDistances = "{0}-{1}-rawDistances{2}".format(outputFileRawDistances,
residue, ext)
dcdFilenameAll, ext = os.path.splitext(kwargs.pop('dcdFilenameAll', 'trj'))
ext = ext or ".dcd"
tmptrj = "{0}-{1}-all{2}".format(dcdFilenameAll, residue, ext)
dcdFilenameNoClashes, ext = os.path.splitext(kwargs.pop('dcdFilenameNoClashes', 'trj'))
ext = ext or ".dcd"
tmptrjNoClashes = "{0}-{1}-noClashes{2}".format(dcdFilenameNoClashes, residue, ext)
kwargs.setdefault('discardFrames', 0)
self.clashDistance = kwargs.pop('clashDistance', 2.2) # Ångström
useNOelectron = kwargs.pop('useNOelectron', True)
self.lib = library.RotamerLibrary(kwargs.get('libname', 'MTSSL 298K'))
# setup the main lists
distances = []
weights = []
logger.info("Starting rotamer distance analysis of trajectory "
"{0}...".format(proteinStructure.trajectory.filename))
logger.info("clashDistance = {0} A; rotamer library = '{1}'".format(self.clashDistance,
self.lib.name))
logger.debug("Temporary trajectories for rotamers 1 and 2 "
"(only last frame of MD trajectory): {0[0]} and {0[1]}".format(tmptrj))
progressmeter = MDAnalysis.log.ProgressMeter(proteinStructure.trajectory.n_frames, interval=1)
for protein in proteinStructure.trajectory:
progressmeter.echo(protein.frame)
if protein.frame < kwargs['discardFrames']:
continue
# define the atoms used to fit the rotamers. Note that an
# ordered list has to be created as the ordering of C CA N is
# different in both. Fit the rotamers onto the protein:
self.fit_rotamers(self.lib.rotamers, proteinStructure, residue, tmptrj)
rotamersSite1 = MDAnalysis.Universe(self.lib.rotamers.filename, tmptrj)
(rotamer1_clash, rotamer1_clash_total) = self.find_clashing_rotamers(rotamersSite1,
proteinStructure, residue)
proteinHN = proteinStructure.select_atoms("protein and name HN") # or HN
# define the atoms to measure the distances between
rotamer1nitrogen = rotamersSite1.select_atoms("name N1")
rotamer1oxygen = rotamersSite1.select_atoms("name O1")
# define the atoms to measure the distances between
rotamer1All = rotamersSite1.select_atoms("all")
with MDAnalysis.Writer("{}".format(tmptrjNoClashes), rotamer1All.n_atoms) as S1:
# loop over all the rotamers on the first site
for rotamer1 in rotamersSite1.trajectory:
if not rotamer1_clash[rotamer1.frame]:
S1.write(rotamersSite1.atoms)
for nh in proteinHN:
atom = proteinHN.select_atoms('resid {}'.format(nh.resnum))
(a, b, distance_nitrogen) = \
MDAnalysis.analysis.distances.dist(rotamer1nitrogen, atom)
if useNOelectron == True:
(a, b, distance_oxygen) = \
MDAnalysis.analysis.distances.dist(rotamer1oxygen, atom)
distance = np.mean([distance_nitrogen[0], distance_oxygen[0]])
elif useNOelectron == False:
distance = distance_nitrogen[0]
distances.append([nh.resnum, distance])
# check that at least two distances have been measured
if len(distances) < 2:
logger.critical("no distances found between the spin pair!")
raise RuntimeError("no distances found between the spin pair!")
# should this really be an exception?
with open(self.outputFileRawDistances, 'w') as OUTPUT:
for distance in distances:
OUTPUT.write("{0[0]}\t{0[1]}\n".format(distance))
def plot(self, **kwargs):
"""Load data file and plot"""
import matplotlib.pyplot as plt
filename = kwargs.pop('filename', None)
fig = kwargs.pop('fig', None)
if fig is None:
fig = plt.figure(figsize=(5, 5))
ax = kwargs.pop('ax', None)
if ax is None:
ax = fig.add_subplot(111)
data = np.loadtxt(self.outputFileRawDistances, unpack=True)
dataResidues = dict()
for j in range(0, len(data[0])):
if int(data[0][j]) in dataResidues:
dataResidues[int(data[0][j])].append(data[1][j])
else:
dataResidues[int(data[0][j])] = [data[1][j]]
for data in dataResidues:
ax.scatter(data, np.min(dataResidues[data]), color='blue')
ax.scatter(data, np.max(dataResidues[data]), color='red')
if filename:
ax.figure.savefig(filename)
logger.info("Plotted min and max distances to {0}".format(filename))
return ax
|
MDAnalysis/RotamerConvolveMD
|
rotcon/convolve_pre.py
|
Python
|
gpl-2.0
| 7,731
|
[
"MDAnalysis"
] |
09a937e4ba193038e4ca190949277f9d7e3eae79d41d56fdab1ee5a4d21fb1a6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Initialization of LFPy, a Python module for simulating
extracellular potentials.
Group of Computational Neuroscience,
Department of Mathematical Sciences and Technology,
Norwegian University of Life Sciences.
Copyright (C) 2012 Computational Neuroscience Group, NMBU.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
:Classes:
* ``Cell`` - object built on top of NEURON representing biological neuron
* ``TemplateCell`` - Similar to ``Cell``, but for models using cell templates
* ``NetworkCell`` - Similar to ``TemplateCell`` with some attributes and
methods for spike communication between parallel RANKs
* ``PointProcess`` - Parent class of ``Synapse`` and ``StimIntElectrode``
* ``Synapse`` - Convenience class for inserting synapses onto ``Cell``
objects
* ``StimIntElectrode`` - Convenience class for inserting stimulating
electrodes into ``Cell`` objects
* ``Network`` - Class for creating distributed populations of cells and
handling connections between cells in populations
* ``NetworkPopulation`` - Class representing group of ``Cell`` objects
distributed across MPI RANKs
* ``RecExtElectrode`` - Class for setup of simulations of extracellular
potentials
* ``RecMEAElectrode`` - Class for setup of simulations of in vitro (slice)
extracellular potentials
* ``PointSourcePotential`` - Base forward-model for extracellular potentials
assuming point current sources in conductive media
* ``LineSourcePotential`` - Base forward-model for extracellular potentials
assuming line current sources in conductive media
* ``OneSphereVolumeConductor`` - For computing extracellular potentials
within and outside a homogeneous sphere
* ``CurrentDipoleMoment`` - For computing the current dipole moment,
* ``FourSphereVolumeConductor`` - For computing extracellular potentials in
four-sphere head model (brain, CSF, skull, scalp)
* ``InfiniteVolumeConductor`` - To compute extracellular potentials with
current dipoles in infinite volume conductor
* ``MEG`` - Class for computing magnetic field from current dipole moment
:Modules:
* ``lfpcalc`` - Misc. functions used by RecExtElectrode class
* ``tools`` - Some convenient functions
* ``inputgenerators`` - Functions for synaptic input time generation
* ``eegmegcalc`` - Classes for calculating current dipole moment vector
P and P_tot from currents and distances.
* ``run_simulations`` - Functions to run NEURON simulations
"""
from .version import version as __version__
from .pointprocess import Synapse, PointProcess, StimIntElectrode
from lfpykit import RecExtElectrode, RecMEAElectrode, CurrentDipoleMoment, \
PointSourcePotential, LineSourcePotential, OneSphereVolumeConductor, \
LaminarCurrentSourceDensity, VolumetricCurrentSourceDensity
from .cell import Cell
from .templatecell import TemplateCell
from .network import NetworkCell, NetworkPopulation, Network
from .test import _test as run_tests
from .eegmegcalc import FourSphereVolumeConductor, InfiniteVolumeConductor, \
MEG, NYHeadModel
from lfpykit import lfpcalc
from . import tools
from . import inputgenerators
from . import run_simulation
|
espenhgn/LFPy
|
LFPy/__init__.py
|
Python
|
gpl-3.0
| 3,646
|
[
"NEURON"
] |
535349461bb4a969ae96de167aaf7b6c37ecbf1b24d6ecd3baf72439e551a97b
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from camelot.view.model_thread import post
from choiceseditor import ChoicesEditor
class OneToManyChoicesEditor(ChoicesEditor):
def __init__(self, parent, target=None, nullable=True, **kwargs):
super(OneToManyChoicesEditor, self).__init__(parent, **kwargs)
assert target!=None
self._target = target
self._nullable = nullable
post(self.get_choices, self.set_choices)
def get_choices(self):
additional_choices = []
if self._nullable:
additional_choices = [(None, '')]
return additional_choices + [(o, unicode(o)) for o in self._target.query.all()]
def set_field_attributes(self, editable=True, **kwargs):
"""Makes sure choices are not reset when changing the
field attributes"""
self.setEnabled(editable!=False)
|
kurtraschke/camelot
|
camelot/view/controls/editors/onetomanychoiceseditor.py
|
Python
|
gpl-2.0
| 1,903
|
[
"VisIt"
] |
1808de1c3a7da4dbfb80a861ef8dad5cf61d39e6a2eb3c1626897384fd2d6b53
|
#!/usr/bin/env python
'''unit testing code for pysam.
Execute in the :file:`tests` directory as it requires the Makefile
and data files located there.
'''
import sys, os, shutil, gzip
import pysam
import unittest
import itertools
import subprocess
class TestVCFIterator( unittest.TestCase ):
filename = "example.vcf40.gz"
columns = ("contig", "pos", "id",
"ref", "alt", "qual",
"filter", "info", "format" )
def testRead( self ):
self.vcf = pysam.VCF()
self.vcf.connect( self.filename )
for x in self.vcf.fetch():
print str(x)
print x.pos
print x.alt
print x.id
print x.qual
print x.filter
print x.info
print x.format
for s in x.samples:
print s, x[s]
if __name__ == "__main__":
unittest.main()
def Test():
vcf33 = """##fileformat=VCFv3.3
##fileDate=20090805
##source=myImputationProgramV3.1
##reference=1000GenomesPilot-NCBI36
##phasing=partial
##INFO=NS,1,Integer,"Number of Samples With Data"
##INFO=DP,1,Integer,"Total Depth"
##INFO=AF,-1,Float,"Allele Frequency"
##INFO=AA,1,String,"Ancestral Allele"
##INFO=DB,0,Flag,"dbSNP membership, build 129"
##INFO=H2,0,Flag,"HapMap2 membership"
##FILTER=q10,"Quality below 10"
##FILTER=s50,"Less than 50% of samples have data"
##FORMAT=GT,1,String,"Genotype"
##FORMAT=GQ,1,Integer,"Genotype Quality"
##FORMAT=DP,1,Integer,"Read Depth"
##FORMAT=HQ,2,Integer,"Haplotype Quality"
#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tNA00001\tNA00002\tNA00003
20\t14370\trs6054257\tG\tA\t29\t0\tNS=3;DP=14;AF=0.5;DB;H2\tGT:GQ:DP:HQ\t0|0:48:1:51,51\t1|0:48:8:51,51\t1/1:43:5:-1,-1
17\t17330\t.\tT\tA\t3\tq10\tNS=3;DP=11;AF=0.017\tGT:GQ:DP:HQ\t0|0:49:3:58,50\t0|1:3:5:65,3\t0/0:41:3:-1,-1
20\t1110696\trs6040355\tA\tG,T\t67\t0\tNS=2;DP=10;AF=0.333,0.667;AA=T;DB\tGT:GQ:DP:HQ\t1|2:21:6:23,27\t2|1:2:0:18,2\t2/2:35:4:-1,-1
17\t1230237\t.\tT\t.\t47\t0\tNS=3;DP=13;AA=T\tGT:GQ:DP:HQ\t0|0:54:7:56,60\t0|0:48:4:51,51\t0/0:61:2:-1,-1
20\t1234567\tmicrosat1\tG\tD4,IGA\t50\t0\tNS=3;DP=9;AA=G\tGT:GQ:DP\t0/1:35:4\t0/2:17:2\t1/1:40:3"""
vcf40 = """##fileformat=VCFv4.0
##fileDate=20090805
##source=myImputationProgramV3.1
##reference=1000GenomesPilot-NCBI36
##phasing=partial
##INFO=<ID=NS,Number=1,Type=Integer,Description="Number of Samples With Data">
##INFO=<ID=DP,Number=1,Type=Integer,Description="Total Depth">
##INFO=<ID=AF,Number=.,Type=Float,Description="Allele Frequency">
##INFO=<ID=AA,Number=1,Type=String,Description="Ancestral Allele">
##INFO=<ID=DB,Number=0,Type=Flag,Description="dbSNP membership, build 129">
##INFO=<ID=H2,Number=0,Type=Flag,Description="HapMap2 membership">
##FILTER=<ID=q10,Description="Quality below 10">
##FILTER=<ID=s50,Description="Less than 50% of samples have data">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##FORMAT=<ID=GQ,Number=1,Type=Integer,Description="Genotype Quality">
##FORMAT=<ID=DP,Number=1,Type=Integer,Description="Read Depth">
##FORMAT=<ID=HQ,Number=2,Type=Integer,Description="Haplotype Quality">
#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tNA00001\tNA00002\tNA00003
M\t1230237\t.\tT\t.\t47\tPASS\tNS=3;DP=13;AA=T\tGT:GQ:DP:HQ\t0|0:54:7:56,60\t0|0:48:4:51,51\t0/0:61:2
20\t1234567\tmicrosat1\tGTCT\tG,GTACT\t50\tPASS\tNS=3;DP=9;AA=G\tGT:GQ:DP\t0/1:35:4\t0/2:17:2\t1/1:40:3
17\t14370\trs6054257\tG\tA\t29\tPASS\tNS=3;DP=14;AF=0.5;DB;H2\tGT:GQ:DP:HQ\t0|0:48:1:51,51\t1|0:48:8:51,51\t1/1:43:5:.,.
20\t17330\t.\tT\tA\t3\tq10\tNS=3;DP=11;AF=0.017\tGT:GQ:DP:HQ\t0|0:49:3:58,50\t0|1:3:5:65,3\t0/0:41:3
20\t1110696\trs6040355\tA\tG,T\t67\tPASS\tNS=2;DP=10;AF=0.333,0.667;AA=T;DB\tGT:GQ:DP:HQ\t1|2:21:6:23,27\t2|1:2:0:18,2\t2/2:35:4"""
if False:
print "Parsing v3.3 file:"
print vcf33
vcf = VCFFile()
lines = [data for data in vcf.parse( (line+"\n" for line in vcf33.split('\n') ) )]
print "Writing v3.3 file:"
vcf.write( sys.stdout, lines )
if False:
print "Parsing v4.0 file:"
print vcf40
vcf = VCFFile()
lines = [data for data in vcf.parse( (line+"\n" for line in vcf40.split('\n') ) )]
print "Writing v4.0 file:"
vcf.write( sys.stdout, lines )
if True:
print "Parsing v3.3 file:"
print vcf33
vcf = sortedVCFFile()
lines = [data for data in vcf.parse( (line+"\n" for line in vcf33.split('\n') ) )]
print "Writing v3.3 file:"
vcf.write( sys.stdout, lines )
if True:
print "Parsing v4.0 file:"
print vcf40
vcf = sortedVCFFile()
lines = [data for data in vcf.parse( (line+"\n" for line in vcf40.split('\n') ) )]
print "Writing v4.0 file:"
vcf.write( sys.stdout, lines )
|
pkaleta/pysam
|
tests/vcf_test.py
|
Python
|
mit
| 4,827
|
[
"pysam"
] |
6b3005ce19f7ad3f86d9ae94cd6643eaee260660a476a00d65242fb192bde0e9
|
# -*- coding: utf-8 -*-
#
# MOOSE documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 1 19:05:47 2014.
# updated on Thr Jan 21 00:30:10 2016
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
#Workaround to fix bug where extensions werent added
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from sphinx.util import compat
compat.make_admonition = BaseAdmonition
import subprocess
import os
import sys
import sphinx_rtd_theme
import mock
conf_dir_ = os.path.dirname( __file__ )
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('/home/harsha/MOOSE/FullMoose/moose-core/python'))
sys.path.append(os.path.abspath('/home/harsha/MOOSE/moose-examples-july/snippets'))
sys.path.append(os.path.abspath('/home/harsha/MOOSE/moose-examples-july/tutorials/ChemicalOscillators'))
sys.path.append(os.path.abspath('/home/harsha/MOOSE/moose-examples-july/tutorials/ChemicalBistables'))
sys.path.append(os.path.abspath('/home/harsha/MOOSE/moose-examples-july/tutorials/ExcInhNet'))
sys.path.append(os.path.abspath('/home/harsha/MOOSE/moose-examples-july/neuroml/lobster_pyloric'))
sys.path.append(os.path.abspath('/home/harsha/MOOSE/moose-examples-july/tutorials/ExcInhNetCaPlasticity'))
sys.path.append(os.path.join(conf_dir_, 'Extensions') )
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'hidden_code_block'
]
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MOOSE'
copyright = u'2018, Upinder Bhalla, Niraj Dudani, Subhasis Ray, ' + \
'Aditya Gilra,Harsha Rani, Aviral Goel, Dilawar Singh,' + \
'Malav Shah, Dhruva Gowda Storz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.2'
# The full version, including alpha/beta/rc tags.
release = 'chennapoda (3.2.rc)'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'English'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {'stickysidebar': 'true',
# 'sidebarwidth': '300'}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'images/moose_logo.png'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MOOSE'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '11pt',
# Additional stuff for the LaTeX preamble.
'preamble': r'''
\usepackage{libertine}
\usepackage{mathpazo}
\usepackage{epstopdf}
% Convert GIF to PNG in pdf.
\epstopdfDeclareGraphicsRule{.gif}{png}{.png}{convert gif:#1 png:\OutputFile}
\AppendGraphicsExtensions{.gif}
'''
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'MOOSE.tex', u'MOOSE User Manual',
r'Upinder Bhalla, Niraj Dudani, Subhasis Ray \\Aditya Gilra,Harsha Rani, Aviral Goel \\ Dilawar Singh, Malav Shah, Dhruva Gowda Storz'
, 'manual'
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = 'images/moose_logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
# If true, show page references after internal links.
latex_show_pagerefs = True
# If true, show URL addresses after external links.
latex_show_urls = True
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'moose', u'MOOSE User Manual',
[u'Upinder Bhalla, Niraj Dudani, Subhasis Ray, Aditya Gilra,Harsha Rani, Aviral Goel, Dilawar Singh, Malav Shah, Dhruva Gowda Storz'], 1)
]
# If true, show URL addresses after external links.
man_show_urls = True
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'MOOSE', u'MOOSE Documentation',
u'Upinder Bhalla, Niraj Dudani, Subhasis Ray, Aditya Gilra,Harsha Rani, Aviral Goel, Dilawar Singh, Malav Shah, Dhruva Gowda Storz'
, 'MOOSE'
, 'MOOSE is the Multiscale Object-Oriented Simulation Environment.',
'Science'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
#numpydoc option
numpydoc_show_class_members = False
# autodoc options to mock MOOSE module
autodoc_mock_imports = [ 'numpy' , 'moose.sbml' , 'moose.genesis' , 'moose.LIF'
, 'moogli.extensions.moose' , 'extensions.moose', 'moose' , 'moose.SBML'
, 'pylab' , 'moose.genesis' , 'datetime' , 'getpass' , 'h5py'
, 'matplotlib' , 'squid' , 'PyQt4' , 'moogli' , 'moose.utils'
, 'math' , 'SquidAxon' , '_moogli' , 'XRRRates' , 'neuroml.NeuroML'
,'neuroml' , 'rdesigneur' , 'pyplot' , 'gnuplot' , 'cm'
, 'matplotlib.pyplot' , 'matplotlib.image' , 'matplotlib.cm' , 'shapes'
, 'chemUtil.add_Delete_ChemicalSolver'
]
#include reference files
exclude_patterns = ['/docs/source/user/py/references/*.rst']
#run the doxygen thingy
import subprocess, os
read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
#if not read_the_docs_build:
# subprocess.call('cd doxygen; echo HELLO......................; doxygen Doxyfile', shell=True)
|
BhallaLab/moose
|
docs/source/conf.py
|
Python
|
gpl-3.0
| 10,678
|
[
"MOOSE"
] |
dfc8b8457143b23fa9152dd3093d9b7ebe039fb65e33adedad09ef5de2f77731
|
"""Monitoring Daemon."""
import logging
import time
from datetime import datetime
import requests
from daemonize import Daemonize
from lzproduction.utils import logging_utils
from lzproduction.sql.statuses import LOCALSTATUS, SERVICESTATUS
from lzproduction.sql.utils import db_session
from lzproduction.sql.tables import Requests, Services, create_all_tables
MINS = 60
class MonitoringDaemon(Daemonize):
"""Monitoring Daemon."""
def __init__(self, dburl, delay, cert, verify=False, **kwargs):
"""Initialisation."""
super(MonitoringDaemon, self).__init__(action=self.main, **kwargs)
self.dburl = dburl
self.delay = delay
self.cert = cert
self.verify = verify
def exit(self):
"""Update the monitoringd status on exit."""
with db_session(reraise=False) as session:
session.query(Services)\
.filter(Services.name == "monitoringd")\
.update({'status': SERVICESTATUS.Down})
super(MonitoringDaemon, self).exit()
@staticmethod
def reset_loggers():
"""Clear all non-root log handlers and set level to NOTSET."""
for _, log in logging_utils.loggers_not_at_level(logging.NOTSET):
log.setLevel(logging.NOTSET)
for _, log in logging_utils.loggers_with_handlers():
log.handlers = []
logging.getLogger("sqlalchemy").setLevel(logging.WARNING)
logging.getLogger("cherrypy").setLevel(logging.WARNING) # Why is cherrypy present?
logging.getLogger("git").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("stomp.py").setLevel(logging.WARNING)
def main(self):
"""Daemon main function."""
MonitoringDaemon.reset_loggers() # use only the root loggers handler.
# Setup tables within the daemon otherwise the file descriptor
# will be closed
create_all_tables(self.dburl)
try:
while True:
self.check_services()
self.monitor_requests()
time.sleep(self.delay * MINS)
except Exception:
self.logger.exception("Unhandled exception while running daemon.")
def check_services(self):
"""
Check the status of the services.
This function checks the status of the DIRAC status as well as updating the
timestamp for the current monitoringd service.
"""
with db_session() as session:
query = session.query(Services)
# DIRAC
query_dirac = query.filter(Services.name == "DIRAC")
status = SERVICESTATUS.Down
if requests.get("https://dirac.gridpp.ac.uk/DIRAC/",
cert=self.cert, verify=self.verify)\
.status_code == 200:
status = SERVICESTATUS.Up
if query_dirac.one_or_none() is None:
session.add(Services(name='DIRAC', status=status))
else:
query_dirac.update({'status': status})
# monitoringd
query_monitoringd = query.filter(Services.name == "monitoringd")
if query_monitoringd.one_or_none() is None:
session.add(Services(name='monitoringd', status=SERVICESTATUS.Up))
else:
query_monitoringd.update({'status': SERVICESTATUS.Up})
def monitor_requests(self):
"""
Monitor the DB requests.
Check the status of ongoing DB requests and either update them or
create new Ganga tasks for new requests.
"""
with db_session() as session:
monitored_requests = session.query(Requests)\
.filter(Requests.status.in_((LOCALSTATUS.Approved,
LOCALSTATUS.Submitted,
LOCALSTATUS.Running)))\
.all()
reschedule_requests = session.query(Requests)\
.filter_by(status=LOCALSTATUS.Failed)\
.join(Requests.parametricjobs)\
.filter_by(reschedule=True)\
.all()
monitored_requests.extend(reschedule_requests)
session.expunge_all()
for request in monitored_requests:
if request.status == LOCALSTATUS.Approved:
request.submit()
request.update_status()
|
alexanderrichards/LZProduction
|
lzproduction/monitoring/MonitoringDaemon.py
|
Python
|
mit
| 4,656
|
[
"DIRAC"
] |
74277cefc0de52471d10957ae2dcf9a430a2f3b455ba2f69fb19952053cb5412
|
# -*- coding: utf-8 -*-
#
# pyrubberband documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 19 10:40:20 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'numpydoc',
]
from glob import glob
autosummary_generate = glob('*.rst')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyrubberband'
copyright = u'2015, Brian McFee'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import sys
from mock import MagicMock as Mock
#class Mock(MagicMock):
# @classmethod
# def __getattr__(cls, name):
# return Mock()
MOCK_MODULES = ['numpy', 'soundfile']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
import imp
PYRB = imp.load_source('pyrubberband.version', '../pyrubberband/version.py')
# The short X.Y version.
version = PYRB.version
# The full version, including alpha/beta/rc tags.
release = PYRB.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyrubberbanddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pyrubberband.tex', u'pyrubberband Documentation',
u'Brian McFee', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyrubberband', u'pyrubberband Documentation',
[u'Brian McFee'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyrubberband', u'pyrubberband Documentation',
u'Brian McFee', 'pyrubberband', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
bmcfee/pyrubberband
|
docs/conf.py
|
Python
|
isc
| 8,734
|
[
"Brian"
] |
8afa1906cfbf5232d9cdf63e9a8ef7b64ba3fed9e07a3aeccda5a0a67b6d7a16
|
#!/usr/bin/env python
import argparse
import logging
import time
from bioblend.galaxy import GalaxyInstance
from bioblend.galaxy.client import ConnectionError
import datetime as dt
import yaml
# Omit (most of the) logging by external libraries
logging.getLogger('bioblend').setLevel(logging.ERROR)
logging.getLogger('requests').setLevel(logging.ERROR)
DEFAULT_GALAXY_URL = "http://localhost:8080/"
class JobFailedException(Exception):
pass
class ProgressConsoleHandler(logging.StreamHandler):
"""
A handler class which allows the cursor to stay on
one line for selected messages
"""
on_same_line = False
def emit(self, record):
try:
msg = self.format(record)
stream = self.stream
same_line = hasattr(record, 'same_line')
if self.on_same_line and not same_line:
stream.write('\r\n')
stream.write(msg)
if same_line:
stream.write('.')
self.on_same_line = True
else:
stream.write('\r\n')
self.on_same_line = False
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def load_indices_list(indices_file):
"""
Load YAML from the `indices_file` and return a dict with the content.
"""
with open(indices_file, 'r') as f:
indices_list = yaml.load(f)
return indices_list
def wait_for_data_manager_jobs(galaxy_ref, jobs):
# Monitor the job(s)
log.debug("\tJob running", extra={'same_line': True})
for job in jobs:
job_id = job.get('id')
job_finished = False
while not job_finished:
job_state = galaxy_ref.jobs.show_job(job_id).get('state', '')
if job_state == 'ok':
job_finished = True
elif job_state == 'error':
raise JobFailedException("Job is in state error")
log.debug("", extra={'same_line': True})
time.sleep(10)
def run_data_manager(galaxy_ref, dbkey_name, dm_tool, tool_input):
response = galaxy_ref.tools.run_tool('', dm_tool, tool_input)
jobs = response.get('jobs', [])
# Check if a job is actually running
if len(jobs) == 0:
raise JobFailedException("\t(!) No '{0}' job found for '{1}'".format(dm_tool,
dbkey_name))
else:
wait_for_data_manager_jobs(galaxy_ref, jobs)
def install_genome(galaxy_ref, genome):
"""
Runs all data managers provided to setup and install the specified genome.
:type galaxy_ref: bioblend.galaxy.GalaxyInstance
:param galaxy_ref: The Galaxy instance on which to install the genome
:type genome: This is an individual dictionary element of the dbkeys array from the indices file
:param genome: A dictionary containing information about an individual genome to install. Two attributes are
treated specially: dbkey and data_managers. The dbkey is used as the identifier for a genome and
the data_managers is a list of data_managers to run for this genome.
"""
errored_dms = []
dbkey_name = genome.get('dbkey')
for idx, dm in enumerate(genome.get('data_managers')):
dm_tool = dm.get('id')
# Initate tool installation
log.debug('[DM: {0}/{1}] Installing genome {2} with '
'Data Manager: {3}'.format(idx,
len(genome.get('data_managers')), dbkey_name, dm_tool))
tool_input = genome
start = dt.datetime.now()
try:
run_data_manager(galaxy_ref, dbkey_name, dm_tool, tool_input)
log.debug("\tDbkey '{0}' installed successfully in '{1}'".format(
genome.get('dbkey'), dt.datetime.now() - start))
except ConnectionError as e:
response = None
log.error("\t* Error installing genome {0} for DM {1} (after {2}): {3}"
.format(dbkey_name, dm_tool, dt.datetime.now() - start, e.body))
errored_dms.append({'dbkey': dbkey_name, 'DM': dm_tool})
return errored_dms
def install_genomes(galaxy_url, api_key, indices_file):
istart = dt.datetime.now()
galaxy_ref = GalaxyInstance(
galaxy_url or indices_list['galaxy_instance'],
api_key or indices_list['api_key'])
indices_list = load_indices_list(indices_file)
for idx, genome in enumerate(indices_list['genomes']):
log.debug('Processing {0} of {1} genomes - name: {2}'.format(idx,
len(indices_list['genomes']),
genome['dbkey']))
errored_dms = install_genome(galaxy_ref, genome)
log.info("All genomes & DMs listed in '{0}' have been processed.".format(indices_file))
log.info("Errored DMs: {0}".format(errored_dms))
log.info("Total run time: {0}".format(dt.datetime.now() - istart))
def _setup_logging():
formatter = logging.Formatter('%(asctime)s %(levelname)-5s - %(message)s')
progress = ProgressConsoleHandler()
file_handler = logging.FileHandler('/tmp/galaxy_genome_install.log')
console = logging.StreamHandler()
console.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(progress)
logger.addHandler(file_handler)
return logger
if __name__ == "__main__":
global log
log = _setup_logging()
parser = argparse.ArgumentParser(description="usage: python %prog [options]")
parser.add_argument(
"-g",
"--galaxy",
default=DEFAULT_GALAXY_URL,
help="URL of galaxy server to use. The default is %s" %
DEFAULT_GALAXY_URL)
parser.add_argument(
"-a",
"--api_key",
type=str,
help="Galaxy admin user API key",
required=True)
parser.add_argument(
"-i",
"--indices_file",
type=str,
help="Reference genomes to install (see genome_list.yaml.sample)",
required=True)
args = parser.parse_args()
install_genomes(args.galaxy, args.api_key, args.indices_file)
|
gvlproject/gvl.ansible.filesystem
|
files/scripts/install_genome_indices.py
|
Python
|
mit
| 6,317
|
[
"Galaxy"
] |
640e59c23ce287edb84a139ccf9ca96034a28e62adcdb4931cbf8705d80e84a4
|
#-------------------------------------------------------------------------------
# Name: sample_nansatBasemap
# Purpose:
#
# Author: asumak
#
# Created: 28.01.2013
# Copyright: (c) asumak 2013
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.basemap import Basemap
import scipy.ndimage as ndimage
from nansat import *
import math
def Map(lon, lat, dpi=300, projection='cyl', resolution='l',
continent=True, geoCoordinates=True, mapboundary = False):
m = Basemap(projection=projection, llcrnrlon=lon.min(),
llcrnrlat=lat.min(), urcrnrlon=lon.max(),
urcrnrlat=lat.max(), resolution=resolution)
m.lat = lat
m.lon = lon
m.x, m.y = m(lon, lat)
m.dpi = dpi
m.cs = None
m.continent = continent
m.geoCoordinates = geoCoordinates
m.colorbar = False
m.mapboundary = mapboundary
# create figure
plt.close()
figureSize = 3,3
m.fig = plt.figure(num=1, figsize=figureSize, dpi=dpi)
return m
def contour_plots(m, data, style="line", level=8, linewidths=0.2, cmap=plt.cm.jet,
clabel=False, inline=1, fontsize=3, smoothing=False, mode="gaussian"):
if smoothing:
data = image_process(data, mode)
if style == "fill":
m.cs = m.contourf(m.x, m.y, data, level, cmap=cmap)
m.colorbar=True
else:
# draw contour lines
m.cs = m.contour(m.x, m.y, data, level,
linewidths=linewidths, cmap=cmap)
# add values for the contour lines
if clabel:
plt.clabel(m.cs, inline=inline, fontsize=fontsize)
if m.continent:
draw_continent(m)
if m.geoCoordinates:
draw_geoCoordinates(m)
def quiver_plots(m, dataX, dataY, quivectors=30):
# subsample for quiver plot
step0, step1 = dataX.shape[0]/quivectors, dataX.shape[1]/quivectors
dataX2 = dataX[::step0, ::step1]
dataY2 = dataY[::step0, ::step1]
lon2 = m.lon[::step0, ::step1]
lat2 = m.lat[::step0, ::step1]
x, y = m(lon2, lat2)
im2 = m.quiver(x, y, dataX2, dataY2)
if m.continent:
draw_continent(m)
if m.geoCoordinates:
draw_geoCoordinates(m)
def put_color(m, data, shading='flat',cmap=plt.cm.jet):
m.pcolormesh(m.x, m.y, data, shading=shading, cmap=cmap)
def image_process(data, mode="gaussian", sigma=2.5, order=0, weight=None,
weightMtxSize=7, convMode="constant", cval=0.0,
splineOrder=1.0):
if mode=="convolve":
# if weight is None, create a weight matrix
if weight is None:
weight = np.ones((weightMtxSize, weightMtxSize))
center = (weightMtxSize - 1) / 2
for i in range(-(center), center+1, 1):
for j in range(-(center), center+1, 1):
weight[i][j] /= math.pow(2.0, max(abs(i),abs(j)))
return ndimage.convolve(data, weight, mode=convMode, cval=cval)
elif mode=="fourier_gaussian":
return ndimage.fourier_gaussian(data, sigma=sigma)
elif mode=="spline":
return ndimage.spline_filter1d(data, order=splineOrder)
else:
if mode!="gaussian":
print "apply Gaussian filter in image_process()"
return ndimage.gaussian_filter(data, sigma=sigma, order=order)
def add_legend(m, orientation='horisontal', pad=0.01,
tickFontSize=4,
title="", titleFontSize=5):
# add colorbar and reduce font size
if m.colorbar:
cbar = m.fig.colorbar(m.cs, orientation=orientation, pad=pad)
imaxes = plt.gca()
plt.axes(cbar.ax)
plt.xticks(fontsize=tickFontSize)
plt.axes(imaxes)
# add title
if title != "":
plt.title(title, fontsize=titleFontSize)
def draw_continent(m, continentColor='#cc9966', lakeColor='#99ffff'):
m.fillcontinents(color=continentColor, lake_color=lakeColor)
def draw_geoCoordinates(m,
latNum=5, latFontsize=4,
latLabels=[True, False, False, False],
lonNum=5, lonFontsize=4,
lonLabels=[False, False, True, False]):
# draw lat and lon
m.drawparallels(np.arange(m.lat.min(), m.lat.max(),
(m.lat.max()-m.lat.min())/latNum),
labels=latLabels, fontsize=latFontsize)
m.drawmeridians(np.arange(m.lon.min(), m.lon.max(),
(m.lon.max()-m.lon.min())/lonNum),
labels=lonLabels, fontsize=lonFontsize)
def draw_mapboudary(m, lineWidth=1, color="k", fillColor='0.3'):
m.drawmapboundary(linewidth=lineWidth, color=color, fill_color=fillColor)
def save_map(m, fileName):
m.fig.savefig(fileName, dpi=m.dpi)
#------------------------------------------------------------------------------#
# file with wind data
iFileName = 'c:/Users/asumak/Data/input/NCEP_GRIB/gfs.t06z.master.grbf03'
n = Nansat(iFileName)
##print n
# Norwegain and Barents Seas
d = Domain('+proj=longlat', '-te 0 60 30 80 -ts 300 300')
n.reproject(d)
lon,lat = n.get_geolocation_grids()
u = n[1]
v = n[2]
w = n[3]
# Create map
nMap = Map(lon, lat)
# add image
put_color(nMap, w)
# add contour1 (line)
contour_plots(nMap, v, level=8, clabel=True, smoothing=True)
# add contour2 (fill)
##contour_plots(nMap, w, style="fill", cmap=plt.cm.winter, smoothing=True, mode ='convolve')
# add quiver
##quiver_plots(nMap, u, v)
# add colorbar and title
add_legend(nMap, title='NCEP wind speed and direction')
# save to file
save_map(nMap, 'c:/Users/asumak/Data/output/basemap02.png')
|
yuxiaobu/nansat
|
sample_nansatBasemap.py
|
Python
|
gpl-3.0
| 5,942
|
[
"Gaussian"
] |
e387803bb7883c5743d9a37d94e70294c6626b61dd1e24993ddf9c0eb49ffe77
|
from datetime import datetime
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from markdown import markdown
import bleach
import json
from flask import Flask, current_app, request, url_for, jsonify, session
from flask.ext.login import UserMixin, AnonymousUserMixin
from app.exceptions import ValidationError
from . import db
from . import login_manager
from sqlalchemy.inspection import inspect
from sqlalchemy import or_
from flask_sqlalchemy import SQLAlchemy, BaseQuery
class VersionQuery(BaseQuery):
def all_versions(self): #was overwriting the original all() function
return [s for s in self.filter_by(version_ok=1)]
def original(self):
return [s for s in self.filter_by(version_original=1)]
def latest(self):
return [s for s in self.filter_by(version_latest=1)]
def all_checked(self):
# This is slow
amber = Status.query.filter(Status.status_name=='Amber').first()
green = Status.query.filter(Status.status_name=='Green').first()
return [s for s in self.filter(or_(Version.statuses == amber, Version.statuses == green)).filter(Version.checked == True).order_by(Version.version_number.desc())]
def all_checked_unchecked(self):
# This is slow
amber = Status.query.filter(Status.status_name=='Amber').first()
green = Status.query.filter(Status.status_name=='Green').first()
return [s for s in self.filter(or_(Version.statuses == amber, Version.statuses == green)).order_by(Version.version_number.desc())]
def all_v(self):
return [s for s in self]
def version_number(self, id):
# This has potential to be slow too
return self.filter(Version.version_number == id).all()
class Permission:
FOLLOW = 0x01
COMMENT = 0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
VALIDATION = 0x08
ADMINISTER = 0x80
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, True),
'Developer': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, False),
'Researcher': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, False),
'Compadrino': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, False),
'Committee': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS |
Permission.VALIDATION, False),
'Moderator': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS |
Permission.VALIDATION, False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return self.name
class User(UserMixin, db.Model):
#query_class = VersionQuery
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.Date(), default=datetime.now().date())
last_seen = db.Column(db.Date(), default=datetime.now().date())
avatar_hash = db.Column(db.String(32))
api_hash = db.Column(db.Text())
tokens = db.Column(db.Text)
institute_id = db.Column(db.Integer, db.ForeignKey('institutes.id'))
institute_confirmed = db.Column(db.Boolean, default=False)
#versions = db.relationship("Version", backref="user")
entered_by = db.relationship("Version", backref="user")
#checked_by = db.relationship("Version", backref="user")
changelogger = db.relationship("ChangeLogger", backref="user")
contacts = db.relationship("AuthorContact", backref="user")
@staticmethod
def migrate():
with open('app/data-migrate/users.json') as user_file:
data = json.load(user_file)
user = data["User"]
usern = user["User"]
for us in usern:
u = User.query.filter_by(email=us['email']).first()
if u is None:
u = User()
u.email = us['email']
u.name = us['name']
u.username = us['username']
u.role = Role.query.filter_by(id=us['role_id']).first()
u.password = generate_password_hash(us['password'])
u.confirmed = us['confirmed']
u.institute_id = us['institute_id']
u.institute_confirmed = us['institute_confirmed']
db.session.add(u)
db.session.commit()
# @staticmethod
# def migrate():
# Institute.migrate()
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['FLASKY_ADMIN']:
self.role = Role.query.filter_by(permissions=0xff).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
username = self.username
hash_ = hashlib.md5(username).hexdigest()
self.api_hash = hash_
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
db.session.commit()
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
db.session.commit()
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
db.session.add(self)
db.session.commit()
return True
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.now().date()
db.session.add(self)
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = self.avatar_hash or hashlib.md5(
self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
def to_json(self, key):
user = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='users', key=key,
_external=False),
'data' : {
'email' : self.email,
'username': self.username,
'role' : self.role.name,
'name' : self.name,
'location' : self.location,
'about_me' : self.about_me,
'member_since': self.member_since,
'last_seen': self.last_seen,
'institute' : self.institute.to_json_simple(key) if self.institute else None,
'institute_confirmed' : self.institute_confirmed ,
'versions' : [version.to_json_simple(key) for version in self.versions]
}
}
return user
def to_json_simple(self, key):
user = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='users', key=key,
_external=False),
'data' : {
'email' : self.email,
'username': self.username,
'last_seen': self.last_seen,
'institute' : self.institute.to_json_simple(key) if self.institute else None,
'versions_len' : len(self.versions)
}
}
return user
def generate_auth_token(self):
username = self.username
hash_ = hashlib.md5(username).hexdigest()
self.api_hash = hash_
db.session.add(self)
db.session.commit()
return {'id' : hash_}
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
''' Start Demog Stuff '''
''' Meta tables '''
''' Meta Tables for Users '''
class Institute(db.Model):
__tablename__ = 'institutes'
id = db.Column(db.Integer, primary_key=True)
institution_name = db.Column(db.String(400))
institution_short = db.Column(db.String(100))
main_contact_email = db.Column(db.String(100))
main_contact_name = db.Column(db.String(300))
institution_address = db.Column(db.String(300))
research_group = db.Column(db.String(300))
date_joined = db.Column(db.Date(), default=datetime.now().date())
department = db.Column(db.String(64))
country = db.Column(db.String(64))
website = db.Column(db.String(200))
head_compadrino = db.Column(db.String(64))
users = db.relationship("User", backref="institute")
populations = db.relationship("Population", backref="database_source")
@staticmethod
def migrate():
with open('app/data-migrate/users.json') as user_file:
data = json.load(user_file)
user = data["User"]
institute = user["Institute"]
for ins in institute:
i = Institute.query.filter_by(institution_name=ins['institution_name']).first()
if i is None:
i = Institute()
i.institution_name = ins['institution_name']
i.institution_short = ins['institution_short']
i.main_contact_email = ins['main_contact_email']
i.main_contact_name = ins['main_contact_name']
i.institution_address = ins['institution_address']
i.research_group = ins['research_group']
i.department = ins['department']
i.country = ins['country']
i.website = ins['website']
i.head_compadrino = ins['head_compadrino']
db.session.add(i)
db.session.commit()
def to_json(self, key):
institute = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='institutes', key=key,
_external=False),
'data' :
{
'institution_name': self.institution_name,
'institution_short' : self.institution_short,
'main_contact_email' : self.main_contact_email,
'main_contact_name' : self.main_contact_name,
'institution_address' : self.institution_address,
'research_group' : self.research_group,
'date_joined' : str(self.date_joined),
'department' : self.department,
'country' : self.country,
'website' : self.website,
'head_compadrino' : self.head_compadrino,
'users' : [user.to_json_simple(key) for user in self.users]}
}
return institute
def to_json_simple(self, key):
institute = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='institutes', key=key,
_external=False),
'data' : {
'institution_name': self.institution_name,
'institution_short' : self.institution_short,
'main_contact_email' : self.main_contact_email,
'main_contact_name' : self.main_contact_name,
'institution_address' : self.institution_address,
'research_group' : self.research_group,
'date_joined' : str(self.date_joined),
'department' : self.department,
'country' : self.country,
'website' : self.website,
'head_compadrino' : self.head_compadrino,
'users' : len(self.users)}
}
return institute
def __repr__(self):
return self.institution_name
''' End Meta Tables for Users '''
''' Meta Tables for Species '''
class IUCNStatus(db.Model):
__tablename__ = 'iucn_status'
id = db.Column(db.Integer, primary_key=True)
status_code = db.Column(db.String(64), index=True)
status_name = db.Column(db.String(64))
status_description = db.Column(db.Text())
species = db.relationship("Species", backref="iucn_status")
@staticmethod
def migrate():
with open('app/data-migrate/species.json') as species_file:
data = json.load(species_file)
species = data["Species"]
iucn = species["IUCNStatus"]
for iu in iucn:
i = IUCNStatus.query.filter_by(status_code=iu['status_code']).first()
if i is None:
i = IUCNStatus()
i.status_code = iu['status_code']
i.status_name = iu['status_name']
i.status_description = iu['status_description']
db.session.add(i)
db.session.commit()
def to_json(self, key):
iucn_status = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='iucn_status', key=key,
_external=False),
'data' : {
'status_code': self.status_code,
'status_name' : self.status_name,
'status_description' : self.status_description,
'species' : [species.to_json_simple() for species in self.species]
}
}
return iucn_status
def to_json_simple(self, key):
iucn_status = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='iucn_status', key=key,
_external=False),
'data' : {
'status_code': self.status_code,
'status_name' : self.status_name
}
}
return iucn_status
def __repr__(self):
return self.status_code
#class ESAStatus(db.Model):
# __tablename__ = 'esa_statuses'
# id = db.Column(db.Integer, primary_key=True)
# status_code = db.Column(db.String(64), index=True, unique=True)
# status_name = db.Column(db.String(64))
# status_description = db.Column(db.Text())
#
# species = db.relationship("Species", backref="esa_status")
#
# @staticmethod
# def migrate():
# with open('app/data-migrate/species.json') as species_file:
# data = json.load(species_file)
# species = data["Species"]
# esa = species["ESAStatus"]
#
# for ea in esa:
# i = ESAStatus.query.filter_by(status_code=ea['status_code']).first()
# if i is None:
# i = ESAStatus()
#
# i.status_code = ea['status_code']
# i.status_name = ea['status_name']
#
# db.session.add(i)
# db.session.commit()
#
# def to_json(self, key):
# esa_status = {
# 'request_url' : url_for('api.get_one_entry', id=self.id, model='esa_statuses', key=key,
# _external=False),
# 'data' : {
# 'status_code': self.status_code,
# 'status_name' : self.status_name,
# 'status_description' : self.status_description,
# 'species' : [species.to_json_simple() for species in self.species]
# }
#
# }
# return esa_status
#
# def to_json_simple(self, key):
# esa_status = {
# 'request_url' : url_for('api.get_one_entry', id=self.id, model='esa_statuses', key=key,
# _external=False),
# 'data' : {
# 'status_code': self.status_code,
# 'status_name' : self.status_name
# }
# }
# return esa_status
#
# def __repr__(self):
# return self.status_code
''' End Meta Tables for Species '''
''' Meta Tables for Taxonomy '''
''' End Meta Tables for Taxonomy '''
''' Meta Tables for Traits '''
class OrganismType(db.Model):
__tablename__ = 'organism_types'
id = db.Column(db.Integer, primary_key=True)
type_name = db.Column(db.String(64), index=True)
traits = db.relationship("Trait", backref="organism_type")
@staticmethod
def migrate():
with open('app/data-migrate/traits.json') as taxonomy_file:
data = json.load(taxonomy_file)
species = data["Trait"]
growth_types = species["OrganismType"]
for types in growth_types:
i = OrganismType.query.filter_by(type_name=types['type_name']).first()
if i is None:
i = OrganismType()
i.type_name = types['type_name']
db.session.add(i)
db.session.commit()
def to_json(self, key):
organism_type = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='organism_types', key=key,
_external=False),
'data' : {
'type_name': self.type_name,
'traits' : [trait.to_json_simple(key) for trait in self.traits]
}
}
return organism_type
def to_json_simple(self, key):
organism_type = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='organism_types', key=key,
_external=False),
'data' : {
'type_name': self.type_name
}
}
return organism_type
def __repr__(self):
return self.type_name
class GrowthFormRaunkiaer(db.Model):
__tablename__ = 'growth_forms_raunkiaer'
id = db.Column(db.Integer, primary_key=True, index=True)
form_name = db.Column(db.Text())
traits = db.relationship("Trait", backref="growth_form_raunkiaer")
@staticmethod
def migrate():
with open('app/data-migrate/traits.json') as taxonomy_file:
data = json.load(taxonomy_file)
species = data["Trait"]
growth_forms = species["GrowthFormRaunkiaer"]
for form in growth_forms:
i = GrowthFormRaunkiaer.query.filter_by(form_name=form['form_name']).first()
if i is None:
i = GrowthFormRaunkiaer()
i.form_name = form['form_name']
db.session.add(i)
db.session.commit()
def to_json(self, key):
growth_form = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='growth_forms_raunkiaer', key=key,
_external=False),
'data' : {
'type_name': self.form_name,
'traits' : [trait.to_json_simple(key) for trait in self.traits]
}
}
return growth_form
def to_json_simple(self, key):
growth_form = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='growth_forms_raunkiaer', key=key,
_external=False),
'data' : {
'type_name': self.form_name
}
}
return organism_type
def __repr__(self):
return self.form_name
class ReproductiveRepetition(db.Model):
__tablename__ = 'reproductive_repetition'
id = db.Column(db.Integer, primary_key=True, index=True)
repetition_name = db.Column(db.Text())
traits = db.relationship("Trait", backref="reproductive_repetition")
@staticmethod
def migrate():
with open('app/data-migrate/traits.json') as d_file:
data = json.load(d_file)
json_data = data["Trait"]
nodes = json_data["ReproductiveRepetition"]
for node in nodes:
i = ReproductiveRepetition.query.filter_by(repetition_name=node['repetition_name']).first()
if i is None:
i = ReproductiveRepetition()
i.repetition_name = node['repetition_name']
db.session.add(i)
db.session.commit()
def to_json(self, key):
reproductive_repetition = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='reproductive_repetition', key=key,
_external=False),
'data' : {
'repetition_name': self.repetition_name,
'traits' : [trait.to_json_simple(key) for trait in self.traits]
}
}
return reproductive_repetition
def to_json_simple(self, key):
reproductive_repetition = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='reproductive_repetition', key=key,
_external=False),
'data' : {
'repetition_name': self.repetition_name
}
}
return reproductive_repetition
def __repr__(self):
return self.repetition_name
class DicotMonoc(db.Model):
__tablename__ = 'dicot_monoc'
id = db.Column(db.Integer, primary_key=True)
dicot_monoc_name = db.Column(db.String(64), index=True)
traits = db.relationship("Trait", backref="dicot_monoc")
@staticmethod
def migrate():
with open('app/data-migrate/traits.json') as d_file:
data = json.load(d_file)
json_data = data["Trait"]
nodes = json_data["DicotMonoc"]
for node in nodes:
i = DicotMonoc.query.filter_by(dicot_monoc_name=node['dicot_monoc_name']).first()
if i is None:
i = DicotMonoc()
i.dicot_monoc_name = node['dicot_monoc_name']
db.session.add(i)
db.session.commit()
def to_json(self, key):
dicot_monoc = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='dicot_monoc', key=key,
_external=False),
'data' : {
'dicot_monoc_name': self.dicot_monoc_name,
'traits' : [trait.to_json_simple(key) for trait in self.traits]
}
}
return dicot_monoc
def to_json_simple(self, key):
dicot_monoc = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='dicot_monoc', key=key,
_external=False),
'data' : {
'dicot_monoc_name': self.dicot_monoc_name,
}
}
return dicot_monoc
def __repr__(self):
return self.dicot_monoc_name
class AngioGymno(db.Model):
__tablename__ = 'angio_gymno'
id = db.Column(db.Integer, primary_key=True)
angio_gymno_name = db.Column(db.String(64), index=True)
traits = db.relationship("Trait", backref="angio_gymno")
@staticmethod
def migrate():
with open('app/data-migrate/traits.json') as d_file:
data = json.load(d_file)
json_data = data["Trait"]
nodes = json_data["AngioGymno"]
for node in nodes:
i = AngioGymno.query.filter_by(angio_gymno_name=node['angio_gymno_name']).first()
if i is None:
i = AngioGymno()
i.angio_gymno_name = node['angio_gymno_name']
db.session.add(i)
db.session.commit()
def to_json(self, key):
angio_gymno = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='angio_gymno', key=key,
_external=False),
'data' : {
'angio_gymno_name': self.angio_gymno_name,
'traits' : [trait.to_json_simple(key) for trait in self.traits]
}
}
return angio_gymno
def to_json_simple(self, key):
angio_gymno = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='angio_gymno', key=key,
_external=False),
'data' : {
'angio_gymno_name': self.angio_gymno_name
}
}
return angio_gymno
def __repr__(self):
return self.angio_gymno_name
class SpandExGrowthType(db.Model):
__tablename__ = 'spand_ex_growth_types'
id = db.Column(db.Integer, primary_key=True)
type_name = db.Column(db.String(64), index=True)
type_description = db.Column(db.Text)
traits = db.relationship("Trait", backref="spand_ex_growth_types")
@staticmethod
def migrate():
with open('app/data-migrate/traits.json') as d_file:
data = json.load(d_file)
json_data = data["Trait"]
nodes = json_data["SpandExGrowthType"]
for node in nodes:
i = SpandExGrowthType.query.filter_by(type_name=node['growth_type_name']).first()
if i is None:
i = SpandExGrowthType()
i.type_name = node['growth_type_name']
i.type_description = node['growth_type_description']
db.session.add(i)
db.session.commit()
def to_json(self, key):
spand_ex_growth_type = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='spand_ex_growth_types', key=key,
_external=False),
'data' : {
'type_name': self.type_name,
'type_description': self.type_description,
'traits' : [trait.to_json_simple(key) for trait in self.traits]
}
}
return spand_ex_growth_type
def to_json_simple(self, key):
spand_ex_growth_type = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='spand_ex_growth_types', key=key,
_external=False),
'data' : {
'type_name': self.type_name,
'type_description': self.type_description
}
}
return spand_ex_growth_type
def __repr__(self):
return self.type_name
''' End Meta Tables for Traits '''
''' Meta Tables for Publication/Additional Source '''
class SourceType(db.Model):
__tablename__ = 'source_types'
id = db.Column(db.Integer, primary_key=True)
source_name = db.Column(db.String(64), index=True)
source_description = db.Column(db.Text())
publications = db.relationship("Publication", backref="source_type")
additional_sources = db.relationship("AdditionalSource", backref="source_type")
@staticmethod
def migrate():
with open('app/data-migrate/publications.json') as d_file:
data = json.load(d_file)
json_data = data["Publication"]
nodes = json_data["SourceType"]
for node in nodes:
i = SourceType.query.filter_by(source_name=node['source_name']).first()
if i is None:
i = SourceType()
i.source_name = node['source_name']
i.source_description = node['source_description']
db.session.add(i)
db.session.commit()
def to_json(self, key):
source_type = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='source_types', key=key,
_external=False),
'data' : {
'source_name': self.source_name,
'source_description': self.source_description,
'publications' : [publication.to_json_simple(key) for publication in self.publications]
}
}
return source_type
def to_json_simple(self, key):
source_type = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='source_types', key=key,
_external=False),
'data' : {
'source_name': self.source_name,
'source_description': self.source_description
}
}
return source_type
def __repr__(self):
return self.source_name
##Not sure what this is for, but it was # in the database model below?
#query_class = VersionQuery
class Database(db.Model):
__tablename__ = 'databases'
id = db.Column(db.Integer, primary_key=True)
database_name = db.Column(db.String(64))
database_description = db.Column(db.Text())
database_master_version = db.Column(db.String(64), index=True)
database_date_created = db.Column(db.Date())
database_number_species_accepted = db.Column(db.Integer())
database_number_matrices = db.Column(db.Integer())
database_agreement = db.Column(db.String(64))
# database_id = db.Column(db.Integer, db.ForeignKey('databases.id'))
populations = db.relationship("Population", backref="database")
# version = db.relationship("Version", backref="database", passive_deletes=True)
###Not sure what these are for....
#version = db.relationship("Version", backref="database")
#version_latest = db.Column(db.String(64))
#version_original = db.Column(db.Boolean())
#version_ok = db.Column(db.Boolean)
@staticmethod
def migrate():
with open('app/data-migrate/databases.json') as d_file:
data = json.load(d_file)
json_data = data["Databasess"]
nodes = json_data["Database"]
for node in nodes:
i = Database.query.filter_by(database_master_version=node['database_master_version']).first()
if i is None:
i = Database()
i.database_name = node['database_name']
i.database_description = node['database_description']
i.database_master_version = node['database_master_version']
i.database_date_created = node['database_date_created']
i.database_number_species_accepted = node['database_number_species_accepted']
i.database_number_matrices = node['database_number_matrices']
i.database_agreement = node['database_agreement']
db.session.add(i)
db.session.commit()
def to_json(self, key):
database = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='databases', key=key,
_external=False),
'data' : {
'database_name' : self.database_name,
'database_description' : self.database_description,
'database_master_version' : self.database_master_version,
'database_date_created' : self.database_date_created,
'database_number_species_accepted' : self.database_number_species_accepted,
'database_number_matrices' : self.database_number_matrices,
'database_agreement' : self.database_agreement,
'populations' : [population.to_json(key) for population in self.populations]
# 'versions' : [version.to_json(key) for version in self.versions]
}
}
return database
def to_json_simple(self, key):
database = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='databases', key=key,
_external=False),
'data' : {
'database_name' : self.database_name,
'database_description' : self.database_description,
'database_master_version' : self.database_master_version,
'database_date_created' : self.database_date_created,
'database_number_species_accepted' : self.database_number_species_accepted,
'database_number_matrices' : self.database_number_matrices
}
}
return database
def __repr__(self):
return self.database_name
class Purpose(db.Model):
__tablename__ = 'purposes'
id = db.Column(db.Integer, primary_key=True)
purpose_name = db.Column(db.String(64), index=True)
purpose_description = db.Column(db.Text())
@staticmethod
def migrate():
with open('app/data-migrate/publications.json') as d_file:
data = json.load(d_file)
json_data = data["Publication"]
nodes = json_data["Purpose"]
for node in nodes:
i = Purpose.query.filter_by(purpose_name=node['purpose_name']).first()
if i is None:
i = Purpose()
i.purpose_name = node['purpose_name']
i.purpose_description = node['purpose_description']
db.session.add(i)
db.session.commit()
def to_json(self, key):
purpose = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='purposes', key=key,
_external=False),
'data' : {
'purpose_name' : self.purpose_name,
'purpose_description' : self.purpose_description,
'publications' : [publication.to_json_simple(key) for publication in self.publications]
}
}
return purpose
def to_json_simple(self, key):
purpose = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='purposes', key=key,
_external=False),
'data' : {
'purpose_name' : self.purpose_name,
'purpose_description' : self.purpose_description
}
}
return purpose
def __repr__(self):
return self.purpose_name
publication_purposes = db.Table('publication_purposes', db.Model.metadata,
db.Column('id', db.Integer, primary_key=True),
db.Column('purpose_id', db.Integer, db.ForeignKey('purposes.id', ondelete='CASCADE')),
db.Column('publication_id', db.Integer, db.ForeignKey('publications.id', ondelete='CASCADE'))
)
class MissingData(db.Model):
__tablename__ = 'missing_data'
id = db.Column(db.Integer, primary_key=True)
missing_code = db.Column(db.String(5), index=True)
missing_description = db.Column(db.Text())
@staticmethod
def migrate():
with open('app/data-migrate/publications.json') as d_file:
data = json.load(d_file)
json_data = data["Publication"]
nodes = json_data["MissingData"]
for node in nodes:
i = MissingData.query.filter_by(missing_code=node['missing_code']).first()
if i is None:
i = MissingData()
i.missing_code = node['missing_code']
i.missing_description = node['missing_description']
db.session.add(i)
db.session.commit()
def to_json(self, key):
missing_data = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='missing_data', key=key,
_external=False),
'data' : {
'missing_code' : self.missing_code,
'missing_description' : self.missing_description,
'publications' : [publication.to_json_simple(key) for publication in self.publications]
}
}
return missing_data
def to_json_simple(self, key):
missing_data = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='missing_data', key=key,
_external=False),
'data' : {
'missing_code' : self.missing_code,
'missing_description' : self.missing_description
}
}
return missing_data
def __repr__(self):
return self.missing_code
publication_missing_data = db.Table('publication_missing_data', db.Model.metadata,
db.Column('id', db.Integer, primary_key=True),
db.Column('missing_data_id', db.Integer, db.ForeignKey('missing_data.id',ondelete='CASCADE')),
db.Column('publication_id', db.Integer, db.ForeignKey('publications.id', ondelete='CASCADE'))
)
class PublicationsProtocol (db.Model):
__tablename__ = 'publications_protocol'
id = db.Column(db.Integer, primary_key=True)
protocol_number = db.Column(db.Integer, index=True)
name = db.Column(db.String(200))
description = db.Column(db.Text())
publications = db.relationship("Publication", backref = "publications_protocol")
@staticmethod
def migrate():
with open('app/data-migrate/compadrino_protocol.json') as d_file:
data = json.load(d_file)
json_data = data["CompadrinoProtocol"]
nodes = json_data["PublicationsProtocol"]
for node in nodes:
i = PublicationsProtocol.query.filter_by(protocol_number=node['protocol_number']).first()
if i is None:
i = PublicationsProtocol()
i.protocol_number = node['protocol_number']
i.name = node['name']
i.description = node['description']
db.session.add(i)
db.session.commit()
def to_json(self,key):
protocol_number = {
'request_url' : url_for('api.get_one_entry', id=self.id, model = 'protocol_number', key=key,
_external=False),
'data' : {
'protocol_number' : self.protocol_number,
'name' : self.name,
'description' : self.description,
'publications' : [publication.to_json_simple(key) for publication in self.publications]
}
}
return publications_protocol
def to_json_simple(self, key):
protocol_number = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='protocol_number', key=key,
_external=False),
'data' : {
'protocol_number' : self.protocol_number,
'description' : self.description
}
}
return publications_protocol
def __repr__(self):
return self.protocol_number
''' End Meta Tables for Publication/Additional Source '''
''' Meta Tables for Protocol '''
#class DigitizationProtocol (db.Model):
# __tablename__ = 'digitization_protocol'
# id = db.Column(db.Integer, primary_key=True)
# field_name = db.Column(db.String(50))
# name_in_csv = db.Column(db.String(100))
# database_model = db.Column(db.String(50))
# field_description = db.Column(db.Text())
# field_short_description = db.Column(db.Text())
class Protocol (db.Model):
__tablename__ = 'protocol'
id = db.Column(db.Integer, primary_key=True)
digitization_protocol_id = db.Column(db.Integer, db.ForeignKey('digitization_protocol.id',ondelete='CASCADE'))
common_id = db.Column(db.Integer, db.ForeignKey('commonterm.id', ondelete='CASCADE'))
@staticmethod
def migrate():
DigitizationProtocol.migrate()
CommonUse.migrate()
def to_json(self,key):
field_name = {
'request_url' : url_for('api.get_one_entry', id=self.id, model = 'field_name', key=key,
_external=False),
'data' : {
'digitization_protocol' : self.digitization_protocol.to_json_simple(key) if self.digitization_protocol else None,
'commonly_used' : self.commonly_used.to_json_simple(key) if self.commonly_used else None,
}
}
return protocol
def __repr__(self):
return '<Protocol %r>' % self.id
class DigitizationProtocol (db.Model):
__tablename__ = 'digitization_protocol'
id = db.Column(db.Integer, primary_key=True)
field_name = db.Column(db.String(50), index=True)
name_in_csv = db.Column(db.String(50))
database_model = db.Column(db.String(50))
field_description = db.Column(db.Text())
field_short_description = db.Column(db.Text())
protocols = db.relationship("Protocol", backref="digitization_protocol")
@staticmethod
def migrate():
with open('app/data-migrate/protocol.json') as d_file:
data = json.load(d_file)
json_data = data["Protocolsssss"]
nodes = json_data["Protocolss"]
for node in nodes:
i = DigitizationProtocol.query.filter_by(field_name=node['field_name']).first()
if i is None:
i = DigitizationProtocol()
i.field_name = node['field_name']
i.name_in_csv = node['name_in_csv']
i.database_model = node['database_model']
i.field_description = node['field_description']
i.field_short_description = node['field_short_description']
db.session.add(i)
db.session.commit()
def to_json(self,key):
field_name = {
'request_url' : url_for('api.get_one_entry', id=self.id, model = 'field_name', key=key,
_external=False),
'data' : {
'field_name' : self.field_name,
'name_in_csv' : self.name_in_csv,
'database_model' : self.database_model,
'field_description' : self.field_description,
'field__short_description' : self.field_short_description,
'protocols' : [protocol.to_json_simple(key) for protocol in self.protocols]
}
}
return digitization_protocol
def to_json_simple(self, key):
field_name = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='field_name', key=key,
_external=False),
'data' : {
'field_name' : self.field_name,
'field__short_description' : self.field__short_description
}
}
return digitization_protocol
def __repr__(self):
return self.field_name
class CommonTerm(db.Model):
__tablename__='commonterm'
id = db.Column(db.Integer, primary_key=True)
common_value_name = db.Column(db.String(50), index=True)
common_value_data = db.Column(db.String(50))
common_value_description = db.Column(db.String(250))
protocols = db.relationship("Protocol", backref="commonterm")
@staticmethod
def migrate():
with open('app/data-migrate/protocol.json') as d_file:
data = json.load(d_file)
json_data = data["Protocolsssss"]
nodes = json_data["Common_values"]
for node in nodes:
i = CommonTerm.query.filter_by(common_value_name=node['common_value_name']).first()
if i is None:
i = CommonTerm()
i.common_value_name = node['common_value_name']
db.session.add(i)
db.session.commit()
def to_json(self, key):
commonterm = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='commonterms',key=key,
_external=False),
'data' : {
'common_value_name' : self.common_value_name,
'common_value_data' : self.common_value_data,
'common_value_description' : self.common_value_description,
'protocols' : [protocol.to_json_simple(key) for protocol in self.protocols]
}
}
return commonterm
def to_json_simple(self,key):
commonterm = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='commonterms',key=key,
_external=False),
'data' : {
'common_value_name' : self.common_value_name,
'common_value_description' : self.common_value_description,
}
}
return commonterm
def __repr__(self):
return self.common_value_name
''' End Meta Tables for Digitization Protocol '''
''' Upload files '''
#class UploadingFiles (db.Model):
# __tablename__ = 'uploading files'
# id = db.Column(db.Integer, primary_key=True)
# file_title = db.Column(db.String(100))
# file_description = db.Column(db.String(300))
# file_filename = db.Column(db.String, default=None, nullable=True)
# file_url = db.Column(db.String, default=None, nullable=True)
# is_public = db.Column(db.Boolean, nullable=False)
''' End files '''
''' Meta Tables for Author Contact '''
class ContentEmail(db.Model):
__tablename__ = 'content_email'
id = db.Column(db.Integer, primary_key=True)
content_code = db.Column(db.String(5), index=True)
content_description = db.Column(db.Text())
@staticmethod
def migrate():
with open('app/data-migrate/author_contacts.json') as d_file:
data = json.load(d_file)
json_data = data["AuthorContact"]
nodes = json_data["ContentEmail"]
for node in nodes:
i = ContentEmail.query.filter_by(content_code=node['content_code']).first()
if i is None:
i = ContentEmail()
i.content_code = node['content_code']
i.content_description = node['content_description']
db.session.add(i)
db.session.commit()
def to_json(self, key):
content_email = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='content_email', key=key,
_external=False),
'data' : {
'content_code' : self.content_code,
'content_description' : self.content_description,
'author_contacts' : [author_contacts.to_json_simple(key) for author_contacts in self.author_contacts]
}
}
return content_email
def to_json_simple(self, key):
content_email = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='content_email', key=key,
_external=False),
'data' : {
'content_code' : self.content_code,
'content_description' : self.content_description
}
}
return content_email
def __repr__(self):
return self.content_code
contact_contents = db.Table('contact_contents', db.Model.metadata,
db.Column('id', db.Integer, primary_key=True),
db.Column('content_email_id', db.Integer, db.ForeignKey('content_email.id', ondelete='CASCADE')),
db.Column('author_contact_id', db.Integer, db.ForeignKey('author_contacts.id', ondelete='CASCADE'))
)
''' End Meta Tables for Author Contact '''
''' Meta Tables for Population'''
class PurposeEndangered(db.Model):
__tablename__ = 'purposes_endangered'
id = db.Column(db.Integer, primary_key=True)
purpose_name = db.Column(db.String(64), index=True)
purpose_description = db.Column(db.Text())
populations = db.relationship("Population", backref="purpose_endangered")
@staticmethod
def migrate():
with open('app/data-migrate/populations.json') as d_file:
data = json.load(d_file)
json_data = data["Population"]
nodes = json_data["PurposeEndangered"]
for node in nodes:
i = PurposeEndangered.query.filter_by(purpose_name=node['purpose_name']).first()
if i is None:
i = PurposeEndangered()
i.purpose_name = node['purpose_name']
i.purpose_description = node['purpose_description']
db.session.add(i)
db.session.commit()
def to_json(self, key):
purpose_endangered = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='purposes_endangered', key=key,
_external=False),
'data' : {
'purpose_name' : self.purpose_name,
'purpose_description' : self.purpose_description,
'populations' : [population.to_json_simple(key) for population in self.populations]
}
}
return purpose_endangered
def to_json_simple(self, key):
purpose_endangered = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='purposes_endangered', key=key,
_external=False),
'data' : {
'purpose_name' : self.purpose_name,
'purpose_description' : self.purpose_description
}
}
return purpose_endangered
def __repr__(self):
return self.purpose_name
class PurposeWeed(db.Model):
__tablename__ = 'purposes_weed'
id = db.Column(db.Integer, primary_key=True)
purpose_name = db.Column(db.String(64), index=True)
purpose_description = db.Column(db.Text())
populations = db.relationship("Population", backref="purpose_weed")
@staticmethod
def migrate():
with open('app/data-migrate/populations.json') as d_file:
data = json.load(d_file)
json_data = data["Population"]
nodes = json_data["PurposeWeed"]
for node in nodes:
i = PurposeWeed.query.filter_by(purpose_name=node['purpose_name']).first()
if i is None:
i = PurposeWeed()
i.purpose_name = node['purpose_name']
i.purpose_description = node['purpose_description']
db.session.add(i)
db.session.commit()
def to_json(self, key):
purpose_weed = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='purposes_weed', key=key,
_external=False),
'data' : {
'purpose_name' : self.purpose_name,
'purpose_description' : self.purpose_description,
'populations' : [population.to_json_simple(key) for population in self.populations]
}
}
return purpose_weed
def to_json_simple(self, key):
purpose_weed = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='purposes_weed', key=key,
_external=False),
'data' : {
'purpose_name' : self.purpose_name,
'purpose_description' : self.purpose_description
}
}
return purpose_weed
def __repr__(self):
return self.purpose_name
''' Meta Tables for Population '''
class Ecoregion(db.Model):
__tablename__ = 'ecoregions'
id = db.Column(db.Integer, primary_key=True)
ecoregion_code = db.Column(db.String(5), index=True)
ecoregion_description = db.Column(db.Text())
populations = db.relationship("Population", backref="ecoregion")
@staticmethod
def migrate():
with open('app/data-migrate/populations.json') as d_file:
data = json.load(d_file)
json_data = data["Population"]
nodes = json_data["Ecoregion"]
for node in nodes:
i = Ecoregion.query.filter_by(ecoregion_code=node['ecoregion_code']).first()
if i is None:
i = Ecoregion()
i.ecoregion_code = node['ecoregion_code']
i.ecoregion_description = node['ecoregion_description']
db.session.add(i)
db.session.commit()
def to_json(self, key):
ecoregion = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='ecoregions', key=key,
_external=False),
'data' : {
'ecoregion_code' : self.ecoregion_code,
'ecoregion_description' : self.ecoregion_description,
'populations' : [population.to_json_simple(key) for population in self.populations]
}
}
return ecoregion
def to_json_simple(self, key):
ecoregion = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='ecoregions', key=key,
_external=False),
'data' : {
'ecoregion_code' : self.ecoregion_code,
'ecoregion_description' : self.ecoregion_description
}
}
return ecoregion
def __repr__(self):
return self.ecoregion_code
class Continent(db.Model):
__tablename__ = 'continents'
id = db.Column(db.Integer, primary_key=True)
continent_name = db.Column(db.String(64), index=True)
populations = db.relationship("Population", backref="continent")
@staticmethod
def migrate():
with open('app/data-migrate/populations.json') as d_file:
data = json.load(d_file)
json_data = data["Population"]
nodes = json_data["Continent"]
for node in nodes:
i = Continent.query.filter_by(continent_name=node['continent_name']).first()
if i is None:
i = Continent()
i.continent_name = node['continent_name']
db.session.add(i)
db.session.commit()
def to_json(self, key):
continent = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='continents', key=key,
_external=False),
'data' : {
'continent_name' : self.continent_name,
'populations' : [population.to_json_simple(key) for population in self.populations] if self.populations else []
}
}
return continent
def to_json_simple(self, key):
continent = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='continents', key=key,
_external=False),
'data' : {
'continent_name' : self.continent_name
}
}
return continent
def __repr__(self):
return self.continent_name
class InvasiveStatusStudy(db.Model):
__tablename__ = 'invasive_status_studies'
id = db.Column(db.Integer, primary_key=True)
status_name = db.Column(db.String(64), index=True)
status_description = db.Column(db.Text)
populations = db.relationship("Population", backref="invasive_status_studies")
@staticmethod
def migrate():
with open('app/data-migrate/populations.json') as d_file:
data = json.load(d_file)
json_data = data["Population"]
nodes = json_data["InvasiveStatusStudy"]
for node in nodes:
i = InvasiveStatusStudy.query.filter_by(status_name=node['status_name']).first()
if i is None:
i = InvasiveStatusStudy()
i.status_name = node['status_name']
i.status_description = node['status_description']
db.session.add(i)
db.session.commit()
def to_json(self, key):
invasive_status_study = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='invasive_status_studies', key=key,
_external=False),
'data' : {
'status_name' : self.status_name,
'status_description' : self.status_description,
'populations' : [population.to_json_simple(key) for population in self.populations]
}
}
return invasive_status_study
def to_json_simple(self, key):
invasive_status_study = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='invasive_status_studies', key=key,
_external=False),
'data' : {
'status_name' : self.status_name,
'status_description' : self.status_description
}
}
return invasive_status_study
def __repr__(self):
return self.status_name
class InvasiveStatusElsewhere(db.Model):
__tablename__ = 'invasive_status_elsewhere'
id = db.Column(db.Integer, primary_key=True)
status_name = db.Column(db.String(64), index=True)
status_description = db.Column(db.Text)
populations = db.relationship("Population", backref="invasive_status_elsewhere")
@staticmethod
def migrate():
with open('app/data-migrate/populations.json') as d_file:
data = json.load(d_file)
json_data = data["Population"]
nodes = json_data["InvasiveStatusElsewhere"]
for node in nodes:
i = InvasiveStatusElsewhere.query.filter_by(status_name=node['status_name']).first()
if i is None:
i = InvasiveStatusElsewhere()
i.status_name = node['status_name']
i.status_description = node['status_description']
db.session.add(i)
db.session.commit()
def to_json(self, key):
invasive_status_elsewhere = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='invasive_status_elsewhere', key=key,
_external=False),
'data' : {
'status_name' : self.status_name,
'status_description' : self.status_description,
'populations' : [population.to_json_simple(key) for population in self.populations]
}
}
return invasive_status_elsewhere
def to_json_simple(self, key):
invasive_status_elsewhere = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='invasive_status_elsewhere', key=key,
_external=False),
'data' : {
'status_name' : self.status_name,
'status_description' : self.status_description
}
}
return invasive_status_elsewhere
def __repr__(self):
return self.status_name
''' End Meta Tables for Population '''
''' Meta Tables for Stage Type '''
class StageTypeClass(db.Model):
__tablename__ = 'stage_type_classes'
id = db.Column(db.Integer, primary_key=True)
type_class = db.Column(db.String(64), index=True)
stage_types = db.relationship("StageType", backref="stage_type_class")
@staticmethod
def migrate():
with open('app/data-migrate/stage_types.json') as d_file:
data = json.load(d_file)
json_data = data["StageType"]
nodes = json_data["StageTypeClass"]
for node in nodes:
i = StageTypeClass.query.filter_by(type_class=node['type_class']).first()
if i is None:
i = StageTypeClass()
i.type_class = node['type_class']
db.session.add(i)
db.session.commit()
def to_json(self, key):
stage_type_classes = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='stage_type_classes', key=key,
_external=False),
'data' : {
'type_class' : self.type_class,
'stage_types' : [self.stage_types for stage_type in self.stage_types] if self.stage_types else []
}
}
return stage_type_classes
def to_json_simple(self, key):
stage_type_classes = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='stage_type_classes', key=key,
_external=False),
'data' : {
'type_class' : self.type_class
}
}
return stage_type_classes
def __repr__(self):
return self.type_class
''' End Meta Tables for Stage Type '''
''' Meta Tables for MatrixValue '''
class TransitionType(db.Model):
__tablename__ = 'transition_types'
id = db.Column(db.Integer, primary_key=True)
trans_code = db.Column(db.String(64), index=True)
trans_description = db.Column(db.Text())
matrix_values = db.relationship("MatrixValue", backref="transition_type")
@staticmethod
def migrate():
with open('app/data-migrate/matrix_values.json') as d_file:
data = json.load(d_file)
json_data = data["MatrixValue"]
nodes = json_data["TransitionType"]
for node in nodes:
i = TransitionType.query.filter_by(trans_code=node['trans_code']).first()
if i is None:
i = TransitionType()
i.trans_code = node['trans_code']
i.trans_description = node['trans_description']
db.session.add(i)
db.session.commit()
def to_json(self, key):
transition_type = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='transition_types', key=key,
_external=False),
'data' : {
'trans_code' : self.trans_code,
'trans_description' : self.trans_description,
'matrix_values' : [value.to_json_simple() for value in matrix_values] if self.matrix_values else []
}
}
return transition_type
def to_json_simple(self, key):
transition_type = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='transition_types', key=key,
_external=False),
'data' : {
'type_class' : self.type_class,
'trans_description' : self.trans_description
}
}
return transition_type
def __repr__(self):
return self.trans_code
''' End Meta Tables for MatrixValue '''
''' Meta Tables for Matrix '''
class MatrixComposition(db.Model):
__tablename__ = 'matrix_compositions'
id = db.Column(db.Integer, primary_key=True)
comp_name = db.Column(db.String(64))
comp_description = db.Column(db.String(250), index=True)
matrices = db.relationship("Matrix", backref="matrix_composition")
@staticmethod
def migrate():
with open('app/data-migrate/matrices.json') as d_file:
data = json.load(d_file)
json_data = data["Matrix"]
nodes = json_data["MatrixComposition"]
for node in nodes:
i = MatrixComposition.query.filter_by(comp_name=node['comp_name']).first()
if i is None:
i = MatrixComposition()
i.comp_name = node['comp_name']
i.comp_description = node['comp_description']
db.session.add(i)
db.session.commit()
def to_json(self, key):
matrix_composition = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='matrix_compositions', key=key,
_external=False),
'data' : {
'comp_name' : self.comp_name,
'comp_description' : self.comp_description,
'matrices' : [matrix.to_json_simple(key) for matrix in self.matrices]
}
}
return matrix_composition
def to_json_simple(self, key):
matrix_composition = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='matrix_compositions', key=key,
_external=False),
'data' : {
'comp_name' : self.comp_name,
'comp_description' : self.comp_description,
}
}
return matrix_composition
def __repr__(self):
return self.comp_name
class StartSeason(db.Model):
__tablename__ = 'start_seasons'
id = db.Column(db.Integer, primary_key=True)
season_id = db.Column(db.Integer())
season_name = db.Column(db.String(64), index=True)
matrices = db.relationship("Matrix", backref="start_season", lazy="dynamic")
@staticmethod
def migrate():
with open('app/data-migrate/matrices.json') as d_file:
data = json.load(d_file)
json_data = data["Matrix"]
nodes = json_data["Season"]
for node in nodes:
i = StartSeason.query.filter_by(season_id=node['season_id']).first()
if i is None:
i = StartSeason()
i.season_id = node['season_id']
i.season_name = node['season_name']
db.session.add(i)
db.session.commit()
def to_json(self, key):
start_season = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='start_seasons', key=key,
_external=False),
'data' : {
'season_id' : self.season_id,
'season_name' : self.season_name,
'matrices' : [matrix.to_json_simple(key) for matrix in self.matrices]
}
}
return start_season
def to_json_simple(self, key):
start_season = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='start_seasons', key=key,
_external=False),
'data' : {
'season_id' : self.season_id,
'season_name' : self.season_name
}
}
return start_season
def __repr__(self):
return str(self.season_id)
class EndSeason(db.Model):
__tablename__ = 'end_seasons'
id = db.Column(db.Integer, primary_key=True)
season_id = db.Column(db.Integer())
season_name = db.Column(db.String(64), index=True)
matrices = db.relationship("Matrix", backref="end_season", lazy="dynamic")
@staticmethod
def migrate():
with open('app/data-migrate/matrices.json') as d_file:
data = json.load(d_file)
json_data = data["Matrix"]
nodes = json_data["Season"]
for node in nodes:
i = EndSeason.query.filter_by(season_id=node['season_id']).first()
if i is None:
i = EndSeason()
i.season_id = node['season_id']
i.season_name = node['season_name']
db.session.add(i)
db.session.commit()
def to_json(self, key):
end_season = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='end_seasons', key=key,
_external=False),
'data' : {
'season_id' : self.season_id,
'season_name' : self.season_name,
'matrices' : [matrix.to_json_simple(key) for matrix in self.matrices]
}
}
return end_season
def to_json_simple(self, key):
end_season = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='end_seasons', key=key,
_external=False),
'data' : {
'season_id' : self.season_id,
'season_name' : self.season_name
}
}
return end_season
def __repr__(self):
return str(self.season_id)
class StudiedSex(db.Model):
__tablename__ = 'studied_sex'
id = db.Column(db.Integer, primary_key=True)
sex_code = db.Column(db.String(5), index=True)
sex_description = db.Column(db.Text())
matrices = db.relationship("Matrix", backref="studied_sex")
@staticmethod
def migrate():
with open('app/data-migrate/matrices.json') as d_file:
data = json.load(d_file)
json_data = data["Matrix"]
nodes = json_data["StudiedSex"]
for node in nodes:
i = StudiedSex.query.filter_by(sex_code=node['sex_code']).first()
if i is None:
i = StudiedSex()
i.sex_code = node['sex_code']
i.sex_description = node['sex_description']
db.session.add(i)
db.session.commit()
def to_json(self, key):
studied_sex = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='studied_sex', key=key,
_external=False),
'data' : {
'sex_code' : self.sex_code,
'sex_description' : self.sex_description,
'matrices' : [matrix.to_json_simple(key) for matrix in self.matrices]
}
}
return studied_sex
def to_json_simple(self, key):
studied_sex = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='studied_sex', key=key,
_external=False),
'data' : {
'sex_code' : self.sex_code,
'sex_description' : self.sex_description
}
}
return studied_sex
def __repr__(self):
return self.sex_code
class Captivity(db.Model):
__tablename__ = 'captivities'
id = db.Column(db.Integer, primary_key=True)
cap_code = db.Column(db.String(5), index=True)
cap_description = db.Column(db.Text())
matrices = db.relationship("Matrix", backref="captivities")
@staticmethod
def migrate():
with open('app/data-migrate/matrices.json') as d_file:
data = json.load(d_file)
json_data = data["Matrix"]
nodes = json_data["Captivity"]
for node in nodes:
i = Captivity.query.filter_by(cap_code=node['cap_code']).first()
if i is None:
i = Captivity()
i.cap_code = node['cap_code']
i.cap_description = node['cap_description']
db.session.add(i)
db.session.commit()
def to_json(self, key):
captivity = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='captivities', key=key,
_external=False),
'data' : {
'cap_code' : self.cap_code,
'cap_description' : self.cap_description,
'matrices' : [matrix.to_json_simple(key) for matrix in self.matrices]
}
}
return captivity
def to_json_simple(self, key):
captivity = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='captivities', key=key,
_external=False),
'data' : {
'cap_code' : self.cap_code,
'cap_description' : self.cap_description
}
}
return captivity
def __repr__(self):
return self.cap_code
class Status(db.Model):
#query_class = VersionQuery
__tablename__ = 'statuses'
id = db.Column(db.Integer, primary_key=True)
status_name = db.Column(db.String(64), index=True)
status_description = db.Column(db.Text())
notes = db.Column(db.Text())
version = db.relationship("Version", backref="statuses")
version_latest = db.Column(db.String(64))
version_original = db.Column(db.Boolean())
version_ok = db.Column(db.Boolean)
@staticmethod
def migrate():
with open('app/data-migrate/versions.json') as d_file:
data = json.load(d_file)
json_data = data["Version"]
nodes = json_data["Status"]
for node in nodes:
i = Status.query.filter_by(status_name=node['status_name']).first()
if i is None:
i = Status()
i.status_name = node['status_name']
i.status_description = node['status_description']
db.session.add(i)
db.session.commit()
def to_json(self, key):
status = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='statuses', key=key,
_external=False),
'data' : {
'status_name' : self.status_name,
'status_description' : self.status_description,
'notes' : self.notes,
'versions' : [version.to_json_simple(key) for version in self.versions]
}
}
return status
def to_json_simple(self, key):
status = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='statuses', key=key,
_external=False),
'data' : {
'status_name' : self.status_name,
'status_description' : self.status_description,
'notes' : self.notes
}
}
return status
def __repr__(self):
return self.status_name
''' End Meta Tables for Matrix '''
''' Meta Tables for Fixed '''
class Small(db.Model):
__tablename__ = 'smalls'
id = db.Column(db.Integer, primary_key=True)
small_name = db.Column(db.String(200), index=True)
small_description = db.Column(db.Text())
fixed = db.relationship("Fixed", backref="smalls")
@staticmethod
def migrate():
with open('app/data-migrate/fixed.json') as d_file:
data = json.load(d_file)
json_data = data["Fixed"]
nodes = json_data["Small"]
for node in nodes:
i = Small.query.filter_by(small_name=node['small_name']).first()
if i is None:
i = Small()
i.small_name = node['small_name']
i.small_description = node['small_description']
db.session.add(i)
db.session.commit()
def to_json(self, key):
small = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='smalls', key=key,
_external=False),
'data' : {
'small_name' : self.small_name,
'small_description' : self.small_description,
'fixed' : [fix.to_json(key) for fix in self.fixed]
}
}
return small
def to_json_simple(self, key):
small = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='smalls', key=key,
_external=False),
'data' : {
'small_name' : self.small_name,
'small_description' : self.small_description
}
}
return small
def __repr__(self):
return self.small_name
class CensusTiming(db.Model):
__tablename__ = 'census_timings'
id = db.Column(db.Integer, primary_key=True)
census_name = db.Column(db.String(200), index=True)
census_description = db.Column(db.Text())
fixed = db.relationship("Fixed", backref="census_timings")
@staticmethod
def migrate():
with open('app/data-migrate/fixed.json') as d_file:
data = json.load(d_file)
json_data = data["Fixed"]
nodes = json_data["CensusTiming"]
for node in nodes:
i = CensusTiming.query.filter_by(census_name=node['census_name']).first()
if i is None:
i = CensusTiming()
i.census_name = node['census_name']
i.census_description = node['census_description']
db.session.add(i)
db.session.commit()
def to_json(self, key):
census_timing = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='census_timings', key=key,
_external=False),
'data' : {
'census_name' : self.census_name,
'census_description' : self.census_description,
'fixed' : [fix.to_json(key) for fix in self.fixed]
}
}
return census_timing
def to_json_simple(self, key):
census_timing = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='census_timings', key=key,
_external=False),
'data' : {
'census_name' : self.census_name,
'census_description' : self.census_description
}
}
return census_timing
def __repr__(self):
return self.census_name
''' End Meta Tables for Fixed '''
''' End Meta Tables '''
class ChangeLogger(db.Model):
#query_class = VersionQuery
__tablename__ = 'changelogger'
id = db.Column(db.Integer, primary_key=True)
new_edit_delete = db.Column(db.String(6), default = "edit")
time_of_edit = db.Column(db.DateTime, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey('users.id',ondelete='CASCADE'))
object_type = db.Column(db.String(64))
field_name = db.Column(db.String(64))
object_id = db.Column(db.Integer)
content_before = db.Column(db.Text)
content_after = db.Column(db.Text)
class Species(db.Model):
#query_class = VersionQuery
__tablename__ = 'species'
id = db.Column(db.Integer, primary_key=True)
# subspecies = db.Column(db.String(64))
species_accepted = db.Column(db.String(64))
species_common = db.Column(db.String(200))
iucn_status_id = db.Column(db.Integer, db.ForeignKey('iucn_status.id',ondelete='CASCADE'))
#esa_status_id = db.Column(db.Integer, db.ForeignKey('esa_statuses.id',ondelete='CASCADE'))
#invasive_status = db.Column(db.Boolean())
gbif_taxon_key = db.Column(db.Integer)
species_iucn_taxonid = db.Column(db.Integer())
#species_iucn_population_assessed = db.Column(db.String(200))
image_path = db.Column(db.Text)
image_path2 = db.Column(db.Text)
taxonomy = db.relationship("Taxonomy", backref="species", passive_deletes=True)
trait = db.relationship("Trait", backref="species", passive_deletes=True)
populations = db.relationship("Population", backref="species", passive_deletes=True)
version = db.relationship("Version", backref="species", uselist=False, passive_deletes=True)
#version_latest = db.Column(db.String(64))
#version_original = db.Column(db.Boolean())
#version_ok = db.Column(db.Boolean)
# @staticmethod
# def find_version(original):
# original = original[0]
# last = Version.query.filter_by(original_version_id=original.id).order_by(Version.version_number.desc()).first()
# return last.version_number + 1
def add_to_logger(self,current_user,field_name,content_before,content_after,new_edit_delete):
changelogger = {
'user_id' : current_user.id,
'object_type' : "species",
'field_name' : field_name,
'object_id' : self.id,
'content_before' : content_before,
'content_after' : content_after,
'new_edit_delete' : new_edit_delete
}
cl = ChangeLogger(**changelogger)
if cl.content_before != cl.content_after:
if cl.content_before == None:
cl.new_edit_delete = "new"
db.session.add(cl)
db.session.commit()
# def save(self,current_user):
# species = {
# 'species_accepted' : self.species_accepted,
# 'species_common' : self.species_common,
# 'iucn_status_id' : self.iucn_status_id,
# 'esa_status_id' : self.esa_status_id,
# 'species_gisd_status' : self.species_gisd_status,
# 'invasive_status' : self.invasive_status,
# 'gbif_taxon_key' : self.gbif_taxon_key,
# 'species_iucn_taxonid' : self.species_iucn_taxonid,
# 'species_iucn_population_assessed' : self.species_iucn_population_assessed,
# 'image_path' : self.image_path,
# 'image_path2' : self.image_path2,
# 'version_latest' : 1,
# 'version_ok' : 0
# }
#
# s = Species(**species)
# db.session.add(s)
# db.session.commit() # in the forms this creates 2 copys of everytinh, is this right, who knows?
#
# status = Status.query.filter_by(status_name="Amber").first() #get an amber status from the metatable
#
# try:
# original_version = self.version.original_version
# version = {
# 'version_number' : self.find_version(original_version),
# 'original_version_id' : self.version.original_version[0].id,
# 'checked' : 0,
# 'status_id' : status.id,
# 'checked_count' : 0,
# 'version_user_id' : current_user.id, #needs to get user id
# 'database_id' : 1,
# 'species_id' : s.id
# }
# except AttributeError, IntegrityError: # if a previous version doesn't exist: eg. you're creating a new object
# version = {
# 'version_number' : 1,
# 'original_version_id' : 0, # s: this causes an error because when creating a new version object, this has to have the same original_version_id as it's primary key, so when you commit it gets an int
# 'checked' : 0,
# 'status_id' : status.id,
# 'checked_count' : 0,
# 'version_user_id' : current_user.id,
# 'database_id' : 1, # what is this
# 'species_id' : s.id
# }
#
#
# v = Version(**version)
# db.session.add(v)
# db.session.flush()
# if v.original_version_id == 0:
# v.original_version_id = v.id
# db.session.commit()
#
# return s
#
# def save_admin(self, current_user):
# species = {
# 'species_accepted' : self.species_accepted,
# 'species_common' : self.species_common,
# 'iucn_status_id' : self.iucn_status_id,
# 'esa_status_id' : self.esa_status_id,
# 'species_gisd_status' : self.species_gisd_status,
# 'invasive_status' : self.invasive_status,
# 'gbif_taxon_key' : self.gbif_taxon_key,
# 'species_iucn_taxonid' : self.species_iucn_taxonid,
# 'species_iucn_population_assessed' : self.species_iucn_population_assessed,
# 'image_path' : self.image_path,
# 'image_path2' : self.image_path2,
# 'version_latest' : 1,
# 'version_ok' : 1
# }
#
# s = Species(**species)
# db.session.add(s)
# db.session.commit()
#
# status = Status.query.filter_by(status_name="Green").first()
# original_version = self.version.original_version
#
# version = {
# 'version_number' : self.find_version(original_version),
# 'original_version_id' : self.version.original_version[0].id,
# 'checked' : 1,
# 'status_id' : status.id,
# 'checked_count' : self.version.checked_count + 1 if self.version.checked_count else 1,
# 'version_user_id' : current_user.id,
# 'database_id' : 1,
# 'species_id' : s.id
# }
#
# v = Version(**version)
# db.session.add(v)
# db.session.commit()
#
# original_version_children = self.version.original_version[0].child_versions
#
# for child in original_version_children:
# child.version_latest = 0
# db.session.add(child)
# db.session.commit()
#
# return s
# def verify(self, status_colour, current_user):
# status = Status.query.filter_by(status_name=status_colour).first()
# self.version.status_id = status.id
# self.version.checked = True
# self.version.checked_count = self.version.checked_count + 1 if self.version.checked_count else 1
# self.version.version_user_id = current_user.id
# this_version = [self.version.version_number]
# if status.status_name == 'Green':
# self.version_latest = 1
# self.version_ok = 1
# original_version_children = self.version.original_version[0].child_versions
# for child in original_version_children:
# child.version_latest = 0
# db.session.add(child)
# db.session.commit()
# if status.status_name == 'Amber':
# self.version_latest = 0
# self.version_ok = 0
# original_version_children = self.version.original_version[0].child_versions
# original_species_children = [s.species for s in self.version.original_version[0].child_versions]
# green = Status.query.filter_by(status_name='Green').first()
# this_version = [v.version_number]
# green_statuses = [child.version_number for child in original_version_children if child.statuses is green and child.version_number not in this_version]
# closest_green_status = min(green_statuses, key=lambda x:abs(x-this_version[0]))
# closest_id = [child for child in original_version_children if child.version_number is closest_green_status][0].species_id
# species = Species.query.get(closest_id)
# species.version_latest = 1
@staticmethod
def migrate():
IUCNStatus.migrate()
def to_json(self, key):
species = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='species', key=key,
_external=False),
'data' : {
'species_accepted': self.species_accepted,
'species_common': self.species_common,
'taxonomy' : [taxonomy.to_json_simple(key) for taxonomy in self.taxonomy][0],
'traits' : [trait.to_json_simple(key) for trait in self.trait],
'populations' : [population.to_json_simple(key) for population in self.populations],
'number_populations' : len([population.to_json_simple(key) for population in self.populations])
}
}
user = User.query.filter_by(api_hash=key).first()
if user is not None and user.institute.institution_name == "University of Exeter":
species['data']['gbif_taxon_key'] = self.gbif_taxon_key
species['data']['iucn_status'] = self.iucn_status.to_json_simple(key) if self.iucn_status else None
species['data']['esa_status'] = self.esa_status.to_json_simple(key) if self.esa_status else None
return species
def to_json_simple(self, key):
species = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='species', key=key,
_external=False),
'data' : {
'species_accepted': self.species_accepted,
'populations_len' : len(self.populations),
#'versions' : len(self.versions)
}
}
return species
def __repr__(self):
return '<Species %r>' % self.id
class Taxonomy(db.Model):
#query_class = VersionQuery
__tablename__ = 'taxonomies'
id = db.Column(db.Integer, primary_key=True)
species_id = db.Column(db.Integer, db.ForeignKey('species.id',ondelete='CASCADE'))
authority = db.Column(db.Text())
tpl_version = db.Column(db.String(64)) # Currently at 1.0, which could be float, but sometimes releases are 1.0.1 etc, best as string for now?
infraspecies_accepted = db.Column(db.String(64))
species_epithet_accepted = db.Column(db.String(64))
genus_accepted = db.Column(db.String(64))
genus = db.Column(db.String(64))
family = db.Column(db.String(64))
tax_order = db.Column(db.String(64))
tax_class = db.Column(db.String(64))
phylum = db.Column(db.String(64))
kingdom = db.Column(db.String(64))
col_check_ok = db.Column(db.Boolean())
col_check_date = db.Column(db.Date())
version = db.relationship("Version", backref="taxonomy", passive_deletes=True)
#version_latest = db.Column(db.String(64))
#version_original = db.Column(db.Boolean())
#version_ok = db.Column(db.Boolean)
@staticmethod
def migrate():
pass
def to_json(self, key):
taxonomy = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='taxonomies', key=key,
_external=False),
'data' : {
'species' : self.species.to_json_simple(key),
'publication' : self.publication.to_json_simple(key),
#'species_author' : self.species_author,
'authority' : self.authority,
'tpl_version' : self.tpl_version,
'infraspecies_accepted' : self.infraspecies_accepted,
'species_epithet_accepted' : self.species_epithet_accepted,
'genus_accepted' : self.genus_accepted,
'genus' : self.genus,
'family' : self.family,
'tax_order' : self.tax_order,
'tax_class' : self.tax_class,
'phylum' : self.phylum,
'kingdom' : self.kingdom,
'col_check_ok' : self.col_check_ok,
'col_check_date' : self.col_check_date,
'versions' : [version.to_json_simple(key) for version in self.versions]
}
}
return taxonomy
def to_json_simple(self, key):
taxonomy = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='taxonomies', key=key,
_external=False),
'data' : {
'authority' : self.authority,
'genus' : self.genus,
'family' : self.family,
}
}
return taxonomy
def __repr__(self):
return '<Taxonomy %r>' % self.id
def add_to_logger(self,current_user,field_name,content_before,content_after,new_edit_delete):
changelogger = {
'user_id' : current_user.id,
'object_type' : "taxonomy",
'field_name' : field_name,
'object_id' : self.id,
'content_before' : content_before,
'content_after' : content_after,
'new_edit_delete' : new_edit_delete
}
cl = ChangeLogger(**changelogger)
if cl.content_before != cl.content_after:
if cl.content_before == None:
cl.new_edit_delete = "new"
db.session.add(cl)
db.session.commit()
class Trait(db.Model):
#query_class = VersionQuery
__tablename__ = 'traits'
id = db.Column(db.Integer, primary_key=True)
species_id = db.Column(db.Integer, db.ForeignKey('species.id',ondelete='CASCADE'))
#max_height = db.Column(db.Float()) #This should be a double, eventually
organism_type_id = db.Column(db.Integer, db.ForeignKey('organism_types.id',ondelete='CASCADE'))
growth_form_raunkiaer_id = db.Column(db.Integer, db.ForeignKey('growth_forms_raunkiaer.id',ondelete='CASCADE'))
reproductive_repetition_id = db.Column(db.Integer, db.ForeignKey('reproductive_repetition.id',ondelete='CASCADE'))
dicot_monoc_id = db.Column(db.Integer, db.ForeignKey('dicot_monoc.id',ondelete='CASCADE'))
angio_gymno_id = db.Column(db.Integer, db.ForeignKey('angio_gymno.id',ondelete='CASCADE'))
spand_ex_growth_type_id = db.Column(db.Integer, db.ForeignKey('spand_ex_growth_types.id',ondelete='CASCADE'))
species_seedbank = db.Column(db.Boolean())
species_clonality = db.Column(db.Boolean())
species_gisd_status = db.Column(db.Boolean())
species_seedbank_source = db.Column(db.Text())
species_clonality_source = db.Column(db.Text())
version = db.relationship("Version", backref="trait", passive_deletes=True)
#version_latest = db.Column(db.String(64))
#version_original = db.Column(db.Boolean())
#version_ok = db.Column(db.Boolean)
@staticmethod
def migrate():
OrganismType.migrate()
GrowthFormRaunkiaer.migrate()
ReproductiveRepetition.migrate()
DicotMonoc.migrate()
AngioGymno.migrate()
SpandExGrowthType.migrate()
def to_json(self, key):
trait = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='traits', key=key,
_external=False),
'data' : {
'species' : self.species.to_json_simple(key) if self.species else None,
'organism_type' : self.organism_type.to_json_simple(key) if self.organism_type else None,
'reproductive_repetition' : self.reproductive_repetition.to_json_simple(key) if self.reproductive_repetition else None,
'dicot_monoc' : self.dicot_monoc.to_json_simple(key) if self.dicot_monoc else None,
'angio_gymno' : self.angio_gymno.to_json_simple(key) if self.angio_gymno else None,
'species_seedbank': self.species_seedbank,
'species_clonality': self.species_clonality,
'species_gisd_status': self.species_gisd_status,
'versions' : [version.to_json_simple(key) for version in self.versions]
}
}
user = User.query.filter_by(api_hash=key).first()
if user is not None and user.institute.institution_name == "University of Exeter":
trait['data']['growth_form_raunkiaer'] = self.growth_form_raunkiaer.to_json_simple(key) if self.growth_form_raunkiaer else None
trait['data']['spand_ex_growth_type'] = self.spand_ex_growth_types.to_json_simple(key) if self.spand_ex_growth_types else None
return trait
def to_json_simple(self, key):
trait = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='traits', key=key,
_external=False),
'data' : {
'species' : self.species.species_accepted if self.species else None,
'organism_type' : self.organism_type.to_json_simple(key) if self.organism_type else None,
'reproductive_repetition' : self.reproductive_repetition.to_json_simple(key) if self.reproductive_repetition else None,
'dicot_monoc' : self.dicot_monoc.to_json_simple(key) if self.dicot_monoc else None,
'angio_gymno' : self.angio_gymno.to_json_simple(key) if self.angio_gymno else None,
}
}
user = User.query.filter_by(api_hash=key).first()
if user is not None and user.institute.institution_name == "University of Exeter":
trait['data']['growth_form_raunkiaer'] = self.growth_form_raunkiaer.to_json_simple(key) if self.growth_form_raunkiaer else None
trait['data']['spand_ex_growth_type'] = self.spand_ex_growth_types.to_json_simple(key) if self.spand_ex_growth_types else None
return trait
def __repr__(self):
return '<Trait %r>' % self.id
def add_to_logger(self,current_user,field_name,content_before,content_after,new_edit_delete):
changelogger = {
'user_id' : current_user.id,
'object_type' : "trait",
'field_name' : field_name,
'object_id' : self.id,
'content_before' : content_before,
'content_after' : content_after,
'new_edit_delete' : new_edit_delete
}
cl = ChangeLogger(**changelogger)
if cl.content_before != cl.content_after:
if cl.content_before == None:
cl.new_edit_delete = "new"
db.session.add(cl)
db.session.commit()
class Publication(db.Model):
#query_class = VersionQuery
__tablename__ = 'publications'
id = db.Column(db.Integer, primary_key=True)
source_type_id = db.Column(db.Integer, db.ForeignKey('source_types.id',ondelete='CASCADE'))
authors = db.Column(db.Text()) # These appear as vectors in Judy's schema, trying to think of the best way to implement this within MySQL and Django/Flask
editors = db.Column(db.Text())
pub_title = db.Column(db.Text())
journal_book_conf = db.Column(db.Text())
year = db.Column(db.SmallInteger()) #proto
volume = db.Column(db.Text())
pages = db.Column(db.Text())
publisher = db.Column(db.Text())
city = db.Column(db.Text())
country = db.Column(db.Text())
institution = db.Column(db.Text())
DOI_ISBN = db.Column(db.Text())
journal_name = db.Column(db.Text()) #r-generated, needs more info, probably generated in method of this model
purposes = db.relationship("Purpose",
secondary=publication_purposes, backref="publications", passive_deletes=True)
date_digitised = db.Column(db.DateTime(), default=datetime.now().date())
embargo = db.Column(db.Date()) #nullable
missing_data = db.relationship("MissingData",
secondary=publication_missing_data, backref="publications", passive_deletes=True)
additional_source_string = db.Column(db.Text())
colour = db.Column(db.String(7))
study_notes = db.Column(db.Text())
publications_protocol_id = db.Column(db.Integer, db.ForeignKey('publications_protocol.id',ondelete='CASCADE'))
# Establishing one to many relationships between tables
author_contacts = db.relationship("AuthorContact", backref="publication", passive_deletes=True)
additional_sources = db.relationship("AdditionalSource", backref="publication", passive_deletes=True)
populations = db.relationship("Population", backref="publication", passive_deletes=True)
version = db.relationship("Version", backref="publication", passive_deletes=True)
#version_latest = db.Column(db.String(64))
#version_original = db.Column(db.Boolean())
#version_ok = db.Column(db.Boolean)
@staticmethod
def migrate():
SourceType.migrate()
Purpose.migrate()
MissingData.migrate()
PublicationsProtocol.migrate()
def to_json(self, key):
publication = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='publications', key=key,
_external=False),
'data' : {
'source_type' : self.source_type.to_json_simple(key) if self.source_type else None,
'authors' : self.authors,
'editors' : self.editors,
'pub_title' : self.pub_title,
'journal_book_conf' : self.journal_book_conf,
'year' : self.year,
'volume' : self.volume,
'pages' : self.pages,
'publisher' : self.publisher,
'city' : self.city,
'country' : self.country,
'institution' : self.institution,
'DOI_ISBN' : self.DOI_ISBN,
'journal_name' : self.journal_name,
'corresponding_author' : self.corresponding_author,
'email' : self.email,
'purposes' : self.purpose.to_json_simple(key) if self.purpose else None,
'date_digitised' : self.date_digitised,
'embargo' : self.embargo,
'missing_data' : self.missing_data.to_json_simple(key) if self.purposes else None,
'additional_source_string' : self.additional_source_string,
'author_contacts' : self.author_contacts.to_json_simple(key) if self.author_contacts else None,
'additional_sources' : self.additional_sources.to_json_simple(key) if self.additional_sources else None,
'populations' : [populations.to_json_simple(key) for populations in self.populations],
'stages' : [stages.to_json_simple(key) for stages in self.stages] if self.stages else None,
'taxonomies' : [taxonomies.to_json_simple(key) for taxonomies in self.taxonomies],
'publications_protocol' : self.publications_protocol.to_json_simple(key) if self.publications_protocol else None,
# 'studies' : [studies.to_json_simple(key) for studies in self.studies] if self.studies else None,
'versions' : [versions.to_json_simple(key) for versions in self.versions] if self.versions else None
}
}
return publication
def to_json_simple(self, key):
publication = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='publications', key=key,
_external=False),
'data' : {
'source_type' : self.source_type.to_json_simple(key) if self.source_type else None,
'authors' : self.authors,
'pub_title' : self.pub_title,
'year' : self.year,
'DOI_ISBN' : self.DOI_ISBN,
'journal_name' : self.journal_name,
#'purposes' : self.purposes.to_json_simple(key) if self.purposes else None,
'date_digitised' : self.date_digitised,
'embargo' : self.embargo,
'additional_source_string' : self.additional_source_string,
'additional_sources_len' : len(self.additional_sources),
'populations_len' : len(self.populations)
#'versions_len' : len(self.versions)
}
}
return publication
def __repr__(self):
return '<Publication %r>' % self.id
def add_to_logger(self,current_user,field_name,content_before,content_after,new_edit_delete):
changelogger = {
'user_id' : current_user.id,
'object_type' : "publication",
'field_name' : field_name,
'object_id' : self.id,
'content_before' : content_before,
'content_after' : content_after,
'new_edit_delete' : new_edit_delete
}
cl = ChangeLogger(**changelogger)
if cl.content_before != cl.content_after:
if cl.content_before == None:
cl.new_edit_delete = "new"
db.session.add(cl)
db.session.commit()
class AuthorContact(db.Model):
#query_class = VersionQuery
__tablename__ = 'author_contacts'
id = db.Column(db.Integer, primary_key=True)
publication_id = db.Column(db.Integer, db.ForeignKey('publications.id',ondelete='CASCADE'))
corresponding_author = db.Column(db.Text())
corresponding_author_email = db.Column(db.Text())
date_contacted = db.Column(db.Date(), index=True)
date_contacted_again = db.Column(db.Date(), index=True)
contacting_user_id = db.Column(db.Integer, db.ForeignKey('users.id',ondelete='CASCADE'))
content_emails = db.relationship("ContentEmail",
secondary=contact_contents, backref="author_contacts", passive_deletes=True)
extra_content_email = db.Column(db.Text())
correspondence_email_content = db.Column(db.Text())
author_reply = db.Column(db.Text())
version = db.relationship("Version", backref="author_contact", passive_deletes=True)
#version_latest = db.Column(db.String(64))
#version_original = db.Column(db.Boolean())
#version_ok = db.Column(db.Boolean)
@staticmethod
def migrate():
ContentEmail.migrate()
def to_json(self, key):
author_contact = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='author_contacts', key=key,
_external=False),
'data' : {
'publication' : self.publication.to_json_simple(key) if self.publication else None,
'corresponding_author' : self.corresponding_author,
'corresponding_author_email' : self.corresponding_author_email,
'date_contacted' : self.date_contacted,
'contacting_user' : self.contacting_user.to_json_simple(key) if self.contacting_user else None,
'content_email_id' : self.content_email.to_json_simple(key) if self.content_email else None,
'content_email_text' : self.content_email_text,
'author_reply' : self.author_reply,
}
}
return author_contact
def to_json_simple(self, key):
author_contact = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='author_contacts', key=key,
_external=False),
'data' : {
'publication_len' : len(self.publication),
'date_contacted' : self.date_contacted,
'author_reply' : self.author_reply,
}
}
return author_contact
def __repr__(self):
return str(self.publication_id)
class AdditionalSource(db.Model):
#query_class = VersionQuery
__tablename__ = 'additional_sources'
id = db.Column(db.Integer, primary_key=True)
publication_id = db.Column(db.Integer, db.ForeignKey('publications.id',ondelete='CASCADE'))
source_type_id = db.Column(db.Integer, db.ForeignKey('source_types.id',ondelete='CASCADE'))
authors = db.Column(db.Text())
editors = db.Column(db.Text())
pub_title = db.Column(db.Text())
journal_book_conf = db.Column(db.Text())
year = db.Column(db.SmallInteger()) #proto
volume = db.Column(db.Text())
pages = db.Column(db.Text())
publisher = db.Column(db.Text())
city = db.Column(db.Text())
country = db.Column(db.Text())
institution = db.Column(db.Text())
DOI_ISBN = db.Column(db.Text())
journal_name = db.Column(db.Text()) #r-generated, needs more info, probably to be generated in method of this model, first author in author list?
description = db.Column(db.Text())
version = db.relationship("Version", backref="additional_source", passive_deletes=True)
#version_latest = db.Column(db.String(64))
#version_original = db.Column(db.Boolean())
#version_ok = db.Column(db.Boolean)
def to_json(self, key):
additional_source = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='additional_sources', key=key,
_external=False),
'data' : {
'source_type' : self.source_type.to_json_simple(key),
'contacting_user_id' : self.contacting_user.to_json_simple(key),
'authors' : self.authors,
'editors' : self.editors,
'pub_title' : self.pub_title,
'journal_book_conf' : self.journal_book_conf,
'year' : self.year,
'volume' : self.volume,
'pages' : self.pages,
'publisher' : self.publisher,
'city' : self.city,
'country' : self.country,
'institution' : self.institution,
'DOI_ISBN' : self.DOI_ISBN,
'journal_name' : self.journal_name,
'description' : self.description,
'versions' : [version.to_json_simple(key) for version in versions]
}
}
return additional_source
def to_json_simple(self, key):
additional_source = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='additional_sources', key=key,
_external=False),
'data' : {
'authors' : self.authors,
'pub_title' : self.pub_title,
'year' : self.year,
'DOI_ISBN' : self.DOI_ISBN,
'journal_name' : self.journal_name,
'description' : self.description,
'versions_len' : len(self.versions)
}
}
return additional_source
def __repr__(self):
return '<Additional Source %r>' % self.id
class Population(db.Model):
#query_class = VersionQuery
__tablename__ = 'populations'
id = db.Column(db.Integer, primary_key=True, index=True)
publication_id = db.Column(db.Integer, db.ForeignKey('publications.id',ondelete='CASCADE'))
species_id = db.Column(db.Integer, db.ForeignKey('species.id',ondelete='CASCADE'))
species_author = db.Column(db.String(64))
population_name = db.Column(db.Text())
ecoregion_id = db.Column(db.Integer, db.ForeignKey('ecoregions.id',ondelete='CASCADE'))
###Danny adding database stuff
database_id = db.Column(db.Integer, db.ForeignKey('databases.id',ondelete='CASCADE'))
invasive_status_study_id = db.Column(db.Integer, db.ForeignKey('invasive_status_studies.id',ondelete='CASCADE')) #
invasive_status_elsewhere_id = db.Column(db.Integer, db.ForeignKey('invasive_status_elsewhere.id',ondelete='CASCADE')) #
country = db.Column(db.Text())
population_nautical_miles = db.Column(db.Integer())
continent_id = db.Column(db.Integer, db.ForeignKey('continents.id',ondelete='CASCADE'))
latitude = db.Column(db.Float())
longitude = db.Column(db.Float())
lat_ns = db.Column(db.String(1))
lat_deg = db.Column(db.Integer())
lat_min = db.Column(db.Integer())
lat_sec = db.Column(db.Integer())
lon_ew = db.Column(db.String(1))
lon_deg = db.Column(db.Integer())
lon_min = db.Column(db.Integer())
lon_sec = db.Column(db.Integer())
altitude = db.Column(db.Float())
#pop_size = db.Column(db.Text())
within_site_replication = db.Column(db.Text())
#from study
study_duration = db.Column(db.Integer(), index=True)
study_start = db.Column(db.Integer())
study_end = db.Column(db.Integer())
purpose_endangered_id = db.Column(db.Integer, db.ForeignKey('purposes_endangered.id',ondelete='CASCADE'))
purpose_weed_id = db.Column(db.Integer, db.ForeignKey('purposes_weed.id',ondelete='CASCADE'))
database_source_id = db.Column(db.Integer, db.ForeignKey('institutes.id',ondelete='CASCADE'))
number_populations = db.Column(db.Integer())
#from study end
matrices = db.relationship("Matrix", backref="population", passive_deletes=True)
version = db.relationship("Version", backref="population", passive_deletes=True)
#version_latest = db.Column(db.String(64))
#version_original = db.Column(db.Boolean())
#version_ok = db.Column(db.Boolean)
# def geometries_dec(self):
# geo = json.loads(self.geometries)
#
# lat_min = geo['lat_min']
# lat_deg = geo['lat_deg']
# lat_sec = geo['lat_sec']
# lat_ns = geo['lat_ns']
# lon_min = geo['lon_min']
# lon_deg = geo['lon_deg']
# lon_sec = geo['lon_sec']
# lat_we = geo['lat_we']
# altitude = geo['altitude']
#
# if lat_we != 'NA':
# if lat_we == 'W':
# lon_deg = -float(lon_deg)
#
# try:
# decimal_lat = float(lat_deg)+float(lat_min)/60+float(lat_sec)/3600
# decimal_lon = float(lon_deg)+float(lon_min)/60+float(lon_sec)/3600
# altitude = float(altitude)
# except:
# decimal_lat = 'NA'
# decimal_lon = 'NA'
# altitude = 'NA'
#
# geometries = {"latitude" : decimal_lat, "longitude" : decimal_lon, "altitude" : altitude}
# return geometries
def to_json(self, key):
population = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='populations', key=key,
_external=False),
'data' : {
'species' : self.species.to_json_simple(key),
'publication' : self.publication.to_json_simple(key),
'species_author' : self.species_author,
'population_name' : self.population_name,
'ecoregion' : self.ecoregion.to_json_simple(key) if self.ecoregion else None,
'country' : self.country,
'database' : self.database.to_json_simple(key) if self.database else None,
'population_nautical_miles' : self.population_nautical_miles,
'continent' : self.continent.to_json_simple(key) if self.continent else None,
'longitude' : self.longitude,
'latitude' : self.latitude,
'altitude' : self.altitude,
'matrices' : [matrix.to_json_simple(key) for matrix in self.matrices] if self.matrices else None#,
#'versions' : [version.to_json_simple(key) for version in self.versions] if self.versions else None,
}
}
user = User.query.filter_by(api_hash=key).first()
if user is not None and user.institute.institution_name == "University of Exeter":
population['data']['invasive_status_study'] = self.invasive_status_studies.to_json_simple(key) if self.invasive_status_studies else None
population['data']['invasive_status_elsewhere'] = self.invasive_status_elsewhere.to_json_simple(key) if self.invasive_status_elsewhere else None
population['data']['population_size'] = self.pop_size
return population
def to_json_simple(self, key):
population = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='populations', key=key,
_external=False),
'data' : {
'population_name' : self.population_name,
'ecoregion' : self.ecoregion.to_json_simple(key) if self.ecoregion else None,
'country' : self.country,
'matrices_len' : len(self.matrices),
}
}
return population
def add_to_logger(self,current_user,field_name,content_before,content_after,new_edit_delete):
changelogger = {
'user_id' : current_user.id,
'object_type' : "population",
'field_name' : field_name,
'object_id' : self.id,
'content_before' : content_before,
'content_after' : content_after,
'new_edit_delete' : new_edit_delete
}
cl = ChangeLogger(**changelogger)
if cl.content_before != cl.content_after:
if cl.content_before == None:
cl.new_edit_delete = "new"
db.session.add(cl)
db.session.commit()
@staticmethod
def migrate():
Ecoregion.migrate()
Continent.migrate()
Database.migrate()
InvasiveStatusStudy.migrate()
InvasiveStatusElsewhere.migrate()
PurposeEndangered.migrate()
PurposeWeed.migrate()
def __repr__(self):
return '<Population %r>' % self.id
class Stage(db.Model):
#query_class = VersionQuery
__tablename__ = 'stages'
id = db.Column(db.Integer, primary_key=True, index=True)
publication_id = db.Column(db.Integer, db.ForeignKey('publications.id',ondelete='CASCADE'))
stage_type_id = db.Column(db.Integer, db.ForeignKey('stage_types.id',ondelete='CASCADE'))
name = db.Column(db.Text())
matrix_stages = db.relationship("MatrixStage", backref="stage", passive_deletes=True)
version = db.relationship("Version", backref="stage", passive_deletes=True)
#version_latest = db.Column(db.String(64))
#version_original = db.Column(db.Boolean())
#version_ok = db.Column(db.Boolean)
def to_json(self, key):
stage = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='stages', key=key,
_external=False),
'data' : {
'species' : self.species.to_json_simple(key),
'publication' : self.to_json_simple(key),
'stage_type': self.to_json_simple(key),
'name' : self.name,
'matrix_stages' : [matrix_stage.to_json_simple(key) for matrix_stage in self.matrix_stages],
'versions' : [version.to_json_simple(key) for version in versions]
}
}
return stage
def to_json_simple(self, key):
stage = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='stages', key=key,
_external=False),
'data' : {
'name' : self.name,
'matrix_stages_len' : len(self.matrix_stages),
'versions_len' : len(self.versions)
}
}
return stage
def __repr__(self):
return str(self.species_id)
class StageType(db.Model):
#query_class = VersionQuery
__tablename__ = 'stage_types'
id = db.Column(db.Integer, primary_key=True, index=True)
type_name = db.Column(db.Text())
type_class_id = db.Column(db.Integer, db.ForeignKey('stage_type_classes.id',ondelete='CASCADE'))
stages = db.relationship("Stage", backref="stage_types", passive_deletes=True)
version = db.relationship("Version", backref="stage_type", passive_deletes=True)
#version_latest = db.Column(db.String(64))
#version_original = db.Column(db.Boolean())
#version_ok = db.Column(db.Boolean)
def to_json(self, key):
stage_type = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='stage_types', key=key,
_external=False),
'data' : {
'type_name' : self.type_name,
'type_class' : self.type_class.to_json_simple(key),
'stages' : [stage.to_json_simple(key) for stage in self.stages],
'versions' : [version.to_json_simple(key) for version in self.versions]
}
}
return stage_type
def to_json_simple(self, key):
stage_type = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='stage_types', key=key,
_external=False),
'data' : {
'type_name' : self.type_name,
'stages_len' : len(self.stages),
'versions_len' : len(self.versions)
}
}
return stage_type
@staticmethod
def migrate():
StageTypeClass.migrate()
def __repr__(self):
return self.type_name
class Treatment(db.Model):
__tablename__ = 'treatments'
id = db.Column(db.Integer, primary_key=True, index=True)
treatment_name = db.Column(db.Text())
matrices = db.relationship("Matrix", backref="treatment", passive_deletes=True)
@staticmethod
def migrate():
with open('app/data-migrate/matrices.json') as d_file:
data = json.load(d_file)
json_data = data["Matrix"]
nodes = json_data["Treatment"]
for node in nodes:
i = Treatment.query.filter_by(treatment_name=node['type_name']).first()
if i is None:
i = Treatment()
i.type_name = node['treatment_name']
db.session.add(i)
db.session.commit()
def to_json(self, key):
treatment= {
'request_url' : url_for('api.get_one_entry', id=self.id, model='treatments', key=key,
_external=False),
'data' : {
'treatment_name' : self.treatment_name,
'matrices' : [matrix.to_json_simple(key) for matrix in self.matrices]
}
}
return treatment
def to_json_simple(self, key):
treatment= {
'request_url' : url_for('api.get_one_entry', id=self.id, model='treatments', key=key,
_external=False),
'data' : {
'treatment_name' : self.treatment_name,
'matrices_len' : len(self.matrices)
}
}
return treatment
def __repr__(self):
return self.treatment_name if self.treatment_name else ''
class MatrixStage(db.Model):
#query_class = VersionQuery
__tablename__ = 'matrix_stages'
id = db.Column(db.Integer, primary_key=True)
stage_order = db.Column(db.SmallInteger())
stage_id = db.Column(db.Integer, db.ForeignKey('stages.id',ondelete='CASCADE'))
matrix_id = db.Column(db.Integer, db.ForeignKey('matrices.id',ondelete='CASCADE'))
version = db.relationship("Version", backref="matrix_stage", passive_deletes=True)
#version_latest = db.Column(db.String(64))
#version_original = db.Column(db.Boolean())
#version_ok = db.Column(db.Boolean)
def to_json(self, key):
matrix_stage = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='matrix_stages', key=key,
_external=False),
'data' : {
'stage_order' : self.stage_order,
'stage' : self.stage.to_json_simple(key),
'matrix' : self.matrix.to_json_simple(key),
'versions' : [version.to_json_simple(key) for version in self.versions]
}
}
return matrix_stage
def to_json_simple(self, key):
matrix_stage = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='matrix_stages', key=key,
_external=False),
'data' : {
'stage_order' : self.stage_order,
'versions_len' : len(self.versions)
}
}
return matrix_stage
def __repr__(self):
return '<Matrix Stage %r>' % self.stage_order
class MatrixValue(db.Model):
#query_class = VersionQuery
__tablename__ = 'matrix_values'
id = db.Column(db.Integer, primary_key=True)
column_number = db.Column(db.SmallInteger())
row_number = db.Column(db.SmallInteger())
transition_type_id = db.Column(db.Integer, db.ForeignKey('transition_types.id',ondelete='CASCADE'))
value = db.Column(db.Float())
matrix_id = db.Column(db.Integer, db.ForeignKey('matrices.id',ondelete='CASCADE'))
version = db.relationship("Version", backref="matrix_value", passive_deletes=True)
#version_latest = db.Column(db.String(64))
#version_original = db.Column(db.Boolean())
#version_ok = db.Column(db.Boolean)
@staticmethod
def migrate():
TransitionType.migrate()
def to_json(self, key):
matrix_value = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='matrix_values', key=key,
_external=False),
'data' : {
'column_number' : self.column_number,
'row_number' : self.row_number,
'transition_type' : self.transition_type.to_json_simple(key),
'value' : self.value,
'matrix' : self.matrix.to_json_simple(key),
'versions' : [version.to_json_simple(key) for version in self.versions]
}
}
return matrix_value
def to_json_simple(self, key):
matrix_value = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='matrix_values', key=key,
_external=False),
'data' : {
'column_number' : self.column_number,
'row_number' : self.row_number,
'transition_type' : self.transition_type.type_name,
'value' : self.value,
'versions_len' : len(self.versions)
}
}
return matrix_value
def __repr__(self):
return self.column_number
class Matrix(db.Model):
#query_class = VersionQuery
__tablename__ = 'matrices'
id = db.Column(db.Integer, primary_key=True)
uid = db.Column(db.String(200), index=True, unique=True)
population_id = db.Column(db.Integer, db.ForeignKey('populations.id',ondelete='CASCADE'))
treatment_id = db.Column(db.Integer, db.ForeignKey('treatments.id',ondelete='CASCADE'))
matrix_split = db.Column(db.Boolean())
matrix_composition_id = db.Column(db.Integer, db.ForeignKey('matrix_compositions.id',ondelete='CASCADE'))
seasonal = db.Column(db.Boolean())
survival_issue = db.Column(db.Float())
matrix_irreducible = db.Column(db.Boolean())
matrix_primitive = db.Column(db.Boolean())
matrix_ergodic = db.Column(db.Boolean())
periodicity = db.Column(db.Float())
n_intervals = db.Column(db.SmallInteger())
matrix_criteria_size = db.Column(db.String(200))
matrix_criteria_ontogeny = db.Column(db.Boolean())
matrix_criteria_age = db.Column(db.Boolean())
matrix_start_year = db.Column(db.Integer)
matrix_start_month = db.Column(db.Integer())
matrix_end_year = db.Column(db.Integer)
matrix_end_month = db.Column(db.Integer())
matrix_start_season_id = db.Column(db.Integer, db.ForeignKey('start_seasons.id',ondelete='CASCADE')) # Proto says season used as described in manuscript, maybe not safe to derive this from latdeg, country, date
matrix_end_season_id = db.Column(db.Integer, db.ForeignKey('end_seasons.id',ondelete='CASCADE')) # Proto says season used as described in manuscript, maybe not safe to derive this from latdeg, country, date
matrix_fec = db.Column(db.Boolean())
matrix_a_string = db.Column(db.Text())
matrix_u_string = db.Column(db.Text())
matrix_f_string = db.Column(db.Text())
matrix_c_string = db.Column(db.Text())
matrix_difficulty = db.Column(db.String(64))
matrix_complete = db.Column(db.Boolean())
independence_origin = db.Column(db.Text())
#n_plots = db.Column(db.SmallInteger()) # Danny/Jenni/Dave, will need your help with plots too - not quite sure what they are.
#plot_size = db.Column(db.Float()) # Schema states, 'R convert to m^2'
#n_individuals = db.Column(db.Integer()) # Schema states, 'total number of individuals observed'
studied_sex_id = db.Column(db.Integer, db.ForeignKey('studied_sex.id',ondelete='CASCADE'))
captivity_id = db.Column(db.Integer, db.ForeignKey('captivities.id',ondelete='CASCADE'))
matrix_dimension = db.Column(db.Integer()) # dimension of matrix population A
observations = db.Column(db.Text())
class_organized = db.Column(db.Text())
class_author = db.Column(db.Text())
class_number = db.Column(db.Text())
#vectors_includes_na = db.Column(db.Text())
matrix_lambda = db.Column(db.Float())
independent = db.Column(db.Boolean())
non_independence = db.Column(db.Text())
non_independence_author = db.Column(db.Text())
intervals = db.relationship("Interval", backref="matrix",passive_deletes=True)
matrix_values = db.relationship("MatrixValue", backref="matrix", passive_deletes=True)
matrix_stages = db.relationship("MatrixStage", backref="matrix", passive_deletes=True)
fixed = db.relationship("Fixed", backref="matrix", passive_deletes=True)
seeds = db.relationship("Seed", backref="matrix", passive_deletes=True)
# Versioning
version = db.relationship("Version", backref="matrix", passive_deletes=True)
#version_latest = db.Column(db.String(64))
#version_original = db.Column(db.Boolean())
#version_ok = db.Column(db.Boolean)
@staticmethod
def migrate():
MatrixComposition.migrate()
StartSeason.migrate()
EndSeason.migrate()
StudiedSex.migrate()
Captivity.migrate()
Status.migrate()
# Generate UID for this Matrix
def create_uid(self):
import re
species_accepted = self.population.species.species_accepted
journal = self.population.publication.pub_title
year_pub = self.population.publication.year
authors = self.population.publication.authors[:15].encode('utf-8')
pop_name = self.population.name.encode('utf-8')
try:
composite = self.matrix_composition.comp_name
except AttributeError:
composite = ''
# treatment = self.treatment.treatment_name
try:
start_year = self.matrix_start[-4:]
except TypeError:
start_year = ''
# observation = self.observations.encode('utf-8')
# matrix_a_string = self.matrix_a_string
import time
timestamp = time.time()
uid_concat = '{}{}{}{}{}{}{}{}'.format(species_accepted, journal, year_pub, authors, pop_name, composite, start_year, timestamp)
uid_lower = uid_concat.lower()
uid = re.sub('[\W_]+', '', uid_lower)
self.uid = uid
if Matrix.query.filter_by(uid=uid).first() == None:
pass
# db.session.add(self)
# db.session.commit()
else:
return
def to_json(self, key):
matrix = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='matrices', key=key,
_external=False),
'data' : {
'population' : self.population.to_json_simple(key),
'treatment' : self.treatment.to_json_simple(key),
'matrix_split' : self.matrix_split,
'matrix_composition' : self.matrix_composition.to_json_simple(key),
'survival_issue' : self.survival_issue,
'n_intervals' : self.n_intervals,
'periodicity' : self.periodicity,
'matrix_criteria_size' : self.matrix_criteria_size,
'matrix_criteria_ontogeny' : self.matrix_criteria_ontogeny,
'matrix_criteria_age' : self.matrix_criteria_age,
'matrix_start_season' : self.start_season.to_json_simple(key) if self.start_season else None,
'matrix_end_season' : self.end_season.to_json_simple(key) if self.end_season else None,
'matrix_fec' : self.matrix_fec,
'matrix_a_string' : self.matrix_a_string,
'matrix_u_string' : self.matrix_u_string,
'matrix_f_string' : self.matrix_f_string,
#'n_plots' : self.n_plots,
#'plot_size' : self.plot_size,
'studied_sex' : self.studied_sex.to_json_simple(key),
'captivities' : self.captivities.to_json_simple(key) if self.captivities else None,
'matrix_dimension' : self.matrix_dimension,
'observations' : self.observations,
'uid' : self.uid,
'seasonal' : self.seasonal,
'survival_issue' : self.survival_issue,
'matrix_start_year' : self.matrix_start_year,
'matrix_start_month' : self.matrix_start_month,
'matrix_end_year' : self.matrix_end_year,
'matrix_end_month' : self.matrix_end_month,
#'n_individuals' : self.n_individuals,
'class_organized' : self.class_organized,
'class_author' : self.class_author,
'class_number' : self.class_number,
'intervals' : [interval.to_json_simple(key) for interval in self.intervals] if self.intervals else [],
'matrix_values' : [matrix_value.to_json_simple(key) for matrix_value in self.matrix_values] if self.matrix_values else [],
'matrix_stages' : [matrix_stage.to_json_simple(key) for matrix_stage in self.matrix_stages] if self.matrix_stages else [],
'seeds' : [seeds.to_json_simple(key) for seeds in self.seeds] if self.seeds else []
#'versions' : [versions.to_json_simple(key) for versions in self.versions] if self.versions else []
}
}
user = User.query.filter_by(api_hash=key).first()
if user is not None and user.institute.institution_name == "University of Exeter":
matrix['data']['matrix_difficulty'] = self.matrix_difficulty #
matrix['data']['matrix_complete'] = self.matrix_complete #
matrix['data']['independence_origin'] = self.independence_origin #
#matrix['data']['vectors_includes_na'] = self.vectors_includes_na #
matrix['data']['independent'] = self.independent #
matrix['data']['non_independence'] = self.non_independence #
matrix['data']['non_independence_author'] = self.non_independence_author #
matrix['data']['fixed'] = [fixed.to_json_simple(key) for fixed in self.fixed] if self.fixed else []
return matrix
def to_json_simple(self, key):
matrix = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='matrices', key=key,
_external=False),
'data' : {
'treatment' : self.treatment.to_json_simple(key),
'matrix_split' : self.matrix_split,
'matrix_composition' : self.matrix_composition.to_json_simple(key) if self.matrix_composition else None,
'survival_issue' : self.survival_issue,
'matrix_a_string' : self.matrix_a_string,
'matrix_dimension' : self.matrix_dimension,
'observations' : self.observations,
'uid' : self.uid,
'seasonal' : self.seasonal,
'matrix_start_year' : self.matrix_start_year,
'matrix_end_year' : self.matrix_end_year,
'intervals_count' : len(self.intervals),
'matrix_values_len' : len(self.matrix_values),
'matrix_stages_len' : len(self.matrix_stages),
'fixed_len' : len(self.fixed),
'seeds_len' : len(self.seeds),
#'versions_len' : len(self.versions)
}
}
user = User.query.filter_by(api_hash=key).first()
if user is not None and user.institute.institution_name == "University of Exeter":
matrix['data']['matrix_difficulty'] = self.matrix_difficulty, #
matrix['data']['matrix_complete'] = self.matrix_complete, #
matrix['data']['independence_origin'] = self.independence_origin, #
#matrix['data']['vectors_includes_na'] = self.vectors_includes_na, #
matrix['data']['independent'] = self.independent, #
matrix['data']['non_independence'] = self.non_independence, #
matrix['data']['non_independence_author'] = self.non_independence_author, #
matrix['data']['fixed_len'] = len(self.fixed)
return matrix
def __repr__(self):
return '<Matrix %r>' % self.id
def add_to_logger(self,current_user,field_name,content_before,content_after,new_edit_delete):
changelogger = {
'user_id' : current_user.id,
'object_type' : "matrix",
'field_name' : field_name,
'object_id' : self.id,
'content_before' : content_before,
'content_after' : content_after,
'new_edit_delete' : new_edit_delete
}
cl = ChangeLogger(**changelogger)
if cl.content_before != cl.content_after:
if cl.content_before == None:
cl.new_edit_delete = "new"
db.session.add(cl)
db.session.commit()
''' This table only applies to mean matrices, to identify the intervals that the mean values are derived from '''
class Interval(db.Model):
__tablename__ = 'intervals'
id = db.Column(db.Integer, primary_key=True)
matrix_id = db.Column(db.Integer, db.ForeignKey('matrices.id',ondelete='CASCADE'))
interval_order = db.Column(db.Integer())
interval_start = db.Column(db.Date())
interval_end = db.Column(db.Date())
def to_json(self, key):
interval = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='intervals', key=key,
_external=False),
'data' : {
'interval_order' : self.interval_order,
'interval_start' : self.interval_start,
'interval_end' : self.interval_end,
'matrix' : self.matrix.to_json_simple(key)
}
}
return interval
def to_json_simple(self, key):
interval = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='intervals', key=key,
_external=False),
'data' : {
'interval_order' : self.interval_order,
'interval_start' : self.interval_start,
'interval_end' : self.interval_end,
'matrix' : self.matrix.to_json_simple(key)
}
}
return interval
def __repr__(self):
return '<Interval %r>' % self.id
''' Fixed Stuff - Not yet agreed but CORE committee to be released'''
class Fixed(db.Model):
#query_class = VersionQuery
__tablename__ = 'fixed'
id = db.Column(db.Integer, primary_key=True)
matrix_id = db.Column(db.Integer, db.ForeignKey('matrices.id',ondelete='CASCADE'), index=True)
vector_str = db.Column(db.Text())
vector_present = db.Column(db.Boolean())
total_pop_no = db.Column(db.Integer())
small_id = db.Column(db.Integer, db.ForeignKey('smalls.id',ondelete='CASCADE'))
census_timing_id = db.Column(db.Integer, db.ForeignKey('census_timings.id',ondelete='CASCADE'))
seed_stage_error = db.Column(db.Boolean(), default=False)
private = db.Column(db.Boolean(), default=True)
vectors_includes_na = db.Column(db.Boolean(), default=False)
vectors_proportional = db.Column(db.Text())
vector_class_names = db.Column(db.Text())
version = db.relationship("Version", backref="fixed", passive_deletes=True)
@staticmethod
def migrate():
CensusTiming.migrate()
Small.migrate()
def to_json(self, key):
fixed = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='fixed', key=key,
_external=False),
'data' : {
'matrix' : self.matrix.to_json_simple(key),
'vector_str' : self.vector_str,
'vector_present' : self.vector_present,
'total_pop_no' : self.total_pop_no,
'small' : self.smalls.to_json_simple(key) if self.smalls else None,
'census' : self.census_timings.to_json_simple(key) if self.census_timings else None,
'seed_stage_error' : self.seed_stage_error,
'private' : self.private,
'versions' : [version.to_json_simple(key) for version in self.versions]
}
}
user = User.query.filter_by(api_hash=key).first()
if user is not None and user.institute.institution_name == "University of Exeter":
return fixed
else:
from api_1_0.errors import unauthorized
return unauthorized("Invalid Permissions")
def to_json_simple(self, key):
fixed = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='fixed', key=key,
_external=False),
'data' : {
'vector_str' : self.vector_str,
'vector_present' : self.vector_present,
'total_pop_no' : self.total_pop_no,
'seed_stage_error' : self.seed_stage_error,
'private' : self.private,
'versions_len' : len(self.versions)
}
}
return fixed
def __repr__(self):
return str(self.matrix_id)
def add_to_logger(self,current_user,field_name,content_before,content_after,new_edit_delete):
changelogger = {
'user_id' : current_user.id,
'object_type' : "fixed",
'field_name' : field_name,
'object_id' : self.id,
'content_before' : content_before,
'content_after' : content_after,
'new_edit_delete' : new_edit_delete
}
cl = ChangeLogger(**changelogger)
if cl.content_before != cl.content_after:
if cl.content_before == None:
cl.new_edit_delete = "new"
db.session.add(cl)
db.session.commit()
class Seed(db.Model):
__tablename__ = 'seeds'
id = db.Column(db.Integer, primary_key=True)
matrix_id = db.Column(db.Integer, db.ForeignKey('matrices.id'), index=True)
matrix_a = db.Column(db.Text())
def to_json(self):
seeds = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='seeds', key=key,
_external=False),
'data' : {
'matrix' : self.matrix.to_json_simple(key),
'matrix_a' : self.matrix_a,
}
}
return seeds
def to_json_simple(self):
seeds = {
'request_url' : url_for('api.get_one_entry', id=self.id, model='seeds', key=key,
_external=False),
'data' : {
'matrix_a' : self.matrix_a
}
}
return seeds
def __repr__(self, key):
return '<Seed %r>' % self.id
class Version(db.Model):
__tablename__ = 'versions'
id = db.Column(db.Integer, primary_key=True)
version_number = db.Column(db.Integer(), default=0)
#version of is the ID of the previous version, so each version can refer back to it
#original_version_id = db.Column(db.Integer, db.ForeignKey('versions.id', ondelete='CASCADE'))
#version_date_added = db.Column(db.Date())
#version_timestamp_created = db.Column(db.DateTime, default=datetime.utcnow)
# If this is the original version, it will have other versions
#original_version = db.relationship("Version", backref="child_versions", remote_side="Version.id", uselist=True)
checked = db.Column(db.Boolean())
status_id = db.Column(db.Integer, db.ForeignKey('statuses.id',ondelete='CASCADE'))
checked_count = db.Column(db.Integer(), default=0)
# Utility relationships
#version_user_id = db.Column(db.Integer, db.ForeignKey('users.id',ondelete='CASCADE'))
database_id = db.Column(db.Integer, db.ForeignKey('databases.id',ondelete='CASCADE'))
entered_by_id = db.Column(db.Integer, db.ForeignKey('users.id',ondelete='CASCADE'))
#checked_by_id = db.Column(db.Integer, db.ForeignKey('users.id',ondelete='CASCADE'))
# Demography relationships
species_id = db.Column(db.Integer, db.ForeignKey('species.id',ondelete='CASCADE'))
taxonomy_id = db.Column(db.Integer, db.ForeignKey('taxonomies.id',ondelete='CASCADE'))
trait_id = db.Column(db.Integer, db.ForeignKey('traits.id',ondelete='CASCADE'))
publication_id = db.Column(db.Integer, db.ForeignKey('publications.id',ondelete='CASCADE'))
population_id = db.Column(db.Integer, db.ForeignKey('populations.id',ondelete='CASCADE'))
matrix_id = db.Column(db.Integer, db.ForeignKey('matrices.id',ondelete='CASCADE'))
fixed_id = db.Column(db.Integer, db.ForeignKey('fixed.id',ondelete='CASCADE'))
#these are defunct
stage_id = db.Column(db.Integer, db.ForeignKey('stages.id',ondelete='CASCADE'))
stage_type_id = db.Column(db.Integer, db.ForeignKey('stage_types.id',ondelete='CASCADE'))
matrix_stage_id = db.Column(db.Integer, db.ForeignKey('matrix_stages.id',ondelete='CASCADE'))
matrix_value_id = db.Column(db.Integer, db.ForeignKey('matrix_values.id',ondelete='CASCADE'))
author_contact_id = db.Column(db.Integer, db.ForeignKey('author_contacts.id',ondelete='CASCADE'))
additional_source_id = db.Column(db.Integer, db.ForeignKey('additional_sources.id',ondelete='CASCADE'))
def to_json(self, key):
version = {
'created_by' : self.user.to_json_simple(key),
'version_number' : self.version_number,
'original_version' : url_for('api.get_one_entry', id=self.original_version[0].id, model="versions", key=key,
_external=False),
'version_date_added' : self.version_date_added,
'version_timestamp_created' : self.version_timestamp_created,
'checked_count' : self.checked_count,
'checked' : self.checked,
'status' : self.statuses.to_json_simple(key),
'versions' : [version.to_json_simple(key) for version in self.versions]
}
return version
def to_json_simple(self, key):
version = {
'created_by_email' : self.user.email,
'version_number' : self.version_number,
'version_timestamp_created' : self.version_timestamp_created,
'checked' : self.checked,
'status' : self.statuses.status_name,
'versions_len' : len(self.versions)
}
return version
def __getitem__(self, key):
return getattr(self, key)
def parent_table(self):
fk = {
'species_id' : self.species_id,
'taxonomy_id' : self.taxonomy_id,
'trait_id' : self.trait_id,
'publication_id' : self.publication_id,
'population_id' : self.population_id,
'matrix_id' : self.matrix_id,
'fixed_id' : self.fixed_id,
'stage_id' : self.stage_id,
'stage_type_id' : self.stage_type_id,
'matrix_stage_id' : self.matrix_stage_id,
'matrix_value_id' : self.matrix_value_id,
'author_contact_id': self.author_contact_id,
'additional_source_id' : self.additional_source_id,
'protocol_id' : self.protocol_id
}
for f, k in fk.items():
if k == 1:
kwargs = {f: k}
return kwargs
def all(self):
kwargs = self.parent_table()
kwargs['statuses'] = Status.query.filter_by(status_name="Green").first()
kwargs['checked'] = True
return Version.query.filter_by(**kwargs).all()
def original(self):
kwargs = self.parent_table()
kwargs['version_number'] = 0
kwargs['statuses'] = Status.query.filter_by(status_name="Green").first()
kwargs['checked'] = True
return Version.query.filter_by(**kwargs).first()
def latest(self):
kwargs = self.parent_table()
kwargs['statuses'] = Status.query.filter_by(status_name="Green").first()
kwargs['checked'] = True
latest = Version.query.filter_by(**kwargs).order_by(Version.version_number.desc()).first()
return latest
@staticmethod
def migrate():
###previously Database.migrate()
Status.migrate()
def __repr__(self):
return '<Version {} {} {}>'.format(str(self.id), self.statuses.status_name, self.checked)
'''Userlists'''
class UserList(db.Model):
__tablename__ = 'user_lists'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id',ondelete='CASCADE'))
name = db.Column(db.Text())
description = db.Column(db.Text())
public = db.Column(db.Boolean())
DOI_ISBN = db.Column(db.Text())
'''entries in userlists'''
class UserListEntry(db.Model):
__tablename__ = 'user_list_entries'
id = db.Column(db.Integer, primary_key=True)
matrix_id = db.Column(db.Integer, db.ForeignKey('matrices.id',ondelete='CASCADE'))
exclusion = db.Column(db.Boolean())
notes = db.Column(db.Text())
|
Spandex-at-Exeter/demography_database
|
app/models.py
|
Python
|
mit
| 151,567
|
[
"Amber"
] |
a738c739ecd9863af4a84df4b41491ca2c2a50e836e7d4626f8f5a8ebe82660f
|
#-*- coding=utf-8 -*-
#-----------------------
# Named: Price Models
# Created: 2016-08-11
# @Author: Qianfeng
#-----------------------
from random import random,randint
import math
import numpy as np
from pylab import *
def wineprice(rating, age):
peak_age = rating-50
# 根据等级来计算计算价格
price = rating/2
if age > peak_age:
# 经过“峰值年”,后继5年里其品质将会变差
price = price * (5-(age-peak_age))
else:
price = price * (5*((age+1)/peak_age))
#价格在接近“峰值年”是会增加到原值的5倍
if price < 0:
price = 0
return price
def wineset1():
rows = []
for i in range(300):
# 随机生成年代和等级
rating = random()*50 + 50
age = random()*50
# 得到一个参考价格
price = wineprice(rating, age)
# 增加“噪声”
price *= (random()*0.4 + 0.8)
# 加入数据集
rows.append({'input':(rating,age),
'result':price})
return rows
def euclidian(v1, v2):
d = 0.0
for i in range(len(v1)):
d += (v1[i]-v2[i])**2
return math.sqrt(d)
def getdistance(data, vec1):
distancelist = []
for i in range(len(data)):
vec2 = data[i]['input']
distancelist.append((euclidian(vec1, vec2),i))
distancelist.sort()
return distancelist
def knnestimate(data, vec1, k=5):
# 得到排序过后的距离列表
dlist = getdistance(data, vec1)
avg = 0.0
# 对前 k 项结果求平均
for i in range(k):
idx = dlist[i][1]
avg += data[idx]['result']
return avg/k
# 使用反函数为近邻分配权重(缺点:衰减过快)
def inverseweight(dist, num=1.0, const=0.1):
return num/(dist+const)
# 使用减法函数计算权重(解决反函数对近邻分配权重过大问题;缺点:权重会跌至0)
# 无法找到距离足够近的项
def subtraceweight(dist, const=1.0):
if dist > const:
return 0
else:
return const-dist
# 高斯函数(钟形曲线)
def gaussian(dist, sigma=1.0):
return math.exp(-dist**2/(2*sigma**2))
def weightedknn(data, vec1, k=5, weightf=gaussian):
# 得到距离
dlist = getdistance(data, vec1)
avg = 0.0
totalweight = 0.0
# 得到加权平均值
for i in range(k):
dist = dlist[i][0]
idx = dlist[i][1]
weight = weightf(dist)
avg += weight * data[idx]['result']
totalweight += weight
avg /= totalweight
return avg
def dividedata(data, test=0.05):
trainset = []
testset = []
for row in data:
if random()<test:
testset.append(row)
else:
trainset.append(row)
return trainset, testset
def testalgorithm(algf, trainset, testset):
# 衡量均方误差
error = 0.0
for row in testset:
guess = algf(train, row['input'])
error += (row['result']-guess)**2
return error/len(testset)
def crossvalidate(algf, data, trials=100, test=0.05):
error = 0.0
for i in range(trials):
trainset, testset = dividedata(data, test)
error += testalgorithm(algf, trainset, testset)
return error/trials
#==========================================
# 算法的改进(在不同类型的特征变量上的应用)
def wineset2():
rows = []
for i in range(300):
rating = random()*50 + 50
age = random() * 50
aisle = float(randint(1,20))
bottlesize = [375.0, 750.0, 1500.0, 3000.0][randint(0,3)]
price = wineprice(rating, age)
price *= (bottlesize/750)
price *= (random()*0.9+0.2)
rows.append({'input':(rating,age,aisle,bottlesize),
'result':price})
return rows
def rescale(data, scale):
scaleddata = []
for row in data:
scaled = [scale[i] * row['input'][i] for i in range(len(scale))]
scaleddata.append({'input':scaled, 'result':row['result']})
return scaleddata
# 将交叉验证封装成一个损失函数,再将此函数用作优化问题进行求解。此处函数调用将返回一个函数(类似闭包)
def createcostfunction(algf, data):
def costf(scale):
sdata = rescale(data, scale)
return crossvalidate(algf, sdata, trials=10)
return costf
weightdomain = [(0,20)] * 4
def wineset3():
rows = wineset1()
for row in rows:
if random()<0.5:
# 葡萄酒是从折扣店购得
row['result'] *= 0.5
return rows
# 不对称分布问题的处理
def probguess(data, vec1, low, high, k=5, weightf=gaussian):
dlist = getdistance(data, vec1)
nweight = 0.0
tweight = 0.0
for i in range(k):
dist = dlist[i][0]
idx = dlist[i][1]
weight = weightf(dist)
v = data[idx]['result']
# 当前数据点位于指定范围吗?
if v>= low and v<=high:
nweight += weight
tweight += weight
if tweight == 0:
return 0
# 概率等于位于指定范围内的权重值初一所有权重值
return nweight/tweight
# 绘制概率分布图
# 累加概率
def cumulativegraph(data, vec1, high, k=5, weightf=gaussian):
t1 = arange(0.0, high, 0.1)
cprob = array([probguess(data, vec1, 0, v, k, weightf) for v in t1])
plot(t1, cprob)
show()
# 组合概率图
def probabilitygraph(data, vec1, high, k=5, weightf=gaussian, ss=5.0):
# 建立一个代表价格的值域范围
t1 = arange(0.0, high, 0.1)
# 得到整个值域范围内的所有概率
probs = [probguess(data, vec1, v, v+0.1, k, weightf) for v in t1]
# 通过加上近邻概率的高斯计算结果,对概率值做平滑处理
smoothed = []
for i in range(len(probs)):
sv = 0.0
for j in range(0, len(probs)):
dist = abs(i-j) * 0.1
weight = gaussian(dist, sigma=ss)
sv += weight * probs[j]
smoothed.append(sv)
smoothed = np.array(smoothed)
plot(t1, smoothed)
show()
|
qianfengzh/ML-source-code
|
algorithms/CollectiveIntelligence/CH08-Price Models/numpredict.py
|
Python
|
gpl-2.0
| 5,428
|
[
"Gaussian"
] |
5b4baa47e959e5148d0dce81a654d6715d61f5c7321b0f5239070c51df95664f
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles function calls, by generating compiled function names and calls.
Note: this transformer does not rename the top level object being converted;
that is the caller's responsibility.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.util import tf_inspect
class FunctionInfo(namedtuple('FunctionInfo', ('dtype',))):
pass
# TODO(mdan): Move this to a separate transformer.
KNOWN_NUMPY_FUNCTIONS = {
('numpy', 'random', 'binomial'): FunctionInfo(dtype='tf.int64'),
}
# TODO(mdan): Get rid of these interfaces. Can now depend directly on Namer.
class FunctionNamer(object):
"""Describes the interface for CallTreeTransformer's namer."""
def compiled_function_name(self,
original_fqn,
live_entity=None,
owner_type=None):
"""Generate the name corresponding to the compiled version of a function.
Args:
original_fqn: string or tuple(string)
live_entity: Callable, the actual target function, if known.
owner_type: Optional object. If present, it indicates that the function is
a member of the given type.
Returns:
string, bool
"""
raise NotImplementedError()
def compiled_class_name(self, original_fqn, live_entity=None):
"""Generate the name corresponding to the compiled version of a class.
Args:
original_fqn: string or tuple(string)
live_entity: The actual target class, if known.
Returns:
string
"""
raise NotImplementedError()
# TODO(mdan): Rename to CallsTransformer.
class CallTreeTransformer(converter.Base):
"""Transforms the call tree by renaming transformed symbols."""
def _resolve_decorator_name(self, node):
"""Used to resolve decorator info."""
if isinstance(node, gast.Call):
return self._resolve_decorator_name(node.func)
if isinstance(node, gast.Name):
# TODO(mdan): Add test coverage for this branch.
return self.ctx.info.namespace.get(node.id)
if isinstance(node, gast.Attribute):
parent = self._resolve_decorator_name(node.value)
if parent is not None:
return getattr(parent, node.attr)
return None
raise ValueError(node)
def _try_resolve_target(self, node):
"""Works for methods of objects of known type."""
if anno.hasanno(node, 'live_val'):
return anno.getanno(node, 'live_val')
if isinstance(node, gast.Attribute) and anno.hasanno(node, 'type'):
owner_type = anno.getanno(node, 'type')
if hasattr(owner_type, node.attr):
return getattr(owner_type, node.attr)
else:
# TODO(mdan): We should probably return None here rather than an error.
raise ValueError('Type "%s" has no attribute "%s". Is it dynamic?' %
(owner_type, node.attr))
return None
def _function_is_compilable(self, target_entity):
"""Determines whether an entity can be compiled at all."""
# TODO(mdan): Expand.
if target_entity.__module__ is None:
# Functions like builtins and NumPy don't expose a module.
# Those in general should not be compiled.
return False
if inspect_utils.isbuiltin(target_entity):
return False
return True
def _should_compile(self, node, fqn):
"""Determines whether an entity should be compiled in the context."""
# TODO(mdan): Needs cleanup. We should remove the use of fqn altogether.
module_name = fqn[0]
for mod in self.ctx.program.uncompiled_modules:
if module_name.startswith(mod[0] + '.'):
return False
for i in range(1, len(fqn)):
if fqn[:i] in self.ctx.program.uncompiled_modules:
return False
target_entity = self._try_resolve_target(node.func)
if target_entity is not None:
# This may be reached when "calling" a callable attribute of an object.
# For example:
#
# self.fc = tf.keras.layers.Dense()
# self.fc()
#
for mod in self.ctx.program.uncompiled_modules:
if target_entity.__module__.startswith(mod[0] + '.'):
return False
# Inspect the target function decorators. If any include a @convert
# or @do_not_convert annotation, then they must be called as they are.
# TODO(mdan): This may be quite heavy. Perhaps always dynamically convert?
# To parse and re-analyze each function for every call site could be quite
# wasteful. Maybe we could cache the parsed AST?
try:
target_node, _ = parser.parse_entity(target_entity)
target_node = target_node.body[0]
except TypeError:
# Functions whose source we cannot access are compilable (e.g. wrapped
# to py_func).
return True
# This attribute is set when the decorator was applied before the
# function was parsed. See api.py.
if hasattr(target_entity, '__ag_compiled'):
return False
for dec in target_node.decorator_list:
decorator_fn = self._resolve_decorator_name(dec)
if (decorator_fn is not None and
decorator_fn in self.ctx.program.options.strip_decorators):
return False
return True
def _rename_compilable_function(self, node):
assert anno.hasanno(node.func, 'live_val')
assert anno.hasanno(node.func, 'fqn')
target_entity = anno.getanno(node.func, 'live_val')
target_fqn = anno.getanno(node.func, 'fqn')
if anno.hasanno(node, 'is_constructor'):
new_name = self.ctx.namer.compiled_class_name(
target_fqn, live_entity=target_entity)
do_rename = True
else:
if anno.hasanno(node.func, 'parent_type'):
owner_type = anno.getanno(node.func, 'parent_type')
else:
# Fallback - not reliable.
owner_type = inspect_utils.getmethodclass(target_entity)
new_name, do_rename = self.ctx.namer.compiled_function_name(
target_fqn, live_entity=target_entity, owner_type=owner_type)
if do_rename:
if target_entity is not None:
if tf_inspect.ismethod(target_entity):
# The renaming process will transform it into a regular function.
# TODO(mdan): Is this complete? How does it work with nested members?
node.args = [node.func.value] + node.args
node.func = templates.replace_as_expression(
'func_name', func_name=new_name)
return node
def _wrap_to_py_func_single_return(self, node, dtype):
# TODO(mdan): Properly handle varargs, etc.
template = """
ag__.utils.wrap_py_func(func, dtype, (args,), kwargs, False)
"""
return templates.replace_as_expression(
template,
func=node.func,
dtype=parser.parse_expression(dtype),
args=node.args,
kwargs=ast_util.keywords_to_dict(node.keywords))
def _insert_dynamic_conversion(self, node):
"""Inlines a dynamic conversion for a dynamic function."""
# TODO(mdan): Pass information on the statically compiled functions.
# Having access to the statically compiled functions can help avoid
# unnecessary compilation.
# For example, this would lead to function `a` being compiled twice:
#
# def a():
# v = b
# b()
# def b():
# a()
#
# This is really a problem with recursive calls, which currently can
# only be gated by a static condition, and should be rare.
# TODO(mdan): It probably makes sense to use dynamic conversion every time.
# Before we could convert all the time though, we'd need a reasonable
# caching mechanism.
template = """
ag__.converted_call(func, owner, options, args)
"""
if isinstance(node.func, gast.Attribute):
func = gast.Str(node.func.attr)
owner = node.func.value
else:
func = node.func
owner = parser.parse_expression('None')
new_call = templates.replace_as_expression(
template,
func=func,
owner=owner,
options=self.ctx.program.options.to_ast(
self.ctx.info.namespace,
internal_convert_user_code=self.ctx.program.options.recursive),
args=node.args)
# TODO(mdan): Improve the template mechanism to better support this.
new_call.keywords = node.keywords
return new_call
def _visit_decorators(self, decorator_list):
if not self.ctx.program.options.uses(converter.Feature.DECORATORS):
# When not processing decorators, strip everything that is encountered.
return []
return self.visit_block(decorator_list)
def visit_FunctionDef(self, node):
node.args = self.visit(node.args)
node.body = self.visit_block(node.body)
node.decorator_list = self._visit_decorators(node.decorator_list)
node.returns = self.visit_block(node.returns)
return node
def visit_Call(self, node):
if anno.hasanno(node.func, 'live_val'):
target_entity = anno.getanno(node.func, 'live_val')
if anno.hasanno(node.func, 'fqn'):
target_fqn = anno.getanno(node.func, 'fqn')
else:
target_fqn = None
if self._function_is_compilable(target_entity):
if self._should_compile(node, target_fqn):
node = self._rename_compilable_function(node)
else:
node = self.generic_visit(node)
return node
elif target_fqn and target_fqn in KNOWN_NUMPY_FUNCTIONS:
# TODO(mdan): Should we replace these with equivalent TF ops instead?
node = self._wrap_to_py_func_single_return(
node, KNOWN_NUMPY_FUNCTIONS[target_fqn].dtype)
elif inspect_utils.isbuiltin(target_entity):
# Note: Any builtin that passed the builtins converter is assumed to be
# safe for graph mode.
return node
else:
raise NotImplementedError(
'py_func with return values (unknown function)')
else:
# Special cases
# TODO(mdan): These need a systematic review - there may be more.
# 1. super() calls - these are preserved. The class conversion mechanism
# will ensure that they return the correct value.
if ast_util.matches(node, 'super(_)'):
return node
# 2. super().method calls - these are preserved as well, when the
# conversion processes the entire class.
if (ast_util.matches(node, 'super(_)._(_)') and
self.ctx.info.owner_type is not None):
return node
node = self._insert_dynamic_conversion(node)
return node
def transform(node, ctx):
"""Transform function call to the compiled counterparts.
Args:
node: AST
ctx: EntityContext
Returns:
A tuple (node, new_names):
node: The transformed AST
new_names: set(string), containing any newly-generated names
"""
return CallTreeTransformer(ctx).visit(node)
|
hehongliang/tensorflow
|
tensorflow/python/autograph/converters/call_trees.py
|
Python
|
apache-2.0
| 11,904
|
[
"VisIt"
] |
0b77c478b6d41bbf31efbdd6704790a677a2a3e2a2cd17345b242c6ce07db6e7
|
# TODO: Determine which tests are valid for GLSAR, and under what conditions
# TODO: Fix issue with constant and GLS
# TODO: GLS: add options Iterative GLS, for iterative fgls if sigma is None
# TODO: GLS: default if sigma is none should be two-step GLS
# TODO: Check nesting when performing model based tests, lr, wald, lm
"""
This module implements standard regression models:
Generalized Least Squares (GLS)
Ordinary Least Squares (OLS)
Weighted Least Squares (WLS)
Generalized Least Squares with autoregressive error terms GLSAR(p)
Models are specified with an endogenous response variable and an
exogenous design matrix and are fit using their `fit` method.
Subclasses that have more complicated covariance matrices
should write over the 'whiten' method as the fit method
prewhitens the response by calling 'whiten'.
General reference for regression models:
D. C. Montgomery and E.A. Peck. "Introduction to Linear Regression
Analysis." 2nd. Ed., Wiley, 1992.
Econometrics references for regression models:
R. Davidson and J.G. MacKinnon. "Econometric Theory and Methods," Oxford,
2004.
W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, lzip, range
__docformat__ = 'restructuredtext en'
__all__ = ['GLS', 'WLS', 'OLS', 'GLSAR']
import numpy as np
from scipy.linalg import toeplitz
from scipy import stats
from scipy.stats.stats import ss
from scipy import optimize
from scipy.stats import chi2
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.tools.tools import add_constant, chain_dot, pinv_extended
from statsmodels.tools.decorators import (resettable_cache,
cache_readonly,
cache_writable)
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
from statsmodels.emplike.elregress import _ELRegOpts
import warnings
from statsmodels.tools.sm_exceptions import InvalidTestWarning
# need import in module instead of lazily to copy `__doc__`
from . import _prediction as pred
def _get_sigma(sigma, nobs):
"""
Returns sigma (matrix, nobs by nobs) for GLS and the inverse of its
Cholesky decomposition. Handles dimensions and checks integrity.
If sigma is None, returns None, None. Otherwise returns sigma,
cholsigmainv.
"""
if sigma is None:
return None, None
sigma = np.asarray(sigma).squeeze()
if sigma.ndim == 0:
sigma = np.repeat(sigma, nobs)
if sigma.ndim == 1:
if sigma.shape != (nobs,):
raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d "
"array of shape %s x %s" % (nobs, nobs, nobs))
cholsigmainv = 1/np.sqrt(sigma)
else:
if sigma.shape != (nobs, nobs):
raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d "
"array of shape %s x %s" % (nobs, nobs, nobs))
cholsigmainv = np.linalg.cholesky(np.linalg.pinv(sigma)).T
return sigma, cholsigmainv
class RegressionModel(base.LikelihoodModel):
"""
Base class for linear regression models. Should not be directly called.
Intended for subclassing.
"""
def __init__(self, endog, exog, **kwargs):
super(RegressionModel, self).__init__(endog, exog, **kwargs)
self._data_attr.extend(['pinv_wexog', 'wendog', 'wexog', 'weights'])
def initialize(self):
self.wexog = self.whiten(self.exog)
self.wendog = self.whiten(self.endog)
# overwrite nobs from class Model:
self.nobs = float(self.wexog.shape[0])
self._df_model = None
self._df_resid = None
self.rank = None
@property
def df_model(self):
"""
The model degree of freedom, defined as the rank of the regressor
matrix minus 1 if a constant is included.
"""
if self._df_model is None:
if self.rank is None:
self.rank = np_matrix_rank(self.exog)
self._df_model = float(self.rank - self.k_constant)
return self._df_model
@df_model.setter
def df_model(self, value):
self._df_model = value
@property
def df_resid(self):
"""
The residual degree of freedom, defined as the number of observations
minus the rank of the regressor matrix.
"""
if self._df_resid is None:
if self.rank is None:
self.rank = np_matrix_rank(self.exog)
self._df_resid = self.nobs - self.rank
return self._df_resid
@df_resid.setter
def df_resid(self, value):
self._df_resid = value
def whiten(self, X):
raise NotImplementedError("Subclasses should implement.")
def fit(self, method="pinv", cov_type='nonrobust', cov_kwds=None,
use_t=None, **kwargs):
"""
Full fit of the model.
The results include an estimate of covariance matrix, (whitened)
residuals and an estimate of scale.
Parameters
----------
method : str, optional
Can be "pinv", "qr". "pinv" uses the Moore-Penrose pseudoinverse
to solve the least squares problem. "qr" uses the QR
factorization.
cov_type : str, optional
See `regression.linear_model.RegressionResults` for a description
of the available covariance estimators
cov_kwds : list or None, optional
See `linear_model.RegressionResults.get_robustcov_results` for a
description required keywords for alternative covariance estimators
use_t : bool, optional
Flag indicating to use the Student's t distribution when computing
p-values. Default behavior depends on cov_type. See
`linear_model.RegressionResults.get_robustcov_results` for
implementation details.
Returns
-------
A RegressionResults class instance.
See Also
---------
regression.linear_model.RegressionResults
regression.linear_model.RegressionResults.get_robustcov_results
Notes
-----
The fit method uses the pseudoinverse of the design/exogenous variables
to solve the least squares minimization.
"""
if method == "pinv":
if ((not hasattr(self, 'pinv_wexog')) or
(not hasattr(self, 'normalized_cov_params')) or
(not hasattr(self, 'rank'))):
self.pinv_wexog, singular_values = pinv_extended(self.wexog)
self.normalized_cov_params = np.dot(self.pinv_wexog,
np.transpose(self.pinv_wexog))
# Cache these singular values for use later.
self.wexog_singular_values = singular_values
self.rank = np_matrix_rank(np.diag(singular_values))
beta = np.dot(self.pinv_wexog, self.wendog)
elif method == "qr":
if ((not hasattr(self, 'exog_Q')) or
(not hasattr(self, 'exog_R')) or
(not hasattr(self, 'normalized_cov_params')) or
(getattr(self, 'rank', None) is None)):
Q, R = np.linalg.qr(self.wexog)
self.exog_Q, self.exog_R = Q, R
self.normalized_cov_params = np.linalg.inv(np.dot(R.T, R))
# Cache singular values from R.
self.wexog_singular_values = np.linalg.svd(R, 0, 0)
self.rank = np_matrix_rank(R)
else:
Q, R = self.exog_Q, self.exog_R
# used in ANOVA
self.effects = effects = np.dot(Q.T, self.wendog)
beta = np.linalg.solve(R, effects)
if self._df_model is None:
self._df_model = float(self.rank - self.k_constant)
if self._df_resid is None:
self.df_resid = self.nobs - self.rank
if isinstance(self, OLS):
lfit = OLSResults(self, beta,
normalized_cov_params=self.normalized_cov_params,
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t)
else:
lfit = RegressionResults(self, beta,
normalized_cov_params=self.normalized_cov_params,
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t)
return RegressionResultsWrapper(lfit)
def fit_regularized(self, method="coord_descent", maxiter=1000,
alpha=0., L1_wt=1., start_params=None,
cnvrg_tol=1e-8, zero_tol=1e-8, **kwargs):
"""
Return a regularized fit to a linear regression model.
Parameters
----------
method : string
Only the coordinate descent algorithm is implemented.
maxiter : integer
The maximum number of iteration cycles (an iteration cycle
involves running coordinate descent on all variables).
alpha : scalar or array-like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is
ridge regression. If 1, the fit is the lasso.
start_params : array-like
Starting values for ``params``.
cnvrg_tol : scalar
If ``params`` changes by less than this amount (in sup-norm)
in once iteration cycle, the algorithm terminates with
convergence.
zero_tol : scalar
Any estimated coefficient smaller than this value is
replaced with zero.
Returns
-------
A RegressionResults object, of the same type returned by
``fit``.
Notes
-----
The approach closely follows that implemented in the glmnet
package in R. The penalty is the "elastic net" penalty, which
is a convex combination of L1 and L2 penalties.
The function that is minimized is: ..math::
0.5*RSS/n + alpha*((1-L1_wt)*|params|_2^2/2 + L1_wt*|params|_1)
where RSS is the usual regression sum of squares, n is the
sample size, and :math:`|*|_1` and :math:`|*|_2` are the L1 and L2
norms.
Post-estimation results are based on the same data used to
select variables, hence may be subject to overfitting biases.
References
----------
Friedman, Hastie, Tibshirani (2008). Regularization paths for
generalized linear models via coordinate descent. Journal of
Statistical Software 33(1), 1-22 Feb 2010.
"""
k_exog = self.wexog.shape[1]
if np.isscalar(alpha):
alpha = alpha * np.ones(k_exog, dtype=np.float64)
# Below we work with RSS + penalty, so we need to rescale.
alpha *= 2 * self.wexog.shape[0]
if start_params is None:
params = np.zeros(k_exog, dtype=np.float64)
else:
params = start_params.copy()
converged = False
xxprod = 2*(self.wexog**2).sum(0)
# Coordinate descent
for itr in range(maxiter):
params_save = params.copy()
for k in range(self.wexog.shape[1]):
params[k] = 0.
wendog_adj = self.wendog - np.dot(self.wexog, params)
xyprod = 2*np.dot(self.wexog[:,k], wendog_adj)
den = xxprod[k] + alpha[k] * (1 - L1_wt)
a = alpha[k] * L1_wt
if a >= np.abs(xyprod):
params[k] = 0.
elif xyprod > 0:
params[k] = (xyprod - a) / den
else:
params[k] = (xyprod + a) / den
# Check for convergence
pchange = np.max(np.abs(params - params_save))
if pchange < cnvrg_tol:
converged = True
break
# Set approximate zero coefficients to be exactly zero
params *= np.abs(params) >= zero_tol
# Fit the reduced model to get standard errors and other
# post-estimation results.
ii = np.flatnonzero(params)
cov = np.zeros((k_exog, k_exog), dtype=np.float64)
if len(ii) > 0:
model = self.__class__(self.wendog, self.wexog[:,ii])
rslt = model.fit()
cov[np.ix_(ii, ii)] = rslt.normalized_cov_params
lfit = RegressionResults(self, params,
normalized_cov_params=cov)
lfit.converged = converged
return RegressionResultsWrapper(lfit)
def predict(self, params, exog=None):
"""
Return linear predicted values from a design matrix.
Parameters
----------
params : array-like
Parameters of a linear model
exog : array-like, optional.
Design / exogenous data. Model exog is used if None.
Returns
-------
An array of fitted values
Notes
-----
If the model has not yet been fit, params is not optional.
"""
#JP: this doesn't look correct for GLMAR
#SS: it needs its own predict method
if exog is None:
exog = self.exog
return np.dot(exog, params)
class GLS(RegressionModel):
__doc__ = """
Generalized least squares model with a general covariance structure.
%(params)s
sigma : scalar or array
`sigma` is the weighting matrix of the covariance.
The default is None for no scaling. If `sigma` is a scalar, it is
assumed that `sigma` is an n x n diagonal matrix with the given
scalar, `sigma` as the value of each diagonal element. If `sigma`
is an n-length vector, then `sigma` is assumed to be a diagonal
matrix with the given `sigma` on the diagonal. This should be the
same as WLS.
%(extra_params)s
**Attributes**
pinv_wexog : array
`pinv_wexog` is the p x n Moore-Penrose pseudoinverse of `wexog`.
cholsimgainv : array
The transpose of the Cholesky decomposition of the pseudoinverse.
df_model : float
p - 1, where p is the number of regressors including the intercept.
of freedom.
df_resid : float
Number of observations n less the number of parameters p.
llf : float
The value of the likelihood function of the fitted model.
nobs : float
The number of observations n.
normalized_cov_params : array
p x p array :math:`(X^{T}\Sigma^{-1}X)^{-1}`
results : RegressionResults instance
A property that returns the RegressionResults class if fit.
sigma : array
`sigma` is the n x n covariance structure of the error terms.
wexog : array
Design matrix whitened by `cholsigmainv`
wendog : array
Response variable whitened by `cholsigmainv`
Notes
-----
If sigma is a function of the data making one of the regressors
a constant, then the current postestimation statistics will not be correct.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> ols_resid = sm.OLS(data.endog, data.exog).fit().resid
>>> res_fit = sm.OLS(ols_resid[1:], ols_resid[:-1]).fit()
>>> rho = res_fit.params
`rho` is a consistent estimator of the correlation of the residuals from
an OLS fit of the longley data. It is assumed that this is the true rho
of the AR process data.
>>> from scipy.linalg import toeplitz
>>> order = toeplitz(np.arange(16))
>>> sigma = rho**order
`sigma` is an n x n matrix of the autocorrelation structure of the
data.
>>> gls_model = sm.GLS(data.endog, data.exog, sigma=sigma)
>>> gls_results = gls_model.fit()
>>> print(gls_results.summary()))
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog, sigma=None, missing='none', hasconst=None,
**kwargs):
#TODO: add options igls, for iterative fgls if sigma is None
#TODO: default if sigma is none should be two-step GLS
sigma, cholsigmainv = _get_sigma(sigma, len(endog))
super(GLS, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, sigma=sigma,
cholsigmainv=cholsigmainv, **kwargs)
#store attribute names for data arrays
self._data_attr.extend(['sigma', 'cholsigmainv'])
def whiten(self, X):
"""
GLS whiten method.
Parameters
-----------
X : array-like
Data to be whitened.
Returns
-------
np.dot(cholsigmainv,X)
See Also
--------
regression.GLS
"""
X = np.asarray(X)
if self.sigma is None or self.sigma.shape == ():
return X
elif self.sigma.ndim == 1:
if X.ndim == 1:
return X * self.cholsigmainv
else:
return X * self.cholsigmainv[:, None]
else:
return np.dot(self.cholsigmainv, X)
def loglike(self, params):
"""
Returns the value of the Gaussian log-likelihood function at params.
Given the whitened design matrix, the log-likelihood is evaluated
at the parameter vector `params` for the dependent variable `endog`.
Parameters
----------
params : array-like
The parameter estimates
Returns
-------
loglike : float
The value of the log-likelihood function for a GLS Model.
Notes
-----
The log-likelihood function for the normal distribution is
.. math:: -\\frac{n}{2}\\log\\left(\\left(Y-\\hat{Y}\\right)^{\\prime}\\left(Y-\\hat{Y}\\right)\\right)-\\frac{n}{2}\\left(1+\\log\\left(\\frac{2\\pi}{n}\\right)\\right)-\\frac{1}{2}\\log\\left(\\left|\\Sigma\\right|\\right)
Y and Y-hat are whitened.
"""
#TODO: combine this with OLS/WLS loglike and add _det_sigma argument
nobs2 = self.nobs / 2.0
SSR = ss(self.wendog - np.dot(self.wexog,params))
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with likelihood constant
if np.any(self.sigma):
#FIXME: robust-enough check? unneeded if _det_sigma gets defined
if self.sigma.ndim==2:
det = np.linalg.slogdet(self.sigma)
llf -= .5*det[1]
else:
llf -= 0.5*np.sum(np.log(self.sigma))
# with error covariance matrix
return llf
class WLS(RegressionModel):
__doc__ = """
A regression model with diagonal but non-identity covariance structure.
The weights are presumed to be (proportional to) the inverse of the
variance of the observations. That is, if the variables are to be
transformed by 1/sqrt(W) you must supply weights = 1/W.
%(params)s
weights : array-like, optional
1d array of weights. If you supply 1/W then the variables are pre-
multiplied by 1/sqrt(W). If no weights are supplied the default value
is 1 and WLS reults are the same as OLS.
%(extra_params)s
Attributes
----------
weights : array
The stored weights supplied as an argument.
See regression.GLS
Examples
---------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> Y = [1,3,4,5,2,3,4]
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>> wls_model = sm.WLS(Y,X, weights=list(range(1,8)))
>>> results = wls_model.fit()
>>> results.params
array([ 2.91666667, 0.0952381 ])
>>> results.tvalues
array([ 2.0652652 , 0.35684428])
>>> print(results.t_test([1, 0]))
<T test: effect=array([ 2.91666667]), sd=array([[ 1.41224801]]), t=array([[ 2.0652652]]), p=array([[ 0.04690139]]), df_denom=5>
>>> print(results.f_test([0, 1]))
<F test: F=array([[ 0.12733784]]), p=[[ 0.73577409]], df_denom=5, df_num=1>
Notes
-----
If the weights are a function of the data, then the post estimation
statistics such as fvalue and mse_model might not be correct, as the
package does not yet support no-constant regression.
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog, weights=1., missing='none', hasconst=None,
**kwargs):
weights = np.array(weights)
if weights.shape == ():
if (missing == 'drop' and 'missing_idx' in kwargs and
kwargs['missing_idx'] is not None):
# patsy may have truncated endog
weights = np.repeat(weights, len(kwargs['missing_idx']))
else:
weights = np.repeat(weights, len(endog))
# handle case that endog might be of len == 1
if len(weights) == 1:
weights = np.array([weights.squeeze()])
else:
weights = weights.squeeze()
super(WLS, self).__init__(endog, exog, missing=missing,
weights=weights, hasconst=hasconst, **kwargs)
nobs = self.exog.shape[0]
weights = self.weights
# Experimental normalization of weights
weights = weights / np.sum(weights) * nobs
if weights.size != nobs and weights.shape[0] != nobs:
raise ValueError('Weights must be scalar or same length as design')
def whiten(self, X):
"""
Whitener for WLS model, multiplies each column by sqrt(self.weights)
Parameters
----------
X : array-like
Data to be whitened
Returns
-------
sqrt(weights)*X
"""
#print(self.weights.var()))
X = np.asarray(X)
if X.ndim == 1:
return X * np.sqrt(self.weights)
elif X.ndim == 2:
return np.sqrt(self.weights)[:, None]*X
def loglike(self, params):
"""
Returns the value of the gaussian log-likelihood function at params.
Given the whitened design matrix, the log-likelihood is evaluated
at the parameter vector `params` for the dependent variable `Y`.
Parameters
----------
params : array-like
The parameter estimates.
Returns
-------
llf : float
The value of the log-likelihood function for a WLS Model.
Notes
--------
.. math:: -\\frac{n}{2}\\log\\left(Y-\\hat{Y}\\right)-\\frac{n}{2}\\left(1+\\log\\left(\\frac{2\\pi}{n}\\right)\\right)-\\frac{1}{2}log\\left(\\left|W\\right|\\right)
where :math:`W` is a diagonal matrix
"""
nobs2 = self.nobs / 2.0
SSR = ss(self.wendog - np.dot(self.wexog,params))
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with constant
llf += 0.5 * np.sum(np.log(self.weights))
return llf
class OLS(WLS):
__doc__ = """
A simple ordinary least squares model.
%(params)s
%(extra_params)s
Attributes
----------
weights : scalar
Has an attribute weights = array(1.0) due to inheritance from WLS.
See Also
--------
GLS
Examples
--------
>>> import numpy as np
>>>
>>> import statsmodels.api as sm
>>>
>>> Y = [1,3,4,5,2,3,4]
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>>
>>> model = sm.OLS(Y,X)
>>> results = model.fit()
>>> results.params
array([ 2.14285714, 0.25 ])
>>> results.tvalues
array([ 1.87867287, 0.98019606])
>>> print(results.t_test([1, 0])))
<T test: effect=array([ 2.14285714]), sd=array([[ 1.14062282]]), t=array([[ 1.87867287]]), p=array([[ 0.05953974]]), df_denom=5>
>>> print(results.f_test(np.identity(2)))
<F test: F=array([[ 19.46078431]]), p=[[ 0.00437251]], df_denom=5, df_num=2>
Notes
-----
No constant is added by the model unless you are using formulas.
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
#TODO: change example to use datasets. This was the point of datasets!
def __init__(self, endog, exog=None, missing='none', hasconst=None,
**kwargs):
super(OLS, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, **kwargs)
if "weights" in self._init_keys:
self._init_keys.remove("weights")
def loglike(self, params):
"""
The likelihood function for the clasical OLS model.
Parameters
----------
params : array-like
The coefficients with which to estimate the log-likelihood.
Returns
-------
The concentrated likelihood function evaluated at params.
"""
nobs2 = self.nobs / 2.0
return -nobs2*np.log(2*np.pi)-nobs2*np.log(1/(2*nobs2) *\
np.dot(np.transpose(self.endog -
np.dot(self.exog, params)),
(self.endog - np.dot(self.exog,params)))) -\
nobs2
def whiten(self, Y):
"""
OLS model whitener does nothing: returns Y.
"""
return Y
class GLSAR(GLS):
__doc__ = """
A regression model with an AR(p) covariance structure.
%(params)s
rho : int
Order of the autoregressive covariance
%(extra_params)s
Examples
--------
>>> import statsmodels.api as sm
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>> Y = [1,3,4,5,8,10,9]
>>> model = sm.GLSAR(Y, X, rho=2)
>>> for i in range(6):
... results = model.fit()
... print("AR coefficients: {0}".format(model.rho))
... rho, sigma = sm.regression.yule_walker(results.resid,
... order=model.order)
... model = sm.GLSAR(Y, X, rho)
...
AR coefficients: [ 0. 0.]
AR coefficients: [-0.52571491 -0.84496178]
AR coefficients: [-0.6104153 -0.86656458]
AR coefficients: [-0.60439494 -0.857867 ]
AR coefficients: [-0.6048218 -0.85846157]
AR coefficients: [-0.60479146 -0.85841922]
>>> results.params
array([-0.66661205, 1.60850853])
>>> results.tvalues
array([ -2.10304127, 21.8047269 ])
>>> print(results.t_test([1, 0]))
<T test: effect=array([-0.66661205]), sd=array([[ 0.31697526]]), t=array([[-2.10304127]]), p=array([[ 0.06309969]]), df_denom=3>
>>> print(results.f_test(np.identity(2)))
<F test: F=array([[ 1815.23061844]]), p=[[ 0.00002372]], df_denom=3, df_num=2>
Or, equivalently
>>> model2 = sm.GLSAR(Y, X, rho=2)
>>> res = model2.iterative_fit(maxiter=6)
>>> model2.rho
array([-0.60479146, -0.85841922])
Notes
-----
GLSAR is considered to be experimental.
The linear autoregressive process of order p--AR(p)--is defined as:
TODO
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog=None, rho=1, missing='none', **kwargs):
#this looks strange, interpreting rho as order if it is int
if isinstance(rho, np.int):
self.order = rho
self.rho = np.zeros(self.order, np.float64)
else:
self.rho = np.squeeze(np.asarray(rho))
if len(self.rho.shape) not in [0,1]:
raise ValueError("AR parameters must be a scalar or a vector")
if self.rho.shape == ():
self.rho.shape = (1,)
self.order = self.rho.shape[0]
if exog is None:
#JP this looks wrong, should be a regression on constant
#results for rho estimate now identical to yule-walker on y
#super(AR, self).__init__(endog, add_constant(endog))
super(GLSAR, self).__init__(endog, np.ones((endog.shape[0],1)),
missing=missing, **kwargs)
else:
super(GLSAR, self).__init__(endog, exog, missing=missing,
**kwargs)
def iterative_fit(self, maxiter=3):
"""
Perform an iterative two-stage procedure to estimate a GLS model.
The model is assumed to have AR(p) errors, AR(p) parameters and
regression coefficients are estimated iteratively.
Parameters
----------
maxiter : integer, optional
the number of iterations
"""
#TODO: update this after going through example.
for i in range(maxiter-1):
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
self.initialize()
results = self.fit()
self.rho, _ = yule_walker(results.resid,
order=self.order, df=None)
#why not another call to self.initialize
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
self.initialize()
results = self.fit() #final estimate
return results # add missing return
def whiten(self, X):
"""
Whiten a series of columns according to an AR(p)
covariance structure. This drops initial p observations.
Parameters
----------
X : array-like
The data to be whitened,
Returns
-------
whitened array
"""
#TODO: notation for AR process
X = np.asarray(X, np.float64)
_X = X.copy()
#the following loops over the first axis, works for 1d and nd
for i in range(self.order):
_X[(i+1):] = _X[(i+1):] - self.rho[i] * X[0:-(i+1)]
return _X[self.order:]
def yule_walker(X, order=1, method="unbiased", df=None, inv=False, demean=True):
"""
Estimate AR(p) parameters from a sequence X using Yule-Walker equation.
Unbiased or maximum-likelihood estimator (mle)
See, for example:
http://en.wikipedia.org/wiki/Autoregressive_moving_average_model
Parameters
----------
X : array-like
1d array
order : integer, optional
The order of the autoregressive process. Default is 1.
method : string, optional
Method can be "unbiased" or "mle" and this determines denominator in
estimate of autocorrelation function (ACF) at lag k. If "mle", the
denominator is n=X.shape[0], if "unbiased" the denominator is n-k.
The default is unbiased.
df : integer, optional
Specifies the degrees of freedom. If `df` is supplied, then it is assumed
the X has `df` degrees of freedom rather than `n`. Default is None.
inv : bool
If inv is True the inverse of R is also returned. Default is False.
demean : bool
True, the mean is subtracted from `X` before estimation.
Returns
-------
rho
The autoregressive coefficients
sigma
TODO
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.datasets.sunspots import load
>>> data = load()
>>> rho, sigma = sm.regression.yule_walker(data.endog,
order=4, method="mle")
>>> rho
array([ 1.28310031, -0.45240924, -0.20770299, 0.04794365])
>>> sigma
16.808022730464351
"""
#TODO: define R better, look back at notes and technical notes on YW.
#First link here is useful
#http://www-stat.wharton.upenn.edu/~steele/Courses/956/ResourceDetails/YuleWalkerAndMore.htm
method = str(method).lower()
if method not in ["unbiased", "mle"]:
raise ValueError("ACF estimation method must be 'unbiased' or 'MLE'")
X = np.array(X, dtype=np.float64)
if demean:
X -= X.mean() # automatically demean's X
n = df or X.shape[0]
if method == "unbiased": # this is df_resid ie., n - p
denom = lambda k: n - k
else:
denom = lambda k: n
if X.ndim > 1 and X.shape[1] != 1:
raise ValueError("expecting a vector to estimate AR parameters")
r = np.zeros(order+1, np.float64)
r[0] = (X**2).sum() / denom(0)
for k in range(1,order+1):
r[k] = (X[0:-k]*X[k:]).sum() / denom(k)
R = toeplitz(r[:-1])
rho = np.linalg.solve(R, r[1:])
sigmasq = r[0] - (r[1:]*rho).sum()
if inv==True:
return rho, np.sqrt(sigmasq), np.linalg.inv(R)
else:
return rho, np.sqrt(sigmasq)
class RegressionResults(base.LikelihoodModelResults):
"""
This class summarizes the fit of a linear regression model.
It handles the output of contrasts, estimates of covariance, etc.
Returns
-------
**Attributes**
aic
Aikake's information criteria. For a model with a constant
:math:`-2llf + 2(df_model + 1)`. For a model without a constant
:math:`-2llf + 2(df_model)`.
bic
Bayes' information criteria For a model with a constant
:math:`-2llf + \log(n)(df_model+1)`. For a model without a constant
:math:`-2llf + \log(n)(df_model)`
bse
The standard errors of the parameter estimates.
pinv_wexog
See specific model class docstring
centered_tss
The total (weighted) sum of squares centered about the mean.
cov_HC0
Heteroscedasticity robust covariance matrix. See HC0_se below.
cov_HC1
Heteroscedasticity robust covariance matrix. See HC1_se below.
cov_HC2
Heteroscedasticity robust covariance matrix. See HC2_se below.
cov_HC3
Heteroscedasticity robust covariance matrix. See HC3_se below.
cov_type
Parameter covariance estimator used for standard errors and t-stats
df_model
Model degress of freedom. The number of regressors `p`. Does not
include the constant if one is present
df_resid
Residual degrees of freedom. `n - p - 1`, if a constant is present.
`n - p` if a constant is not included.
ess
Explained sum of squares. If a constant is present, the centered
total sum of squares minus the sum of squared residuals. If there is
no constant, the uncentered total sum of squares is used.
fvalue
F-statistic of the fully specified model. Calculated as the mean
squared error of the model divided by the mean squared error of the
residuals.
f_pvalue
p-value of the F-statistic
fittedvalues
The predicted the values for the original (unwhitened) design.
het_scale
adjusted squared residuals for heteroscedasticity robust standard
errors. Is only available after `HC#_se` or `cov_HC#` is called.
See HC#_se for more information.
HC0_se
White's (1980) heteroskedasticity robust standard errors.
Defined as sqrt(diag(X.T X)^(-1)X.T diag(e_i^(2)) X(X.T X)^(-1)
where e_i = resid[i]
HC0_se is a cached property.
When HC0_se or cov_HC0 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is just
resid**2.
HC1_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as sqrt(diag(n/(n-p)*HC_0)
HC1_see is a cached property.
When HC1_se or cov_HC1 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
n/(n-p)*resid**2.
HC2_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
HC2_see is a cached property.
When HC2_se or cov_HC2 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii).
HC3_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)^(2)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
HC3_see is a cached property.
When HC3_se or cov_HC3 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii)^(2).
model
A pointer to the model instance that called fit() or results.
mse_model
Mean squared error the model. This is the explained sum of squares
divided by the model degrees of freedom.
mse_resid
Mean squared error of the residuals. The sum of squared residuals
divided by the residual degrees of freedom.
mse_total
Total mean squared error. Defined as the uncentered total sum of
squares divided by n the number of observations.
nobs
Number of observations n.
normalized_cov_params
See specific model class docstring
params
The linear coefficients that minimize the least squares criterion. This
is usually called Beta for the classical linear model.
pvalues
The two-tailed p values for the t-stats of the params.
resid
The residuals of the model.
resid_pearson
`wresid` normalized to have unit variance.
rsquared
R-squared of a model with an intercept. This is defined here as
1 - `ssr`/`centered_tss` if the constant is included in the model and
1 - `ssr`/`uncentered_tss` if the constant is omitted.
rsquared_adj
Adjusted R-squared. This is defined here as
1 - (`nobs`-1)/`df_resid` * (1-`rsquared`) if a constant is included
and 1 - `nobs`/`df_resid` * (1-`rsquared`) if no constant is included.
scale
A scale factor for the covariance matrix.
Default value is ssr/(n-p). Note that the square root of `scale` is
often called the standard error of the regression.
ssr
Sum of squared (whitened) residuals.
uncentered_tss
Uncentered sum of squares. Sum of the squared values of the
(whitened) endogenous response variable.
wresid
The residuals of the transformed/whitened regressand and regressor(s)
"""
_cache = {} # needs to be a class attribute for scale setter?
def __init__(self, model, params, normalized_cov_params=None, scale=1.,
cov_type='nonrobust', cov_kwds=None, use_t=None):
super(RegressionResults, self).__init__(model, params,
normalized_cov_params,
scale)
self._cache = resettable_cache()
if hasattr(model, 'wexog_singular_values'):
self._wexog_singular_values = model.wexog_singular_values
else:
self._wexog_singular_values = None
self.df_model = model.df_model
self.df_resid = model.df_resid
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description' : 'Standard Errors assume that the ' +
'covariance matrix of the errors is correctly ' +
'specified.'}
if use_t is None:
self.use_t = True # TODO: class default
else:
if cov_kwds is None:
cov_kwds = {}
if 'use_t' in cov_kwds:
# TODO: we want to get rid of 'use_t' in cov_kwds
use_t_2 = cov_kwds.pop('use_t')
if use_t is None:
use_t = use_t_2
# TODO: warn or not?
self.get_robustcov_results(cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
def __str__(self):
self.summary()
def conf_int(self, alpha=.05, cols=None):
"""
Returns the confidence interval of the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
cols : array-like, optional
`cols` specifies which confidence intervals to return
Notes
-----
The confidence interval is based on Student's t-distribution.
"""
# keep method for docstring for now
ci = super(RegressionResults, self).conf_int(alpha=alpha, cols=cols)
return ci
@cache_readonly
def nobs(self):
return float(self.model.wexog.shape[0])
@cache_readonly
def fittedvalues(self):
return self.model.predict(self.params, self.model.exog)
@cache_readonly
def wresid(self):
return self.model.wendog - self.model.predict(self.params,
self.model.wexog)
@cache_readonly
def resid(self):
return self.model.endog - self.model.predict(self.params,
self.model.exog)
#TODO: fix writable example
@cache_writable()
def scale(self):
wresid = self.wresid
return np.dot(wresid, wresid) / self.df_resid
@cache_readonly
def ssr(self):
wresid = self.wresid
return np.dot(wresid, wresid)
@cache_readonly
def centered_tss(self):
model = self.model
weights = getattr(model, 'weights', None)
if weights is not None:
return np.sum(weights*(model.endog - np.average(model.endog,
weights=weights))**2)
else: # this is probably broken for GLS
centered_endog = model.wendog - model.wendog.mean()
return np.dot(centered_endog, centered_endog)
@cache_readonly
def uncentered_tss(self):
wendog = self.model.wendog
return np.dot(wendog, wendog)
@cache_readonly
def ess(self):
if self.k_constant:
return self.centered_tss - self.ssr
else:
return self.uncentered_tss - self.ssr
@cache_readonly
def rsquared(self):
if self.k_constant:
return 1 - self.ssr/self.centered_tss
else:
return 1 - self.ssr/self.uncentered_tss
@cache_readonly
def rsquared_adj(self):
return 1 - np.divide(self.nobs - self.k_constant, self.df_resid) * (1 - self.rsquared)
@cache_readonly
def mse_model(self):
return self.ess/self.df_model
@cache_readonly
def mse_resid(self):
return self.ssr/self.df_resid
@cache_readonly
def mse_total(self):
if self.k_constant:
return self.centered_tss / (self.df_resid + self.df_model)
else:
return self.uncentered_tss / (self.df_resid + self.df_model)
@cache_readonly
def fvalue(self):
if hasattr(self, 'cov_type') and self.cov_type != 'nonrobust':
# with heteroscedasticity or correlation robustness
k_params = self.normalized_cov_params.shape[0]
mat = np.eye(k_params)
const_idx = self.model.data.const_idx
# TODO: What if model includes implcit constant, e.g. all dummies but no constant regressor?
# TODO: Restats as LM test by projecting orthogonalizing to constant?
if self.model.data.k_constant == 1:
# assume const_idx exists
idx = lrange(k_params)
idx.pop(const_idx)
mat = mat[idx] # remove constant
ft = self.f_test(mat)
# using backdoor to set another attribute that we already have
self._cache['f_pvalue'] = ft.pvalue
return ft.fvalue
else:
# for standard homoscedastic case
return self.mse_model/self.mse_resid
@cache_readonly
def f_pvalue(self):
return stats.f.sf(self.fvalue, self.df_model, self.df_resid)
@cache_readonly
def bse(self):
return np.sqrt(np.diag(self.cov_params()))
@cache_readonly
def aic(self):
return -2 * self.llf + 2 * (self.df_model + self.k_constant)
@cache_readonly
def bic(self):
return (-2 * self.llf + np.log(self.nobs) * (self.df_model +
self.k_constant))
@cache_readonly
def eigenvals(self):
"""
Return eigenvalues sorted in decreasing order.
"""
if self._wexog_singular_values is not None:
eigvals = self._wexog_singular_values ** 2
else:
eigvals = np.linalg.linalg.eigvalsh(np.dot(self.model.wexog.T, self.model.wexog))
return np.sort(eigvals)[::-1]
@cache_readonly
def condition_number(self):
"""
Return condition number of exogenous matrix.
Calculated as ratio of largest to smallest eigenvalue.
"""
eigvals = self.eigenvals
return np.sqrt(eigvals[0]/eigvals[-1])
#TODO: make these properties reset bse
def _HCCM(self, scale):
H = np.dot(self.model.pinv_wexog,
scale[:,None]*self.model.pinv_wexog.T)
return H
@cache_readonly
def cov_HC0(self):
"""
See statsmodels.RegressionResults
"""
self.het_scale = self.wresid**2
cov_HC0 = self._HCCM(self.het_scale)
return cov_HC0
@cache_readonly
def cov_HC1(self):
"""
See statsmodels.RegressionResults
"""
self.het_scale = self.nobs/(self.df_resid)*(self.wresid**2)
cov_HC1 = self._HCCM(self.het_scale)
return cov_HC1
@cache_readonly
def cov_HC2(self):
"""
See statsmodels.RegressionResults
"""
# probably could be optimized
h = np.diag(chain_dot(self.model.wexog,
self.normalized_cov_params,
self.model.wexog.T))
self.het_scale = self.wresid**2/(1-h)
cov_HC2 = self._HCCM(self.het_scale)
return cov_HC2
@cache_readonly
def cov_HC3(self):
"""
See statsmodels.RegressionResults
"""
h = np.diag(chain_dot(self.model.wexog,
self.normalized_cov_params,
self.model.wexog.T))
self.het_scale=(self.wresid/(1-h))**2
cov_HC3 = self._HCCM(self.het_scale)
return cov_HC3
@cache_readonly
def HC0_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC0))
@cache_readonly
def HC1_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC1))
@cache_readonly
def HC2_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC2))
@cache_readonly
def HC3_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC3))
@cache_readonly
def resid_pearson(self):
"""
Residuals, normalized to have unit variance.
Returns
-------
An array wresid/sqrt(scale)
"""
if not hasattr(self, 'resid'):
raise ValueError('Method requires residuals.')
eps = np.finfo(self.wresid.dtype).eps
if np.sqrt(self.scale) < 10 * eps * self.model.endog.mean():
# don't divide if scale is zero close to numerical precision
from warnings import warn
warn("All residuals are 0, cannot compute normed residuals.",
RuntimeWarning)
return self.wresid
else:
return self.wresid / np.sqrt(self.scale)
def _is_nested(self, restricted):
"""
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
Returns
-------
nested : bool
True if nested, otherwise false
Notes
-----
A most nests another model if the regressors in the smaller model are spanned
by the regressors in the larger model and the regressand is identical.
"""
if self.model.nobs != restricted.model.nobs:
return False
full_rank = self.model.rank
restricted_rank = restricted.model.rank
if full_rank <= restricted_rank:
return False
restricted_exog = restricted.model.wexog
full_wresid = self.wresid
scores = restricted_exog * full_wresid[:,None]
score_l2 = np.sqrt(np.mean(scores.mean(0) ** 2))
# TODO: Could be improved, and may fail depending on scale of regressors
return np.allclose(score_l2,0)
def compare_lm_test(self, restricted, demean=True, use_lr=False):
"""Use Lagrange Multiplier test to test whether restricted model is correct
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
demean : bool
Flag indicating whether the demean the scores based on the residuals
from the restricted model. If True, the covariance of the scores
are used and the LM test is identical to the large sample version
of the LR test.
Returns
-------
lm_value : float
test statistic, chi2 distributed
p_value : float
p-value of the test statistic
df_diff : int
degrees of freedom of the restriction, i.e. difference in df between
models
Notes
-----
TODO: explain LM text
"""
import statsmodels.stats.sandwich_covariance as sw
from numpy.linalg import inv
if not self._is_nested(restricted):
raise ValueError("Restricted model is not nested by full model.")
wresid = restricted.wresid
wexog = self.model.wexog
scores = wexog * wresid[:,None]
n = self.nobs
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
s = scores.mean(axis=0)
if use_lr:
scores = wexog * self.wresid[:,None]
demean = False
if demean:
scores = scores - scores.mean(0)[None,:]
# Form matters here. If homoskedastics can be sigma^2 (X'X)^-1
# If Heteroskedastic then the form below is fine
# If HAC then need to use HAC
# If Cluster, shoudl use cluster
cov_type = getattr(self, 'cov_type', 'nonrobust')
if cov_type == 'nonrobust':
sigma2 = np.mean(wresid**2)
XpX = np.dot(wexog.T,wexog) / n
Sinv = inv(sigma2 * XpX)
elif cov_type in ('HC0', 'HC1', 'HC2', 'HC3'):
Sinv = inv(np.dot(scores.T,scores) / n)
elif cov_type == 'HAC':
print("HAC")
maxlags = self.cov_kwds['maxlags']
use_correction = self.cov_kwds['use_correction']
Sinv = inv(sw.S_hac_simple(scores, maxlags) / n)
elif cov_type == 'cluster':
#cluster robust standard errors
groups = self.cov_kwds['groups']
# TODO: Might need demean option in S_crosssection by group?
Sinv = inv(sw.S_crosssection(scores, groups))
else:
raise ValueError('Only nonrobust, HC, HAC and cluster are ' +
'currently connected')
lm_value = n * chain_dot(s,Sinv,s.T)
p_value = stats.chi2.sf(lm_value, df_diff)
return lm_value, p_value, df_diff
def compare_f_test(self, restricted):
"""use F test to test whether restricted model is correct
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
Returns
-------
f_value : float
test statistic, F distributed
p_value : float
p-value of the test statistic
df_diff : int
degrees of freedom of the restriction, i.e. difference in df between
models
Notes
-----
See mailing list discussion October 17,
This test compares the residual sum of squares of the two models.
This is not a valid test, if there is unspecified heteroscedasticity
or correlation. This method will issue a warning if this is detected
but still return the results under the assumption of homoscedasticity
and no autocorrelation (sphericity).
"""
has_robust1 = getattr(self, 'cov_type', 'nonrobust') != 'nonrobust'
has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust') !=
'nonrobust')
if has_robust1 or has_robust2:
import warnings
warnings.warn('F test for comparison is likely invalid with ' +
'robust covariance, proceeding anyway',
InvalidTestWarning)
ssr_full = self.ssr
ssr_restr = restricted.ssr
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
f_value = (ssr_restr - ssr_full) / df_diff / ssr_full * df_full
p_value = stats.f.sf(f_value, df_diff, df_full)
return f_value, p_value, df_diff
def compare_lr_test(self, restricted, large_sample=False):
"""
Likelihood ratio test to test whether restricted model is correct
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current model.
The result instance of the restricted model is required to have two
attributes, residual sum of squares, `ssr`, residual degrees of
freedom, `df_resid`.
large_sample : bool
Flag indicating whether to use a heteroskedasticity robust version
of the LR test, which is a modified LM test.
Returns
-------
lr_stat : float
likelihood ratio, chisquare distributed with df_diff degrees of
freedom
p_value : float
p-value of the test statistic
df_diff : int
degrees of freedom of the restriction, i.e. difference in df between
models
Notes
-----
The exact likelihood ratio is valid for homoskedastic data, and is
defined as
.. math:: D=-2\\log\\left(\\frac{\\mathcal{L}_{null}}
{\\mathcal{L}_{alternative}}\\right)
where :math:`\mathcal{L}` is the likelihood of the model. With :math:`D`
distributed as chisquare with df equal to difference in number of
parameters or equivalently difference in residual degrees of freedom.
The large sample version of the likelihood ratio is defined as
.. math:: D=n s^{\\prime}S^{-1}s
where :math:`s=n^{-1}\\sum_{i=1}^{n} s_{i}`
.. math:: s_{i} = x_{i,alternative} \\epsilon_{i,null}
is the average score of the model evaluated using the residuals from
null model and the regressors from the alternative model and :math:`S`
is the covariance of the scores, :math:`s_{i}`. The covariance of the
scores is estimated using the same estimator as in the alternative model.
This test compares the loglikelihood of the two models.
This may not be a valid test, if there is unspecified heteroscedasticity
or correlation. This method will issue a warning if this is detected
but still return the results without taking unspecified
heteroscedasticity or correlation into account.
This test compares the loglikelihood of the two models.
This may not be a valid test, if there is unspecified heteroscedasticity
or correlation. This method will issue a warning if this is detected
but still return the results without taking unspecified
heteroscedasticity or correlation into account.
is the average score of the model evaluated using the residuals from
null model and the regressors from the alternative model and :math:`S`
is the covariance of the scores, :math:`s_{i}`. The covariance of the
scores is estimated using the same estimator as in the alternative model.
TODO: put into separate function, needs tests
"""
# See mailing list discussion October 17,
if large_sample:
return self.compare_lm_test(restricted, use_lr=True)
has_robust1 = (getattr(self, 'cov_type', 'nonrobust') != 'nonrobust')
has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust') !=
'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('Likelihood Ratio test is likely invalid with ' +
'robust covariance, proceeding anyway',
InvalidTestWarning)
llf_full = self.llf
llf_restr = restricted.llf
df_full = self.df_resid
df_restr = restricted.df_resid
lrdf = (df_restr - df_full)
lrstat = -2*(llf_restr - llf_full)
lr_pvalue = stats.chi2.sf(lrstat, lrdf)
return lrstat, lr_pvalue, lrdf
def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwds):
"""create new results instance with robust covariance as default
Parameters
----------
cov_type : string
the type of robust sandwich estimator to use. see Notes below
use_t : bool
If true, then the t distribution is used for inference.
If false, then the normal distribution is used.
If `use_t` is None, then an appropriate default is used, which is
`true` if the cov_type is nonrobust, and `false` in all other cases.
kwds : depends on cov_type
Required or optional arguments for robust covariance calculation.
see Notes below
Returns
-------
results : results instance
This method creates a new results instance with the requested
robust covariance as the default covariance of the parameters.
Inferential statistics like p-values and hypothesis tests will be
based on this covariance matrix.
Notes
-----
The following covariance types and required or optional arguments are
currently available:
- 'fixed scale' and optional keyword argument 'scale' which uses
a predefined scale estimate with default equal to one.
- 'HC0', 'HC1', 'HC2', 'HC3' and no keyword arguments:
heteroscedasticity robust covariance
- 'HAC' and keywords
- `maxlag` integer (required) : number of lags to use
- `kernel` string (optional) : kernel, default is Bartlett
- `use_correction` bool (optional) : If true, use small sample
correction
- 'cluster' and required keyword `groups`, integer group indicator
- `groups` array_like, integer (required) :
index of clusters or groups
- `use_correction` bool (optional) :
If True the sandwich covariance is calulated with a small
sample correction.
If False the the sandwich covariance is calulated without
small sample correction.
- `df_correction` bool (optional)
If True (default), then the degrees of freedom for the
inferential statistics and hypothesis tests, such as
pvalues, f_pvalue, conf_int, and t_test and f_test, are
based on the number of groups minus one instead of the
total number of observations minus the number of explanatory
variables. `df_resid` of the results instance is adjusted.
If False, then `df_resid` of the results instance is not
adjusted.
- 'hac-groupsum' Driscoll and Kraay, heteroscedasticity and
autocorrelation robust standard errors in panel data
keywords
- `time` array_like (required) : index of time periods
- `maxlag` integer (required) : number of lags to use
- `kernel` string (optional) : kernel, default is Bartlett
- `use_correction` False or string in ['hac', 'cluster'] (optional) :
If False the the sandwich covariance is calulated without
small sample correction.
If `use_correction = 'cluster'` (default), then the same
small sample correction as in the case of 'covtype='cluster''
is used.
- `df_correction` bool (optional)
adjustment to df_resid, see cov_type 'cluster' above
#TODO: we need more options here
- 'hac-panel' heteroscedasticity and autocorrelation robust standard
errors in panel data.
The data needs to be sorted in this case, the time series for
each panel unit or cluster need to be stacked.
keywords
- `time` array_like (required) : index of time periods
- `maxlag` integer (required) : number of lags to use
- `kernel` string (optional) : kernel, default is Bartlett
- `use_correction` False or string in ['hac', 'cluster'] (optional) :
If False the the sandwich covariance is calulated without
small sample correction.
- `df_correction` bool (optional)
adjustment to df_resid, see cov_type 'cluster' above
#TODO: we need more options here
Reminder:
`use_correction` in "nw-groupsum" and "nw-panel" is not bool,
needs to be in [False, 'hac', 'cluster']
TODO: Currently there is no check for extra or misspelled keywords,
except in the case of cov_type `HCx`
"""
import statsmodels.stats.sandwich_covariance as sw
# TODO: make separate function that returns a robust cov plus info
use_self = kwds.pop('use_self', False)
if use_self:
res = self
else:
res = self.__class__(self.model, self.params,
normalized_cov_params=self.normalized_cov_params,
scale=self.scale)
res.cov_type = cov_type
# use_t might already be defined by the class, and already set
if use_t is None:
use_t = self.use_t
res.cov_kwds = {'use_t':use_t} # store for information
res.use_t = use_t
adjust_df = False
if cov_type in ['cluster', 'nw-panel', 'nw-groupsum']:
df_correction = kwds.get('df_correction', None)
# TODO: check also use_correction, do I need all combinations?
if df_correction is not False: # i.e. in [None, True]:
# user didn't explicitely set it to False
adjust_df = True
res.cov_kwds['adjust_df'] = adjust_df
# verify and set kwds, and calculate cov
# TODO: this should be outsourced in a function so we can reuse it in
# other models
# TODO: make it DRYer repeated code for checking kwds
if cov_type in ['fixed scale', 'fixed_scale']:
res.cov_kwds['description'] = ('Standard Errors are based on ' +
'fixed scale')
res.cov_kwds['scale'] = scale = kwds.get('scale', 1.)
res.cov_params_default = scale * res.normalized_cov_params
elif cov_type in ('HC0', 'HC1', 'HC2', 'HC3'):
if kwds:
raise ValueError('heteroscedasticity robust covarians ' +
'does not use keywords')
res.cov_kwds['description'] = ('Standard Errors are heteroscedasticity ' +
'robust ' + '(' + cov_type + ')')
# TODO cannot access cov without calling se first
getattr(self, cov_type.upper() + '_se')
res.cov_params_default = getattr(self, 'cov_' + cov_type.upper())
elif cov_type == 'HAC':
maxlags = kwds['maxlags'] # required?, default in cov_hac_simple
res.cov_kwds['maxlags'] = maxlags
use_correction = kwds.get('use_correction', False)
res.cov_kwds['use_correction'] = use_correction
res.cov_kwds['description'] = ('Standard Errors are heteroscedasticity ' +
'and autocorrelation robust (HAC) using %d lags and %s small ' +
'sample correction') % (maxlags, ['without', 'with'][use_correction])
res.cov_params_default = sw.cov_hac_simple(self, nlags=maxlags,
use_correction=use_correction)
elif cov_type == 'cluster':
#cluster robust standard errors, one- or two-way
groups = kwds['groups']
if not hasattr(groups, 'shape'):
groups = np.asarray(groups).T
if groups.ndim >= 2:
groups = groups.squeeze()
res.cov_kwds['groups'] = groups
use_correction = kwds.get('use_correction', True)
res.cov_kwds['use_correction'] = use_correction
if groups.ndim == 1:
if adjust_df:
# need to find number of groups
# duplicate work
self.n_groups = n_groups = len(np.unique(groups))
res.cov_params_default = sw.cov_cluster(self, groups,
use_correction=use_correction)
elif groups.ndim == 2:
if hasattr(groups, 'values'):
groups = groups.values
if adjust_df:
# need to find number of groups
# duplicate work
n_groups0 = len(np.unique(groups[:,0]))
n_groups1 = len(np.unique(groups[:, 1]))
self.n_groups = (n_groups0, n_groups1)
n_groups = min(n_groups0, n_groups1) # use for adjust_df
# Note: sw.cov_cluster_2groups has 3 returns
res.cov_params_default = sw.cov_cluster_2groups(self, groups,
use_correction=use_correction)[0]
else:
raise ValueError('only two groups are supported')
res.cov_kwds['description'] = ('Standard Errors are robust to' +
'cluster correlation ' + '(' + cov_type + ')')
elif cov_type == 'nw-panel':
#cluster robust standard errors
res.cov_kwds['time'] = time = kwds['time']
#TODO: nlags is currently required
#nlags = kwds.get('nlags', True)
#res.cov_kwds['nlags'] = nlags
#TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']
use_correction = kwds.get('use_correction', 'hac')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
# TODO: clumsy time index in cov_nw_panel
tt = (np.nonzero(np.diff(time) < 0)[0] + 1).tolist()
groupidx = lzip([0] + tt, tt + [len(time)])
self.n_groups = n_groups = len(groupidx)
res.cov_params_default = sw.cov_nw_panel(self, maxlags, groupidx,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = ('Standard Errors are robust to' +
'cluster correlation ' + '(' + cov_type + ')')
elif cov_type == 'nw-groupsum':
# Driscoll-Kraay standard errors
res.cov_kwds['time'] = time = kwds['time']
#TODO: nlags is currently required
#nlags = kwds.get('nlags', True)
#res.cov_kwds['nlags'] = nlags
#TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']
use_correction = kwds.get('use_correction', 'cluster')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if adjust_df:
# need to find number of groups
tt = (np.nonzero(np.diff(time) < 0)[0] + 1)
self.n_groups = n_groups = len(tt) + 1
res.cov_params_default = sw.cov_nw_groupsum(self, maxlags, time,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = (
'Driscoll and Kraay Standard Errors are robust to ' +
'cluster correlation ' + '(' + cov_type + ')')
else:
raise ValueError('cov_type not recognized. See docstring for ' +
'available options and spelling')
if adjust_df:
# Note: df_resid is used for scale and others, add new attribute
res.df_resid_inference = n_groups - 1
return res
def get_prediction(self, exog=None, transform=True, weights=None,
row_labels=None, **kwds):
return pred.get_prediction(self, exog=exog, transform=transform,
weights=weights, row_labels=row_labels, **kwds)
get_prediction.__doc__ = pred.get_prediction.__doc__
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
#TODO: import where we need it (for now), add as cached attributes
from statsmodels.stats.stattools import (jarque_bera,
omni_normtest, durbin_watson)
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis,
omni=omni, omnipv=omnipv, condno=condno,
mineigval=eigvals[-1])
#TODO not used yet
#diagn_left_header = ['Models stats']
#diagn_right_header = ['Residual stats']
#TODO: requiring list/iterable is a bit annoying
#need more control over formatting
#TODO: default don't work if it's not identically spelled
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Least Squares']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
('Df Residuals:', None), #[self.df_resid]), #TODO: spelling
('Df Model:', None), #[self.df_model])
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
top_right = [('R-squared:', ["%#8.3f" % self.rsquared]),
('Adj. R-squared:', ["%#8.3f" % self.rsquared_adj]),
('F-statistic:', ["%#8.4g" % self.fvalue] ),
('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
('Log-Likelihood:', None), #["%#6.4g" % self.llf]),
('AIC:', ["%#8.4g" % self.aic]),
('BIC:', ["%#8.4g" % self.bic])
]
diagn_left = [('Omnibus:', ["%#6.3f" % omni]),
('Prob(Omnibus):', ["%#6.3f" % omnipv]),
('Skew:', ["%#6.3f" % skew]),
('Kurtosis:', ["%#6.3f" % kurtosis])
]
diagn_right = [('Durbin-Watson:', ["%#8.3f" % durbin_watson(self.wresid)]),
('Jarque-Bera (JB):', ["%#8.3f" % jb]),
('Prob(JB):', ["%#8.3g" % jbpv]),
('Cond. No.', ["%#8.3g" % condno])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
#create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
yname=yname, xname=xname,
title="")
#add warnings/notes, added to text format only
etext =[]
if hasattr(self, 'cov_type'):
etext.append(self.cov_kwds['description'])
if self.model.exog.shape[0] < self.model.exog.shape[1]:
wstr = "The input rank is higher than the number of observations."
etext.append(wstr)
if eigvals[-1] < 1e-10:
wstr = "The smallest eigenvalue is %6.3g. This might indicate "
wstr += "that there are\n"
wstr += "strong multicollinearity problems or that the design "
wstr += "matrix is singular."
wstr = wstr % eigvals[-1]
etext.append(wstr)
elif condno > 1000: #TODO: what is recommended
wstr = "The condition number is large, %6.3g. This might "
wstr += "indicate that there are\n"
wstr += "strong multicollinearity or other numerical "
wstr += "problems."
wstr = wstr % condno
etext.append(wstr)
if etext:
etext = ["[{0}] {1}".format(i + 1, text) for i, text in enumerate(etext)]
etext.insert(0, "Warnings:")
smry.add_extra_txt(etext)
return smry
#top = summary_top(self, gleft=topleft, gright=diagn_left, #[],
# yname=yname, xname=xname,
# title=self.model.__class__.__name__ + ' ' +
# "Regression Results")
#par = summary_params(self, yname=yname, xname=xname, alpha=.05,
# use_t=False)
#
#diagn = summary_top(self, gleft=diagn_left, gright=diagn_right,
# yname=yname, xname=xname,
# title="Linear Model")
#
#return summary_return([top, par, diagn], return_fmt=return_fmt)
def summary2(self, yname=None, xname=None, title=None, alpha=.05,
float_format="%.4f"):
"""Experimental summary function to summarize the regression results
Parameters
-----------
xname : List of strings of length equal to the number of parameters
Names of the independent variables (optional)
yname : string
Name of the dependent variable (optional)
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
# Diagnostics
from statsmodels.stats.stattools import (jarque_bera,
omni_normtest,
durbin_watson)
from statsmodels.compat.collections import OrderedDict
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
dw = durbin_watson(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
eigvals = np.sort(eigvals) #in increasing order
diagnostic = OrderedDict([
('Omnibus:', "%.3f" % omni),
('Prob(Omnibus):', "%.3f" % omnipv),
('Skew:', "%.3f" % skew),
('Kurtosis:', "%.3f" % kurtosis),
('Durbin-Watson:', "%.3f" % dw),
('Jarque-Bera (JB):', "%.3f" % jb),
('Prob(JB):', "%.3f" % jbpv),
('Condition No.:', "%.0f" % condno)
])
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
smry.add_dict(diagnostic)
# Warnings
if eigvals[-1] < 1e-10:
warn = "The smallest eigenvalue is %6.3g. This might indicate that\
there are strong multicollinearity problems or that the design\
matrix is singular." % eigvals[-1]
smry.add_text(warn)
if condno > 1000:
warn = "* The condition number is large (%.g). This might indicate \
strong multicollinearity or other numerical problems." % condno
smry.add_text(warn)
return smry
class OLSResults(RegressionResults):
"""
Results class for for an OLS model.
Most of the methods and attributes are inherited from RegressionResults.
The special methods that are only available for OLS are:
- get_influence
- outlier_test
- el_test
- conf_int_el
See Also
--------
RegressionResults
"""
def get_influence(self):
"""
get an instance of Influence with influence and outlier measures
Returns
-------
infl : Influence instance
the instance has methods to calculate the main influence and
outlier measures for the OLS regression
See also
--------
:class:`statsmodels.stats.outliers_influence.OLSInfluence`
"""
from statsmodels.stats.outliers_influence import OLSInfluence
return OLSInfluence(self)
def outlier_test(self, method='bonf', alpha=.05):
"""
Test observations for outliers according to method
Parameters
----------
method : str
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` :
- `holm` :
- `simes-hochberg` :
- `hommel` :
- `fdr_bh` : Benjamini/Hochberg
- `fdr_by` : Benjamini/Yekutieli
See `statsmodels.stats.multitest.multipletests` for details.
alpha : float
familywise error rate
Returns
-------
table : ndarray or DataFrame
Returns either an ndarray or a DataFrame if labels is not None.
Will attempt to get labels from model_results if available. The
columns are the Studentized residuals, the unadjusted p-value,
and the corrected p-value according to method.
Notes
-----
The unadjusted p-value is stats.t.sf(abs(resid), df) where
df = df_resid - 1.
"""
from statsmodels.stats.outliers_influence import outlier_test
return outlier_test(self, method, alpha)
def el_test(self, b0_vals, param_nums, return_weights=0,
ret_params=0, method='nm',
stochastic_exog=1, return_params=0):
"""
Tests single or joint hypotheses of the regression parameters using
Empirical Likelihood.
Parameters
----------
b0_vals : 1darray
The hypothesized value of the parameter to be tested
param_nums : 1darray
The parameter number to be tested
print_weights : bool
If true, returns the weights that optimize the likelihood
ratio at b0_vals. Default is False
ret_params : bool
If true, returns the parameter vector that maximizes the likelihood
ratio at b0_vals. Also returns the weights. Default is False
method : string
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
Default is 'nm'
stochastic_exog : bool
When TRUE, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. Default = TRUE
Returns
-------
res : tuple
The p-value and -2 times the log-likelihood ratio for the
hypothesized values.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.stackloss.load()
>>> endog = data.endog
>>> exog = sm.add_constant(data.exog)
>>> model = sm.OLS(endog, exog)
>>> fitted = model.fit()
>>> fitted.params
>>> array([-39.91967442, 0.7156402 , 1.29528612, -0.15212252])
>>> fitted.rsquared
>>> 0.91357690446068196
>>> # Test that the slope on the first variable is 0
>>> fitted.test_beta([0], [1])
>>> (1.7894660442330235e-07, 27.248146353709153)
"""
params = np.copy(self.params)
opt_fun_inst = _ELRegOpts() # to store weights
if len(param_nums) == len(params):
llr = opt_fun_inst._opt_nuis_regress([],
param_nums=param_nums,
endog=self.model.endog,
exog=self.model.exog,
nobs=self.model.nobs,
nvar=self.model.exog.shape[1],
params=params,
b0_vals=b0_vals,
stochastic_exog=stochastic_exog)
pval = 1 - chi2.cdf(llr, len(param_nums))
if return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval
x0 = np.delete(params, param_nums)
args = (param_nums, self.model.endog, self.model.exog,
self.model.nobs, self.model.exog.shape[1], params,
b0_vals, stochastic_exog)
if method == 'nm':
llr = optimize.fmin(opt_fun_inst._opt_nuis_regress, x0, maxfun=10000,
maxiter=10000, full_output=1, disp=0,
args=args)[1]
if method == 'powell':
llr = optimize.fmin_powell(opt_fun_inst._opt_nuis_regress, x0,
full_output=1, disp=0,
args=args)[1]
pval = 1 - chi2.cdf(llr, len(param_nums))
if ret_params:
return llr, pval, opt_fun_inst.new_weights, opt_fun_inst.new_params
elif return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval
def conf_int_el(self, param_num, sig=.05, upper_bound=None, lower_bound=None,
method='nm', stochastic_exog=1):
"""
Computes the confidence interval for the parameter given by param_num
using Empirical Likelihood
Parameters
----------
param_num : float
The parameter for which the confidence interval is desired
sig : float
The significance level. Default is .05
upper_bound : float
The maximum value the upper limit can be. Default is the
99.9% confidence value under OLS assumptions.
lower_bound : float
The minimum value the lower limit can be. Default is the 99.9%
confidence value under OLS assumptions.
method : string
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
Default is 'nm'
Returns
-------
ci : tuple
The confidence interval
See Also
--------
el_test
Notes
-----
This function uses brentq to find the value of beta where
test_beta([beta], param_num)[1] is equal to the critical
value.
The function returns the results of each iteration of brentq at
each value of beta.
The current function value of the last printed optimization
should be the critical value at the desired significance level.
For alpha=.05, the value is 3.841459.
To ensure optimization terminated successfully, it is suggested to
do el_test([lower_limit], [param_num])
If the optimization does not terminate successfully, consider switching
optimization algorithms.
If optimization is still not successful, try changing the values of
start_int_params. If the current function value repeatedly jumps
from a number between 0 and the critical value and a very large number
(>50), the starting parameters of the interior minimization need
to be changed.
"""
r0 = chi2.ppf(1 - sig, 1)
if upper_bound is None:
upper_bound = self.conf_int(.01)[param_num][1]
if lower_bound is None:
lower_bound = self.conf_int(.01)[param_num][0]
f = lambda b0: self.el_test(np.array([b0]), np.array([param_num]),
method=method,
stochastic_exog=stochastic_exog)[0]-r0
lowerl = optimize.brenth(f, lower_bound,
self.params[param_num])
upperl = optimize.brenth(f, self.params[param_num],
upper_bound)
# ^ Seems to be faster than brentq in most cases
return (lowerl, upperl)
class RegressionResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'chisq' : 'columns',
'sresid' : 'rows',
'weights' : 'rows',
'wresid' : 'rows',
'bcov_unscaled' : 'cov',
'bcov_scaled' : 'cov',
'HC0_se' : 'columns',
'HC1_se' : 'columns',
'HC2_se' : 'columns',
'HC3_se' : 'columns',
'norm_resid' : 'rows',
}
_wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(
base.LikelihoodResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(RegressionResultsWrapper,
RegressionResults)
if __name__ == "__main__":
import statsmodels.api as sm
data = sm.datasets.longley.load()
data.exog = add_constant(data.exog, prepend=False)
ols_results = OLS(data.endog, data.exog).fit() #results
gls_results = GLS(data.endog, data.exog).fit() #results
print(ols_results.summary())
tables = ols_results.summary(returns='tables')
csv = ols_results.summary(returns='csv')
"""
Summary of Regression Results
=======================================
| Dependent Variable: ['y']|
| Model: OLS|
| Method: Least Squares|
| Date: Tue, 29 Jun 2010|
| Time: 22:32:21|
| # obs: 16.0|
| Df residuals: 9.0|
| Df model: 6.0|
===========================================================================
| coefficient std. error t-statistic prob.|
---------------------------------------------------------------------------
| x1 15.0619 84.9149 0.1774 0.8631|
| x2 -0.0358 0.0335 -1.0695 0.3127|
| x3 -2.0202 0.4884 -4.1364 0.002535|
| x4 -1.0332 0.2143 -4.8220 0.0009444|
| x5 -0.0511 0.2261 -0.2261 0.8262|
| x6 1829.1515 455.4785 4.0159 0.003037|
| const -3482258.6346 890420.3836 -3.9108 0.003560|
===========================================================================
| Models stats Residual stats |
---------------------------------------------------------------------------
| R-squared: 0.995479 Durbin-Watson: 2.55949 |
| Adjusted R-squared: 0.992465 Omnibus: 0.748615 |
| F-statistic: 330.285 Prob(Omnibus): 0.687765 |
| Prob (F-statistic): 4.98403e-10 JB: 0.352773 |
| Log likelihood: -109.617 Prob(JB): 0.838294 |
| AIC criterion: 233.235 Skew: 0.419984 |
| BIC criterion: 238.643 Kurtosis: 2.43373 |
---------------------------------------------------------------------------
"""
|
DonBeo/statsmodels
|
statsmodels/regression/linear_model.py
|
Python
|
bsd-3-clause
| 92,206
|
[
"Gaussian"
] |
1070d5350c612b44f38811879cafcc27f46b9c848f6f832947a3f8ac19afc5bb
|
import random
import string
from django.contrib.auth import get_user_model
from django.db import models
from django.utils import timezone
import base32_crockford
API_KEY_LENGTH = 32
def generate_key(length):
return ''.join(
random.choice(string.digits + string.ascii_lowercase)
for __ in xrange(length))
class ShortManager(models.Manager):
def get_for_key(self, key):
return self.get(key=key)
class Short(models.Model):
key = models.CharField(
blank=True, db_index=True, max_length=100, null=True)
destination = models.URLField(blank=True, null=True)
image = models.ImageField(blank=True, null=True, upload_to='%y/%m/%d/')
created_by = models.ForeignKey(get_user_model())
created_at = models.DateTimeField(default=timezone.now)
objects = ShortManager()
def __unicode__(self):
if self.destination:
return self.destination
return self.image.url
def save(self, *args, **kwargs):
super(Short, self).save(*args, **kwargs)
if not self.key:
self.key = base32_crockford.encode(self.pk)
self.save()
class APIKey(models.Model):
user = models.ForeignKey(get_user_model())
key = models.CharField(max_length=API_KEY_LENGTH, blank=True)
def save(self, *args, **kwargs):
if not self.key:
self.key = generate_key(API_KEY_LENGTH)
super(APIKey, self).save(*args, **kwargs)
class Visit(models.Model):
short = models.ForeignKey(Short)
remote_addr = models.CharField(max_length=15)
user_agent = models.TextField(blank=True, null=True)
referrer = models.TextField(blank=True, null=True)
created_at = models.DateTimeField(default=timezone.now)
class Meta:
ordering = ('-created_at', )
def __unicode__(self):
return u"Visit"
|
sneeu/little
|
little/models.py
|
Python
|
mit
| 1,848
|
[
"VisIt"
] |
20e1925da52ecd91b3af8a48cfddd8b0df8395cfd6e457288363f97cf291a81a
|
from traits.api import HasTraits, Instance, ListStr, Str, Dict, Property
from traitsui.api import View, Group, Item
from mayavi.core.trait_defs import DEnum
from simphony.cuds.abc_modeling_engine import ABCModelingEngine
class EngineManager(HasTraits):
"""A basic container of Simphony Engine that comes with a GUI.
Additional panel can be added to support more operations related
to the modeling engines"""
#: Mappings of Simphony Modeling Engines in this manager
engines = Dict(Str, Instance(ABCModelingEngine))
#: Names of engines in the Manager
_engine_names = ListStr
#: Selected engine
engine = Property(depends_on="engine_name")
#: Selected engine name
engine_name = DEnum(values_name="_engine_names")
# Traits view
traits_view = View(Group(Item("engine_name", label="Engine Wrapper")),
resizable=True)
# ----------------------------------------------------
# Traits Property
# ----------------------------------------------------
def _get_engine(self):
if self.engine_name in self.engines:
return self.engines[self.engine_name]
else:
return None
def _set_engine(self, value):
if value not in self.engines.values():
msg = "{} is not an engine in the manager. Use ``add_engine()``"
raise ValueError(msg.format(value))
for name, engine in self.engines.items():
if value is engine:
self.engine_name = name
break
# ------------------------------------------------------
# Public methods
# ------------------------------------------------------
def add_engine(self, name, modeling_engine):
''' Add a Simphony Engine to the manager
Parameters
----------
name : str
Name to be associated with the modeling engine
modeling_engine : ABCModelingEngine
Simphony Engine Wrapper
'''
if name in self.engines:
raise ValueError("{} is already added".format(name))
self.engines[name] = modeling_engine
self._engine_names = self.engines.keys()
def remove_engine(self, name):
''' Remove a modeling engine from the manager.
If modeling engine to be removed is currently selected,
select the one of the remaining engines
Parameters
----------
name : str
Name associated with the engine to be removed
'''
if name not in self.engines:
msg = "{} is not an engine in this manager"
raise KeyError(msg.format(name))
if len(self.engines) == 1:
msg = ("There will be no more engine if {} is removed. "
"Not removing it.")
raise IndexError(msg.format(name))
self.engines.pop(name)
if self.engine_name == name:
self.engine_name = self.engines.keys()[0]
self._engine_names = self.engines.keys()
|
simphony/simphony-mayavi
|
simphony_mayavi/plugins/engine_manager.py
|
Python
|
bsd-2-clause
| 3,024
|
[
"Mayavi"
] |
16ca04cc0026b58369770a8aec737b9c7fc9c0c67343f47bd19fd767d30b24ce
|
# Copyright (c) 2007 The Regents of The University of Michigan
# Copyright (c) 2010 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import m5
from m5 import internal
from m5.internal.stats import schedStatEvent as schedEvent
from m5.objects import Root
from m5.util import attrdict, fatal
outputList = []
def initText(filename, desc=True):
output = internal.stats.initText(filename, desc)
outputList.append(output)
def initSimStats():
internal.stats.initSimStats()
names = []
stats_dict = {}
stats_list = []
raw_stats_list = []
def enable():
'''Enable the statistics package. Before the statistics package is
enabled, all statistics must be created and initialized and once
the package is enabled, no more statistics can be created.'''
__dynamic_cast = []
for k, v in internal.stats.__dict__.iteritems():
if k.startswith('dynamic_'):
__dynamic_cast.append(v)
for stat in internal.stats.statsList():
for cast in __dynamic_cast:
val = cast(stat)
if val is not None:
stats_list.append(val)
raw_stats_list.append(val)
break
else:
fatal("unknown stat type %s", stat)
for stat in stats_list:
if not stat.check() or not stat.baseCheck():
fatal("statistic '%s' (%d) was not properly initialized " \
"by a regStats() function\n", stat.name, stat.id)
if not (stat.flags & flags.display):
stat.name = "__Stat%06d" % stat.id
def less(stat1, stat2):
v1 = stat1.name.split('.')
v2 = stat2.name.split('.')
return v1 < v2
stats_list.sort(less)
for stat in stats_list:
stats_dict[stat.name] = stat
stat.enable()
internal.stats.enable();
def prepare():
'''Prepare all stats for data access. This must be done before
dumping and serialization.'''
for stat in stats_list:
stat.prepare()
lastDump = 0
def dump():
'''Dump all statistics data to the registered outputs'''
curTick = m5.curTick()
global lastDump
assert lastDump <= curTick
if lastDump == curTick:
return
lastDump = curTick
internal.stats.processDumpQueue()
prepare()
for output in outputList:
if output.valid():
output.begin()
for stat in stats_list:
output.visit(stat)
output.end()
def reset():
'''Reset all statistics to the base state'''
# call reset stats on all SimObjects
root = Root.getInstance()
if root:
for obj in root.descendants(): obj.resetStats()
# call any other registered stats reset callbacks
for stat in stats_list:
stat.reset()
internal.stats.processResetQueue()
flags = attrdict({
'none' : 0x0000,
'init' : 0x0001,
'display' : 0x0002,
'total' : 0x0010,
'pdf' : 0x0020,
'cdf' : 0x0040,
'dist' : 0x0080,
'nozero' : 0x0100,
'nonan' : 0x0200,
})
|
xiaoyuanW/gem5
|
src/python/m5/stats/__init__.py
|
Python
|
bsd-3-clause
| 4,535
|
[
"VisIt"
] |
a949f954de9d1d72ea3093df23f1ea23d5dc3aa485bdc3f20c63a176f49e9e75
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Eficent (<http://www.eficent.com/>)
# <contact@eficent.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Purchase Request Procurement with Operating Units",
"version": "1.0",
"author": "Eficent",
"website": "www.eficent.com",
"category": "Purchase Management",
"depends": ["purchase_request_procurement",
"purchase_request_operating_unit"],
"description": """
Purchase Request Procurement with Operating Unit
================================================
This module passes the Operating Unit from the Procurement to the Purchase
Request.
Credits
=======
Contributors
------------
* Jordi Ballester <jordi.ballester@eficent.com>
Maintainer
----------
.. image:: http://odoo-community.org/logo.png
:alt: Odoo Community Association
:target: http://odoo-community.org
This module is maintained by the OCA.
OCA, or the Odoo Community Association, is a nonprofit organization whose
mission is to support the collaborative development of Odoo features and
promote its widespread use.
To contribute to this module, please visit http://odoo-community.org.
""",
'installable': True,
'active': False,
}
|
Eficent/odoo-operating-unit
|
purchase_request_procurement_operating_unit/__openerp__.py
|
Python
|
agpl-3.0
| 2,045
|
[
"VisIt"
] |
f0af185fbb5fc06105e5e7975a0b7b003a185235500fe155113b6f4b9b3d9925
|
#!/usr/bin/env python
"""Spectral indicator definition and measurements."""
import sys
import numpy as N
from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
from snspin.spectrum import smoothing
from snspin.spectrum import covariance
class Craniometer(object):
"""Initialization function."""
def __init__(self, wavelength, flux, variance):
"""
Spectral indicator measurements.
Class to feel bumps on SN spectra and conclude about how they
work internaly
How to use it:
Create the craniometer:
cranio = spin.Craniometer(wavelength, flux, variance)
Smooth the craniometer:
cranio.smooth()
Generate simulated spectra:
cranio.cranio_generator()
Find all extrema:
cranio.find_extrema()
and compute spectral indicators:
EWSi4000 = cranio.EW(3830, 3963, 4034, 4150, 'SiII4000')
values will be saved in cranio.EWvalues for EWs computing
Spectrum initialization.
self.x = wavelength
self.y = flux
self.v = variance
self.s = []
self.smoother = None
self.maxima = None
self.minima = None
self.ewvalues = {}
"""
self.x = wavelength
self.y = flux
self.v = variance
self.s = []
self.maxima = None
self.minima = None
self.ewvalues = {}
self.velocityvalues = {}
self.p3590 = [3504, 3687]
self.p3930 = [3887, 3990]
self.p4075 = [4034, 4140]
self.p4510 = [4452, 4573]
self.p5165 = [5085, 5250]
self.p5603 = [5550, 5681]
self.p5930 = [5850, 6015]
self.p6312 = [6250, 6365]
self.init_only = False
# =========================================================================
# Analyse the spectrum
# Functions to smooth the spectrum, to find extrema of the spectrum
# =========================================================================
def smooth(self, smoother="sgfilter", rho=0.482, s=None,
hsize=None, order=2, lim=None, verbose=False):
"""
Create the smoother function and makes a smooth array out of spec.y.
Mode = 0 : number and position are fixed (smoother='spline_fix_knot')
interpolate.LSQUnivariateSpline(self.x,
self.y,
t=(self.x[::12])[1:],
w = 1/(N.sqrt(self.v)))
Mode = 1 : number and position of knots are not fixed
(smoother='spline_free_knot')
interpolate.UnivariateSpline(self.x, self.y, w = 1/(N.sqrt(self.v)), s=s)
Mode = 2: used a savitzky_golay filter to smooth the spectrum
(smoother='sgfilter')
For mode = 0 or mode = 1: The spline function is put in self.smoother
The smoothed array is in self.s
The smoother type is in self.smoother_type
"""
self.smoother_type = smoother
self.lim = lim # Limit for savitzky parameter
if smoother == "spline_free_knot":
if verbose:
mess = "<spin.Craniometer> using spline with free"
mess += "knots to smooth ", smoother
print >> sys.stderr, mess
self.spline_spec(mode=1, s=s, rho=rho, verbose=verbose)
elif smoother == "spline_fix_knot":
if verbose:
mess = "<spin.Craniometer> using spline with fixed"
mess += " knots to smooth ", smoother
print >> sys.stderr, mess
self.spline_spec(mode=0, s=s, rho=rho, verbose=verbose)
elif smoother == 'sgfilter':
if verbose:
mess = "<spin.Craniometer> using savitzky_golay filter"
print >> sys.stderr, mess
self.sg_filter(hsize=hsize, order=order, rho=rho, verbose=verbose)
else:
warn = "<spin.Craniometer> WARNING: smoother not"
warn += "implemented yet. Smoother asked for:", smoother
print >> sys.stderr, warn
def spline_spec(self, mode=1, s=None, rho=0.482, verbose=True):
"""
Create a spline with interpolate.
Mode = 0 : number and position are fixed
interpolate.LSQUnivariateSpline(self.x,
self.y,
t=(self.x[::12])[1:],
w = 1/(N.sqrt(self.v)))
Mode = 1 : number and position of knots are not fixed
interpolate.UnivariateSpline(self.x, self.y, w = 1/(N.sqrt(self.v)), s=s)
"""
rc = 1. - 2. * (rho**2)
if s is None:
try:
s = smoothing.spline_find_s(self.x,
self.y,
self.v * rc,
corr=(rho**2) / rc)
except Exception, err:
print "Error in spline_find_s:", err
s = 0.492 * len(self.x)
s = s[0] if isinstance(s, list) else s
if verbose:
print >> sys.stderr, 'best_s=%i' % s
if s <= 1:
s *= len(self.x)
self.smooth_parameter = s
if len(self.x) == len(self.y):
if len(self.v) > 0:
if mode == 0:
self.spline = LSQUnivariateSpline(self.x,
self.y,
t=(self.x[::12])[1:],
w=1 / (N.sqrt(self.v)))
if mode == 1:
self.spline = UnivariateSpline(self.x,
self.y,
w=1 / (N.sqrt(self.v)),
s=s)
# Compute chi square for each point
self.spline.chi2i = []
for j in range(len(self.x)):
self.spline.chi2i.append(((self.y[j]
- self.spline(self.x[j])[0])**2)
/ (self.v[j]))
self.spline.chi2 = self.spline.get_residual()\
/ (len(self.x)
- (len(self.spline.get_coeffs()) - 4))
# Save smooth function
self.s = self.spline(self.x)
else:
if verbose:
print >> sys.stderr, "No variance informations"
self.spline = None
else:
if verbose:
print >> sys.stderr, "ERROR. len(Wavelenght) != len(flux)"
def sg_filter(self, hsize=None, order=2, rho=0.0, verbose=False):
"""
Use savitzky_golay() to apply a savitzky golay filter on the spectrum.
Input:
- hsize : half size of the window (default:15)
- order : order of the polynome used to smooth the spectrum (default:2)
Output:
- self.s : smoothing spectrum
"""
rc = 1. - 2. * (rho**2)
if hsize is None:
try:
hsize = int(smoothing.sg_find_num_points(self.x,
self.y,
self.v * rc,
corr=(rho**2) / rc))
except Exception, err:
if verbose:
print >> sys.stderr, 'ERROR in computing of best hsize', err
hsize = 15
if (hsize * 2) + 1 < (order + 2):
hsize = 10 # order/2.+1
if self.lim is not None and hsize < self.lim:
hsize = self.lim # for oxygen zone only
if verbose:
print >> sys.stderr, 'best_w=%i' % hsize
self.s = smoothing.savitzky_golay(self.y,
kernel=(int(hsize) * 2) + 1,
order=order,
derivative=0)
self.s_deriv = smoothing.savitzky_golay(self.y,
kernel=(int(hsize) * 2) + 1,
order=order,
derivative=1)
self.hsize = hsize
self.order = order
def smoother(self, lbd, verbose=False):
"""Smooth the spectrum."""
if len(self.s) != len(self.x):
# If no smoothing function
if verbose:
print >> sys.stderr, 'ERROR: len(self.s) != len(self.x)'
return None
elif self.smoother_type != 'sgfilter':
# If smoothing function is a spline
return self.spline(lbd)
else:
# If smoothing function is a sgfilter
if N.isscalar(lbd):
if verbose:
print >> sys.stderr, 'lbd is a scalar'
if lbd < self.x[0] or lbd > self.x[-1]:
if verbose:
mess = 'ERROR: lbd is not in the range, %.2f<lbd<%.2f' %\
(self.x[0], self.x[-1])
print >> sys.stderr, mess
return None
else:
flux = self.s[(self.x > lbd - 2) & (self.x < lbd + 2)]
return flux[0]
else:
if verbose:
print >> sys.stderr, 'lbd is an array'
for i in lbd:
if i < self.x[0] or i > self.x[-1]:
if verbose:
mess = 'ERROR: %.2f<lbd<%.2f' % (
self.x[0], self.x[-1])
print >> sys.stderr, mess
return None
flux = N.array([float(self.s[(self.x > (l - 1)) &
(self.x < (l + 1))])
for l in lbd])
return flux
def find_extrema(self, verbose=True):
"""
Function to find all extrema in a smoothed spectrum.
Return two arrays : maxima = {'x':maxima_x,
'y':maxima_y,
's':maxima_s,
'v':maxima_v}
minima = {'x':minima_x,
'y':minima_y,
's':minima_s,
'v':minima_v}
and save it in self.maxima and self.minima
"""
if not len(self.s):
self.maxima = None
self.minima = None
if verbose:
mess = "ERROR! Incompatible or non existent smoothing spectrum"
print >> sys.stderr, mess
print >> sys.stderr, "[try spec.smooth()]"
else:
# find extrema for the real spectrum
maxima, minima = self._extrema(self.x, self.y, self.v, self.s)
self.maxima = maxima
self.minima = minima
def cranio_generator(self, nsimu=1000, rho=0.482, correl=True,
factor=1, simus=None, verbose=True):
"""
Simulation generator.
Generate 'nsimu' simulated spectra from smooth fuction applied on
the spectra, make a smoother and find extrema on each of them,
and save it in a new craniometer object in self.simulations
Default: smoother='sgfilter'
Default parameters for spline fitting:
s=0.492, rho=0.23 (mean values)
Default parameters for savitzky golay filter:
hsize=15, order=2 (mean value)
"""
self.v *= factor
self.rho = rho
self.simulations = []
if simus is not None:
simulated_spectra = simus
nsimu = len(simus)
else:
# Create gaussian distribution and simulated spectra
if correl:
if verbose:
print >> sys.stderr, 'Simulations with correlated pixels'
simulated_spectra = self._correl_simulated_spectra(nsimu, rho=rho)
else:
normal_distribution = N.random.randn(nsimu, len(self.x))
simulated_spectra = normal_distribution * (N.sqrt((self.v))) + self.s
# Smooth and save simulated spectra
for simulated_spectrum, number in zip(simulated_spectra, range(nsimu)):
self.simulations.append(Craniometer(self.x,
simulated_spectrum,
self.v))
if self.smoother_type == 'spline_fix_knot':
self.simulations[number].smooth(smoother=self.smoother_type,
rho=rho,
verbose=False)
elif self.smoother_type == 'spline_free_knot':
self.simulations[number].smooth(smoother=self.smoother_type,
rho=rho,
s=self.smooth_parameter,
verbose=False)
elif self.smoother_type == 'sgfilter':
self.simulations[number].smooth(smoother=self.smoother_type,
hsize=self.hsize,
order=self.order,
verbose=False)
self.simulations[number].find_extrema(verbose=False)
try:
self.systematic_error()
except Exception, err:
self.syst = None
print >> sys.stderr, "ERROR in systematic_error (cranio_generator)", err
def _correl_simulated_spectra(self, nsimu, rho=0.482):
"""Correlate the noise of simulated sptectra."""
def comp_alpha(rho):
"""Compute alpha."""
return 0.5 * (1 + N.sqrt(1 - 4 * (rho**2)))
def comp_beta(rho):
"""Compute beta."""
return 0.5 * (1 - N.sqrt(1 - 4 * (rho**2)))
alpha = comp_alpha(rho)
beta = comp_beta(rho)
normal_distribution = N.random.randn(nsimu, len(self.x) + 1)
normal_distribution_correl = N.zeros((nsimu, len(self.x)))
for i in range(normal_distribution.shape[0]):
normal_distribution_correl[i] = alpha * normal_distribution[i][:-1] \
+ beta * normal_distribution[i][1:]
simulated_spectra = normal_distribution_correl * (N.sqrt((self.v))) \
+ self.s
return simulated_spectra
def systematic_error(self):
"""
Comput ethe systematic error.
15% on hsize and 10% on s
create an un-pre-defined number of craniometer with the variation
of hsize or s and save it.
in each spectral indicators functions, compute this one for each new
craniometer and compute the standard deviation between those new ones
and the initial one
"""
self.syst = []
if self.smoother_type == 'sgfilter':
# For cases where hsize is large, one has to make sure that the
# window explored doesn't include cases where hsize is larger than
# the size of the data
hsizes = N.arange(self.hsize * 0.85,
min(len(self.x), self.hsize * 1.15),
dtype=int)
for hsize, number in zip(hsizes, range(len(hsizes))):
self.syst.append(Craniometer(self.x, self.y, self.v))
self.syst[number].smooth(smoother=self.smoother_type,
hsize=hsize,
order=self.order,
verbose=False)
self.syst[number].find_extrema(verbose=False)
else:
sparams = N.linspace(self.smooth_parameter * 0.9,
self.smooth_parameter * 1.1, 6)
for s, number in zip(sparams, range(len(sparams))):
self.syst.append(Craniometer(self.x, self.y, self.v))
self.syst[number].smooth(smoother=self.smoother_type,
rho=self.rho,
s=s,
verbose=False)
self.syst[number].find_extrema(verbose=False)
# =========================================================================
# Utilities to compute spectral indicators
# Functions to intergate, compute lines ratio, variances...
# =========================================================================
def _extrema(self, x, y, v, s, w=12, StoN=0.80):
"""
Find all signicative extrema of a spectrum or spectrum's zone.
Output:
maxima = {'x':N.array(maxima_x),
'y':N.array(maxima_y),
's':N.array(maxima_s),
'v':N.array(maxima_v)}
minima = {'x':N.array(minima_x),
'y':N.array(minima_y),
's':N.array(minima_s),
'v':N.array(minima_v)}
"""
# parameters for the window (13*step on each side of the extrema)
# 2 functions to keep only one extrema in a window
def minima(i, w):
"""Get the minimun."""
if (i > w) and (i < (len(x) - w)):
window = (x >= x[i - w]) & (x <= x[i + w])
lbdmin = (x[window])[N.argmin(s[window])]
return x[i] == lbdmin
else:
return False
def maxima(i, w):
"""Get the maximum."""
if (i > w) and (i < (len(x) - w)):
window = (x >= x[i - w]) & (x <= x[i + w])
lbdmax = (x[window])[N.argmax(s[window])]
return x[i] == lbdmax
else:
return False
def signaltonoise(i):
"""Get the signal to noise."""
good = (x > (x[i] - 20)) & (x < (x[i] + 20))
return (y[i] / N.sqrt(v[i])) \
/ N.mean(y[good] / N.sqrt(v[good])) >= StoN
# Define arrays: lambda, flux, smooth values and variance
# for maxima and minima
minima_x, minima_y, minima_s, minima_v = [], [], [], []
maxima_x, maxima_y, maxima_s, maxima_v = [], [], [], []
# parameter initialization
p = not s[0] < s[1]
# Find extrema
for i in range(len(x) - 1):
if not p:
if s[i] <= s[i + 1]:
continue
elif not maxima(i, w):
p = 1
continue
elif not signaltonoise(i):
p = 1
continue
else:
maxima_x.append(x[i])
maxima_y.append(y[i])
maxima_s.append(s[i])
maxima_v.append(v[i])
p = 1
else:
if s[i] >= s[i + 1]:
continue
elif not minima(i, w):
p = 0
continue
else:
minima_x.append(x[i])
minima_y.append(y[i])
minima_s.append(s[i])
minima_v.append(v[i])
p = 0
# Create output
maxima = {'x': N.array(maxima_x),
'y': N.array(maxima_y),
's': N.array(maxima_s),
'v': N.array(maxima_v)}
minima = {'x': N.array(minima_x),
'y': N.array(minima_y),
's': N.array(minima_s),
'v': N.array(minima_v)}
return maxima, minima
def _integration(self, x, y, imin=None, imax=None, verbose=True):
"""Intergate over a area."""
if imin is None \
or imax is None \
or (imin >= imax) \
or (imin <= 0) \
or (imax <= 0):
if verbose:
print >> sys.stderr, "ERROR in the definition of extrema"
return N.nan
elif x[0] > imin or x[-1] < imax:
if verbose:
print >> sys.stderr, "ERROR. Extrema are not in the interval"
return N.nan
else:
return float(y[(N.array(x) >= imin) & (N.array(x) <= imax)].sum())
def _var_integration(self, x, v, imin=None, imax=None, verbose=True):
"""Compute variance of an intergration."""
if len(v):
var_int = v[(N.array(x) > imin) & (N.array(x) < imax)].sum()
else:
if verbose:
print >> sys.stderr, "No variance for this spectrum"
var_int = N.nan
return float(var_int)
def _var_rapport(self, a, b, var_a, var_b, verbose=True):
"""Compute variance for a/b."""
if a and b and var_a and var_b:
var = (1 / b**2) * var_a + (a**2) * (var_b / b**4)
else:
if verbose:
mess = "Incompatible values to compute the ratio variance"
print >> sys.stderr, mess
var = N.nan
return float(var)
def _equivalentdepth(self, lbd1=None, lbd2=None, lbd3=None, flux1=None,
flux2=None, flux3=None, verbose=True):
"""Compute an equivalent depth."""
if lbd1 >= lbd2 or lbd2 >= lbd3 or lbd1 >= lbd3:
if verbose:
print >> sys.stderr, 'ERROR in the definition of wavelenght '\
'to compute equivalent depth (lbds: %.2f, %.2f, %.2f)' % \
(lbd1, lbd2, lbd3)
return N.nan
else:
p = N.polyfit([lbd1, lbd3], [flux1, flux3], 1) # y=p[0]*x+p[1]
return float(N.polyval(p, lbd2) - flux2)
def _equivalentwidth(self, x, y, lbd1=None, lbd2=None, flux1=None,
flux2=None, verbose=True):
"""Compute an equivalent width."""
if lbd1 >= lbd2:
if verbose:
print >> sys.stderr, 'ERROR in the definition of '\
'wavelenght to compute equivalent width'
return N.nan
else:
step = x[1] - x[0]
p = N.polyfit([lbd1, lbd2], [flux1, flux2], 1) # y=p[0]*x+p[1]
x_new = x[(x >= lbd1) & (x <= lbd2)]
y_new = y[(x >= lbd1) & (x <= lbd2)]
integration = N.sum((N.polyval(p, x_new) - y_new) /
N.polyval(p, x_new)) * step
return float(integration)
def _extrema_value_in_interval(self, imin, imax, lbd, var, smooth,
extrema=None, right=False, left=False):
"""
Find extrema.
Function to find extrema values (lambda, flux and variance) in a given
interval. Values are searched in self.minima and self.maxima.
Use extrema='minima' to find minima, and extrema='maxima' to find maxima
Lambda imin < Lambda_max
"""
try:
filt = (N.array(lbd) > imin) & (N.array(lbd) < imax)
if not sum(filt):
return [None, None, None]
if extrema == 'maxima':
if right:
arg = N.argmax(lbd[filt])
elif left:
arg = N.argmin(lbd[filt])
else:
arg = N.argmax(smooth[filt])
elif extrema == 'minima':
if right:
arg = N.argmax(lbd[filt])
elif left:
arg = N.argmin(lbd[filt])
else:
arg = N.argmin(smooth[filt])
wavelength = (lbd[(lbd >= imin) & (lbd <= imax)])[arg]
flux = (smooth[(lbd >= imin) & (lbd <= imax)])[arg]
variance = (var[(lbd >= imin) & (lbd <= imax)])[arg]
return wavelength, flux, variance
except TypeError:
return [None, None, None]
def _find_special_peak(self, imin, imax, maxima=False,
minima=False, right=False, left=False):
"""Find peak when other method failed."""
if maxima is False and minima is False:
return None, None, None
limit = (self.x >= imin) & (self.x <= imax)
maxi, mini = self._extrema(self.x[limit], self.y[limit],
self.v[limit], self.s[limit], w=1)
if (maxima and not len(maxi['x'])) or (minima and not len(mini['x'])):
return None, None, None
if right:
if maxima:
arg = N.argmax(maxi['x'])
return maxi['x'][arg], maxi['s'][arg], maxi['v'][arg]
elif minima:
arg = N.argmax(mini['x'])
return mini['x'][arg], mini['s'][arg], mini['v'][arg]
else:
return None, None, None
elif left:
if maxima:
arg = N.argmin(maxi['x'])
return maxi['x'][arg], maxi['s'][arg], maxi['v'][arg]
elif minima:
arg = N.argmin(mini['x'])
return mini['x'][arg], mini['s'][arg], mini['v'][arg]
else:
return None, None, None
else:
if maxima:
arg = N.argmax(maxi['s'])
return maxi['x'][arg], maxi['s'][arg], maxi['v'][arg]
elif minima:
arg = N.argmin(mini['s'])
return mini['x'][arg], mini['s'][arg], mini['v'][arg]
else:
return None, None, None
def max_of_interval(self, imin, imax):
"""Find maximum value in an interval."""
la = self.x[(self.x > imin) & (self.x < imax)]
fa = self.smoother(la)
arg = N.argmax(fa)
l, f = la[arg], fa[arg]
v = 0.0
return l, f, v
def std2(self, x, x0):
"""
conpute the standard deviation of the distribution x compared with x0.
x0 can be the mean or an other value
"""
return N.sqrt(N.mean(N.absolute(x - x0)**2))
def _get_min(self, lbd):
"""
Get the minimum the flux around a given bin.
It uses the derivative of the smoothed function.
A linear interpolation is made using the given
bin and the left and right bins.
"""
# now take the minimum and the bins around it
bin_c = N.argmin(N.abs(self.x - lbd))
xx = self.x[bin_c - 1:bin_c + 1]
yy = self.s_deriv[bin_c - 1:bin_c + 1]
# make a linear fit
pol = N.polyfit(xx, yy, 1)
# check if this is constistant with a ~1 bin shift
if N.abs(lbd + pol[1] / pol[0]) < 1.5 * (self.x[1] - self.x[0]):
return -(pol[1] / pol[0])
else:
return lbd
# =========================================================================
# Compute spectral indicators on the spectrum
# Functions to compute several spectral indicators
# =========================================================================
def rca(self, verbose=True, simu=True, syst=True):
"""
Return the value and the error of rca.
[rca, rca_sigma]
"""
# Initialisation
self.rcavalues = {'rca': N.nan, 'rca.err': N.nan, 'rca.stat': N.nan,
'rca.syst': N.nan, 'rca.mean': N.nan,
'rca_lbd': [N.nan, N.nan], 'rca_flux': [N.nan, N.nan]}
if self.init_only:
return
lbd1, flux1, var1 = self._extrema_value_in_interval(self.p3590[0],
self.p3590[1],
self.maxima['x'],
self.maxima['v'],
self.maxima['s'],
extrema='maxima',
right=True)
if simu and lbd1 is None:
lbd1, flux1, var1 = self.max_of_interval(self.p3590[0], self.p3590[1])
lbd2, flux2, var2 = self._extrema_value_in_interval(self.p3930[0],
self.p3930[1],
self.maxima['x'],
self.maxima['v'],
self.maxima['s'],
extrema='maxima')
if simu and lbd2 is None:
lbd2, flux2, var2 = self.max_of_interval(self.p3930[0], self.p3930[1])
if flux1 != 0 and isinstance(flux1, float) and isinstance(flux2, float):
rca_value = flux2 / flux1
else:
if verbose:
print >> sys.stderr, "ERROR in computing rca"
rca_value = N.nan
if simu:
if not N.isfinite(rca_value):
return [float(N.nan), float(N.nan)]
rca_simu = []
for simu in self.simulations:
try:
rca_simu.append(simu.rca(simu=False, syst=False, verbose=False))
except TypeError:
continue
rca_sigma = self.std2(N.array(rca_simu)[N.isfinite(rca_simu)],
rca_value)
rca_mean = N.mean(N.array(rca_simu)[N.isfinite(rca_simu)])
if N.isfinite(rca_value):
self.rcavalues = {'rca': float(rca_value),
'rca.err': float(rca_sigma),
'rca.stat': float(rca_sigma),
'rca.mean': float(rca_mean),
'rca_lbd': [float(lbd1), float(lbd2)],
'rca_flux': [float(flux1), float(flux2)]}
if syst:
rca_syst = []
for system in self.syst:
try:
rca_syst.append(system.rca(syst=False, simu=False,
verbose=False))
except TypeError:
continue
rca_sigma_syst = self.std2(N.array(rca_syst)[N.isfinite(rca_syst)],
rca_value)
if N.isfinite(rca_sigma_syst):
rca_sigma = float(N.sqrt(rca_sigma**2 + rca_sigma_syst**2))
else:
rca_sigma *= 2
self.rcavalues['rca.syst'] = float(rca_sigma_syst)
self.rcavalues['rca.err'] = float(rca_sigma)
return [float(rca_value), float(rca_sigma)]
if simu is False and syst is False:
if N.isfinite(rca_value):
self.rcavalues = {'rca': float(rca_value),
'rca_lbd': [float(lbd1), float(lbd2)],
'rca_flux': [float(flux1), float(flux2)],
'rca.err': N.nan, 'rca.stat': N.nan,
'rca.mean': N.nan}
return rca_value
def rcas(self, verbose=True, simu=True, syst=True):
"""
Return the value and the error of rcas.
[rcas, rcas_sigma]
"""
# Initialisation
self.rcasvalues = {'rcas': N.nan, 'rcas.err': N.nan, 'rcas.stat': N.nan,
'rcas.syst': N.nan, 'rcas.mean': N.nan,
'rcas_lbd': [N.nan, N.nan, N.nan, N.nan]}
if self.init_only:
return
min_1 = 3620
max_1 = 3716
min_2 = 3887
max_2 = 4012
try:
rcas_value = (self._integration(self.x, self.y, imin=min_2, imax=max_2,
verbose=verbose)) / \
(self._integration(self.x, self.y,
imin=min_1,
imax=max_1,
verbose=verbose))
except TypeError:
if verbose:
print >> sys.stderr, 'ERROR in compute of rcas'
rcas_value = float(N.nan)
if simu:
if not N.isfinite(rcas_value):
return [float(N.nan), float(N.nan)]
rcas_simu = []
for simu in self.simulations:
try:
rcas_simu.append(simu.rcas(simu=False, syst=False,
verbose=False))
except TypeError:
continue
rcas_sigma = self.std2(N.array(rcas_simu)[N.isfinite(rcas_simu)],
rcas_value)
rcas_mean = N.mean(N.array(rcas_simu)[N.isfinite(rcas_simu)])
if N.isfinite(rcas_value):
self.rcasvalues = {'rcas': float(rcas_value),
'rcas.err': float(rcas_sigma),
'rcas.stat': float(rcas_sigma),
'rcas.mean': float(rcas_mean),
'rcas_lbd': [float(min_1),
float(max_1),
float(min_2),
float(max_2)]}
if syst:
rcas_syst = []
for system in self.syst:
try:
rcas_syst.append(system.rcas(simu=False,
syst=False, verbose=False))
except TypeError:
continue
rcas_sigma_syst = self.std2(
N.array(rcas_syst)[N.isfinite(rcas_syst)], rcas_value)
if N.isfinite(rcas_sigma_syst):
rcas_sigma = float(N.sqrt(rcas_sigma**2 + rcas_sigma_syst**2))
else:
rcas_sigma *= 2
self.rcasvalues['rcas.syst'] = float(rcas_sigma_syst)
self.rcasvalues['rcas.err'] = float(rcas_sigma)
return [float(rcas_value), float(rcas_sigma)]
if simu is False and syst is False:
if N.isfinite(rcas_value):
self.rcasvalues = {'rcas': float(rcas_value),
'rcas_lbd': [min_1, max_1, min_2, max_2]}
return rcas_value
def rcas2(self, verbose=True, simu=True, syst=True):
"""
New rcas where peaks are following.
Return the value and the error of rcas
[rcas, rcas_sigma]
"""
interval_1 = 48
interval_2 = 62.5
# Initialisation
self.rcas2values = {'rcas2': N.nan, 'rcas2.err': N.nan,
'rcas2.stat': N.nan,
'rcas2.syst': N.nan,
'rcas2.mean': N.nan,
'rcas2_lbd': [N.nan, N.nan, N.nan, N.nan]}
if self.init_only:
return
try:
lbd1, flux1, var1 = self._extrema_value_in_interval(self.p3590[0],
self.p3590[1],
self.maxima['x'],
self.maxima['v'],
self.maxima['s'],
extrema='maxima')
if simu and lbd1 is None:
lbd1, flux1, var1 = self.max_of_interval(self.p3590[0],
self.p3590[1])
lbd2, flux2, var2 = self._extrema_value_in_interval(self.p3930[0],
self.p3930[1],
self.maxima[
'x'],
self.maxima[
'v'],
self.maxima[
's'],
extrema='maxima')
if simu and lbd2 is None:
lbd2, flux2, var2 = self.max_of_interval(self.p3930[0],
self.p3930[1])
min_1 = lbd1 - interval_1
max_1 = lbd1 + interval_1
min_2 = lbd2 - interval_2
max_2 = lbd2 + interval_2
rcas2_value = (self._integration(self.x, self.y, imin=min_2,
imax=max_2, verbose=verbose)) / \
self._integration(self.x, self.y,
imin=min_1,
imax=max_1,
verbose=verbose)
except TypeError:
if verbose:
print >> sys.stderr, 'ERROR in compute of rcas2'
rcas2_value = float(N.nan)
if simu:
if not N.isfinite(rcas2_value):
return [float(N.nan), float(N.nan)]
rcas2_simu = []
for simu in self.simulations:
try:
rcas2_simu.append(simu.rcas2(simu=False,
syst=False,
verbose=False))
except TypeError:
continue
rcas2_sigma = self.std2(N.array(rcas2_simu)[N.isfinite(rcas2_simu)],
rcas2_value)
rcas2_mean = N.mean(N.array(rcas2_simu)[N.isfinite(rcas2_simu)])
if N.isfinite(rcas2_value):
self.rcas2values = {'rcas2': float(rcas2_value),
'rcas2.err': float(rcas2_sigma),
'rcas2.stat': float(rcas2_sigma),
'rcas2.mean': float(rcas2_mean),
'rcas2_lbd': [float(min_1), float(max_1),
float(min_2), float(max_2)]}
if syst:
rcas2_syst = []
for system in self.syst:
try:
rcas2_syst.append(system.rcas2(simu=False,
syst=False,
verbose=False))
except TypeError:
continue
rcas2_sigma_syst = self.std2(
N.array(rcas2_syst)[N.isfinite(rcas2_syst)], rcas2_value)
if N.isfinite(rcas2_sigma_syst):
rcas2_sigma = float(N.sqrt(rcas2_sigma**2 +
rcas2_sigma_syst**2))
else:
rcas2_sigma *= 2
self.rcas2values['rcas2.syst'] = float(rcas2_sigma_syst)
self.rcas2values['rcas2.err'] = float(rcas2_sigma)
return [float(rcas2_value), float(rcas2_sigma)]
if simu is False and syst is False:
if N.isfinite(rcas2_value):
self.rcas2values = {'rcas2': float(rcas2_value),
'rcas2_lbd': [float(min_1),
float(max_1),
float(min_2),
float(max_2)],
'rcas2.err': N.nan,
'rcas2.stat': N.nan,
'rcas2.mean': N.nan}
return rcas2_value
def rsi(self, verbose=True, simu=True, syst=True):
"""
Retun the value and the error of rsi.
[rsi, rsi_sigma]
"""
# initialisation
self.rsivalues = {'rsi': N.nan, 'rsi.err': N.nan, 'rsi.stat': N.nan,
'rsi.syst': N.nan, 'rsi.mean': N.nan, 'rsi_lbd': N.nan}
if self.init_only:
return
lbd1, flux1, var1 = self._extrema_value_in_interval(self.p5603[0],
self.p5603[1],
self.maxima['x'],
self.maxima['v'],
self.maxima['s'],
extrema='maxima')
if lbd1 is None:
lbd1, flux1, var1 = self.max_of_interval(self.p5603[0], self.p5603[1])
lbd2, flux2, var2 = self._extrema_value_in_interval(5700, 5849,
self.minima['x'],
self.minima['v'],
self.minima['s'],
extrema='minima')
if lbd2 is None:
try:
lbd2, flux2, var2 = self._find_special_peak(5700, 5849,
minima=True)
except TypeError:
lbd2, flux2, var2 = None, None, None
if simu and lbd2 is None:
lbd2, flux2, var2 = self.max_of_interval(5700, 5849)
lbd3, flux3, var3 = self._extrema_value_in_interval(5850, 6050,
self.maxima['x'],
self.maxima['v'],
self.maxima['s'],
extrema='maxima',
right=True)
if lbd3 is None:
try:
lbd3, flux3, var3 = self._find_special_peak(self.p5930[0],
self.p5930[1],
maxima=True,
right=True)
except TypeError:
lbd3, flux3, var3 = None, None, None
if simu and lbd3 is None:
lbd3, flux3, var3 = self.max_of_interval(self.p5930[0],
self.p5930[1])
lbd4, flux4, var4 = self._extrema_value_in_interval(6000, 6210,
self.minima['x'],
self.minima['v'],
self.minima['s'],
extrema='minima')
if simu and lbd4 is None:
lbd4, flux4, var4 = self.max_of_interval(6000, 6210)
lbd5, flux5, var5 = self._extrema_value_in_interval(self.p6312[0],
self.p6312[1],
self.maxima['x'],
self.maxima['v'],
self.maxima['s'],
extrema='maxima')
if simu and lbd5 is None:
lbd5, flux5, var5 = self.max_of_interval(self.p6312[0], self.p6312[1])
# Check if the straight line in under the smoothing function
x = N.polyval(N.polyfit([lbd3, lbd5], [flux3, flux5], 1),
self.x[(self.x > lbd3) & (self.x < lbd5)]) - \
self.s[(self.x > lbd3) & (self.x < lbd5)]
while len(x[x < 0]):
lbd3 = self.x[(self.x == lbd3).nonzero()[0][0] + 1]
flux3 = self.smoother(lbd3)
x = N.polyval(N.polyfit([lbd3, lbd5], [flux3, flux5], 1),
self.x[(self.x > lbd3) & (self.x < lbd5)]) - \
self.s[(self.x > lbd3) & (self.x < lbd5)]
if lbd2 is None and lbd1 is not None and lbd3 is not None:
try:
p = N.polyfit([lbd1, lbd3], [flux1, flux3], 1)
interval = (self.x >= lbd1) & (self.x <= lbd3)
lbd2 = (self.x[interval])[N.argmax(N.polyval(p,
self.x[interval])
- self.s[interval])]
flux2 = (self.s[interval])[N.argmax(N.polyval(p,
self.x[interval])
- self.s[interval])]
except TypeError:
lbd2, flux2, var2 = None, None, None
lbd = [float(lbd1), float(lbd2), float(lbd3), float(lbd4), float(lbd5)]
flux = [flux1, flux2, flux3, flux4, flux5]
try:
d_blue = self._equivalentdepth(lbd1=lbd[0], lbd2=lbd[1],
lbd3=lbd[2], flux1=flux[0],
flux2=flux[1], flux3=flux[2],
verbose=verbose)
d_red = self._equivalentdepth(lbd1=lbd[2], lbd2=lbd[3],
lbd3=lbd[4], flux1=flux[2],
flux2=flux[3], flux3=flux[4],
verbose=verbose)
rsi_value = d_blue / d_red
except TypeError:
if verbose:
print >> sys.stderr, 'ERROR in computing of rsi, no '\
'wavelenght to compute rsi or maybe none extrema found, '\
'try self.find_extrema()'
rsi_value = N.nan
if simu:
if not N.isfinite(rsi_value):
return [float(N.nan), float(N.nan)]
rsi_simu = []
for simu in self.simulations:
try:
rsi_simu.append(simu.rsi(simu=False, syst=False,
verbose=False))
except TypeError:
continue
rsi_sigma = self.std2(N.array(rsi_simu)[N.isfinite(rsi_simu)],
rsi_value)
rsi_mean = N.mean(N.array(rsi_simu)[N.isfinite(rsi_simu)])
if N.isfinite(rsi_value):
self.rsivalues = {'rsi': float(rsi_value),
'rsi.err': float(rsi_sigma),
'rsi.stat': float(rsi_sigma),
'rsi.mean': float(rsi_mean),
'rsi_lbd': lbd}
if syst:
rsi_syst = []
for system in self.syst:
try:
rsi_syst.append(system.rsi(simu=False, syst=False,
verbose=False))
except TypeError:
continue
rsi_sigma_syst = self.std2(N.array(rsi_syst)[N.isfinite(rsi_syst)],
rsi_value)
if N.isfinite(rsi_sigma_syst):
rsi_sigma = float(N.sqrt(rsi_sigma**2 + rsi_sigma_syst**2))
else:
rsi_sigma *= 2
self.rsivalues['rsi.syst'] = float(rsi_sigma_syst)
self.rsivalues['rsi.err'] = float(rsi_sigma)
return [float(rsi_value), float(rsi_sigma)]
if simu is False and syst is False:
if N.isfinite(rsi_value):
self.rsivalues = {'rsi': float(rsi_value),
'rsi_lbd': lbd,
'rsi.err': N.nan,
'rsi.stat': N.nan,
'rsi.syst': N.nan,
'rsi.mean': N.nan}
return rsi_value
def rsis(self, verbose=True, simu=True, syst=True):
"""
Return the value and the error of rsis.
[rsis, rsis_sigma]
"""
# initialisation
self.rsisvalues = {'rsis': N.nan, 'rsis.err': N.nan, 'rsis.stat': N.nan,
'rsis.syst': N.nan, 'rsis.mean': N.nan,
'rsis_lbd': [N.nan, N.nan], 'rsis_flux': [N.nan, N.nan]}
if self.init_only:
return
lbd1, flux1, var1 = self._extrema_value_in_interval(self.p5603[0],
self.p5603[1],
self.maxima['x'],
self.maxima['v'],
self.maxima['s'],
extrema='maxima')
if simu and lbd1 is None:
lbd1, flux1, var1 = self.max_of_interval(self.p5603[0], self.p5603[1])
lbd2, flux2, var2 = self._extrema_value_in_interval(self.p6312[0],
self.p6312[1],
self.maxima['x'],
self.maxima['v'],
self.maxima['s'],
extrema='maxima')
if simu and lbd2 is None:
lbd2, flux2, var2 = self.max_of_interval(self.p6312[0], self.p6312[1])
try:
rsis_value = flux1 / flux2
except TypeError:
if verbose:
print >> sys.stderr, "ERROR in computing rsis"
rsis_value = N.nan
if simu:
if not N.isfinite(rsis_value):
return [float(N.nan), float(N.nan)]
rsis_simu = []
for simu in self.simulations:
try:
rsis_simu.append(simu.rsis(simu=False, syst=False,
verbose=False))
except TypeError:
continue
rsis_sigma = self.std2(N.array(rsis_simu)[N.isfinite(rsis_simu)],
rsis_value)
rsis_mean = N.mean(N.array(rsis_simu)[N.isfinite(rsis_simu)])
if N.isfinite(rsis_value):
self.rsisvalues = {'rsis': float(rsis_value),
'rsis.err': float(rsis_sigma),
'rsis.stat': float(rsis_sigma),
'rsis.mean': float(rsis_mean),
'rsis_lbd': [float(lbd1), float(lbd2)],
'rsis_flux': [float(flux1), float(flux2)]}
if syst:
rsis_syst = []
for system in self.syst:
try:
rsis_syst.append(system.rsis(simu=False, syst=False,
verbose=False))
except TypeError:
continue
rsis_sigma_syst = self.std2(
N.array(rsis_syst)[N.isfinite(rsis_syst)], rsis_value)
if N.isfinite(rsis_sigma_syst):
rsis_sigma = float(N.sqrt(rsis_sigma**2 + rsis_sigma_syst**2))
else:
rsis_sigma *= 2
self.rsisvalues['rsis.syst'] = float(rsis_sigma_syst)
self.rsisvalues['rsis.err'] = float(rsis_sigma)
return [float(rsis_value), float(rsis_sigma)]
if simu is False and syst is False:
if N.isfinite(rsis_value):
self.rsisvalues = {'rsis': float(rsis_value),
'rsis_lbd': [float(lbd1), float(lbd2)],
'rsis_flux': [float(flux1), float(flux2)],
'rsis.err': N.nan, 'rsis.stat': N.nan,
'rsis.syst': N.nan, 'rsis.mean': N.nan}
return rsis_value
def rsiss(self, verbose=True, simu=True):
"""
Return the value and the error of rsiss.
[rsiss, rsiss_sigma]
"""
min_1 = 5500
max_1 = 5700
min_2 = 6200
max_2 = 6450
try:
a = self._integration(self.x, self.y, imin=min_1, imax=max_1,
verbose=verbose)
b = self._integration(self.x, self.y, imin=min_2, imax=max_2,
verbose=verbose)
var_a = self._var_integration(self.x, self.v, imin=min_1, imax=max_1,
verbose=verbose)
var_b = self._var_integration(self.x, self.v, imin=min_2, imax=max_2,
verbose=verbose)
rsiss_value = a / b
rsiss_sigma = N.sqrt(self._var_rapport(a, b, var_a, var_b,
verbose=verbose))
except TypeError:
if verbose:
print >> sys.stderr, 'ERROR in compute of rsiss'
rsiss_value = float(N.nan)
rsiss_sigma = float(N.nan)
self.rsissvalues = {'rsiss': float(rsiss_value),
'rsiss.err': float(rsiss_sigma),
'rsiss_lbd': [float(min_1),
float(max_1),
float(min_2),
float(max_2)]}
return [float(rsiss_value), float(rsiss_sigma)]
def ew(self, lambda_min_blue, lambda_max_blue, lambda_min_red,
lambda_max_red, sf, verbose=True, simu=True,
right1=False, left1=False, right2=False, left2=False,
sup=False, syst=True, check=True):
"""
Return the value and the error of an Equivalent Width.
[lambda_min_blue, lambda_max_blue] and [lambda_min_red, lambda_max_red]
are the interval where the two peaks are searching.
'sf' is the name (a string) of the spectral feature associated to the ew
[ew, ew_sigma]
"""
# shoftcut
ewv = self.ewvalues
# Initialisation
ewv['ew%s' % sf] = N.nan
ewv['lbd_ew%s' % sf] = [N.nan, N.nan]
ewv['flux_ew%s' % sf] = [N.nan, N.nan]
ewv['R%s' % sf] = N.nan
ewv['flux_sum_norm_ew%s' % sf] = N.nan
ewv['depth_norm_ew%s' % sf] = N.nan
ewv['surf_ew%s' % sf] = N.nan
ewv['depth_ew%s' % sf] = N.nan
ewv['depth_ew%s.err' % sf] = N.nan
ewv['depth_ew%s.stat' % sf] = N.nan
ewv['depth_ew%s.syst' % sf] = N.nan
ewv['depth_ew%s.mean' % sf] = N.nan
ewv['surf_ew%s.err' % sf] = N.nan
ewv['surf_ew%s.stat' % sf] = N.nan
ewv['surf_ew%s.syst' % sf] = N.nan
ewv['surf_ew%s.mean' % sf] = N.nan
ewv['width_ew%s' % sf] = N.nan
ewv['ew%s.err' % sf] = N.nan
ewv['ew%s.stat' % sf] = N.nan
ewv['ew%s.syst' % sf] = N.nan
ewv['flux_sum_norm_ew%s.err' % sf] = N.nan
ewv['flux_sum_norm_ew%s.stat' % sf] = N.nan
ewv['flux_sum_norm_ew%s.syst' % sf] = N.nan
ewv['depth_norm_ew%s.err' % sf] = N.nan
ewv['depth_norm_ew%s.stat' % sf] = N.nan
ewv['depth_norm_ew%s.syst' % sf] = N.nan
ewv['width_ew%s.err' % sf] = N.nan
ewv['width_ew%s.stat' % sf] = N.nan
ewv['width_ew%s.syst' % sf] = N.nan
ewv['ew%s.mean' % sf] = N.nan
ewv['R%s.mean' % sf] = N.nan
ewv['R%s.stat' % sf] = N.nan
ewv['R%s.syst' % sf] = N.nan
ewv['R%s.err' % sf] = N.nan
ewv['flux_sum_norm_ew%s.mean' % sf] = N.nan
ewv['depth_norm_ew%s.mean' % sf] = N.nan
ewv['width_ew%s.mean' % sf] = N.nan
ewv['ew%s.med' % sf] = N.nan
ewv['fmean_ew%s' % sf] = N.nan
ewv['fmean_ew%s.err' % sf] = N.nan
ewv['fmean_ew%s.stat' % sf] = N.nan
ewv['fmean_ew%s.syst' % sf] = N.nan
ewv['fmean_ew%s.mean' % sf] = N.nan
if self.init_only:
return
# Function to compute the ew value and find its parameters ============
try:
lbd1, flux1, var1 = self._extrema_value_in_interval(lambda_min_blue,
lambda_max_blue,
self.maxima['x'],
self.maxima['v'],
self.maxima['s'],
extrema='maxima',
right=right1,
left=left1)
if lbd1 is None:
try:
lbd1, flux1, var1 = self._find_special_peak(lambda_min_blue,
lambda_max_blue,
maxima=True,
right=right1,
left=left1)
except TypeError:
lbd1, flux1, var1 = None, None, None
if simu and lbd1 is None:
lbd1, flux1, var1 = self.max_of_interval(lambda_min_blue,
lambda_max_blue)
check = False
lbd2, flux2, var2 = self._extrema_value_in_interval(lambda_min_red,
lambda_max_red,
self.maxima['x'],
self.maxima['v'],
self.maxima['s'],
extrema='maxima',
right=right2,
left=left2)
if lbd2 is None:
try:
lbd2, flux2, var2 = self._find_special_peak(lambda_min_red,
lambda_max_red,
maxima=True,
right=right2,
left=left2)
except TypeError:
lbd2, flux2, var2 = None, None, None
if simu and lbd2 is None:
lbd2, flux2, var2 = self.max_of_interval(lambda_min_red,
lambda_max_red)
check = False
# Check if the straight line in under the smoothing function
if check:
if sup is True and lbd2 is not None and lbd1 is not None:
x = N.polyval(N.polyfit([lbd1, lbd2],
[flux1, flux2],
1),
self.x[(self.x > lbd1)
& (self.x < lbd2)]) \
- self.s[(self.x > lbd1)
& (self.x < lbd2)]
lbd1_tmp, flux1_tmp = lbd1, flux1
while len(x[x < 0]) > 5 and lbd1_tmp <= lambda_max_blue:
lbd1_tmp = self.x[
(self.x == lbd1_tmp).nonzero()[0][0] + 1]
flux1_tmp = self.smoother(lbd1_tmp)
x = N.polyval(N.polyfit([lbd1_tmp, lbd2],
[flux1_tmp, flux2],
1),
self.x[(self.x > lbd1_tmp)
& (self.x < lbd2)]) \
- self.s[(self.x > lbd1_tmp)
& (self.x < lbd2)]
if len(x[x < 0]) > 5:
while len(x[x < 0]) > 5 and lbd2 >= lambda_min_red:
lbd2 = self.x[(self.x == lbd2).nonzero()[0][0] - 1]
flux2 = self.smoother(lbd2)
x = N.polyval(N.polyfit([lbd1, lbd2],
[flux1, flux2],
1),
self.x[(self.x > lbd1)
& (self.x < lbd2)]) \
- self.s[(self.x > lbd1)
& (self.x < lbd2)]
else:
lbd1, flux1 = lbd1_tmp, flux1_tmp
# if sup and len(x[x<0]) > 5: ew_value = N.nan
# else: ew_value = self._equivalentwidth(self.x, self.y,
# lbd1=lbd1, lbd2=lbd2, flux1=flux1, flux2=flux2, verbose=verbose)
ew_value = self._equivalentwidth(self.x,
self.y,
lbd1=lbd1,
lbd2=lbd2,
flux1=flux1,
flux2=flux2,
verbose=verbose)
except TypeError:
if verbose:
print >> sys.stderr, 'ERROR, no extrema found, '\
'try self.find_extrema()'
ew_value = N.nan
# ======================================================================
if N.isfinite(ew_value): # Additional informations
interval = (self.x > lbd1) & (self.x < lbd2)
arg = N.argmin(self.s[interval])
lbd3 = self.x[interval][arg]
surf = N.sum(N.polyval(N.polyfit([lbd1, lbd2],
[flux1, flux2], 1),
self.x[interval])
- self.y[interval])
depth = self._equivalentdepth(lbd1=lbd1,
lbd2=lbd3,
lbd3=lbd2,
flux1=self.smoother(lbd1),
flux2=self.smoother(lbd3),
flux3=self.smoother(lbd2),
verbose=True)
p = N.polyfit([lbd1, lbd2], [flux1, flux2], 1) # y=p[0]*x+p[1]
flux_norm = N.divide(N.sum(N.polyval(p, self.x[interval])),
float(N.mean(N.polyval(p, self.x[interval]))))
depth_n = depth / N.polyval(p, lbd3)
fmean = 2 * (flux2 - self.smoother(lbd3)) \
/ (flux2 + self.smoother(lbd3))
ewv['ew%s' % sf] = float(ew_value)
ewv['lbd_ew%s' % sf] = [float(lbd1), float(lbd2)]
ewv['flux_ew%s' % sf] = [float(flux1), float(flux2)]
ewv['R%s' % sf] = float(flux2 / flux1)
ewv['flux_sum_norm_ew%s' % sf] = float(flux_norm)
ewv['depth_norm_ew%s' % sf] = float(depth_n)
ewv['width_ew%s' % sf] = float(lbd2 - lbd1)
ewv['depth_ew%s' % sf] = float(depth)
ewv['surf_ew%s' % sf] = float(surf)
ewv['fmean_ew%s' % sf] = float(fmean)
# Compute statistiaue error
if simu:
if not N.isfinite(ew_value):
return [float(N.nan), float(N.nan)]
ew_simu, r_simu, d_simu, w_simu, f_simu, fm_simu = [], [], [], [], [], []
dep_simu, sur_simu = [], []
for simu in self.simulations:
try:
ew_simu.append(simu.ew(lambda_min_blue, lambda_max_blue,
lambda_min_red, lambda_max_red,
sf, simu=False, syst=False, verbose=False))
r_simu.append(float(simu.ewvalues['R%s' % sf]))
f_simu.append(float(simu.ewvalues['flux_sum_norm_ew%s' % sf]))
d_simu.append(float(simu.ewvalues['depth_norm_ew%s' % sf]))
w_simu.append(float(simu.ewvalues['width_ew%s' % sf]))
dep_simu.append(float(simu.ewvalues['depth_ew%s' % sf]))
sur_simu.append(float(simu.ewvalues['surf_ew%s' % sf]))
fm_simu.append(float(simu.ewvalues['fmean_ew%s' % sf]))
except TypeError:
continue
ew_sigma = self.std2(
N.array(ew_simu)[N.isfinite(ew_simu)], ew_value)
r_sigma = self.std2(N.array(r_simu)[N.isfinite(r_simu)],
float(flux2 / flux1))
f_sigma = self.std2(N.array(f_simu)[N.isfinite(f_simu)], flux_norm)
d_sigma = self.std2(N.array(d_simu)[N.isfinite(d_simu)], depth_n)
w_sigma = self.std2(N.array(w_simu)[N.isfinite(w_simu)],
float(lbd2 - lbd1))
dep_sigma = self.std2(N.array(dep_simu)[N.isfinite(dep_simu)],
depth)
sur_sigma = self.std2(N.array(sur_simu)[N.isfinite(sur_simu)],
surf)
fmean_sigma = self.std2(N.array(fm_simu)[N.isfinite(fm_simu)],
fmean)
ew_mean = N.mean(N.array(ew_simu)[N.isfinite(ew_simu)])
r_mean = N.mean(N.array(r_simu)[N.isfinite(r_simu)])
f_mean = N.mean(N.array(f_simu)[N.isfinite(f_simu)])
d_mean = N.mean(N.array(d_simu)[N.isfinite(d_simu)])
w_mean = N.mean(N.array(w_simu)[N.isfinite(w_simu)])
dep_mean = N.mean(N.array(dep_simu)[N.isfinite(dep_simu)])
sur_mean = N.mean(N.array(sur_simu)[N.isfinite(sur_simu)])
fmean_mean = N.mean(N.array(fm_simu)[N.isfinite(fm_simu)])
ew_med = N.median(N.array(ew_simu)[N.isfinite(ew_simu)])
ewv['ew%s.err' % sf] = float(ew_sigma)
ewv['ew%s.stat' % sf] = float(ew_sigma)
ewv['R%s.err' % sf] = float(r_sigma)
ewv['R%s.stat' % sf] = float(r_sigma)
ewv['flux_sum_norm_ew%s.err' % sf] = float(f_sigma)
ewv['flux_sum_norm_ew%s.stat' % sf] = float(f_sigma)
ewv['depth_norm_ew%s.err' % sf] = float(d_sigma)
ewv['depth_norm_ew%s.stat' % sf] = float(d_sigma)
ewv['width_ew%s.err' % sf] = float(w_sigma)
ewv['width_ew%s.stat' % sf] = float(w_sigma)
ewv['depth_ew%s.err' % sf] = float(dep_sigma)
ewv['depth_ew%s.stat' % sf] = float(dep_sigma)
ewv['surf_ew%s.err' % sf] = float(sur_sigma)
ewv['surf_ew%s.stat' % sf] = float(sur_sigma)
ewv['fmean_ew%s.err' % sf] = float(fmean_sigma)
ewv['fmean_ew%s.stat' % sf] = float(fmean_sigma)
ewv['ew%s.mean' % sf] = float(ew_mean)
ewv['R%s.mean' % sf] = float(r_mean)
ewv['flux_sum_norm_ew%s.mean' % sf] = float(f_mean)
ewv['depth_norm_ew%s.mean' % sf] = float(d_mean)
ewv['width_ew%s.mean' % sf] = float(w_mean)
ewv['depth_ew%s.mean' % sf] = float(dep_mean)
ewv['surf_ew%s.mean' % sf] = float(sur_mean)
ewv['fmean_ew%s.mean' % sf] = float(fmean_mean)
ewv['ew%s.med' % sf] = float(ew_med)
# Compute systematic error
if syst:
if not N.isfinite(ew_value):
return [float(N.nan), float(N.nan)]
ew_syst, r_syst, d_syst, w_syst, f_syst, fm_syst = [
], [], [], [], [], []
dep_syst, sur_syst = [], []
for system in self.syst:
try:
ew_syst.append(system.ew(lambda_min_blue,
lambda_max_blue,
lambda_min_red,
lambda_max_red,
sf,
simu=False,
syst=False,
verbose=False))
r_syst.append(float(system.ewvalues['R%s' % sf]))
f_syst.append(float(system.ewvalues['flux_sum_norm_ew%s' %
sf]))
d_syst.append(
float(system.ewvalues['depth_norm_ew%s' % sf]))
w_syst.append(float(system.ewvalues['width_ew%s' % sf]))
dep_syst.append(float(system.ewvalues['depth_ew%s' % sf]))
sur_syst.append(float(system.ewvalues['surf_ew%s' % sf]))
fm_syst.append(float(system.ewvalues['fmean_ew%s' % sf]))
except TypeError:
continue
ew_sigma_syst = self.std2(N.array(ew_syst)[N.isfinite(ew_syst)],
ew_value)
r_sigma_syst = self.std2(N.array(r_syst)[N.isfinite(r_syst)],
float(flux2 / flux1))
f_sigma_syst = self.std2(N.array(f_syst)[N.isfinite(f_syst)],
flux_norm)
d_sigma_syst = self.std2(N.array(d_syst)[N.isfinite(d_syst)],
depth_n)
w_sigma_syst = self.std2(N.array(w_syst)[N.isfinite(w_syst)],
float(lbd2 - lbd1))
dep_sigma_syst = self.std2(N.array(dep_syst)[N.isfinite(dep_syst)],
depth)
sur_sigma_syst = self.std2(N.array(sur_syst)[N.isfinite(sur_syst)],
surf)
fm_sigma_syst = self.std2(N.array(fm_syst)[N.isfinite(fm_syst)],
fmean)
if not N.isfinite(ew_sigma_syst):
ew_sigma_syst = float(0.0)
if not N.isfinite(r_sigma_syst):
r_sigma_syst = float(0.0)
if not N.isfinite(f_sigma_syst):
f_sigma_syst = float(0.0)
if not N.isfinite(d_sigma_syst):
d_sigma_syst = float(0.0)
if not N.isfinite(w_sigma_syst):
w_sigma_syst = float(0.0)
if not N.isfinite(dep_sigma_syst):
dep_sigma_syst = float(0.0)
if not N.isfinite(sur_sigma_syst):
sur_sigma_syst = float(0.0)
if not N.isfinite(fm_sigma_syst):
fm_sigma_syst = float(0.0)
ew_sigma = N.sqrt(ew_sigma**2 + ew_sigma_syst**2)
ewv['ew%s.syst' % sf] = float(ew_sigma_syst)
ewv['ew%s.err' % sf] = float(N.sqrt(ewv['ew%s.err' % sf]**2 +
ew_sigma_syst**2))
ewv['R%s.syst' % sf] = float(r_sigma_syst)
ewv['R%s.err' % sf] = float(N.sqrt(ewv['R%s.err' % sf]**2 +
r_sigma_syst**2))
ewv['flux_sum_norm_ew%s.syst' % sf] = float(f_sigma_syst)
ewv['flux_sum_norm_ew%s.err' % sf] = float(
N.sqrt(ewv['flux_sum_norm_ew%s.err' % sf]**2 + f_sigma_syst**2))
ewv['depth_norm_ew%s.syst' % sf] = float(d_sigma_syst)
ewv['depth_norm_ew%s.err' % sf] = float(
N.sqrt(ewv['depth_norm_ew%s.err' % sf]**2 + d_sigma_syst**2))
ewv['width_ew%s.syst' % sf] = float(w_sigma_syst)
ewv['width_ew%s.err' % sf] = float(N.sqrt(ewv['width_ew%s.err' % sf]**2
+ w_sigma_syst**2))
ewv['depth_ew%s.syst' % sf] = float(dep_sigma_syst)
ewv['depth_ew%s.err' % sf] = float(N.sqrt(ewv['depth_ew%s.err' % sf]**2
+ dep_sigma_syst**2))
ewv['surf_ew%s.syst' % sf] = float(sur_sigma_syst)
ewv['surf_ew%s.err' % sf] = float(N.sqrt(ewv['surf_ew%s.err' % sf]**2 +
sur_sigma_syst**2))
ewv['fmean_ew%s.syst' % sf] = float(fm_sigma_syst)
ewv['fmean_ew%s.err' % sf] = float(N.sqrt(ewv['fmean_ew%s.err' % sf]**2
+ fm_sigma_syst**2))
return [float(ew_value), float(ew_sigma)]
if simu is False and syst is False:
return ew_value
def velocity(self, infodict, verbose=False, simu=True, syst=True,
left=False, right=False):
"""
Value and error of a velocity of an absorption feature.
infodict should have the following structure :
{'lmin' : minimum lambda for searching the dip,
'lmax' : max lambda for searching the dip,
'lrest' : restframe wavelangth of absorption feature
'name' : name of the feature}
the error will be coded as ['name']+'.err'
and the lambda as ['name']+'_lbd'
"""
# shortcut
velo = self.velocityvalues
c = 299792.458
# Initialisation
velo[infodict['name']] = N.nan
velo[infodict['name'] + '.err'] = N.nan
velo[infodict['name'] + '.stat'] = N.nan
velo[infodict['name'] + '.syst'] = N.nan
velo[infodict['name'] + '_lbd'] = N.nan
velo[infodict['name'] + '_lbd.stat'] = N.nan
velo[infodict['name'] + '_lbd.syst'] = N.nan
velo[infodict['name'] + '_lbd.err'] = N.nan
velo[infodict['name'] + '_lbd.mean'] = N.nan
velo[infodict['name'] + '_flux'] = N.nan
velo[infodict['name'] + '_flux.stat'] = N.nan
velo[infodict['name'] + '_flux.syst'] = N.nan
velo[infodict['name'] + '_flux.err'] = N.nan
velo[infodict['name'] + '_flux.mean'] = N.nan
velo[infodict['name'] + '.binsyst'] = N.nan
velo[infodict['name'] + '.bin'] = N.nan
velo[infodict['name'] + '.mean'] = N.nan
velo[infodict['name'] + '.med'] = N.nan
if self.init_only:
return
try:
lbd, flux, var = self._extrema_value_in_interval(infodict['lmin'],
infodict['lmax'],
self.minima['x'],
self.minima['v'],
self.minima['s'],
extrema='minima',
right=right,
left=left)
if lbd is None:
lbd, flux, var = self._find_special_peak(infodict['lmin'],
infodict['lmax'],
minima=True,
right=right, left=left)
if simu and lbd is None:
lbd, flux, var = self.max_of_interval(infodict['lmin'],
infodict['lmax'])
if lbd is not None:
lbd = self._get_min(lbd)
velocity = (infodict['lrest'] - lbd) / infodict['lrest'] * c
except TypeError:
velocity = N.nan
# check for the vSiII5972 velocity
# if < 8000 km/s, check the curvature of the spectral area
if N.isfinite(velocity) and velocity < 8000 and infodict['name'] == 'vSiII_5972':
filt = (self.x > 5650) & (self.x < 5950)
pol = N.polyfit(self.x[filt], self.s[filt], 2)
if pol[0] * 1e6 <= 0.55:
print "ERROR: Velocity is under 8000km/s, " \
"and curvature of the spectral zone is too small."
velocity = N.nan
if N.isfinite(velocity):
velo[infodict['name']] = float(velocity)
velo[infodict['name'] + '_lbd'] = float(lbd)
velo[infodict['name'] + '_flux'] = float(flux)
velocity_sigma = None
if simu:
# store only in case we are at top level.
if not N.isfinite(velocity):
return [float(N.nan), float(N.nan)]
velocity_simu, lbd_simu, flux_simu = [], [], []
for simul in self.simulations:
try:
velocity_simu.append(simul.velocity(infodict, simu=False,
syst=False,
verbose=False))
lbd_simu.append(simul.velocityvalues[infodict['name'] +
'_lbd'])
flux_simu.append(simul.velocityvalues[infodict['name'] +
'_flux'])
except TypeError:
continue
velocity_sigma = self.std2(
N.array(velocity_simu)[N.isfinite(velocity_simu)], velocity)
lbd_sigma = self.std2(N.array(lbd_simu)[N.isfinite(lbd_simu)], lbd)
flux_sigma = self.std2(
N.array(flux_simu)[N.isfinite(flux_simu)], flux)
velocity_mean = N.mean(
N.array(velocity_simu)[N.isfinite(velocity_simu)])
lbd_mean = N.mean(N.array(lbd_simu)[N.isfinite(lbd_simu)])
flux_mean = N.mean(N.array(flux_simu)[N.isfinite(flux_simu)])
velocity_med = N.median(
N.array(velocity_simu)[N.isfinite(velocity_simu)])
lbd_med = N.median(N.array(lbd_simu)[N.isfinite(lbd_simu)])
flux_med = N.median(N.array(flux_simu)[N.isfinite(flux_simu)])
velo[infodict['name'] + '.err'] = float(velocity_sigma)
velo[infodict['name'] + '.stat'] = float(velocity_sigma)
velo[infodict['name'] + '.mean'] = float(velocity_mean)
velo[infodict['name'] + '.med'] = float(velocity_med)
velo[infodict['name'] + '_lbd.err'] = float(lbd_sigma)
velo[infodict['name'] + '_lbd.stat'] = float(lbd_sigma)
velo[infodict['name'] + '_lbd.mean'] = float(lbd_mean)
velo[infodict['name'] + '_lbd.med'] = float(lbd_med)
velo[infodict['name'] + '_flux.err'] = float(flux_sigma)
velo[infodict['name'] + '_flux.stat'] = float(flux_sigma)
velo[infodict['name'] + '_flux.mean'] = float(flux_mean)
velo[infodict['name'] + '_flux.med'] = float(flux_med)
if syst:
velocity_syst, lbd_syst, flux_syst = [], [], []
for system in self.syst:
try:
velocity_syst.append(system.velocity(infodict,
simu=False,
syst=False,
verbose=False))
lbd_syst.append(system.velocityvalues[infodict['name'] +
'_lbd'])
flux_syst.append(system.velocityvalues[infodict['name'] +
'_flux'])
except TypeError:
continue
binning = self.x[1] - self.x[0]
velocity_syst_sigma = self.std2(
N.array(velocity_syst)[N.isfinite(velocity_syst)], velocity)
lbd_syst_sigma = self.std2(
N.array(lbd_syst)[N.isfinite(lbd_syst)], lbd)
flux_syst_sigma = self.std2(
N.array(flux_syst)[N.isfinite(flux_syst)], flux)
velocity_syst_bin = ((binning) * c) / (N.sqrt(12)
* infodict['lrest'])
velocity_sigma = N.sqrt(velocity_sigma ** 2
+ velocity_syst_sigma**2
+ velocity_syst_bin**2)
lbd_sigma = N.sqrt(lbd_sigma ** 2 + lbd_syst_sigma**2
+ (binning)**2)
flux_sigma = N.sqrt(flux_sigma ** 2 + flux_syst_sigma**2)
velo[infodict['name'] + '.syst'] = float(velocity_syst_sigma)
velo[infodict['name'] + '.err'] = float(velocity_sigma)
velo[infodict['name'] + '_lbd.syst'] = float(lbd_syst_sigma)
velo[infodict['name'] + '_lbd.err'] = float(lbd_sigma)
velo[infodict['name'] + '_flux.syst'] = float(flux_syst_sigma)
velo[infodict['name'] + '_flux.err'] = float(flux_sigma)
velo[infodict['name'] + '.binsyst'] = float(velocity_syst_bin)
velo[infodict['name'] + '.bin'] = float(binning)
if velocity_sigma is None:
return float(velocity)
else:
return [float(velocity), float(velocity_sigma)]
# ===============
# Get craniometer
# ===============
def get_cranio(x, y, v, smoother='spline_free_knot', nsimu=1000, verbose=False):
"""Get the craniometers."""
obj = covariance.SPCS(x, y, v)
if smoother == 'spline_free_knot':
smoothf = 'sp'
else:
smoothf = 'sg'
# obj.comp_rho_f()
obj.smooth(smoothing=smoothf)
obj.make_simu(nsimu=nsimu)
simus = N.array([s.data['y'] for s in obj.simus])
cr = Craniometer(x, y, v * obj.data['factor_used'])
cr.smooth(rho=obj.data['rho'], smoother=smoother, s=obj.s, hsize=obj.w, verbose=False)
cr.cranio_generator(rho=obj.data['rho'], simus=simus, verbose=verbose)
cr.find_extrema()
return cr
|
nicolaschotard/snspin
|
snspin/spin.py
|
Python
|
mit
| 82,655
|
[
"Gaussian"
] |
290d550b67daa049f3b4833aee9f71d612e393db19e120707be28715290739fb
|
from sklearn.feature_extraction import DictVectorizer
from collections import defaultdict
import evaluateFile
import loading
import evaluation
import sys, os
import classification
import shutil
from utils import Stream
from sklearn.grid_search import GridSearchCV
import itertools
from sklearn.preprocessing.data import minmax_scale, MinMaxScaler
def clearKeys(proteins, keys):
for protId in proteins:
protein = proteins[protId]
for key in keys:
if key in protein:
del protein[key]
def getCombinations(items):
combinations = []
for i in xrange(1, len(items) + 1):
els = [list(x) for x in itertools.combinations(items, i)]
combinations.extend(els)
return combinations
def mean(numbers):
return float(sum(numbers)) / max(len(numbers), 1)
def combineConf(protein, labels, predKeys, combKey):
combConfKey = combKey + "_conf"
assert combConfKey not in protein
protein[combConfKey] = {}
combConfs = protein[combConfKey]
assert combKey + "_sources" not in protein
protein[combKey + "_sources"] = {}
combSources = protein[combKey + "_sources"]
for key in predKeys:
preds = protein.get(key, {})
predConfs = protein.get(key + "_conf", {})
for label in labels:
predConf = predConfs.get(label)
if predConf != None:
if label not in combConfs:
combConfs[label] = []
combConfs[label].append(predConf)
if label in preds:
if label not in combSources:
combSources[label] = []
combSources[label].append(key)
protein[combConfKey] = {x:mean(combConfs[x]) for x in combConfs}
def combinePred(proteins, predKeys, combKey, mode="AND", limitToSets=None):
assert mode in ("AND", "OR", "SINGLE")
if mode == "SINGLE":
assert len(predKeys) == 1
counts = defaultdict(int)
for protId in proteins:
protein = proteins[protId]
if limitToSets != None and not any(x in limitToSets for x in protein["sets"]):
counts["out-of-sets"] += 1
continue
counts["proteins"] += 1
assert combKey not in protein
protein[combKey] = {}
if mode == "OR" or mode == "SINGLE":
for key in predKeys:
if key in protein:
counts["predictions-mode-" + mode] += 1
protein[combKey].update(protein[key])
else:
counts["no-prediction-for-" + key] += 1
elif mode == "AND":
missing = False
predLabelSets = []
for key in predKeys:
if key not in protein:
missing = True
counts["no-prediction-for-" + key] += 1
else:
predLabelSets.append(set(protein[key].keys()))
if not missing:
counts["predictions-mode-" + mode] += 1
protein[combKey] = {x:1 for x in set.intersection(*predLabelSets)} #{x:1 for x in pred1.intersection(pred2)}
combineConf(protein, sorted(protein[combKey].keys()), predKeys, combKey)
print "Combined predictions, mode =", mode, "counts =", dict(counts)
def buildFeatures(protein, label, predKeys, predictions, confidences, counts):
features = {label:1}
for key in predKeys:
if key in predictions:
features["pos:" + key] = 1
if key in confidences:
if label in confidences[key]:
features["conf:" + key] = confidences[key].get(label)
else:
features["neg:" + key] = 1
return features
def buildExamples(proteins, predKeys, limitToSets=None, limitTerms=None, outDir=None):
counts = defaultdict(int)
empty = {}
examples = {"classes":[], "features":[], "sets":[], "proteins":[], "labels":[]}
for protId in proteins:
protein = proteins[protId]
if limitToSets != None and not any(x in limitToSets for x in protein["sets"]):
counts["out-of-sets"] += 1
continue
predictions = {}
confidences = {}
predLabels = []
for key in predKeys: # Collect predictions for each system (key)
if key not in protein:
counts["no-prediction-for-" + key] += 1
else:
counts["predictions-for-" + key] += 1
predLabels.extend(protein[key].keys())
predictions[key] = protein[key]
confidences[key] = protein.get(key + "_conf", empty)
predLabels = sorted(set(predLabels))
#predLabels = sorted(set.union(*[predictions[x].keys() for x in predictions])) # + goldLabels))
protSets = protein.get("sets")
goldLabels = protein["terms"].keys()
if limitTerms:
predLabels = [x for x in predLabels if x in limitTerms]
goldLabels = [x for x in goldLabels if x in limitTerms]
goldLabels = set(goldLabels)
for label in predLabels:
features = buildFeatures(protein, label, predKeys, predictions, confidences, counts)
cls = 1 if label in goldLabels else 0
counts["examples"] += 1
counts["pos" if cls == 1 else "neg"] += 1
examples["classes"].append(cls)
examples["features"].append(features)
examples["sets"].append(protSets)
examples["proteins"].append(protein)
examples["labels"].append(label)
print "Built examples,", dict(counts)
dv = DictVectorizer(sparse=True)
examples["features"] = dv.fit_transform(examples["features"])
examples["feature_names"] = dv.feature_names_
print "Vectorized the examples, unique features =", len(examples["feature_names"])
if outDir != None:
loading.saveFeatureNames(examples["feature_names"], os.path.join(outDir, "features.tsv"))
return examples
def getSubset(examples, setNames):
subset = {}
counts = {}
sets = examples["sets"]
indices = [i for i in range(len(sets)) if any(x in setNames for x in sets[i])]
subset["features"] = examples["features"][indices]
counts["features"] = subset["features"].shape[0]
for key in ("classes", "sets", "proteins", "labels"):
subset[key] = [examples[key][i] for i in indices]
counts[key] = len(subset[key])
print "Generated example subset for sets", setNames, "with", counts
return subset
def binaryToMultiLabel(examples, predictions, probabilities, predKey):
print "Converting binary predictions to labels"
predKeyConf = predKey + "_conf"
for prediction, probability, protein, label in zip(predictions, probabilities, examples["proteins"], examples["labels"]):
if predKey not in protein:
protein[predKey] = {}
protein[predKeyConf] = {}
if prediction == 1:
protein[predKey][label] = 1
protein[predKeyConf][label] = probability
def learn(examples, Classifier, classifierArgs, develFolds=10, verbose=3, n_jobs=1, predKey="ml_comb_pred", limitTerms=None):
print "Parameter grid search"
develExamples = getSubset(examples, ["devel"])
clf = GridSearchCV(Classifier(), classifierArgs, cv=develFolds, verbose=verbose, n_jobs=n_jobs, scoring="f1_micro")
clf.fit(develExamples["features"], develExamples["classes"])
print "Best params", (clf.best_params_, clf.best_score_)
print "Predicting all examples"
minMax = MinMaxScaler((0.03, 1.0))
allPredictions = clf.predict(examples["features"])
if hasattr(clf, "predict_proba"):
allProbabilities = clf.predict_proba(examples["features"])
else:
allProbabilities = clf.decision_function(examples["features"])
#import pdb; pdb.set_trace()
minMax.fit(allProbabilities) #minmax_scale(testProbabilities, (0.03, 1.0))
allProbabilities = minMax.transform(allProbabilities) #allProbabilities = minmax_scale(allProbabilities, (0.03, 1.0))
print "Predicting the test set"
testExamples = getSubset(examples, ["test"])
testPredictions = clf.predict(testExamples["features"])
if hasattr(clf, "predict_proba"):
testProbabilities = clf.predict_proba(testExamples["features"])
else:
testProbabilities = clf.decision_function(testExamples["features"])
testProbabilities = minMax.transform(testProbabilities)
binaryToMultiLabel(testExamples, testPredictions, testProbabilities, predKey)
print "Evaluating test set ensemble predictions"
testProteins = {x["id"]:x for x in testExamples["proteins"]}
multiLabelTestExamples = evaluateFile.makeExamples(testProteins, limitTerms=limitTerms, limitToSets=["test"], predKey=predKey)
loading.vectorizeExamples(multiLabelTestExamples, None, sparseLabels=True)
results = evaluation.evaluate(multiLabelTestExamples["labels"], multiLabelTestExamples["predictions"], multiLabelTestExamples, terms=None, averageOnly=True, noAUC=True)
print "Average for test set:", evaluation.metricsToString(results["average"])
binaryToMultiLabel(examples, allPredictions, allProbabilities, predKey)
def combine(dataPath, nnInput, clsInput, outDir=None, classifier=None, classifierArgs=None, develFolds=5, useCafa=False, useCombinations=True, useLearning=True, baselineCutoff=1, numTerms=5000, clear=False, useOutFiles=True, task="cafa3"):
if outDir != None:
if clear and os.path.exists(outDir):
print "Removing output directory", outDir
shutil.rmtree(outDir)
if not os.path.exists(outDir):
print "Making output directory", outDir
os.makedirs(outDir)
Stream.openLog(os.path.join(outDir, "log.txt"))
print "==========", "Ensemble", "=========="
proteins = {}
print "Loading Swissprot proteins"
if task == "cafapi":
loading.loadFASTA(os.path.join(options.dataPath, "CAFA_PI", "Swissprot", "CAFA_PI_Swissprot_sequence.tsv.gz"), proteins)
else:
loading.loadFASTA(os.path.join(options.dataPath, "Swiss_Prot", "Swissprot_sequence.tsv.gz"), proteins)
if useCafa:
print "Loading CAFA targets"
if task == "cafapi":
loading.loadFASTA(os.path.join(options.dataPath, "CAFA_PI", "Swissprot", "target.all.fasta.gz"), proteins, True)
else:
loading.loadFASTA(os.path.join(options.dataPath, "CAFA3_targets", "Target_files", "target.all.fasta"), proteins, True)
print "Proteins:", len(proteins)
if task == "cafa3hpo":
loading.removeNonHuman(proteins)
termCounts = loading.loadHPOAnnotations(os.path.join(options.dataPath, "HPO", "annotation", "all_cafa_annotation_propagated.tsv.gz"), proteins)
elif task == "cafa3":
termCounts = loading.loadAnnotations(os.path.join(options.dataPath, "data", "Swissprot_propagated.tsv.gz"), proteins)
else:
termCounts = loading.loadAnnotations(os.path.join(options.dataPath, "CAFA_PI", "Swissprot", "CAFA_PI_Swissprot_propagated.tsv.gz"), proteins)
print "Unique terms:", len(termCounts)
topTerms = loading.getTopTerms(termCounts, numTerms)
limitTerms=set([x[0] for x in topTerms])
print "Using", len(topTerms), "most common GO terms"
loading.loadSplit(os.path.join(dataPath, "data"), proteins)
loading.defineSets(proteins, "overlap" if useCafa else "skip")
predKeys = []
if nnInput != None:
print "Loading neural network predictions from", nnInput
for setName in (("devel", "test", "cafa") if useCafa else ("devel", "test")):
predKey = "nn_pred_cafa" if setName == "cafa" else "nn_pred"
evaluateFile.loadPredictions(proteins, os.path.join(nnInput, setName + ("_targets" if setName == "cafa" else "_pred")) + ".tsv.gz", limitToSets=None, readGold=False, predKey=predKey, confKey="nn_pred_conf", includeDuplicates=True)
predKeys += ["nn_pred"]
if clsInput != None:
print "Loading classifier predictions"
evaluateFile.loadPredictions(proteins, clsInput, limitToSets=["devel","test","cafa"] if useCafa else ["devel","test"], readGold=True, predKey="cls_pred", confKey="cls_pred_conf")
predKeys += ["cls_pred"]
if baselineCutoff > 0:
print "Loading baseline predictions"
loading.loadBaseline(dataPath, proteins, "baseline_pred", baselineCutoff, limitTerms, useCafa=useCafa)
predKeys += ["baseline_pred"]
if useCombinations:
print "===============", "Combining predictions", "==============="
combKey = "comb_pred"
combConfKey = "comb_pred_conf"
combinations = getCombinations(predKeys)
numCombinations = len(combinations)
print "Testing", numCombinations, "combinations"
for i in range(len(combinations)):
print "******", "Combination", str(i + 1) + "/" + str(numCombinations), combinations[i], "******"
for mode in (("AND", "OR") if len(combinations[i]) > 1 else ("SINGLE",)):
for setName in (("devel", "test", "cafa") if useCafa else ("devel", "test")):
combination = combinations[i][:]
if setName == "cafa" and "nn_pred" in combination:
combination[combination.index("nn_pred")] = "nn_pred_cafa"
print "***", "Evaluating", combination, "predictions for set '" + setName + "' using mode '" + mode + "'", "***"
combinePred(proteins, combination, combKey, mode, limitToSets=[setName])
#if setName != "cafa":
examples = evaluateFile.makeExamples(proteins, limitTerms=limitTerms, limitToSets=[setName], predKey=combKey)
loading.vectorizeExamples(examples, None, sparseLabels=True)
results = evaluation.evaluate(examples["labels"], examples["predictions"], examples, terms=None, averageOnly=True, noAUC=True)
print "Average for", str(combination) + "/" + setName + "/" + mode + ":", evaluation.metricsToString(results["average"])
#else:
# print "Skipping evaluation for set '" + setName + "'"
if useOutFiles:
combString = "-".join(combination)
outPath = os.path.join(outDir, "-".join([combString, setName, mode, "ensemble"]) + ".tsv.gz")
evaluation.saveProteins(proteins, outPath, limitTerms=limitTerms, limitToSets=[setName], predKey=combKey) #pass#evaluation.saveResults(data, outStem, label_names, negatives)
clearKeys(proteins, [combKey, combConfKey, combKey + "_sources"])
if useLearning:
print "===============", "Learning", "==============="
Classifier = classification.importNamed(classifier)
examples = buildExamples(proteins, predKeys, limitToSets=None, limitTerms=limitTerms, outDir=outDir)
learn(examples, Classifier, classifierArgs, develFolds=develFolds, limitTerms=limitTerms, predKey="ml_comb_pred")
if useOutFiles:
for setName in (("devel", "test", "cafa") if useCafa else ("devel", "test")):
outPath = os.path.join(outDir, "-".join([setName, "ML", "ensemble"]) + ".tsv.gz")
evaluation.saveProteins(proteins, outPath, limitTerms=limitTerms, limitToSets=[setName], predKey="ml_comb_pred") #pass#evaluation.saveResults(data, outStem, label_names, negatives)
if __name__=="__main__":
from optparse import OptionParser
optparser = OptionParser(description="Ensemble")
optparser.add_option("-p", "--dataPath", default=os.path.expanduser("~/data/CAFA3/data"), help="Data directory")
optparser.add_option("-a", "--nnInput", default=None, help="Neural network predictions tsv.gz file")
optparser.add_option("-b", "--clsInput", default=None, help="Classifier predictions tsv.gz file")
optparser.add_option("-o", "--outDir", default=None, help="Output directory")
optparser.add_option("-s", "--simple", default=False, action="store_true", help="Do simple ensembles (AND, OR and SINGLE)")
optparser.add_option("-l", "--learning", default=False, action="store_true", help="Do machine learning ensemble")
optparser.add_option("-f", "--baseline", default=-1, type=int, help="Add the BLAST baseline as the third set of predictions. Value in range 1-10, 1 for all values.")
optparser.add_option("-t", "--terms", default=5000, type=int, help="The number of top most common GO terms to use as labels")
optparser.add_option("-w", "--write", default=False, action="store_true", help="Write output files")
optparser.add_option("-n", "--develFolds", type=int, default=5, help="Cross-validation for parameter optimization")
optparser.add_option('-c','--classifier', default="ensemble.RandomForestClassifier", help="Scikit-learn classifier")
optparser.add_option('-r','--args', default="{'random_state':[1], 'n_estimators':[10], 'n_jobs':[1], 'verbose':[3]}", help="Classifier arguments")
optparser.add_option("--clear", default=False, action="store_true", help="Remove the output directory if it already exists")
optparser.add_option("--cafa", default=False, action="store_true", help="Process CAFA predictions")
(options, args) = optparser.parse_args()
options.args = eval(options.args)
combine(dataPath=options.dataPath, nnInput=options.nnInput, clsInput=options.clsInput, outDir=options.outDir,
classifier=options.classifier, classifierArgs=options.args, develFolds=options.develFolds,
useCafa=options.cafa,
useCombinations=options.simple, useLearning=options.learning, baselineCutoff=options.baseline,
numTerms=options.terms, clear=options.clear, useOutFiles=options.write)
|
TurkuNLP/CAFA3
|
learning/ensemble.py
|
Python
|
lgpl-3.0
| 17,859
|
[
"BLAST"
] |
37920eaefc59650a4fadeafa24e660f8f3c95dbcfb94042129447b7466332a18
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in control_flow_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.control_flow_ops import *
from tensorflow.python.ops.gen_control_flow_ops import *
# pylint: enable=wildcard-import
def _SwitchGrad(op, *grad):
"""Gradients for a Switch op is calculated using a Merge op.
If the switch is a loop switch, it will be visited twice. We create
the merge on the first visit, and update the other input of the merge
on the second visit. A next_iteration is also added on second visit.
"""
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = op._get_control_flow_context()
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(op_ctxt, WhileContext):
merge_op = grad_ctxt.grad_state.switch_map.get(op)
if merge_op:
# This is the second time this Switch is visited. It comes from
# the non-exit branch of the Switch, so update the second input
# to the Merge.
# TODO: Perform shape inference with this new input.
# pylint: disable=protected-access
merge_op._update_input(1, control_flow_ops._NextIteration(grad[1]))
# pylint: enable=protected-access
return None, None
else:
# This is the first time this Switch is visited. It always comes
# from the Exit branch, which is grad[0]. grad[1] is empty at this point.
# Use grad[0] for both inputs to merge for now, but update the second
# input of merge when we see this Switch the second time.
merge_fn = control_flow_ops._Merge # pylint: disable=protected-access
merge_op = merge_fn([grad[0], grad[0]], name="b_switch")[0]
grad_ctxt.grad_state.switch_map[op] = merge_op.op
return merge_op, None
elif isinstance(op_ctxt, CondContext):
good_grad = grad[op_ctxt.branch]
zero_grad = grad[1 - op_ctxt.branch]
# If we are in a grad context, this switch is part of a cond within a
# loop. In this case, we have called ControlFlowState.ZeroLike() so grad
# is ready for merge. Otherwise, we need a switch to control zero_grad.
if not (grad_ctxt and grad_ctxt.grad_state):
dtype = good_grad.dtype
branch = op_ctxt.branch
zero_grad = switch(zero_grad, op_ctxt.pred, dtype=dtype)[1 - branch]
return merge([good_grad, zero_grad], name="cond_grad")[0], None
else:
false_grad = switch(grad[0], op.inputs[1])[0]
true_grad = switch(grad[1], op.inputs[1])[1]
return merge([false_grad, true_grad])[0], None
ops.RegisterGradient("Switch")(_SwitchGrad)
ops.RegisterGradient("RefSwitch")(_SwitchGrad)
@ops.RegisterGradient("Merge")
def _MergeGrad(op, grad, _):
"""Gradients for a Merge op are calculated using a Switch op."""
input_op = op.inputs[0].op
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = input_op._get_control_flow_context()
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(op_ctxt, WhileContext):
# pylint: disable=protected-access
return control_flow_ops._SwitchRefOrTensor(grad, grad_ctxt.pivot)
# pylint: enable=protected-access
elif isinstance(op_ctxt, CondContext):
pred = op_ctxt.pred
if grad_ctxt and grad_ctxt.grad_state:
# This Merge node is part of a cond within a loop.
# The backprop needs to have the value of this predicate for every
# iteration. So we must have its values accumulated in the forward, and
# use the accumulated values as the predicate for this backprop switch.
grad_state = grad_ctxt.grad_state
real_pred = grad_state.history_map.get(pred.name)
if real_pred is None:
# Remember the value of pred for every iteration.
grad_ctxt = grad_state.grad_context
grad_ctxt.Exit()
history_pred = grad_state.AddForwardAccumulator(pred)
grad_ctxt.Enter()
# Add the stack pop op. If pred.op is in a (outer) CondContext,
# the stack pop will be guarded with a switch.
real_pred = grad_state.AddBackPropAccumulatedValue(history_pred, pred)
grad_state.history_map[pred.name] = real_pred
pred = real_pred
# pylint: disable=protected-access
return control_flow_ops._SwitchRefOrTensor(grad, pred, name="cond_grad")
# pylint: enable=protected-access
else:
num_inputs = len(op.inputs)
cond = [math_ops.equal(op.outputs[1], i) for i in xrange(num_inputs)]
# pylint: disable=protected-access
return [control_flow_ops._SwitchRefOrTensor(grad, cond[i])[1]
for i in xrange(num_inputs)]
# pylint: enable=protected-access
@ops.RegisterGradient("RefMerge")
def _RefMergeGrad(op, grad, _):
return _MergeGrad(op, grad, _)
@ops.RegisterGradient("Exit")
def _ExitGrad(_, grad):
"""Gradients for an exit op are calculated using an Enter op."""
graph = ops.get_default_graph()
# pylint: disable=protected-access
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if not grad_ctxt.back_prop:
# The flag `back_prop` is set by users to suppress gradient
# computation for this loop. If the flag `back_prop` is true,
# no gradient computation.
return None
grad_ctxt.AddName(grad.name)
enter_fn = control_flow_ops._Enter # pylint: disable=protected-access
grad_ctxt.Enter()
result = enter_fn(grad, grad_ctxt.name, is_constant=False,
parallel_iterations=grad_ctxt.parallel_iterations,
name="b_exit")
grad_ctxt.Exit()
return result
ops.RegisterGradient("RefExit")(_ExitGrad)
@ops.RegisterGradient("NextIteration")
def _NextIterationGrad(_, grad):
"""A forward next_iteration is translated into a backprop identity.
Note that the backprop next_iteration is added in switch grad.
"""
return grad
@ops.RegisterGradient("RefNextIteration")
def _RefNextIterationGrad(_, grad):
return _NextIterationGrad(_, grad)
@ops.RegisterGradient("Enter")
def _EnterGrad(op, grad):
"""Gradients for an Enter are calculated using an Exit op.
For loop variables, grad is the gradient so just add an exit.
For loop invariants, we need to add an accumulator loop.
"""
graph = ops.get_default_graph()
# pylint: disable=protected-access
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if not grad_ctxt.back_prop:
# If the flag `back_prop` is true, no gradient computation.
return grad
if op.get_attr("is_constant"):
# Add a gradient accumulator for each loop invariant.
result = grad_ctxt.AddBackPropAccumulator(grad)
else:
result = exit(grad)
grad_ctxt.ExitResult([result])
return result
@ops.RegisterGradient("RefEnter")
def _RefEnterGrad(op, grad):
return _EnterGrad(op, grad)
@ops.RegisterGradient("LoopCond")
def _LoopCondGrad(_):
"""Stop backprop for the predicate of a while loop."""
return None
|
martinbede/second-sight
|
tensorflow/python/ops/control_flow_grad.py
|
Python
|
apache-2.0
| 7,938
|
[
"VisIt"
] |
5b95e543129e1ff346b14495e8831ac419351eaa2be904033173fc60a941b274
|
# -*- coding: utf-8 -*-
"""
Regression tests for the Test Client, especially the customized assertions.
"""
from __future__ import unicode_literals
import datetime
import itertools
import os
from django.contrib.auth.models import User
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.core.urlresolvers import NoReverseMatch, reverse
from django.http import HttpResponse
from django.template import Context, TemplateSyntaxError, engines
from django.template.response import SimpleTemplateResponse
from django.test import (
Client, SimpleTestCase, TestCase, ignore_warnings, override_settings,
)
from django.test.client import RedirectCycleError, RequestFactory, encode_file
from django.test.utils import ContextList, str_prefix
from django.utils._os import upath
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from django.utils.translation import ugettext_lazy
from .models import CustomUser
from .views import CustomTestException
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='testclient@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u2 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='inactive',
first_name='Inactive', last_name='User', email='testclient@example.com', is_staff=False, is_active=False,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u3 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff',
first_name='Staff', last_name='Member', email='testclient@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class AssertContainsTests(SimpleTestCase):
def test_contains(self):
"Responses can be inspected for content, including counting repeated substrings"
response = self.client.get('/no_template_view/')
self.assertNotContains(response, 'never')
self.assertContains(response, 'never', 0)
self.assertContains(response, 'once')
self.assertContains(response, 'once', 1)
self.assertContains(response, 'twice')
self.assertContains(response, 'twice', 2)
try:
self.assertContains(response, 'text', status_code=999)
except AssertionError as e:
self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'text', status_code=999)
except AssertionError as e:
self.assertIn("Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'text', status_code=999, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't retrieve content: Response code was 200 (expected 999)", str(e))
try:
self.assertNotContains(response, 'once')
except AssertionError as e:
self.assertIn("Response should not contain 'once'", str(e))
try:
self.assertNotContains(response, 'once', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response should not contain 'once'", str(e))
try:
self.assertContains(response, 'never', 1)
except AssertionError as e:
self.assertIn("Found 0 instances of 'never' in response (expected 1)", str(e))
try:
self.assertContains(response, 'never', 1, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 0 instances of 'never' in response (expected 1)", str(e))
try:
self.assertContains(response, 'once', 0)
except AssertionError as e:
self.assertIn("Found 1 instances of 'once' in response (expected 0)", str(e))
try:
self.assertContains(response, 'once', 0, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 1 instances of 'once' in response (expected 0)", str(e))
try:
self.assertContains(response, 'once', 2)
except AssertionError as e:
self.assertIn("Found 1 instances of 'once' in response (expected 2)", str(e))
try:
self.assertContains(response, 'once', 2, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 1 instances of 'once' in response (expected 2)", str(e))
try:
self.assertContains(response, 'twice', 1)
except AssertionError as e:
self.assertIn("Found 2 instances of 'twice' in response (expected 1)", str(e))
try:
self.assertContains(response, 'twice', 1, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 2 instances of 'twice' in response (expected 1)", str(e))
try:
self.assertContains(response, 'thrice')
except AssertionError as e:
self.assertIn("Couldn't find 'thrice' in response", str(e))
try:
self.assertContains(response, 'thrice', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't find 'thrice' in response", str(e))
try:
self.assertContains(response, 'thrice', 3)
except AssertionError as e:
self.assertIn("Found 0 instances of 'thrice' in response (expected 3)", str(e))
try:
self.assertContains(response, 'thrice', 3, msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Found 0 instances of 'thrice' in response (expected 3)", str(e))
def test_unicode_contains(self):
"Unicode characters can be found in template context"
# Regression test for #10183
r = self.client.get('/check_unicode/')
self.assertContains(r, 'さかき')
self.assertContains(r, b'\xe5\xb3\xa0'.decode('utf-8'))
def test_unicode_not_contains(self):
"Unicode characters can be searched for, and not found in template context"
# Regression test for #10183
r = self.client.get('/check_unicode/')
self.assertNotContains(r, 'はたけ')
self.assertNotContains(r, b'\xe3\x81\xaf\xe3\x81\x9f\xe3\x81\x91'.decode('utf-8'))
def test_binary_contains(self):
r = self.client.get('/check_binary/')
self.assertContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e')
with self.assertRaises(AssertionError):
self.assertContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e', count=2)
def test_binary_not_contains(self):
r = self.client.get('/check_binary/')
self.assertNotContains(r, b'%ODF-1.4\r\n%\x93\x8c\x8b\x9e')
with self.assertRaises(AssertionError):
self.assertNotContains(r, b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e')
def test_nontext_contains(self):
r = self.client.get('/no_template_view/')
self.assertContains(r, ugettext_lazy('once'))
def test_nontext_not_contains(self):
r = self.client.get('/no_template_view/')
self.assertNotContains(r, ugettext_lazy('never'))
def test_assert_contains_renders_template_response(self):
""" Test that we can pass in an unrendered SimpleTemplateReponse
without throwing an error.
Refs #15826.
"""
template = engines['django'].from_string('Hello')
response = SimpleTemplateResponse(template)
self.assertContains(response, 'Hello')
def test_assert_contains_using_non_template_response(self):
""" Test that auto-rendering does not affect responses that aren't
instances (or subclasses) of SimpleTemplateResponse.
Refs #15826.
"""
response = HttpResponse('Hello')
self.assertContains(response, 'Hello')
def test_assert_not_contains_renders_template_response(self):
""" Test that we can pass in an unrendered SimpleTemplateReponse
without throwing an error.
Refs #15826.
"""
template = engines['django'].from_string('Hello')
response = SimpleTemplateResponse(template)
self.assertNotContains(response, 'Bye')
def test_assert_not_contains_using_non_template_response(self):
""" Test that auto-rendering does not affect responses that aren't
instances (or subclasses) of SimpleTemplateResponse.
Refs #15826.
"""
response = HttpResponse('Hello')
self.assertNotContains(response, 'Bye')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='test_client_regress.urls',)
class AssertTemplateUsedTests(TestDataMixin, TestCase):
def test_no_context(self):
"Template usage assertions work then templates aren't in use"
response = self.client.get('/no_template_view/')
# Check that the no template case doesn't mess with the template assertions
self.assertTemplateNotUsed(response, 'GET Template')
try:
self.assertTemplateUsed(response, 'GET Template')
except AssertionError as e:
self.assertIn("No templates used to render the response", str(e))
try:
self.assertTemplateUsed(response, 'GET Template', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: No templates used to render the response", str(e))
with self.assertRaises(AssertionError) as context:
self.assertTemplateUsed(response, 'GET Template', count=2)
self.assertIn(
"No templates used to render the response",
str(context.exception))
def test_single_context(self):
"Template assertions work when there is a single context"
response = self.client.get('/post_view/', {})
try:
self.assertTemplateNotUsed(response, 'Empty GET Template')
except AssertionError as e:
self.assertIn("Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateNotUsed(response, 'Empty GET Template', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Template 'Empty GET Template' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateUsed(response, 'Empty POST Template')
except AssertionError as e:
self.assertIn("Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template", str(e))
try:
self.assertTemplateUsed(response, 'Empty POST Template', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template", str(e))
with self.assertRaises(AssertionError) as context:
self.assertTemplateUsed(response, 'Empty GET Template', count=2)
self.assertIn(
"Template 'Empty GET Template' was expected to be rendered 2 "
"time(s) but was actually rendered 1 time(s).",
str(context.exception))
with self.assertRaises(AssertionError) as context:
self.assertTemplateUsed(
response, 'Empty GET Template', msg_prefix='abc', count=2)
self.assertIn(
"abc: Template 'Empty GET Template' was expected to be rendered 2 "
"time(s) but was actually rendered 1 time(s).",
str(context.exception))
def test_multiple_context(self):
"Template assertions work when there are multiple contexts"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
try:
self.assertTemplateNotUsed(response, "form_view.html")
except AssertionError as e:
self.assertIn("Template 'form_view.html' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateNotUsed(response, 'base.html')
except AssertionError as e:
self.assertIn("Template 'base.html' was used unexpectedly in rendering the response", str(e))
try:
self.assertTemplateUsed(response, "Valid POST Template")
except AssertionError as e:
self.assertIn("Template 'Valid POST Template' was not a template used to render the response. Actual template(s) used: form_view.html, base.html", str(e))
with self.assertRaises(AssertionError) as context:
self.assertTemplateUsed(response, 'base.html', count=2)
self.assertIn(
"Template 'base.html' was expected to be rendered 2 "
"time(s) but was actually rendered 1 time(s).",
str(context.exception))
def test_template_rendered_multiple_times(self):
"""Template assertions work when a template is rendered multiple times."""
response = self.client.get('/render_template_multiple_times/')
self.assertTemplateUsed(response, 'base.html', count=2)
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class AssertRedirectsTests(SimpleTestCase):
def test_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/permanent_redirect_view/')
try:
self.assertRedirects(response, '/get_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
try:
self.assertRedirects(response, '/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
def test_lost_query(self):
"An assertion is raised if the redirect location doesn't preserve GET parameters"
response = self.client.get('/redirect_view/', {'var': 'value'})
try:
self.assertRedirects(response, '/get_view/')
except AssertionError as e:
self.assertIn("Response redirected to '/get_view/?var=value', expected '/get_view/'", str(e))
try:
self.assertRedirects(response, '/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response redirected to '/get_view/?var=value', expected '/get_view/'", str(e))
def test_incorrect_target(self):
"An assertion is raised if the response redirects to another target"
response = self.client.get('/permanent_redirect_view/')
try:
# Should redirect to get_view
self.assertRedirects(response, '/some_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 301 (expected 302)", str(e))
def test_target_page(self):
"An assertion is raised if the response redirect target cannot be retrieved as expected"
response = self.client.get('/double_redirect_view/')
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/permanent_redirect_view/')
except AssertionError as e:
self.assertIn("Couldn't retrieve redirection page '/permanent_redirect_view/': response code was 301 (expected 200)", str(e))
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/permanent_redirect_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Couldn't retrieve redirection page '/permanent_redirect_view/': response code was 301 (expected 200)", str(e))
def test_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/redirects/further/more/', {}, follow=True)
self.assertRedirects(response, '/no_template_view/',
status_code=302, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0], ('/no_template_view/', 302))
def test_multiple_redirect_chain(self):
"You can follow a redirect chain of multiple redirects"
response = self.client.get('/redirects/', {}, follow=True)
self.assertRedirects(response, '/no_template_view/',
status_code=302, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 3)
self.assertEqual(response.redirect_chain[0], ('/redirects/further/', 302))
self.assertEqual(response.redirect_chain[1], ('/redirects/further/more/', 302))
self.assertEqual(response.redirect_chain[2], ('/no_template_view/', 302))
def test_redirect_chain_to_non_existent(self):
"You can follow a chain to a non-existent view"
response = self.client.get('/redirect_to_non_existent_view2/', {}, follow=True)
self.assertRedirects(response, '/non_existent_view/',
status_code=302, target_status_code=404)
def test_redirect_chain_to_self(self):
"Redirections to self are caught and escaped"
with self.assertRaises(RedirectCycleError) as context:
self.client.get('/redirect_to_self/', {}, follow=True)
response = context.exception.last_response
# The chain of redirects stops once the cycle is detected.
self.assertRedirects(response, '/redirect_to_self/',
status_code=302, target_status_code=302)
self.assertEqual(len(response.redirect_chain), 2)
def test_redirect_to_self_with_changing_query(self):
"Redirections don't loop forever even if query is changing"
with self.assertRaises(RedirectCycleError):
self.client.get('/redirect_to_self_with_changing_query_view/', {'counter': '0'}, follow=True)
def test_circular_redirect(self):
"Circular redirect chains are caught and escaped"
with self.assertRaises(RedirectCycleError) as context:
self.client.get('/circular_redirect_1/', {}, follow=True)
response = context.exception.last_response
# The chain of redirects will get back to the starting point, but stop there.
self.assertRedirects(response, '/circular_redirect_2/',
status_code=302, target_status_code=302)
self.assertEqual(len(response.redirect_chain), 4)
def test_redirect_chain_post(self):
"A redirect chain will be followed from an initial POST post"
response = self.client.post('/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/no_template_view/', 302, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_head(self):
"A redirect chain will be followed from an initial HEAD request"
response = self.client.head('/redirects/',
{'nothing': 'to_send'}, follow=True)
self.assertRedirects(response,
'/no_template_view/', 302, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_options(self):
"A redirect chain will be followed from an initial OPTIONS request"
response = self.client.options('/redirects/',
follow=True)
self.assertRedirects(response,
'/no_template_view/', 302, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_put(self):
"A redirect chain will be followed from an initial PUT request"
response = self.client.put('/redirects/',
follow=True)
self.assertRedirects(response,
'/no_template_view/', 302, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_chain_delete(self):
"A redirect chain will be followed from an initial DELETE request"
response = self.client.delete('/redirects/',
follow=True)
self.assertRedirects(response,
'/no_template_view/', 302, 200)
self.assertEqual(len(response.redirect_chain), 3)
def test_redirect_to_different_host(self):
"The test client will preserve scheme, host and port changes"
response = self.client.get('/redirect_other_host/', follow=True)
self.assertRedirects(response,
'https://otherserver:8443/no_template_view/',
status_code=302, target_status_code=200)
# We can't use is_secure() or get_host()
# because response.request is a dictionary, not an HttpRequest
self.assertEqual(response.request.get('wsgi.url_scheme'), 'https')
self.assertEqual(response.request.get('SERVER_NAME'), 'otherserver')
self.assertEqual(response.request.get('SERVER_PORT'), '8443')
def test_redirect_chain_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/get_view/', follow=True)
try:
self.assertRedirects(response, '/get_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
try:
self.assertRedirects(response, '/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
def test_redirect_on_non_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/get_view/')
try:
self.assertRedirects(response, '/get_view/')
except AssertionError as e:
self.assertIn("Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
try:
self.assertRedirects(response, '/get_view/', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: Response didn't redirect as expected: Response code was 200 (expected 302)", str(e))
def test_redirect_scheme(self):
"An assertion is raised if the response doesn't have the scheme specified in expected_url"
# For all possible True/False combinations of follow and secure
for follow, secure in itertools.product([True, False], repeat=2):
# always redirects to https
response = self.client.get('/https_redirect_view/', follow=follow, secure=secure)
# the goal scheme is https
self.assertRedirects(response, 'https://testserver/secure_view/', status_code=302)
with self.assertRaises(AssertionError):
self.assertRedirects(response, 'http://testserver/secure_view/', status_code=302)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_full_path_in_expected_urls(self):
"""
Test that specifying a full URL as assertRedirects expected_url still
work as backwards compatible behavior until Django 2.0.
"""
response = self.client.get('/redirect_view/')
self.assertRedirects(response, 'http://testserver/get_view/')
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class AssertFormErrorTests(SimpleTestCase):
def test_unknown_form(self):
"An assertion is raised if the form name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.')
except AssertionError as e:
self.assertIn("The form 'wrong_form' was not used to render the response", str(e))
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The form 'wrong_form' was not used to render the response", str(e))
def test_unknown_field(self):
"An assertion is raised if the field name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.')
except AssertionError as e:
self.assertIn("The form 'form' in context 0 does not contain the field 'some_field'", str(e))
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The form 'form' in context 0 does not contain the field 'some_field'", str(e))
def test_noerror_field(self):
"An assertion is raised if the field doesn't have any errors"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'value', 'Some error.')
except AssertionError as e:
self.assertIn("The field 'value' on form 'form' in context 0 contains no errors", str(e))
try:
self.assertFormError(response, 'form', 'value', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The field 'value' on form 'form' in context 0 contains no errors", str(e))
def test_unknown_error(self):
"An assertion is raised if the field doesn't contain the provided error"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'email', 'Some error.')
except AssertionError as e:
self.assertIn(str_prefix("The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [%(_)s'Enter a valid email address.'])"), str(e))
try:
self.assertFormError(response, 'form', 'email', 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn(str_prefix("abc: The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [%(_)s'Enter a valid email address.'])"), str(e))
def test_unknown_nonfield_error(self):
"""
Checks that an assertion is raised if the form's non field errors
doesn't contain the provided error.
"""
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', None, 'Some error.')
except AssertionError as e:
self.assertIn("The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )", str(e))
try:
self.assertFormError(response, 'form', None, 'Some error.', msg_prefix='abc')
except AssertionError as e:
self.assertIn("abc: The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )", str(e))
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class AssertFormsetErrorTests(SimpleTestCase):
msg_prefixes = [("", {}), ("abc: ", {"msg_prefix": "abc"})]
def setUp(self):
"""Makes response object for testing field and non-field errors"""
# For testing field and non-field errors
self.response_form_errors = self.getResponse({
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '2',
'form-0-text': 'Raise non-field error',
'form-0-email': 'not an email address',
'form-0-value': 37,
'form-0-single': 'b',
'form-0-multi': ('b', 'c', 'e'),
'form-1-text': 'Hello World',
'form-1-email': 'email@domain.com',
'form-1-value': 37,
'form-1-single': 'b',
'form-1-multi': ('b', 'c', 'e'),
})
# For testing non-form errors
self.response_nonform_errors = self.getResponse({
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '2',
'form-0-text': 'Hello World',
'form-0-email': 'email@domain.com',
'form-0-value': 37,
'form-0-single': 'b',
'form-0-multi': ('b', 'c', 'e'),
'form-1-text': 'Hello World',
'form-1-email': 'email@domain.com',
'form-1-value': 37,
'form-1-single': 'b',
'form-1-multi': ('b', 'c', 'e'),
})
def getResponse(self, post_data):
response = self.client.post('/formset_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
return response
def test_unknown_formset(self):
"An assertion is raised if the formset name is unknown"
for prefix, kwargs in self.msg_prefixes:
with self.assertRaises(AssertionError) as cm:
self.assertFormsetError(self.response_form_errors,
'wrong_formset',
0,
'Some_field',
'Some error.',
**kwargs)
self.assertIn(prefix + "The formset 'wrong_formset' was not "
"used to render the response",
str(cm.exception))
def test_unknown_field(self):
"An assertion is raised if the field name is unknown"
for prefix, kwargs in self.msg_prefixes:
with self.assertRaises(AssertionError) as cm:
self.assertFormsetError(self.response_form_errors,
'my_formset',
0,
'Some_field',
'Some error.',
**kwargs)
self.assertIn(prefix + "The formset 'my_formset', "
"form 0 in context 0 "
"does not contain the field 'Some_field'",
str(cm.exception))
def test_no_error_field(self):
"An assertion is raised if the field doesn't have any errors"
for prefix, kwargs in self.msg_prefixes:
with self.assertRaises(AssertionError) as cm:
self.assertFormsetError(self.response_form_errors,
'my_formset',
1,
'value',
'Some error.',
**kwargs)
self.assertIn(prefix + "The field 'value' "
"on formset 'my_formset', form 1 "
"in context 0 contains no errors",
str(cm.exception))
def test_unknown_error(self):
"An assertion is raised if the field doesn't contain the specified error"
for prefix, kwargs in self.msg_prefixes:
with self.assertRaises(AssertionError) as cm:
self.assertFormsetError(self.response_form_errors,
'my_formset',
0,
'email',
'Some error.',
**kwargs)
self.assertIn(str_prefix(prefix + "The field 'email' "
"on formset 'my_formset', form 0 in context 0 does not "
"contain the error 'Some error.' (actual errors: "
"[%(_)s'Enter a valid email address.'])"),
str(cm.exception))
def test_field_error(self):
"No assertion is raised if the field contains the provided error"
for prefix, kwargs in self.msg_prefixes:
self.assertFormsetError(self.response_form_errors,
'my_formset',
0,
'email',
['Enter a valid email address.'],
**kwargs)
def test_no_nonfield_error(self):
"An assertion is raised if the formsets non-field errors doesn't contain any errors."
for prefix, kwargs in self.msg_prefixes:
with self.assertRaises(AssertionError) as cm:
self.assertFormsetError(self.response_form_errors,
'my_formset',
1,
None,
'Some error.',
**kwargs)
self.assertIn(prefix + "The formset 'my_formset', form 1 in "
"context 0 does not contain any "
"non-field errors.",
str(cm.exception))
def test_unknown_nonfield_error(self):
"An assertion is raised if the formsets non-field errors doesn't contain the provided error."
for prefix, kwargs in self.msg_prefixes:
with self.assertRaises(AssertionError) as cm:
self.assertFormsetError(self.response_form_errors,
'my_formset',
0,
None,
'Some error.',
**kwargs)
self.assertIn(str_prefix(prefix +
"The formset 'my_formset', form 0 in context 0 does not "
"contain the non-field error 'Some error.' (actual errors: "
"[%(_)s'Non-field error.'])"), str(cm.exception))
def test_nonfield_error(self):
"No assertion is raised if the formsets non-field errors contains the provided error."
for prefix, kwargs in self.msg_prefixes:
self.assertFormsetError(self.response_form_errors,
'my_formset',
0,
None,
'Non-field error.',
**kwargs)
def test_no_nonform_error(self):
"An assertion is raised if the formsets non-form errors doesn't contain any errors."
for prefix, kwargs in self.msg_prefixes:
with self.assertRaises(AssertionError) as cm:
self.assertFormsetError(self.response_form_errors,
'my_formset',
None,
None,
'Some error.',
**kwargs)
self.assertIn(prefix + "The formset 'my_formset' in context 0 "
"does not contain any non-form errors.",
str(cm.exception))
def test_unknown_nonform_error(self):
"An assertion is raised if the formsets non-form errors doesn't contain the provided error."
for prefix, kwargs in self.msg_prefixes:
with self.assertRaises(AssertionError) as cm:
self.assertFormsetError(self.response_nonform_errors,
'my_formset',
None,
None,
'Some error.',
**kwargs)
self.assertIn(str_prefix(prefix +
"The formset 'my_formset' in context 0 does not contain the "
"non-form error 'Some error.' (actual errors: [%(_)s'Forms "
"in a set must have distinct email addresses.'])"), str(cm.exception))
def test_nonform_error(self):
"No assertion is raised if the formsets non-form errors contains the provided error."
for prefix, kwargs in self.msg_prefixes:
self.assertFormsetError(self.response_nonform_errors,
'my_formset',
None,
None,
'Forms in a set must have distinct email '
'addresses.',
**kwargs)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='test_client_regress.urls',)
class LoginTests(TestDataMixin, TestCase):
def test_login_different_client(self):
"Check that using a different test client doesn't violate authentication"
# Create a second client, and log in.
c = Client()
login = c.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Get a redirection page with the second client.
response = c.get("/login_protected_redirect_view/")
# At this points, the self.client isn't logged in.
# Check that assertRedirects uses the original client, not the
# default client.
self.assertRedirects(response, "/get_view/")
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
SESSION_ENGINE='test_client_regress.session',
ROOT_URLCONF='test_client_regress.urls',
)
class SessionEngineTests(TestDataMixin, TestCase):
def test_login(self):
"A session engine that modifies the session key can be used to log in"
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Try to access a login protected page.
response = self.client.get("/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
@override_settings(ROOT_URLCONF='test_client_regress.urls',)
class URLEscapingTests(SimpleTestCase):
def test_simple_argument_get(self):
"Get a view that has a simple string argument"
response = self.client.get(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Howdy, Slartibartfast')
def test_argument_with_space_get(self):
"Get a view that has a string argument that requires escaping"
response = self.client.get(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Hi, Arthur')
def test_simple_argument_post(self):
"Post for a view that has a simple string argument"
response = self.client.post(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Howdy, Slartibartfast')
def test_argument_with_space_post(self):
"Post for a view that has a string argument that requires escaping"
response = self.client.post(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Hi, Arthur')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='test_client_regress.urls',)
class ExceptionTests(TestDataMixin, TestCase):
def test_exception_cleared(self):
"#5836 - A stale user exception isn't re-raised by the test client."
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
try:
self.client.get("/staff_only/")
self.fail("General users should not be able to visit this page")
except CustomTestException:
pass
# At this point, an exception has been raised, and should be cleared.
# This next operation should be successful; if it isn't we have a problem.
login = self.client.login(username='staff', password='password')
self.assertTrue(login, 'Could not log in')
try:
self.client.get("/staff_only/")
except CustomTestException:
self.fail("Staff should be able to visit this page")
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class TemplateExceptionTests(SimpleTestCase):
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(upath(__file__)), 'bad_templates')],
}])
def test_bad_404_template(self):
"Errors found when rendering 404 error templates are re-raised"
try:
self.client.get("/no_such_view/")
except TemplateSyntaxError:
pass
else:
self.fail("Should get error about syntax error in template")
# We need two different tests to check URLconf substitution - one to check
# it was changed, and another one (without self.urls) to check it was reverted on
# teardown. This pair of tests relies upon the alphabetical ordering of test execution.
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class UrlconfSubstitutionTests(SimpleTestCase):
def test_urlconf_was_changed(self):
"TestCase can enforce a custom URLconf on a per-test basis"
url = reverse('arg_view', args=['somename'])
self.assertEqual(url, '/arg_view/somename/')
# This test needs to run *after* UrlconfSubstitutionTests; the zz prefix in the
# name is to ensure alphabetical ordering.
class zzUrlconfSubstitutionTests(SimpleTestCase):
def test_urlconf_was_reverted(self):
"""URLconf is reverted to original value after modification in a TestCase
This will not find a match as the default ROOT_URLCONF is empty.
"""
with self.assertRaises(NoReverseMatch):
reverse('arg_view', args=['somename'])
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='test_client_regress.urls',)
class ContextTests(TestDataMixin, TestCase):
def test_single_context(self):
"Context variables can be retrieved from a single context"
response = self.client.get("/request_data/", data={'foo': 'whiz'})
self.assertEqual(response.context.__class__, Context)
self.assertIn('get-foo', response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['data'], 'sausage')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError as e:
self.assertEqual(e.args[0], 'does-not-exist')
def test_inherited_context(self):
"Context variables can be retrieved from a list of contexts"
response = self.client.get("/request_data_extended/", data={'foo': 'whiz'})
self.assertEqual(response.context.__class__, ContextList)
self.assertEqual(len(response.context), 2)
self.assertIn('get-foo', response.context)
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['data'], 'bacon')
try:
response.context['does-not-exist']
self.fail('Should not be able to retrieve non-existent key')
except KeyError as e:
self.assertEqual(e.args[0], 'does-not-exist')
def test_contextlist_keys(self):
c1 = Context()
c1.update({'hello': 'world', 'goodbye': 'john'})
c1.update({'hello': 'dolly', 'dolly': 'parton'})
c2 = Context()
c2.update({'goodbye': 'world', 'python': 'rocks'})
c2.update({'goodbye': 'dolly'})
l = ContextList([c1, c2])
# None, True and False are builtins of BaseContext, and present
# in every Context without needing to be added.
self.assertEqual({'None', 'True', 'False', 'hello', 'goodbye',
'python', 'dolly'},
l.keys())
@ignore_warnings(category=RemovedInDjango110Warning)
def test_15368(self):
# Need to insert a context processor that assumes certain things about
# the request instance. This triggers a bug caused by some ways of
# copying RequestContext.
with self.settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'test_client_regress.context_processors.special',
],
},
}]):
response = self.client.get("/request_context_view/")
self.assertContains(response, 'Path: /request_context_view/')
def test_nested_requests(self):
"""
response.context is not lost when view call another view.
"""
response = self.client.get("/nested_view/")
self.assertEqual(response.context.__class__, Context)
self.assertEqual(response.context['nested'], 'yes')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='test_client_regress.urls',)
class SessionTests(TestDataMixin, TestCase):
def test_session(self):
"The session isn't lost if a user logs in"
# The session doesn't exist to start.
response = self.client.get('/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'NO')
# This request sets a session variable.
response = self.client.get('/set_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'set_session')
# Check that the session has been modified
response = self.client.get('/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'YES')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Session should still contain the modified value
response = self.client.get('/check_session/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'YES')
def test_session_initiated(self):
session = self.client.session
session['session_var'] = 'foo'
session.save()
response = self.client.get('/check_session/')
self.assertEqual(response.content, b'foo')
def test_logout(self):
"""Logout should work whether the user is logged in or not (#9978)."""
self.client.logout()
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
self.client.logout()
self.client.logout()
def test_logout_with_user(self):
"""Logout should send user_logged_out signal if user was logged in."""
def listener(*args, **kwargs):
listener.executed = True
self.assertEqual(kwargs['sender'], User)
listener.executed = False
user_logged_out.connect(listener)
self.client.login(username='testclient', password='password')
self.client.logout()
user_logged_out.disconnect(listener)
self.assertTrue(listener.executed)
@override_settings(AUTH_USER_MODEL='test_client_regress.CustomUser')
def test_logout_with_custom_user(self):
"""Logout should send user_logged_out signal if custom user was logged in."""
def listener(*args, **kwargs):
self.assertEqual(kwargs['sender'], CustomUser)
listener.executed = True
listener.executed = False
u = CustomUser.custom_objects.create(email='test@test.com')
u.set_password('password')
u.save()
user_logged_out.connect(listener)
self.client.login(username='test@test.com', password='password')
self.client.logout()
user_logged_out.disconnect(listener)
self.assertTrue(listener.executed)
@override_settings(AUTHENTICATION_BACKENDS=(
'django.contrib.auth.backends.ModelBackend',
'test_client_regress.auth_backends.CustomUserBackend'))
def test_logout_with_custom_auth_backend(self):
"Request a logout after logging in with custom authentication backend"
def listener(*args, **kwargs):
self.assertEqual(kwargs['sender'], CustomUser)
listener.executed = True
listener.executed = False
u = CustomUser.custom_objects.create(email='test@test.com')
u.set_password('password')
u.save()
user_logged_out.connect(listener)
self.client.login(username='test@test.com', password='password')
self.client.logout()
user_logged_out.disconnect(listener)
self.assertTrue(listener.executed)
def test_logout_without_user(self):
"""Logout should send signal even if user not authenticated."""
def listener(user, *args, **kwargs):
listener.user = user
listener.executed = True
listener.executed = False
user_logged_out.connect(listener)
self.client.login(username='incorrect', password='password')
self.client.logout()
user_logged_out.disconnect(listener)
self.assertTrue(listener.executed)
self.assertIsNone(listener.user)
def test_login_with_user(self):
"""Login should send user_logged_in signal on successful login."""
def listener(*args, **kwargs):
listener.executed = True
listener.executed = False
user_logged_in.connect(listener)
self.client.login(username='testclient', password='password')
user_logged_out.disconnect(listener)
self.assertTrue(listener.executed)
def test_login_without_signal(self):
"""Login shouldn't send signal if user wasn't logged in"""
def listener(*args, **kwargs):
listener.executed = True
listener.executed = False
user_logged_in.connect(listener)
self.client.login(username='incorrect', password='password')
user_logged_in.disconnect(listener)
self.assertFalse(listener.executed)
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class RequestMethodTests(SimpleTestCase):
def test_get(self):
"Request a view via request method GET"
response = self.client.get('/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: GET')
def test_post(self):
"Request a view via request method POST"
response = self.client.post('/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: POST')
def test_head(self):
"Request a view via request method HEAD"
response = self.client.head('/request_methods/')
self.assertEqual(response.status_code, 200)
# A HEAD request doesn't return any content.
self.assertNotEqual(response.content, b'request method: HEAD')
self.assertEqual(response.content, b'')
def test_options(self):
"Request a view via request method OPTIONS"
response = self.client.options('/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: OPTIONS')
def test_put(self):
"Request a view via request method PUT"
response = self.client.put('/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PUT')
def test_delete(self):
"Request a view via request method DELETE"
response = self.client.delete('/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: DELETE')
def test_patch(self):
"Request a view via request method PATCH"
response = self.client.patch('/request_methods/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PATCH')
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class RequestMethodStringDataTests(SimpleTestCase):
def test_post(self):
"Request a view with string data via request method POST"
# Regression test for #11371
data = '{"test": "json"}'
response = self.client.post('/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: POST')
def test_put(self):
"Request a view with string data via request method PUT"
# Regression test for #11371
data = '{"test": "json"}'
response = self.client.put('/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PUT')
def test_patch(self):
"Request a view with string data via request method PATCH"
# Regression test for #17797
data = '{"test": "json"}'
response = self.client.patch('/request_methods/', data=data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'request method: PATCH')
def test_empty_string_data(self):
"Request a view with empty string data via request method GET/POST/HEAD"
# Regression test for #21740
response = self.client.get('/body/', data='', content_type='application/json')
self.assertEqual(response.content, b'')
response = self.client.post('/body/', data='', content_type='application/json')
self.assertEqual(response.content, b'')
response = self.client.head('/body/', data='', content_type='application/json')
self.assertEqual(response.content, b'')
def test_json(self):
response = self.client.get('/json_response/')
self.assertEqual(response.json(), {'key': 'value'})
def test_json_wrong_header(self):
response = self.client.get('/body/')
msg = 'Content-Type header is "text/html; charset=utf-8", not "application/json"'
with self.assertRaisesMessage(ValueError, msg):
self.assertEqual(response.json(), {'key': 'value'})
@override_settings(ROOT_URLCONF='test_client_regress.urls',)
class QueryStringTests(SimpleTestCase):
def test_get_like_requests(self):
# See: https://code.djangoproject.com/ticket/10571.
for method_name in ('get', 'head'):
# A GET-like request can pass a query string as data
method = getattr(self.client, method_name)
response = method("/request_data/", data={'foo': 'whiz'})
self.assertEqual(response.context['get-foo'], 'whiz')
# A GET-like request can pass a query string as part of the URL
response = method("/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
# Data provided in the URL to a GET-like request is overridden by actual form data
response = method("/request_data/?foo=whiz", data={'foo': 'bang'})
self.assertEqual(response.context['get-foo'], 'bang')
response = method("/request_data/?foo=whiz", data={'bar': 'bang'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['get-bar'], 'bang')
def test_post_like_requests(self):
# A POST-like request can pass a query string as data
response = self.client.post("/request_data/", data={'foo': 'whiz'})
self.assertEqual(response.context['get-foo'], None)
self.assertEqual(response.context['post-foo'], 'whiz')
# A POST-like request can pass a query string as part of the URL
response = self.client.post("/request_data/?foo=whiz")
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], None)
# POST data provided in the URL augments actual form data
response = self.client.post("/request_data/?foo=whiz", data={'foo': 'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['post-foo'], 'bang')
response = self.client.post("/request_data/?foo=whiz", data={'bar': 'bang'})
self.assertEqual(response.context['get-foo'], 'whiz')
self.assertEqual(response.context['get-bar'], None)
self.assertEqual(response.context['post-foo'], None)
self.assertEqual(response.context['post-bar'], 'bang')
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class UnicodePayloadTests(SimpleTestCase):
def test_simple_unicode_payload(self):
"A simple ASCII-only unicode JSON document can be POSTed"
# Regression test for #10571
json = '{"english": "mountain pass"}'
response = self.client.post("/parse_unicode_json/", json,
content_type="application/json")
self.assertEqual(response.content, json.encode())
def test_unicode_payload_utf8(self):
"A non-ASCII unicode data encoded as UTF-8 can be POSTed"
# Regression test for #10571
json = '{"dog": "собака"}'
response = self.client.post("/parse_unicode_json/", json,
content_type="application/json; charset=utf-8")
self.assertEqual(response.content, json.encode('utf-8'))
def test_unicode_payload_utf16(self):
"A non-ASCII unicode data encoded as UTF-16 can be POSTed"
# Regression test for #10571
json = '{"dog": "собака"}'
response = self.client.post("/parse_unicode_json/", json,
content_type="application/json; charset=utf-16")
self.assertEqual(response.content, json.encode('utf-16'))
def test_unicode_payload_non_utf(self):
"A non-ASCII unicode data as a non-UTF based encoding can be POSTed"
# Regression test for #10571
json = '{"dog": "собака"}'
response = self.client.post("/parse_unicode_json/", json,
content_type="application/json; charset=koi8-r")
self.assertEqual(response.content, json.encode('koi8-r'))
class DummyFile(object):
def __init__(self, filename):
self.name = filename
def read(self):
return b'TEST_FILE_CONTENT'
class UploadedFileEncodingTest(SimpleTestCase):
def test_file_encoding(self):
encoded_file = encode_file('TEST_BOUNDARY', 'TEST_KEY', DummyFile('test_name.bin'))
self.assertEqual(b'--TEST_BOUNDARY', encoded_file[0])
self.assertEqual(b'Content-Disposition: form-data; name="TEST_KEY"; filename="test_name.bin"', encoded_file[1])
self.assertEqual(b'TEST_FILE_CONTENT', encoded_file[-1])
def test_guesses_content_type_on_file_encoding(self):
self.assertEqual(b'Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.bin"))[2])
self.assertEqual(b'Content-Type: text/plain',
encode_file('IGNORE', 'IGNORE', DummyFile("file.txt"))[2])
self.assertIn(encode_file('IGNORE', 'IGNORE', DummyFile("file.zip"))[2], (
b'Content-Type: application/x-compress',
b'Content-Type: application/x-zip',
b'Content-Type: application/x-zip-compressed',
b'Content-Type: application/zip',))
self.assertEqual(b'Content-Type: application/octet-stream',
encode_file('IGNORE', 'IGNORE', DummyFile("file.unknown"))[2])
@override_settings(ROOT_URLCONF='test_client_regress.urls',)
class RequestHeadersTest(SimpleTestCase):
def test_client_headers(self):
"A test client can receive custom headers"
response = self.client.get("/check_headers/", HTTP_X_ARG_CHECK='Testing 123')
self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123")
self.assertEqual(response.status_code, 200)
def test_client_headers_redirect(self):
"Test client headers are preserved through redirects"
response = self.client.get("/check_headers_redirect/", follow=True, HTTP_X_ARG_CHECK='Testing 123')
self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123")
self.assertRedirects(response, '/check_headers/',
status_code=302, target_status_code=200)
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class ReadLimitedStreamTest(SimpleTestCase):
"""
Tests that ensure that HttpRequest.body, HttpRequest.read() and
HttpRequest.read(BUFFER) have proper LimitedStream behavior.
Refs #14753, #15785
"""
def test_body_from_empty_request(self):
"""HttpRequest.body on a test client GET request should return
the empty string."""
self.assertEqual(self.client.get("/body/").content, b'')
def test_read_from_empty_request(self):
"""HttpRequest.read() on a test client GET request should return the
empty string."""
self.assertEqual(self.client.get("/read_all/").content, b'')
def test_read_numbytes_from_empty_request(self):
"""HttpRequest.read(LARGE_BUFFER) on a test client GET request should
return the empty string."""
self.assertEqual(self.client.get("/read_buffer/").content, b'')
def test_read_from_nonempty_request(self):
"""HttpRequest.read() on a test client PUT request with some payload
should return that payload."""
payload = b'foobar'
self.assertEqual(self.client.put(
"/read_all/",
data=payload,
content_type='text/plain').content, payload)
def test_read_numbytes_from_nonempty_request(self):
"""HttpRequest.read(LARGE_BUFFER) on a test client PUT request with
some payload should return that payload."""
payload = b'foobar'
self.assertEqual(
self.client.put("/read_buffer/",
data=payload,
content_type='text/plain').content, payload)
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class RequestFactoryStateTest(SimpleTestCase):
"""Regression tests for #15929."""
# These tests are checking that certain middleware don't change certain
# global state. Alternatively, from the point of view of a test, they are
# ensuring test isolation behavior. So, unusually, it doesn't make sense to
# run the tests individually, and if any are failing it is confusing to run
# them with any other set of tests.
def common_test_that_should_always_pass(self):
request = RequestFactory().get('/')
request.session = {}
self.assertFalse(hasattr(request, 'user'))
def test_request(self):
self.common_test_that_should_always_pass()
def test_request_after_client(self):
# apart from the next line the three tests are identical
self.client.get('/')
self.common_test_that_should_always_pass()
def test_request_after_client_2(self):
# This test is executed after the previous one
self.common_test_that_should_always_pass()
@override_settings(ROOT_URLCONF='test_client_regress.urls')
class RequestFactoryEnvironmentTests(SimpleTestCase):
"""
Regression tests for #8551 and #17067: ensure that environment variables
are set correctly in RequestFactory.
"""
def test_should_set_correct_env_variables(self):
request = RequestFactory().get('/path/')
self.assertEqual(request.META.get('REMOTE_ADDR'), '127.0.0.1')
self.assertEqual(request.META.get('SERVER_NAME'), 'testserver')
self.assertEqual(request.META.get('SERVER_PORT'), '80')
self.assertEqual(request.META.get('SERVER_PROTOCOL'), 'HTTP/1.1')
self.assertEqual(request.META.get('SCRIPT_NAME') +
request.META.get('PATH_INFO'), '/path/')
|
BMJHayward/django
|
tests/test_client_regress/tests.py
|
Python
|
bsd-3-clause
| 67,625
|
[
"VisIt"
] |
1cf2a781093673174492590f3c80c4930b433c8c35f575dc36158a25f8b9a8e0
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.structure import Structure
from pymatgen.core.units import Ha_to_eV, bohr_to_ang
from pymatgen.io.abinit.abiobjects import *
import warnings
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class LatticeFromAbivarsTest(PymatgenTest):
def test_rprim_acell(self):
l1 = lattice_from_abivars(acell=3*[10], rprim=np.eye(3))
self.assertAlmostEqual(l1.volume, bohr_to_ang**3 * 1000)
assert l1.angles == (90, 90, 90)
l2 = lattice_from_abivars(acell=3*[10], angdeg=(90, 90, 90))
assert l1 == l2
l2 = lattice_from_abivars(acell=3*[8], angdeg=(60, 60, 60))
abi_rprimd = np.reshape([4.6188022, 0.0000000, 6.5319726,
-2.3094011, 4.0000000, 6.5319726,
-2.3094011, -4.0000000, 6.5319726], (3, 3)) * bohr_to_ang
self.assertArrayAlmostEqual(l2.matrix, abi_rprimd)
l3 = lattice_from_abivars(acell=[3, 6, 9], angdeg=(30, 40, 50))
abi_rprimd = np.reshape([3.0000000, 0.0000000, 0.0000000,
3.8567257, 4.5962667, 0.0000000,
6.8944000, 4.3895544, 3.7681642], (3, 3)) * bohr_to_ang
self.assertArrayAlmostEqual(l3.matrix, abi_rprimd)
with self.assertRaises(ValueError):
lattice_from_abivars(acell=[1, 1, 1], angdeg=(90, 90, 90), rprim=np.eye(3))
with self.assertRaises(ValueError):
lattice_from_abivars(acell=[1, 1, 1], angdeg=(-90, 90, 90))
class SpinModeTest(PymatgenTest):
def test_base(self):
polarized = SpinMode.as_spinmode("polarized")
other_polarized = SpinMode.as_spinmode("polarized")
unpolarized = SpinMode.as_spinmode("unpolarized")
polarized.to_abivars()
self.assertTrue(polarized is other_polarized)
self.assertTrue(polarized == other_polarized)
self.assertTrue(polarized != unpolarized)
# Test pickle
self.serialize_with_pickle(polarized)
# Test dict methods
self.assertMSONable(polarized)
self.assertMSONable(unpolarized)
class SmearingTest(PymatgenTest):
def test_base(self):
fd1ev = Smearing.as_smearing("fermi_dirac:1 eV")
fd1ev.to_abivars()
self.assertTrue(fd1ev)
same_fd = Smearing.as_smearing("fermi_dirac:"+ str(1.0/Ha_to_eV))
self.assertTrue(same_fd == fd1ev)
nosmear = Smearing.nosmearing()
assert nosmear == Smearing.as_smearing("nosmearing")
self.assertFalse(nosmear)
self.assertTrue(nosmear != fd1ev)
self.assertMSONable(nosmear)
new_fd1ev = Smearing.from_dict(fd1ev.as_dict())
self.assertTrue(new_fd1ev == fd1ev)
# Test pickle
self.serialize_with_pickle(fd1ev)
# Test dict methods
self.assertMSONable(fd1ev)
class ElectronsAlgorithmTest(PymatgenTest):
def test_base(self):
algo = ElectronsAlgorithm(nstep=70)
abivars = algo.to_abivars()
# Test pickle
self.serialize_with_pickle(algo)
# Test dict methods
self.assertMSONable(algo)
class ElectronsTest(PymatgenTest):
def test_base(self):
default_electrons = Electrons()
self.assertTrue(default_electrons.nsppol==2)
self.assertTrue(default_electrons.nspinor==1)
self.assertTrue(default_electrons.nspden==2)
abivars = default_electrons.to_abivars()
#new = Electron.from_dict(default_electrons.as_dict())
# Test pickle
self.serialize_with_pickle(default_electrons, test_eq=False)
custom_electrons = Electrons(spin_mode="unpolarized", smearing="marzari4:0.2 eV",
algorithm=ElectronsAlgorithm(nstep=70), nband=10, charge=1.0, comment="Test comment")
# Test dict methods
self.assertMSONable(custom_electrons)
class KSamplingTest(PymatgenTest):
def test_base(self):
monkhorst = KSampling.monkhorst((3, 3, 3), (0.5, 0.5, 0.5), 0, False, False)
gamma_centered = KSampling.gamma_centered((3, 3, 3), False, False)
monkhorst.to_abivars()
# Test dict methods
self.assertMSONable(monkhorst)
self.assertMSONable(gamma_centered)
class RelaxationTest(PymatgenTest):
def test_base(self):
atoms_and_cell = RelaxationMethod.atoms_and_cell()
atoms_only = RelaxationMethod.atoms_only()
atoms_and_cell.to_abivars()
# Test dict methods
self.assertMSONable(atoms_and_cell)
self.assertMSONable(atoms_only)
class PPModelTest(PymatgenTest):
def test_base(self):
godby = PPModel.as_ppmodel("godby:12 eV")
# print(godby)
# print(repr(godby))
godby.to_abivars()
self.assertTrue(godby)
same_godby = PPModel.as_ppmodel("godby:"+ str(12.0/Ha_to_eV))
self.assertTrue(same_godby == godby)
noppm = PPModel.get_noppmodel()
self.assertFalse(noppm)
self.assertTrue(noppm != godby)
new_godby = PPModel.from_dict(godby.as_dict())
self.assertTrue(new_godby == godby)
# Test pickle
self.serialize_with_pickle(godby)
# Test dict methods
self.assertMSONable(godby)
if __name__ == '__main__':
import unittest
unittest.main()
|
dongsenfo/pymatgen
|
pymatgen/io/abinit/tests/test_abiobjects.py
|
Python
|
mit
| 5,537
|
[
"ABINIT",
"pymatgen"
] |
f614c5d8bd69b5926d492ee361d8c2d5a34029259407aec13b58fc5d1f435d5b
|
#!/usr/bin/python
# Use 1 or 2 arguments:
# 1 (read): generic input file
# 2 (write): Molcas input file (default: append ".mol" to generic input)
import sys
#=============================
# CHANGE THIS DATA AS NEEDED
Q_root = 1
seward_header = """\
*** Usa el directorio de bases (6-31G* con funciones esfericas)
!ln -sf $BASISDIR BASLIB
&SEWARD
Title
N-fenilpirrol
>>> coord inline basis sph-31G*
NoSymm"""
main_block = """\
!ln -sf $SaveDir/$Project.JobIph JOBOLD
&RASSCF
JobIph
NActElectrons = 12 0 0
Inactive = 32
Ras2 = 11
CIRoot = 5 5 1
RlxRoot = %(root)i
OutOrbitals = Natural
5
End of input
!rm -f JOBOLD
!cp $Project.rasscf.%(root)i.molden $Project.esp.molden
!cp $Project.RasOrb%(root_b)s $Project.esp.RasOrb
&RASSI
NrOfJobIphs = 1 5
1 2 3 4 5
End of input
""" % {"root": Q_root, "root_b": Q_root if (Q_root > 1) else "" }
alaska = """\
&MCLR
SALa = %(root)i
Iterations = 200
End of input
&ALASKA
End of input
""" % {"root": Q_root}
mckinley = """\
&MCKINLEY
End of input
!cat $Project.UnSym
"""
potential = """\
!cp $Project.JobIph $Project.JobOld
&RASSCF
JobIph
CIRestart
CIOnly
NActElectrons = 12 0 0
Inactive = 32
Ras2 = 11
CIRoot = 1 %(root)i
%(root)i
End of input
!cp $Project.JobOld $Project.JobIph
""" % {"root": Q_root}
#=============================
# Get input files
try:
gen_input = sys.argv[1]
except IndexError:
sys.exit("Missing input file")
try:
mol_input = sys.argv[2]
except IndexError:
mol_input = gen_input + ".mol"
#=============================
# Read the data from the generic input file
angstrom = 1.88972613289
Q_derivative = 0
Q_natoms = 0
Q_molecule = []
Q_chargesfile = ""
Q_charges = 0
Q_charges_points = []
Q_potential = 0
Q_potential_points = []
file_gen = open(gen_input, "r")
for line in file_gen:
# Read the order of the energy derivative needed
if (line.rstrip() == "Derivative"):
Q_derivative = int(file_gen.next())
# Read the geometry
# atom name, atomic symbol, atomic number, coordinates in Angstrom
elif (line.rstrip() == "Geometry"):
Q_natoms = int(file_gen.next())
for i in range(Q_natoms):
tmp = dict(zip(("name","type","number","x","y","z"),file_gen.next().split()))
tmp["number"] = int(tmp["number"])
tmp["x"] = float(tmp["x"])
tmp["y"] = float(tmp["y"])
tmp["z"] = float(tmp["z"])
Q_molecule.append(tmp)
# Read the filename containing external charges, in Angstrom
elif (line.rstrip() == "External charge file"):
Q_chargesfile = file_gen.next().rstrip()
# ... or read the external charges positions and values, in Angstrom
elif (line.rstrip() == "External charges"):
Q_charges = int(file_gen.next())
for i in range(Q_charges):
tmp = dict(zip(("x","y","z","q"),file_gen.next().split()))
tmp["x"] = float(tmp["x"])
tmp["y"] = float(tmp["y"])
tmp["z"] = float(tmp["z"])
tmp["q"] = float(tmp["q"])
Q_charges_points.append(tmp)
# Read the points where the electrostatic potential is to be calculated, in Angstrom
elif (line.rstrip() == "Potential points"):
Q_potential = int(file_gen.next())
for i in range(Q_potential):
tmp = dict(zip(("x","y","z"),file_gen.next().split()))
tmp["x"] = float(tmp["x"])
tmp["y"] = float(tmp["y"])
tmp["z"] = float(tmp["z"])
Q_potential_points.append(tmp)
file_gen.close()
#=============================
# Write the Molcas input file
file_mol = open(mol_input, "w")
# Write the SEWARD input
print >> file_mol, seward_header
# Write the molecular geometry, in Angstrom
print >> file_mol, " %i\n" % Q_natoms
for atom in Q_molecule:
print >> file_mol, "%16s %20.12f %20.12f %20.12f" % (atom["type"], atom["x"], atom["y"], atom["z"])
print >> file_mol, " End of input"
# Write the external charges, in Bohr, or the charge file
if (Q_charges):
print >> file_mol, " XField"
print >> file_mol, " %i 1" % Q_charges
for charge in Q_charges_points:
print >> file_mol, "%20.12f %20.12f %20.12f %20.12f %3.1f %3.1f %3.1f" % \
(charge["x"]*angstrom, charge["y"]*angstrom, charge["z"]*angstrom, charge["q"], 0, 0, 0)
elif (Q_chargesfile):
print >> file_mol, " XField"
print >> file_mol, "@$MOLCAS_SUBMIT_PWD/%s" % Q_chargesfile
# Write the potential points, in Bohr
# (Skip, to avoid printing the potential for every root)
# (use molden instead)
#if (Q_potential):
# print >> file_mol, " EPot"
# print >> file_mol, " %i" % Q_potential
# for point in Q_potential_points:
# print >> file_mol, "%20.12f %20.12f %20.12f" % (point["x"]*angstrom, point["y"]*angstrom, point["z"]*angstrom)
# End of SEWARD input
print >> file_mol, "End of input\n"
print >> file_mol, main_block
if (Q_derivative >= 1): print >> file_mol, alaska
if (Q_derivative == 2): print >> file_mol, mckinley
# Work-around to avoid useless printing of the potential for every root...
#print >> file_mol, seward_header
#print >> file_mol, " %i\n" % Q_natoms
#for atom in Q_molecule:
# print >> file_mol, "%16s %20.12f %20.12f %20.12f" % (atom["type"], atom["x"], atom["y"], atom["z"])
#print >> file_mol, " End of input"
#if (Q_charges):
# print >> file_mol, " XField"
# print >> file_mol, " %i 1" % Q_charges
# for charge in Q_charges_points:
# print >> file_mol, "%20.12f %20.12f %20.12f %20.12f %3.1f %3.1f %3.1f" % \
# (charge["x"]*angstrom, charge["y"]*angstrom, charge["z"]*angstrom, charge["q"], 0, 0, 0)
#elif (Q_chargesfile):
# print >> file_mol, " XField"
# print >> file_mol, "@$MOLCAS_SUBMIT_PWD/%s" % Q_chargesfile
#if (Q_potential):
# print >> file_mol, " EPot"
# print >> file_mol, " %i" % Q_potential
# for point in Q_potential_points:
# print >> file_mol, "%20.12f %20.12f %20.12f" % (point["x"]*angstrom, point["y"]*angstrom, point["z"]*angstrom)
#print >> file_mol, "End of input\n"
#print >> file_mol, potential
file_mol.close()
|
Jellby/ASEP-MD
|
Tests/scripts/gen2molcas6.py
|
Python
|
gpl-3.0
| 5,951
|
[
"MOLCAS"
] |
0b56bdd73820bff4d706ed9b841f4b72d5926de5bab9ea51e2854398ba5043ec
|
"""Utilities for handling PDBQT files."""
from typing import Dict, List, Optional, Set, Tuple
from deepchem.utils.typing import RDKitMol
def pdbqt_to_pdb(filename: Optional[str] = None,
pdbqt_data: Optional[List[str]] = None) -> str:
"""Extracts the PDB part of a pdbqt file as a string.
Either `filename` or `pdbqt_data` must be provided. This function
strips PDBQT charge information from the provided input.
Parameters
----------
filename: str, optional (default None)
Filename of PDBQT file
pdbqt_data: List[str], optional (default None)
Raw list of lines containing data from PDBQT file.
Returns
-------
pdb_block: str
String containing the PDB portion of pdbqt file.
"""
if filename is not None and pdbqt_data is not None:
raise ValueError("Only one of filename or pdbqt_data can be provided")
elif filename is None and pdbqt_data is None:
raise ValueError("Either filename or pdbqt_data must be provided")
elif filename is not None:
pdbqt_data = open(filename).readlines()
pdb_block = ""
# FIXME: Item "None" of "Optional[List[str]]" has no attribute "__iter__" (not iterable)
for line in pdbqt_data: # type: ignore
pdb_block += "%s\n" % line[:66]
return pdb_block
def convert_protein_to_pdbqt(mol: RDKitMol, outfile: str) -> None:
"""Convert a protein PDB file into a pdbqt file.
Writes the extra PDBQT terms directly to `outfile`.
Parameters
----------
mol: RDKit Mol
Protein molecule
outfile: str
filename which already has a valid pdb representation of mol
"""
lines = [x.strip() for x in open(outfile).readlines()]
out_lines = []
for line in lines:
if "ROOT" in line or "ENDROOT" in line or "TORSDOF" in line:
out_lines.append("%s\n" % line)
continue
if not line.startswith("ATOM"):
continue
line = line[:66]
atom_index = int(line[6:11])
atom = mol.GetAtoms()[atom_index - 1]
line = "%s +0.000 %s\n" % (line, atom.GetSymbol().ljust(2))
out_lines.append(line)
with open(outfile, 'w') as fout:
for line in out_lines:
fout.write(line)
def mol_to_graph(mol: RDKitMol):
"""Convert RDKit Mol to NetworkX graph
Convert mol into a graph representation atoms are nodes, and bonds
are vertices stored as graph
Parameters
----------
mol: RDKit Mol
The molecule to convert into a graph.
Returns
-------
graph: networkx.Graph
Contains atoms indices as nodes, edges as bonds.
Note
----
This function requires NetworkX to be installed.
"""
try:
import networkx as nx
except ModuleNotFoundError:
raise ValueError("This function requires NetworkX to be installed.")
G = nx.Graph()
num_atoms = mol.GetNumAtoms()
G.add_nodes_from(range(num_atoms))
for i in range(mol.GetNumBonds()):
from_idx = mol.GetBonds()[i].GetBeginAtomIdx()
to_idx = mol.GetBonds()[i].GetEndAtomIdx()
G.add_edge(from_idx, to_idx)
return G
def get_rotatable_bonds(mol: RDKitMol) -> List[Tuple[int, int]]:
"""
https://github.com/rdkit/rdkit/blob/f4529c910e546af590c56eba01f96e9015c269a6/Code/GraphMol/Descriptors/Lipinski.cpp#L107
Taken from rdkit source to find which bonds are rotatable store
rotatable bonds in (from_atom, to_atom)
Parameters
----------
mol: RDKit Mol
Ligand molecule
Returns
-------
rotatable_bonds: List[List[int, int]]
List of rotatable bonds in molecule
Note
----
This function requires RDKit to be installed.
"""
try:
from rdkit import Chem
from rdkit.Chem import rdmolops
except ModuleNotFoundError:
raise ValueError("This function requires RDKit to be installed.")
pattern = Chem.MolFromSmarts(
"[!$(*#*)&!D1&!$(C(F)(F)F)&!$(C(Cl)(Cl)Cl)&!$(C(Br)(Br)Br)&!$(C([CH3])("
"[CH3])[CH3])&!$([CD3](=[N,O,S])-!@[#7,O,S!D1])&!$([#7,O,S!D1]-!@[CD3]="
"[N,O,S])&!$([CD3](=[N+])-!@[#7!D1])&!$([#7!D1]-!@[CD3]=[N+])]-!@[!$(*#"
"*)&!D1&!$(C(F)(F)F)&!$(C(Cl)(Cl)Cl)&!$(C(Br)(Br)Br)&!$(C([CH3])([CH3])"
"[CH3])]")
rdmolops.FastFindRings(mol)
rotatable_bonds = mol.GetSubstructMatches(pattern)
return rotatable_bonds
def convert_mol_to_pdbqt(mol: RDKitMol, outfile: str) -> None:
"""Writes the provided ligand molecule to specified file in pdbqt format.
Creates a torsion tree and write to pdbqt file. The torsion tree
represents rotatable bonds in the molecule.
Parameters
----------
mol: RDKit Mol
The molecule whose value is stored in pdb format in outfile
outfile: str
Filename for a valid pdb file with the extention .pdbqt
Note
----
This function requires NetworkX to be installed.
"""
try:
import networkx as nx
except ModuleNotFoundError:
raise ValueError("This function requires NetworkX to be installed.")
# Walk through the original file and extract ATOM/HETATM lines and
# add PDBQT charge annotations.
pdb_map = _create_pdb_map(outfile)
graph = mol_to_graph(mol)
rotatable_bonds = get_rotatable_bonds(mol)
# Remove rotatable bonds from this molecule
for bond in rotatable_bonds:
graph.remove_edge(bond[0], bond[1])
# Get the connected components now that the rotatable bonds have
# been removed.
components = [x for x in nx.connected_components(graph)]
comp_map = _create_component_map(mol, components)
used_partitions = set()
lines = []
# The root is the largest connected component.
root = max(enumerate(components), key=lambda x: len(x[1]))[0]
# Write the root component
lines.append("ROOT\n")
for atom in components[root]:
lines.append(pdb_map[atom])
lines.append("ENDROOT\n")
# We've looked at the root, so take note of that
used_partitions.add(root)
for bond in rotatable_bonds:
valid, next_partition = _valid_bond(used_partitions, bond, root, comp_map)
if not valid:
continue
_dfs(used_partitions, next_partition, bond, components, rotatable_bonds,
lines, pdb_map, comp_map)
lines.append("TORSDOF %s" % len(rotatable_bonds))
with open(outfile, 'w') as fout:
for line in lines:
fout.write(line)
def _create_pdb_map(outfile: str) -> Dict[int, str]:
"""Create a mapping from atom numbers to lines to write to pdbqt
This is a map from rdkit atom number to its line in the pdb
file. We also add the two additional columns required for
pdbqt (charge, symbol).
Note rdkit atoms are 0 indexed and pdb files are 1 indexed
Parameters
----------
outfile: str
filename which already has a valid pdb representation of mol
Returns
-------
pdb_map: Dict[int, str]
Maps rdkit atom numbers to lines to be written to PDBQT file.
"""
lines = [x.strip() for x in open(outfile).readlines()]
lines = list(
filter(lambda x: x.startswith("HETATM") or x.startswith("ATOM"), lines))
lines = [x[:66] for x in lines]
pdb_map = {}
for line in lines:
my_values = line.split()
atom_number = int(my_values[1])
atom_symbol = my_values[2]
atom_symbol = ''.join([i for i in atom_symbol if not i.isdigit()])
line = line.replace("HETATM", "ATOM ")
line = "%s +0.000 %s\n" % (line, atom_symbol.ljust(2))
pdb_map[atom_number - 1] = line
return pdb_map
def _create_component_map(mol: RDKitMol,
components: List[List[int]]) -> Dict[int, int]:
"""Creates a map from atom ids to disconnected component id
For each atom in `mol`, maps it to the id of the component in the
molecule. The intent is that this is used on a molecule whose
rotatable bonds have been removed. `components` is a list of the
connected components after this surgery.
Parameters
----------
mol: RDKit Mol
molecule to find disconnected compontents in
components: List[List[int]]
List of connected components
Returns
-------
comp_map: Dict[int, int]
Maps atom ids to component ides
"""
comp_map = {}
for i in range(mol.GetNumAtoms()):
for j in range(len(components)):
if i in components[j]:
comp_map[i] = j
break
return comp_map
def _dfs(used_partitions: Set[int], current_partition: int,
bond: Tuple[int, int], components: List[List[int]],
rotatable_bonds: List[Tuple[int, int]], lines: List[str],
pdb_map: Dict[int, str], comp_map: Dict[int, int]) -> List[str]:
"""
This function does a depth first search through the torsion tree
Parameters
----------
used_partions: Set[int]
Partitions which have already been used
current_partition: int
The current partition to expand
bond: Tuple[int, int]
the bond which goes from the previous partition into this partition
components: List[List[int]]
List of connected components
rotatable_bonds: List[Tuple[int, int]]
List of rotatable bonds. This tuple is (from_atom, to_atom).
lines: List[str]
List of lines to write
pdb_map: Dict[int, str]
Maps atom numbers to PDBQT lines to write
comp_map: Dict[int, int]
Maps atom numbers to component numbers
Returns
-------
lines: List[str]
List of lines to write. This has more appended lines.
"""
if comp_map[bond[1]] != current_partition:
bond = (bond[1], bond[0])
used_partitions.add(comp_map[bond[0]])
used_partitions.add(comp_map[bond[1]])
lines.append("BRANCH %4s %4s\n" % (bond[0] + 1, bond[1] + 1))
for atom in components[current_partition]:
lines.append(pdb_map[atom])
for b in rotatable_bonds:
valid, next_partition = \
_valid_bond(used_partitions, b, current_partition, comp_map)
if not valid:
continue
lines = _dfs(used_partitions, next_partition, b, components,
rotatable_bonds, lines, pdb_map, comp_map)
lines.append("ENDBRANCH %4s %4s\n" % (bond[0] + 1, bond[1] + 1))
return lines
def _valid_bond(used_partitions: Set[int], bond: Tuple[int, int],
current_partition: int,
comp_map: Dict[int, int]) -> Tuple[bool, int]:
"""Helper method to find next partition to explore.
Used to check if a bond goes from the current partition into a
partition that is not yet explored
Parameters
----------
used_partions: Set[int]
Partitions which have already been used
bond: Tuple[int, int]
The bond to check if it goes to an unexplored partition.
This tuple is (from_atom, to_atom).
current_partition: int
The current partition of the DFS
comp_map: Dict[int, int]
Maps atom ids to component ids
Returns
-------
is_valid: bool
Whether to exist the next partition or not
next_partition: int
The next partition to explore
"""
part1 = comp_map[bond[0]]
part2 = comp_map[bond[1]]
if part1 != current_partition and part2 != current_partition:
return False, 0
if part1 == current_partition:
next_partition = part2
else:
next_partition = part1
return not next_partition in used_partitions, next_partition
|
miaecle/deepchem
|
deepchem/utils/pdbqt_utils.py
|
Python
|
mit
| 10,872
|
[
"RDKit"
] |
3c3b2d81d0df00fb78a81a12a81323e9cf92c1606b5b32735616206dcc56055e
|
import unittest
import os
import json
import scipy
from io import open
from pymatgen.phonon.dos import CompletePhononDos
from pymatgen.phonon.plotter import PhononDosPlotter, PhononBSPlotter, ThermoPlotter
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class PhononDosPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "NaCl_complete_ph_dos.json"), "r") as f:
self.dos = CompletePhononDos.from_dict(json.load(f))
self.plotter = PhononDosPlotter(sigma=0.2, stack=True)
self.plotter_nostack = PhononDosPlotter(sigma=0.2, stack=False)
def test_add_dos_dict(self):
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 0)
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 2)
def test_get_dos_dict(self):
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
for el in ["Na", "Cl"]:
self.assertIn(el, d)
def test_plot(self):
# Disabling latex for testing.
from matplotlib import rc
rc('text', usetex=False)
self.plotter.add_dos("Total", self.dos)
self.plotter.get_plot(units="mev")
self.plotter_nostack.add_dos("Total", self.dos)
self.plotter_nostack.get_plot(units="mev")
class PhononBSPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "NaCl_phonon_bandstructure.json"), "r") as f:
d = json.loads(f.read())
self.bs = PhononBandStructureSymmLine.from_dict(d)
self.plotter = PhononBSPlotter(self.bs)
def test_bs_plot_data(self):
self.assertEqual(len(self.plotter.bs_plot_data()['distances'][0]), 51,
"wrong number of distances in the first branch")
self.assertEqual(len(self.plotter.bs_plot_data()['distances']), 4,
"wrong number of branches")
self.assertEqual(
sum([len(e) for e in self.plotter.bs_plot_data()['distances']]),
204, "wrong number of distances")
self.assertEqual(self.plotter.bs_plot_data()['ticks']['label'][4], "Y",
"wrong tick label")
self.assertEqual(len(self.plotter.bs_plot_data()['ticks']['label']),
8, "wrong number of tick labels")
def test_plot(self):
# Disabling latex for testing.
from matplotlib import rc
rc('text', usetex=False)
self.plotter.get_plot(units="mev")
class ThermoPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "NaCl_complete_ph_dos.json"), "r") as f:
self.dos = CompletePhononDos.from_dict(json.load(f))
self.plotter = ThermoPlotter(self.dos, self.dos.structure)
def test_plot_functions(self):
# Disabling latex for testing.
from matplotlib import rc
rc('text', usetex=False)
self.plotter.plot_cv(5, 100, 5, show=False)
self.plotter.plot_entropy(5, 100, 5, show=False)
self.plotter.plot_internal_energy(5, 100, 5, show=False)
self.plotter.plot_helmholtz_free_energy(5, 100, 5, show=False)
self.plotter.plot_thermodynamic_properties(5, 100, 5, show=False)
if __name__ == "__main__":
unittest.main()
|
dongsenfo/pymatgen
|
pymatgen/phonon/tests/test_plotter.py
|
Python
|
mit
| 3,658
|
[
"pymatgen"
] |
97bfd777da2fa187851872ca4561e5b8312a5a3b9082c0abe0c7e14c8ea8eb3c
|
#!/usr/bin/python2
# -*- Mode: Python; py-indent-offset: 8 -*-
# (C) Copyright Zack Rusin 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Zack Rusin <zack@kde.org>
import license
import gl_XML
import sys, getopt
class PrintGlEnums(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "gl_enums.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright (C) 1999-2005 Brian Paul All Rights Reserved.""", "BRIAN PAUL")
self.enum_table = {}
def printRealHeader(self):
print '#include "main/glheader.h"'
print '#include "main/mfeatures.h"'
print '#include "main/enums.h"'
print '#include "main/imports.h"'
print ''
print 'typedef struct {'
print ' size_t offset;'
print ' int n;'
print '} enum_elt;'
print ''
return
def print_code(self):
print """
typedef int (*cfunc)(const void *, const void *);
/**
* Compare a key name to an element in the \c all_enums array.
*
* \c bsearch always passes the key as the first parameter and the pointer
* to the array element as the second parameter. We can elimiate some
* extra work by taking advantage of that fact.
*
* \param a Pointer to the desired enum name.
* \param b Pointer to an element of the \c all_enums array.
*/
static int compar_name( const char *a, const enum_elt *b )
{
return strcmp( a, & enum_string_table[ b->offset ] );
}
/**
* Compare a key enum value to an element in the \c all_enums array.
*
* \c bsearch always passes the key as the first parameter and the pointer
* to the array element as the second parameter. We can elimiate some
* extra work by taking advantage of that fact.
*
* \param a Pointer to the desired enum name.
* \param b Pointer to an index into the \c all_enums array.
*/
static int compar_nr( const int *a, const unsigned *b )
{
return a[0] - all_enums[*b].n;
}
static char token_tmp[20];
const char *_mesa_lookup_enum_by_nr( int nr )
{
unsigned * i;
i = (unsigned *) _mesa_bsearch(& nr, reduced_enums,
Elements(reduced_enums),
sizeof(reduced_enums[0]),
(cfunc) compar_nr);
if ( i != NULL ) {
return & enum_string_table[ all_enums[ *i ].offset ];
}
else {
/* this is not re-entrant safe, no big deal here */
sprintf(token_tmp, "0x%x", nr);
return token_tmp;
}
}
/* Get the name of an enum given that it is a primitive type. Avoids
* GL_FALSE/GL_POINTS ambiguity and others.
*/
const char *_mesa_lookup_prim_by_nr( int nr )
{
switch (nr) {
case GL_POINTS: return "GL_POINTS";
case GL_LINES: return "GL_LINES";
case GL_LINE_STRIP: return "GL_LINE_STRIP";
case GL_LINE_LOOP: return "GL_LINE_LOOP";
case GL_TRIANGLES: return "GL_TRIANGLES";
case GL_TRIANGLE_STRIP: return "GL_TRIANGLE_STRIP";
case GL_TRIANGLE_FAN: return "GL_TRIANGLE_FAN";
case GL_QUADS: return "GL_QUADS";
case GL_QUAD_STRIP: return "GL_QUAD_STRIP";
case GL_POLYGON: return "GL_POLYGON";
case GL_POLYGON+1: return "OUTSIDE_BEGIN_END";
default: return "<invalid>";
}
}
int _mesa_lookup_enum_by_name( const char *symbol )
{
enum_elt * f = NULL;
if ( symbol != NULL ) {
f = (enum_elt *) _mesa_bsearch(symbol, all_enums,
Elements(all_enums),
sizeof( enum_elt ),
(cfunc) compar_name);
}
return (f != NULL) ? f->n : -1;
}
"""
return
def printBody(self, api):
self.process_enums( api )
keys = self.enum_table.keys()
keys.sort()
name_table = []
enum_table = {}
for enum in keys:
low_pri = 9
for [name, pri] in self.enum_table[ enum ]:
name_table.append( [name, enum] )
if pri < low_pri:
low_pri = pri
enum_table[enum] = name
name_table.sort()
string_offsets = {}
i = 0;
print 'LONGSTRING static const char enum_string_table[] = '
for [name, enum] in name_table:
print ' "%s\\0"' % (name)
string_offsets[ name ] = i
i += len(name) + 1
print ' ;'
print ''
print 'static const enum_elt all_enums[%u] =' % (len(name_table))
print '{'
for [name, enum] in name_table:
print ' { %5u, 0x%08X }, /* %s */' % (string_offsets[name], enum, name)
print '};'
print ''
print 'static const unsigned reduced_enums[%u] =' % (len(keys))
print '{'
for enum in keys:
name = enum_table[ enum ]
if [name, enum] not in name_table:
print ' /* Error! %s, 0x%04x */ 0,' % (name, enum)
else:
i = name_table.index( [name, enum] )
print ' %4u, /* %s */' % (i, name)
print '};'
self.print_code()
return
def process_enums(self, api):
self.enum_table = {}
for obj in api.enumIterateByName():
if obj.value not in self.enum_table:
self.enum_table[ obj.value ] = []
name = "GL_" + obj.name
priority = obj.priority()
self.enum_table[ obj.value ].append( [name, priority] )
def show_usage():
print "Usage: %s [-f input_file_name]" % sys.argv[0]
sys.exit(1)
if __name__ == '__main__':
file_name = "gl_API.xml"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "f:")
except Exception,e:
show_usage()
for (arg,val) in args:
if arg == "-f":
file_name = val
api = gl_XML.parse_GL_API( file_name )
printer = PrintGlEnums()
printer.Print( api )
|
CPFDSoftware-Tony/gmv
|
utils/Mesa/Mesa-7.8.2/src/mesa/glapi/gen/gl_enums.py
|
Python
|
gpl-3.0
| 6,491
|
[
"Brian"
] |
c5b17fd853c9c1447eb66855da263dcb53a35ed03d2fa90d16c217174520fc02
|
'''
Created on Aug 5, 2014
@author: gearsad
'''
import vtk
from InteractorSuperclass import InteractorSuperclass
class Interactor3rdPerson(InteractorSuperclass):
'''
Inherit the VTK class vtkInteractorStyleUser and extend it to be a 3rd-person camera that tracks a specified SceneObject.
This class will only track the azimuth (heading) because we want to fix the up vector to make viewing easier (as was done in ARX)
Ref: http://www.vtk.org/doc/nightly/html/classvtkInteractorStyleUser.html#details
Important details about implementation: http://vtk.1045678.n5.nabble.com/vtkInteractorStyleUser-td2839763.html
Interactors: http://www.atamai.com/cgi-bin/viewvc.cgi/atamai/classes/PaneFrame.py?diff_format=u&pathrev=OCCIviewer-1-0-99&logsort=cvs&sortby=rev&view=diff&r1=1.25&r2=1.26
'''
# The tracked object.
__trackedObject = None
__camOffsetVec3 = [0, 0, 0]
def __init__(self, renderer, iren, trackedSceneObject, cameraOffsetVec3):
# Call the parent constructor
InteractorSuperclass.__init__(self, renderer, iren)
# Set the tracked object and the offset
self.__trackedObject = trackedSceneObject
self.__camOffsetVec3 = cameraOffsetVec3
camera = self.GetCurrentRenderer().GetActiveCamera()
camera.SetRoll(0)
camera.SetViewAngle(60) #Make it a little wider.
# Do a first pass call in case the object doesn't move
self.MouseMoveCallback(None, None)
#Ref: http://www.vtk.org/doc/nightly/html/classvtkObject.html
self.__trackingId = self.__trackedObject.vtkActor.AddObserver("ModifiedEvent", self.TrackedObjectMovedCallback)
# In the event that the object doesn't move, update the frame now
self.TrackedObjectMovedCallback(self.__trackedObject, "ModifiedEvent")
def SetCameraPosition(self, posVec3):
# Do nothing, this is a bound camera
return
def Disconnect(self):
# Call the parent Disconnect() method
super(Interactor3rdPerson,self).Disconnect()
# Remove the tracking
self.__trackedObject.vtkActor.RemoveObserver(self.__trackingId)
def TrackedObjectMovedCallback(self, obj, event):
# Get the interactor
iren = self.GetInteractor()
# Get the active camera
camera = self.GetCurrentRenderer().GetActiveCamera()
# Get the tracked object's orientation and position
objRot = self.__trackedObject.vtkActor.GetOrientation()
objPos = self.__trackedObject.vtkActor.GetPosition()
# Reset the focal point to the center of the object and the camera position to the absolute offset
camera.SetFocalPoint(objPos)
camera.SetPosition(objPos[0] + self.__camOffsetVec3[0], objPos[1] + self.__camOffsetVec3[1], objPos[2] + self.__camOffsetVec3[2])
# Final azimuthal (heading) rotation
# Set the camera's up to unit-y so that we rotate around that only,
# otherwise it will rotate around the orthogonal axes of the focal point->camera vector
camera.SetViewUp([0, 1, 0])
# Rotate the camera heading
camera.Azimuth(objRot[1])
# Update the clipping range of the camera
self.GetCurrentRenderer().ResetCameraClippingRange()
def MouseMoveCallback(self, obj, event):
return
def KeyupCallback(self, obj, event):
return
def KeydownCallback(self, obj, event):
return
|
GearsAD/semisorted_arnerve
|
sandbox/bot_vis_platform_oculus/scene/Interactor3rdPerson.py
|
Python
|
mit
| 3,581
|
[
"VTK"
] |
d58787e9be0450f289c748291bff714fd5d81755f54a4c72063d25646df76226
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 15 15:24:39 2015
Functions for single-cell TCRseq analysis for paired-end fastq data
@author:David Redmond (email david.redmond@outlook.com)
"""
import sys, os, commands, csv, commands, operator, tempfile, subprocess, numpy
from itertools import groupby, count
from collections import Counter
from Bio.Blast import NCBIXML
def analysis_seq_vidjil(outlabel,outdir,speciesVidjilRef,vidjildir):
vidjil_cline=vidjildir+"vidjil-2015.10.2_x86_64 -c clones -r 1 -g "+speciesVidjilRef+" "+outdir+"/"+outlabel+"/"+outlabel+".gapfilled.final.fa -o "+outdir
os.system(vidjil_cline)
def create_gapFiller_libraries(myFastq1,myFastq2,outName,label,insertSize):
f=open(outName+".gapfiller.libraries.txt", 'w+')
gfFastq1=os.path.abspath(myFastq1)
gfFastq2=os.path.abspath(myFastq2)
print >> f, label+" bwa "+gfFastq1+" "+gfFastq2+" "+str(insertSize)+" 0.5 FR"
f.close()
def run_gapFiller(label,minCov,gapFillerDir,outdir):
myPrevDir=os.getcwd()
os.chdir(outdir)
gapFiller_cline = "perl "+gapFillerDir+"GapFiller.pl -l "+label+".gapfiller.libraries.txt"+" -s "+label+".Gapped.Targets.fa"+" -m 20 -o "+str(minCov/2)+" -r 0.7 -n 5 -d 50 -t 0 -g 2 -T 1 -i 3 -b "+label
os.system(gapFiller_cline)
os.chdir(myPrevDir)
def print_tcr_summary_log(myGene,geneType):
if is_empty(myGene):
return "No "+geneType+" detected at coverage level"
if type(myGene) is tuple:
return geneType+" detected:\n"+myGene[0]+"\nSequence:\n"+myGene[1][0]+"\nCoverage:\n"+str(myGene[1][1])+"\n"
else:
myText=""
for i in range(0, len(myGene)):
myText+=geneType+" detected:\n"+list(myGene)[i][0]+"\nSequence:\n"+list(myGene)[i][1][0]+"\nCoverage:\n"+str(list(myGene)[i][1][1])+"\n"
return myText
def is_empty(any_structure):
if any_structure:
return False
else:
return True
def create_gap_fill_to_be(geneV,geneC,trimSize,gapSize):
junctions=[]
myGap="N"*gapSize
if type(geneV) is tuple:
vFrag=geneV[1][0][:-trimSize]
cFrag=geneC[1][0][trimSize:]
junctions.append((geneV[0]+"."+geneC[0],vFrag+myGap+cFrag))
else:
for i in range(0, len(geneV)):
vFrag=list(geneV)[i][1][0][:-trimSize]
cFrag=geneC[1][0][trimSize:]
junctions.append((list(geneV)[i][0]+"."+geneC[0],vFrag+myGap+cFrag))
return junctions
#choose region with longest seq then highest coverage
def choose_coverage_region(myRegion):
result=[]
maxLen=0
maxCov=0
if type(myRegion) is tuple:
return myRegion
for i in range(0, len(myRegion)):
if len(myRegion[i][1][0]) > maxLen:
maxLen=len(myRegion[i][1][0])
result=myRegion[i]
maxCov=myRegion[i][1][1]
if(len(myRegion[i][1][0])) == maxLen:
if myRegion[i][1][1] > maxCov:
result=myRegion[i]
maxCov=myRegion[i][1][1]
return result
#count reads in fastq files
def count_total_reads(myFastq1,myFastq2):
result=int(commands.getoutput("zcat %s | wc -l" % myFastq1))
result+=int(commands.getoutput("zcat %s | wc -l" % myFastq2))
return result/4
def return_fastq_counts(myFastq1,myFastq2,outfile):
f1=open(outfile, 'w+')
print >>f1, count_total_reads(myFastq1,myFastq2)
f1.close()
#return median read legth of fasta file
def return_fastq_median_read_lengths(myFastq1,outfile,lengthScript):
cmd="zcat "+myFastq1+" | perl "+lengthScript+" - > "+outfile
os.system(cmd)
cmd="zcat "+myFastq1+" | perl "+lengthScript+" -"
return(int(os.popen(cmd).read()))
# gunzip fastq files
def gunzip_fastq(myFastq):
command="gunzip %s" % myFastq
os.system(command)
# gzip fastq files
def gzip_fastq(myFastq):
command="gzip -1 %s" % myFastq
os.system(command)
# Prepare fq files in format for blast mapping
def blast_fq_format(myFastq,outFastq):
#command="sed '3~4d;4~4d;s/@/>/g' "+myFastq+" > "+outFastq
command="zcat "+myFastq+" | sed '3~4d;4~4d;s/@/>/g' > "+outFastq
os.system(command)
#split fasta file into temporary files of 10k lines
def tempfile_split(filename, temp_dir, chunk=10**4):
fns={}
with open(filename, 'r') as datafile:
groups = groupby(datafile, key=lambda k, line=count(): next(line) // chunk)
for k, group in groups:
with tempfile.NamedTemporaryFile(delete=False,
dir=temp_dir,prefix='{}_'.format(str(k))) as outfile:
outfile.write(''.join(group))
fns[k]=outfile.name
return fns
#blast fastqs against ref tcr databases
def blastall_v_regions(myFastq1,myFastq2,myRef,outputfile,eVal,blastallDir):
fns={}
chunk=10**4
with open(myFastq1, 'r') as datafile1:
groups = groupby(datafile1, key=lambda k, line=count(): next(line) // chunk)
for k, group in groups:
with tempfile.NamedTemporaryFile(delete=False,
dir=tempfile.mkdtemp(),prefix='{}_'.format(str(k))) as outfile:
outfile.write(''.join(group))
fns[k]=outfile.name
blastn_cline = blastallDir+"blastall -p blastn -o "+str(outfile.name)+".blast.out -i "+str(outfile.name)+" -d "+myRef+" -e "+str(eVal)+" -m 8 -b 1"
os.system(blastn_cline+" > /dev/null 2>&1")
os.system("cat "+str(outfile.name)+".blast.out >> "+outputfile)
os.remove(str(outfile.name)+".blast.out")
os.remove(str(outfile.name))
testvar=commands.getstatusoutput("dirname "+str(outfile.name))
os.system("rm -r "+testvar[1])
fns={}
with open(myFastq2, 'r') as datafile2:
groups = groupby(datafile2, key=lambda k, line=count(): next(line) // chunk)
for k, group in groups:
with tempfile.NamedTemporaryFile(delete=False,
dir=tempfile.mkdtemp(),prefix='{}_'.format(str(k))) as outfile:
outfile.write(''.join(group))
fns[k]=outfile.name
blastn_cline = blastallDir+"blastall -p blastn -o "+str(outfile.name)+".blast.out -i "+str(outfile.name)+" -d "+myRef+" -e "+str(eVal)+" -m 8 -b 1"
os.system(blastn_cline+" > /dev/null 2>&1")
os.system("cat "+str(outfile.name)+".blast.out >> "+outputfile)
os.remove(str(outfile.name)+".blast.out")
os.remove(str(outfile.name))
testvar=commands.getstatusoutput("dirname "+str(outfile.name))
os.system("rm -r "+testvar[1])
def listToStringWithoutBrackets(list1):
return str(list1).replace('[','').replace(']','')
def run_seqtk(inputList,inputFastq1,inputFastq2,outputFastq,seqTkDir):
command1=seqTkDir+"seqtk subseq "+inputFastq1+" "+inputList+" >> "+outputFastq
command2=seqTkDir+"seqtk subseq "+inputFastq2+" "+inputList+" >> "+outputFastq
os.system(command1)
os.system(command2)
#main ftn to return V or C region counts and alignments for gapfiller processing
def return_counts_and_alignment(blastHitsFile,outName,fastq1,fastq2,seqTkDir,threshold,minBlastAlignedLength):
myHits=[]
with open(blastHitsFile) as f:
reader = csv.reader(f, delimiter="\t")
for row in reader:
if(int(row[3])>=minBlastAlignedLength):
myHits.append(row[1])
gene_table={}
gene_table=Counter(myHits)
perc_table={}
for gene in gene_table:
perc_table[gene]=float(gene_table[gene])/float(sum(gene_table.values()))
gene_table_output = { k: [ gene_table[k], perc_table[k] ] for k in gene_table }
sorted_gto = sorted(gene_table_output.items(), key=operator.itemgetter(1),reverse=True)
f1=open(outName+".counts.txt", 'w+')
for item in sorted_gto:
print >>f1, item[0],",",listToStringWithoutBrackets(item[1])
f1.close()
with open(blastHitsFile) as f:
reader = csv.reader(f, delimiter="\t")
d = list(reader)
myReads = [myReads[0:2] for myReads in d]
f2=open(blastHitsFile+".candidates.txt", 'w+')
for i in range(0,len(sorted_gto)):
if sorted_gto[i][1][1] > threshold:
for j in range(0,len(myReads)):
if myReads[j][1]==sorted_gto[i][0]:
print >> f2, myReads[j][0]
else:
next
f2.close()
run_seqtk(blastHitsFile+".candidates.txt",fastq1,fastq2,outName+".local.cands.fq",seqTkDir)
#blastall the major V and return XML ouput
def blast_single_v_region(myFastq,myRef,outfile,eVal,blastallDir):
tempFasta = tempfile_split(myFastq, tempfile.mkdtemp(), chunk=10**4)
for key, value in tempFasta.iteritems():
blastn_cline = blastallDir+"blastall -p blastn -o "+value+".blast.out -i "+value+" -d "+myRef+" -e "+str(eVal)+" -m 7 -b 1"
os.system(blastn_cline+" > /dev/null 2>&1")
os.system("cat "+value+".blast.out >> "+outfile)
os.remove(value+".blast.out")
os.remove(value)
def perform_targeted_alignment(candidateFile, outAlignment, candidateGene):
result=open(candidateFile,"r")
f=open(outAlignment, 'w+')
records=NCBIXML.parse(result)
for item in records:
for alignment in item.alignments:
if alignment.accession == candidateGene:
for hsp in alignment.hsps:
myAlignment="_"*(hsp.sbjct_start-1)
myAlignment=myAlignment+hsp.query[0:190]
myAlignment=myAlignment+"_"*(alignment.length-len(myAlignment))
print >> f, (myAlignment)
f.close()
result.close()
#create pileup of reads mapping to particular V or C
def return_consensus(alignmentFile,minCov):
with open(alignmentFile,"rt") as infile:
matrix = [list(line.strip()) for line in infile.readlines()]
transpose=[list(x) for x in zip(*matrix)]
vGeneA=[]
vGeneC=[]
vGeneG=[]
vGeneT=[]
for i in range(0,len(transpose)):
vGeneA.append(transpose[i].count("A"))
vGeneC.append(transpose[i].count("C"))
vGeneG.append(transpose[i].count("G"))
vGeneT.append(transpose[i].count("T"))
vGeneCoverage=[]
vGeneCoverage=[a + b + c + d for a, b, c, d in zip(vGeneA, vGeneC, vGeneG, vGeneT)]
vGeneAlignment={}
vGeneAlignment={"A":vGeneA,"C":vGeneC,"G":vGeneG,"T":vGeneT}
vGeneConsensus=[]
vGeneConsensusCoverage=[]
for i in range(0,len(vGeneAlignment["A"])):
consensusCount=0
consensusBase="N"
for base in vGeneAlignment:
if(vGeneAlignment[base][i] > consensusCount):
consensusCount = vGeneAlignment[base][i]
consensusBase = base
vGeneConsensus.append(consensusBase)
vGeneConsensusCoverage.append(consensusCount)
oldsubseq = []
newsubseq = []
for i in range(0,len(vGeneConsensus)):
if (vGeneConsensusCoverage[i] > minCov):
newsubseq.append(vGeneConsensus[i])
else:
if (len(newsubseq) > len(oldsubseq)):
oldsubseq = newsubseq
newsubseq = []
#vGeneTarget=max(oldsubseq,newsubseq)
if len(oldsubseq) >= len(newsubseq):
vGeneTarget=oldsubseq
else:
vGeneTarget=newsubseq
return ("".join(vGeneTarget),numpy.mean(vGeneConsensusCoverage))
def compare_alignments(candGene):
if len(candGene) == 2:
return(compare_agg(candGene))
else:
temp1=map(candGene.__getitem__, (0,1))
temp2=map(candGene.__getitem__, (0,2))
temp3=map(candGene.__getitem__, (1,2))
temp1=compare_agg(temp1)
temp2=compare_agg(temp2)
temp3=compare_agg(temp3)
###ADDED IN
if type(temp1) is tuple:
temp1=list("")
if type(temp2) is tuple:
temp2=list("")
if type(temp3) is tuple:
temp3=list("")
#END OF ADDIN
return(set(temp1+temp2+temp3))
def compare_agg(candGene):
if candGene[1][1][0] in candGene[0][1][0]:
return candGene[0]
elif candGene[0][1][0] in candGene[1][1][0]:
return candGene[1]
else:
return candGene
def get_variable_regions(myFastq1,myFastq2,myRef,outName,minCov,eVal,blastallDir,seqTkDir,threshold,minBlastAlignedLength):
blastall_v_regions(myFastq1,myFastq2,myRef,outName+".matches.txt",eVal,blastallDir)
return_counts_and_alignment(outName+".matches.txt",outName,myFastq1,myFastq2,seqTkDir,threshold,minBlastAlignedLength)
blast_single_v_region(outName+".local.cands.fq",myRef,outName+".matches.xml",eVal,blastallDir)
candGene=[]
with open(outName+".counts.txt", 'r') as csvfile:
reader = csv.reader(csvfile)
table = [[e for e in r] for r in reader]
for i in range(0, len(table)):
if float(table[i][2]) > threshold:
myCand=table[i][0]
candGene.append(myCand.strip())
for i in range(0, len(candGene)):
perform_targeted_alignment(outName+".matches.xml", outName+"."+str(candGene[i]).replace("/","")+".aln", candGene[i])
try:
candGene[i]=(candGene[i],return_consensus(outName+"."+str(candGene[i]).replace("/","")+".aln",minCov))
except Exception:
pass
for i in candGene[:]:
if len(i) != 2:
candGene.remove(i)
for i in candGene:
if numpy.isnan(float(i[1][1])):
candGene.remove(i)
if len(candGene) > 1:
candGene=compare_alignments(candGene)
return candGene
|
ElementoLab/scTCRseq
|
sctcrfuncs.py
|
Python
|
agpl-3.0
| 13,655
|
[
"BLAST",
"BWA"
] |
e202b79a396398bc66afab0253cb8bf1092bbc20d14717a95e901e42d57e16e1
|
# Philip Bailey
# 2 October 2019
# loops over all files listed in the CHaMP Workbench and reports whether they exist on the local system
import os
import sys, traceback
import argparse
import sqlite3
import json
from lib.sitkaAPI import APIGet
from lib import env
from lib.loghelper import Logger
from datetime import datetime
def champ_topo_checker(workbench, folder):
log = Logger('CHaMP Files')
log.setup(logPath=os.path.join(folder, datetime.now().strftime("%Y%m%d-%H%M%S") + '_champ_files.log'))
dbCon = sqlite3.connect(workbench)
dbCurs = dbCon.cursor()
dbCurs.execute('SELECT WatershedName, VisitYear, SiteName, VisitID' +
' FROM vwVisits WHERE ProgramID = 1 AND ProtocolID IN (2030, 416, 806, 1966, 2020, 1955, 1880, 10036, 9999)' +
' ORDER BY VisitYear, WatershedName')
for row in dbCurs.fetchall():
watershed = row[0]
visit_year = row[1]
site = row[2]
visitID = row[3]
visit_path = os.path.join(folder, str(visit_year), watershed.replace(' ', ''), site.replace(' ', ''), 'VISIT_{}'.format(visitID))
log.info('Processing {}'.format(visit_path))
if not os.path.isdir(visit_path):
os.makedirs(visit_path)
try:
visit_data = APIGet('visits/{}'.format(visitID))
# Write visit information to json file
with open(os.path.join(visit_path, 'visit_info.json'), 'w') as json_file:
json.dump(visit_data, json_file)
# Loop over the two lists of folders per visit: field folders and visit folders
for api_key, local_folder in {'fieldFolders': 'Field Folders', 'folders': 'Visit Folders'}.items():
if api_key in visit_data and isinstance(visit_data[api_key], list):
for folder_name in visit_data[api_key]:
field_folder_path = os.path.join(visit_path, local_folder, folder_name['name'])
field_folder_data = APIGet(folder_name['url'], True)
if isinstance(field_folder_data, dict) and 'files' in field_folder_data:
[download_file(file_dict, field_folder_path) for file_dict in field_folder_data['files']]
# Get all the miscellaneous files for the visit
[download_file(file_dict, os.path.join(visit_path, 'Files')) for file_dict in visit_data['files']]
except Exception as e:
log.error('Error for visit {}: {}'.format(visitID, e))
log.info('Process Complete')
def download_file(file_dict, folder):
log = Logger('Download')
if not file_dict['name']:
log.warning('Missing file name in folder {}'.format(folder))
return
if not file_dict['downloadUrl'] or file_dict['downloadUrl'].lower() == '?download':
log.warning('Missing download URL in folder {}'.format(folder))
return
file_path = os.path.join(folder, file_dict['name'])
if not os.path.isdir(folder):
os.makedirs(folder)
# Write file info as JSON
with open( os.path.splitext(file_path)[0] + '.json', 'w') as json_file:
json.dump(file_dict, json_file)
# Skip files that exist unless they are zero bytes in which case remove them
if os.path.isfile(file_path):
if os.stat(file_path).st_size == 0:
log.warning('Removing zero byte file {}'.format(file_path))
os.remove(file_path)
else:
return
# Download missing file
with open(file_path, 'w+b') as f:
response = APIGet(file_dict['downloadUrl'], absolute=True)
f.write(response.content)
log.info('Downloaded missing file {}'.format(file_path))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('workbench', help='CHaMP Workbench database', type=argparse.FileType('r'))
parser.add_argument('folder', help='Top level folder where API files exist.', type=str)
args = parser.parse_args()
try:
champ_topo_checker(args.workbench.name, args.folder)
except Exception as e:
traceback.print_exc(file=sys.stdout)
if __name__ == "__main__":
main()
|
SouthForkResearch/CHaMP_Metrics
|
scripts/download_champ_data.py
|
Python
|
gpl-3.0
| 4,178
|
[
"VisIt"
] |
5fc100b8efc72b7d23bced560fd80f92659ebb68233f228c6a856560d788460c
|
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import pyglet
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
from cocos.particle_systems import *
class L(Layer):
def __init__(self):
super( L, self).__init__()
# p = Fireworks()
# p = Explosion()
p = Fire()
# p = Flower()
# p = Sun()
# p = Spiral()
# p = Meteor()
# p = Galaxy()
p.position = (320,100)
self.add( p )
if __name__ == "__main__":
director.init( resizable=True )
main_scene = cocos.scene.Scene()
main_scene.add( L() )
director.run( main_scene )
|
adamwiggins/cocos2d
|
test/test_particle_fire.py
|
Python
|
bsd-3-clause
| 784
|
[
"Galaxy"
] |
240e3e3a051907c9dc1fcd4458ebdd04a9afa725e7c6cf46c39e307db5dbebcf
|
"""
Perform Levenberg-Marquardt least-squares minimization, based on MINPACK-1.
AUTHORS
The original version of this software, called LMFIT, was written in FORTRAN
as part of the MINPACK-1 package by XXX.
Craig Markwardt converted the FORTRAN code to IDL. The information for the
IDL version is:
Craig B. Markwardt, NASA/GSFC Code 662, Greenbelt, MD 20770
craigm@lheamail.gsfc.nasa.gov
UPDATED VERSIONs can be found on my WEB PAGE:
http://cow.physics.wisc.edu/~craigm/idl/idl.html
Mark Rivers created this Python version from Craig's IDL version.
Mark Rivers, University of Chicago
Building 434A, Argonne National Laboratory
9700 South Cass Avenue, Argonne, IL 60439
rivers@cars.uchicago.edu
Updated versions can be found at http://cars.uchicago.edu/software
Sergey Koposov converted the Mark's Python version from Numeric to numpy
Sergey Koposov, University of Cambridge, Institute of Astronomy,
Madingley road, CB3 0HA, Cambridge, UK
koposov@ast.cam.ac.uk
Updated versions can be found at http://code.google.com/p/astrolibpy/source/browse/trunk/
DESCRIPTION
MPFIT uses the Levenberg-Marquardt technique to solve the
least-squares problem. In its typical use, MPFIT will be used to
fit a user-supplied function (the "model") to user-supplied data
points (the "data") by adjusting a set of parameters. MPFIT is
based upon MINPACK-1 (LMDIF.F) by More' and collaborators.
For example, a researcher may think that a set of observed data
points is best modelled with a Gaussian curve. A Gaussian curve is
parameterized by its mean, standard deviation and normalization.
MPFIT will, within certain constraints, find the set of parameters
which best fits the data. The fit is "best" in the least-squares
sense; that is, the sum of the weighted squared differences between
the model and data is minimized.
The Levenberg-Marquardt technique is a particular strategy for
iteratively searching for the best fit. This particular
implementation is drawn from MINPACK-1 (see NETLIB), and is much faster
and more accurate than the version provided in the Scientific Python package
in Scientific.Functions.LeastSquares.
This version allows upper and lower bounding constraints to be placed on each
parameter, or the parameter can be held fixed.
The user-supplied Python function should return an array of weighted
deviations between model and data. In a typical scientific problem
the residuals should be weighted so that each deviate has a
gaussian sigma of 1.0. If X represents values of the independent
variable, Y represents a measurement for each value of X, and ERR
represents the error in the measurements, then the deviates could
be calculated as follows:
DEVIATES = (Y - F(X)) / ERR
where F is the analytical function representing the model. You are
recommended to use the convenience functions MPFITFUN and
MPFITEXPR, which are driver functions that calculate the deviates
for you. If ERR are the 1-sigma uncertainties in Y, then
TOTAL( DEVIATES^2 )
will be the total chi-squared value. MPFIT will minimize the
chi-square value. The values of X, Y and ERR are passed through
MPFIT to the user-supplied function via the FUNCTKW keyword.
Simple constraints can be placed on parameter values by using the
PARINFO keyword to MPFIT. See below for a description of this
keyword.
MPFIT does not perform more general optimization tasks. See TNMIN
instead. MPFIT is customized, based on MINPACK-1, to the
least-squares minimization problem.
USER FUNCTION
The user must define a function which returns the appropriate
values as specified above. The function should return the weighted
deviations between the model and the data. It should also return a status
flag and an optional partial derivative array. For applications which
use finite-difference derivatives -- the default -- the user
function should be declared in the following way:
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If fjac==None then partial derivatives should not be
# computed. It will always be None if MPFIT is called with default
# flag.
model = F(x, p)
# Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
return([status, (y-model)/err]
See below for applications with analytical derivatives.
The keyword parameters X, Y, and ERR in the example above are
suggestive but not required. Any parameters can be passed to
MYFUNCT by using the functkw keyword to MPFIT. Use MPFITFUN and
MPFITEXPR if you need ideas on how to do that. The function *must*
accept a parameter list, P.
In general there are no restrictions on the number of dimensions in
X, Y or ERR. However the deviates *must* be returned in a
one-dimensional Numeric array of type Float.
User functions may also indicate a fatal error condition using the
status return described above. If status is set to a number between
-15 and -1 then MPFIT will stop the calculation and return to the caller.
ANALYTIC DERIVATIVES
In the search for the best-fit solution, MPFIT by default
calculates derivatives numerically via a finite difference
approximation. The user-supplied function need not calculate the
derivatives explicitly. However, if you desire to compute them
analytically, then the AUTODERIVATIVE=0 keyword must be passed to MPFIT.
As a practical matter, it is often sufficient and even faster to allow
MPFIT to calculate the derivatives numerically, and so
AUTODERIVATIVE=0 is not necessary.
If AUTODERIVATIVE=0 is used then the user function must check the parameter
FJAC, and if FJAC!=None then return the partial derivative array in the
return list.
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If FJAC!=None then partial derivatives must be comptuer.
# FJAC contains an array of len(p), where each entry
# is 1 if that parameter is free and 0 if it is fixed.
model = F(x, p)
Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
if (dojac):
pderiv = zeros([len(x), len(p)], Float)
for j in range(len(p)):
pderiv[:,j] = FGRAD(x, p, j)
else:
pderiv = None
return([status, (y-model)/err, pderiv]
where FGRAD(x, p, i) is a user function which must compute the
derivative of the model with respect to parameter P[i] at X. When
finite differencing is used for computing derivatives (ie, when
AUTODERIVATIVE=1), or when MPFIT needs only the errors but not the
derivatives the parameter FJAC=None.
Derivatives should be returned in the PDERIV array. PDERIV should be an m x
n array, where m is the number of data points and n is the number
of parameters. dp[i,j] is the derivative at the ith point with
respect to the jth parameter.
The derivatives with respect to fixed parameters are ignored; zero
is an appropriate value to insert for those derivatives. Upon
input to the user function, FJAC is set to a vector with the same
length as P, with a value of 1 for a parameter which is free, and a
value of zero for a parameter which is fixed (and hence no
derivative needs to be calculated).
If the data is higher than one dimensional, then the *last*
dimension should be the parameter dimension. Example: fitting a
50x50 image, "dp" should be 50x50xNPAR.
CONSTRAINING PARAMETER VALUES WITH THE PARINFO KEYWORD
The behavior of MPFIT can be modified with respect to each
parameter to be fitted. A parameter value can be fixed; simple
boundary constraints can be imposed; limitations on the parameter
changes can be imposed; properties of the automatic derivative can
be modified; and parameters can be tied to one another.
These properties are governed by the PARINFO structure, which is
passed as a keyword parameter to MPFIT.
PARINFO should be a list of dictionaries, one list entry for each parameter.
Each parameter is associated with one element of the array, in
numerical order. The dictionary can have the following keys
(none are required, keys are case insensitive):
'value' - the starting parameter value (but see the START_PARAMS
parameter for more information).
'fixed' - a boolean value, whether the parameter is to be held
fixed or not. Fixed parameters are not varied by
MPFIT, but are passed on to MYFUNCT for evaluation.
'limited' - a two-element boolean array. If the first/second
element is set, then the parameter is bounded on the
lower/upper side. A parameter can be bounded on both
sides. Both LIMITED and LIMITS must be given
together.
'limits' - a two-element float array. Gives the
parameter limits on the lower and upper sides,
respectively. Zero, one or two of these values can be
set, depending on the values of LIMITED. Both LIMITED
and LIMITS must be given together.
'parname' - a string, giving the name of the parameter. The
fitting code of MPFIT does not use this tag in any
way. However, the default iterfunct will print the
parameter name if available.
'step' - the step size to be used in calculating the numerical
derivatives. If set to zero, then the step size is
computed automatically. Ignored when AUTODERIVATIVE=0.
'mpside' - the sidedness of the finite difference when computing
numerical derivatives. This field can take four
values:
0 - one-sided derivative computed automatically
1 - one-sided derivative (f(x+h) - f(x) )/h
-1 - one-sided derivative (f(x) - f(x-h))/h
2 - two-sided derivative (f(x+h) - f(x-h))/(2*h)
Where H is the STEP parameter described above. The
"automatic" one-sided derivative method will chose a
direction for the finite difference which does not
violate any constraints. The other methods do not
perform this check. The two-sided method is in
principle more precise, but requires twice as many
function evaluations. Default: 0.
'mpmaxstep' - the maximum change to be made in the parameter
value. During the fitting process, the parameter
will never be changed by more than this value in
one iteration.
A value of 0 indicates no maximum. Default: 0.
'tied' - a string expression which "ties" the parameter to other
free or fixed parameters. Any expression involving
constants and the parameter array P are permitted.
Example: if parameter 2 is always to be twice parameter
1 then use the following: parinfo(2).tied = '2 * p(1)'.
Since they are totally constrained, tied parameters are
considered to be fixed; no errors are computed for them.
[ NOTE: the PARNAME can't be used in expressions. ]
'mpprint' - if set to 1, then the default iterfunct will print the
parameter value. If set to 0, the parameter value
will not be printed. This tag can be used to
selectively print only a few parameter values out of
many. Default: 1 (all parameters printed)
Future modifications to the PARINFO structure, if any, will involve
adding dictionary tags beginning with the two letters "MP".
Therefore programmers are urged to avoid using tags starting with
the same letters; otherwise they are free to include their own
fields within the PARINFO structure, and they will be ignored.
PARINFO Example:
parinfo = [{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
for i in range(5)]
parinfo[0]['fixed'] = 1
parinfo[4]['limited'][0] = 1
parinfo[4]['limits'][0] = 50.
values = [5.7, 2.2, 500., 1.5, 2000.]
for i in range(5): parinfo[i]['value']=values[i]
A total of 5 parameters, with starting values of 5.7,
2.2, 500, 1.5, and 2000 are given. The first parameter
is fixed at a value of 5.7, and the last parameter is
constrained to be above 50.
EXAMPLE
import mpfit
import numpy.oldnumeric as Numeric
x = arange(100, float)
p0 = [5.7, 2.2, 500., 1.5, 2000.]
y = ( p[0] + p[1]*[x] + p[2]*[x**2] + p[3]*sqrt(x) +
p[4]*log(x))
fa = {'x':x, 'y':y, 'err':err}
m = mpfit('myfunct', p0, functkw=fa)
print 'status = ', m.status
if (m.status <= 0): print 'error message = ', m.errmsg
print 'parameters = ', m.params
Minimizes sum of squares of MYFUNCT. MYFUNCT is called with the X,
Y, and ERR keyword parameters that are given by FUNCTKW. The
results can be obtained from the returned object m.
THEORY OF OPERATION
There are many specific strategies for function minimization. One
very popular technique is to use function gradient information to
realize the local structure of the function. Near a local minimum
the function value can be taylor expanded about x0 as follows:
f(x) = f(x0) + f'(x0) . (x-x0) + (1/2) (x-x0) . f''(x0) . (x-x0)
----- --------------- ------------------------------- (1)
Order 0th 1st 2nd
Here f'(x) is the gradient vector of f at x, and f''(x) is the
Hessian matrix of second derivatives of f at x. The vector x is
the set of function parameters, not the measured data vector. One
can find the minimum of f, f(xm) using Newton's method, and
arrives at the following linear equation:
f''(x0) . (xm-x0) = - f'(x0) (2)
If an inverse can be found for f''(x0) then one can solve for
(xm-x0), the step vector from the current position x0 to the new
projected minimum. Here the problem has been linearized (ie, the
gradient information is known to first order). f''(x0) is
symmetric n x n matrix, and should be positive definite.
The Levenberg - Marquardt technique is a variation on this theme.
It adds an additional diagonal term to the equation which may aid the
convergence properties:
(f''(x0) + nu I) . (xm-x0) = -f'(x0) (2a)
where I is the identity matrix. When nu is large, the overall
matrix is diagonally dominant, and the iterations follow steepest
descent. When nu is small, the iterations are quadratically
convergent.
In principle, if f''(x0) and f'(x0) are known then xm-x0 can be
determined. However the Hessian matrix is often difficult or
impossible to compute. The gradient f'(x0) may be easier to
compute, if even by finite difference techniques. So-called
quasi-Newton techniques attempt to successively estimate f''(x0)
by building up gradient information as the iterations proceed.
In the least squares problem there are further simplifications
which assist in solving eqn (2). The function to be minimized is
a sum of squares:
f = Sum(hi^2) (3)
where hi is the ith residual out of m residuals as described
above. This can be substituted back into eqn (2) after computing
the derivatives:
f' = 2 Sum(hi hi')
f'' = 2 Sum(hi' hj') + 2 Sum(hi hi'') (4)
If one assumes that the parameters are already close enough to a
minimum, then one typically finds that the second term in f'' is
negligible [or, in any case, is too difficult to compute]. Thus,
equation (2) can be solved, at least approximately, using only
gradient information.
In matrix notation, the combination of eqns (2) and (4) becomes:
hT' . h' . dx = - hT' . h (5)
Where h is the residual vector (length m), hT is its transpose, h'
is the Jacobian matrix (dimensions n x m), and dx is (xm-x0). The
user function supplies the residual vector h, and in some cases h'
when it is not found by finite differences (see MPFIT_FDJAC2,
which finds h and hT'). Even if dx is not the best absolute step
to take, it does provide a good estimate of the best *direction*,
so often a line minimization will occur along the dx vector
direction.
The method of solution employed by MINPACK is to form the Q . R
factorization of h', where Q is an orthogonal matrix such that QT .
Q = I, and R is upper right triangular. Using h' = Q . R and the
ortogonality of Q, eqn (5) becomes
(RT . QT) . (Q . R) . dx = - (RT . QT) . h
RT . R . dx = - RT . QT . h (6)
R . dx = - QT . h
where the last statement follows because R is upper triangular.
Here, R, QT and h are known so this is a matter of solving for dx.
The routine MPFIT_QRFAC provides the QR factorization of h, with
pivoting, and MPFIT_QRSOLV provides the solution for dx.
REFERENCES
MINPACK-1, Jorge More', available from netlib (www.netlib.org).
"Optimization Software Guide," Jorge More' and Stephen Wright,
SIAM, *Frontiers in Applied Mathematics*, Number 14.
More', Jorge J., "The Levenberg-Marquardt Algorithm:
Implementation and Theory," in *Numerical Analysis*, ed. Watson,
G. A., Lecture Notes in Mathematics 630, Springer-Verlag, 1977.
MODIFICATION HISTORY
Translated from MINPACK-1 in FORTRAN, Apr-Jul 1998, CM
Copyright (C) 1997-2002, Craig Markwardt
This software is provided as is without any warranty whatsoever.
Permission to use, copy, modify, and distribute modified or
unmodified copies is granted, provided this copyright and disclaimer
are included unchanged.
Translated from MPFIT (Craig Markwardt's IDL package) to Python,
August, 2002. Mark Rivers
Converted from Numeric to numpy (Sergey Koposov, July 2008)
"""
import numpy
import types
import scipy.linalg.blas
# Original FORTRAN documentation
# **********
#
# subroutine lmdif
#
# the purpose of lmdif is to minimize the sum of the squares of
# m nonlinear functions in n variables by a modification of
# the levenberg-marquardt algorithm. the user must provide a
# subroutine which calculates the functions. the jacobian is
# then calculated by a forward-difference approximation.
#
# the subroutine statement is
#
# subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,
# diag,mode,factor,nprint,info,nfev,fjac,
# ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4)
#
# where
#
# fcn is the name of the user-supplied subroutine which
# calculates the functions. fcn must be declared
# in an external statement in the user calling
# program, and should be written as follows.
#
# subroutine fcn(m,n,x,fvec,iflag)
# integer m,n,iflag
# double precision x(n),fvec(m)
# ----------
# calculate the functions at x and
# return this vector in fvec.
# ----------
# return
# end
#
# the value of iflag should not be changed by fcn unless
# the user wants to terminate execution of lmdif.
# in this case set iflag to a negative integer.
#
# m is a positive integer input variable set to the number
# of functions.
#
# n is a positive integer input variable set to the number
# of variables. n must not exceed m.
#
# x is an array of length n. on input x must contain
# an initial estimate of the solution vector. on output x
# contains the final estimate of the solution vector.
#
# fvec is an output array of length m which contains
# the functions evaluated at the output x.
#
# ftol is a nonnegative input variable. termination
# occurs when both the actual and predicted relative
# reductions in the sum of squares are at most ftol.
# therefore, ftol measures the relative error desired
# in the sum of squares.
#
# xtol is a nonnegative input variable. termination
# occurs when the relative error between two consecutive
# iterates is at most xtol. therefore, xtol measures the
# relative error desired in the approximate solution.
#
# gtol is a nonnegative input variable. termination
# occurs when the cosine of the angle between fvec and
# any column of the jacobian is at most gtol in absolute
# value. therefore, gtol measures the orthogonality
# desired between the function vector and the columns
# of the jacobian.
#
# maxfev is a positive integer input variable. termination
# occurs when the number of calls to fcn is at least
# maxfev by the end of an iteration.
#
# epsfcn is an input variable used in determining a suitable
# step length for the forward-difference approximation. this
# approximation assumes that the relative errors in the
# functions are of the order of epsfcn. if epsfcn is less
# than the machine precision, it is assumed that the relative
# errors in the functions are of the order of the machine
# precision.
#
# diag is an array of length n. if mode = 1 (see
# below), diag is internally set. if mode = 2, diag
# must contain positive entries that serve as
# multiplicative scale factors for the variables.
#
# mode is an integer input variable. if mode = 1, the
# variables will be scaled internally. if mode = 2,
# the scaling is specified by the input diag. other
# values of mode are equivalent to mode = 1.
#
# factor is a positive input variable used in determining the
# initial step bound. this bound is set to the product of
# factor and the euclidean norm of diag*x if nonzero, or else
# to factor itself. in most cases factor should lie in the
# interval (.1,100.). 100. is a generally recommended value.
#
# nprint is an integer input variable that enables controlled
# printing of iterates if it is positive. in this case,
# fcn is called with iflag = 0 at the beginning of the first
# iteration and every nprint iterations thereafter and
# immediately prior to return, with x and fvec available
# for printing. if nprint is not positive, no special calls
# of fcn with iflag = 0 are made.
#
# info is an integer output variable. if the user has
# terminated execution, info is set to the (negative)
# value of iflag. see description of fcn. otherwise,
# info is set as follows.
#
# info = 0 improper input parameters.
#
# info = 1 both actual and predicted relative reductions
# in the sum of squares are at most ftol.
#
# info = 2 relative error between two consecutive iterates
# is at most xtol.
#
# info = 3 conditions for info = 1 and info = 2 both hold.
#
# info = 4 the cosine of the angle between fvec and any
# column of the jacobian is at most gtol in
# absolute value.
#
# info = 5 number of calls to fcn has reached or
# exceeded maxfev.
#
# info = 6 ftol is too small. no further reduction in
# the sum of squares is possible.
#
# info = 7 xtol is too small. no further improvement in
# the approximate solution x is possible.
#
# info = 8 gtol is too small. fvec is orthogonal to the
# columns of the jacobian to machine precision.
#
# nfev is an integer output variable set to the number of
# calls to fcn.
#
# fjac is an output m by n array. the upper n by n submatrix
# of fjac contains an upper triangular matrix r with
# diagonal elements of nonincreasing magnitude such that
#
# t t t
# p *(jac *jac)*p = r *r,
#
# where p is a permutation matrix and jac is the final
# calculated jacobian. column j of p is column ipvt(j)
# (see below) of the identity matrix. the lower trapezoidal
# part of fjac contains information generated during
# the computation of r.
#
# ldfjac is a positive integer input variable not less than m
# which specifies the leading dimension of the array fjac.
#
# ipvt is an integer output array of length n. ipvt
# defines a permutation matrix p such that jac*p = q*r,
# where jac is the final calculated jacobian, q is
# orthogonal (not stored), and r is upper triangular
# with diagonal elements of nonincreasing magnitude.
# column j of p is column ipvt(j) of the identity matrix.
#
# qtf is an output array of length n which contains
# the first n elements of the vector (q transpose)*fvec.
#
# wa1, wa2, and wa3 are work arrays of length n.
#
# wa4 is a work array of length m.
#
# subprograms called
#
# user-supplied ...... fcn
#
# minpack-supplied ... dpmpar,enorm,fdjac2,,qrfac
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
class mpfit:
blas_enorm32, = scipy.linalg.blas.get_blas_funcs(['nrm2'],numpy.array([0],dtype=numpy.float32))
blas_enorm64, = scipy.linalg.blas.get_blas_funcs(['nrm2'],numpy.array([0],dtype=numpy.float64))
def __init__(self, fcn, xall=None, functkw={}, parinfo=None,
ftol=1.e-10, xtol=1.e-10, gtol=1.e-10,
damp=0., maxiter=200, factor=100., nprint=1,
iterfunct='default', iterkw={}, nocovar=0,
rescale=0, autoderivative=1, quiet=0,
diag=None, epsfcn=None, debug=0):
"""
Inputs:
fcn:
The function to be minimized. The function should return the weighted
deviations between the model and the data, as described above.
xall:
An array of starting values for each of the parameters of the model.
The number of parameters should be fewer than the number of measurements.
This parameter is optional if the parinfo keyword is used (but see
parinfo). The parinfo keyword provides a mechanism to fix or constrain
individual parameters.
Keywords:
autoderivative:
If this is set, derivatives of the function will be computed
automatically via a finite differencing procedure. If not set, then
fcn must provide the (analytical) derivatives.
Default: set (=1)
NOTE: to supply your own analytical derivatives,
explicitly pass autoderivative=0
ftol:
A nonnegative input variable. Termination occurs when both the actual
and predicted relative reductions in the sum of squares are at most
ftol (and status is accordingly set to 1 or 3). Therefore, ftol
measures the relative error desired in the sum of squares.
Default: 1E-10
functkw:
A dictionary which contains the parameters to be passed to the
user-supplied function specified by fcn via the standard Python
keyword dictionary mechanism. This is the way you can pass additional
data to your user-supplied function without using global variables.
Consider the following example:
if functkw = {'xval':[1.,2.,3.], 'yval':[1.,4.,9.],
'errval':[1.,1.,1.] }
then the user supplied function should be declared like this:
def myfunct(p, fjac=None, xval=None, yval=None, errval=None):
Default: {} No extra parameters are passed to the user-supplied
function.
gtol:
A nonnegative input variable. Termination occurs when the cosine of
the angle between fvec and any column of the jacobian is at most gtol
in absolute value (and status is accordingly set to 4). Therefore,
gtol measures the orthogonality desired between the function vector
and the columns of the jacobian.
Default: 1e-10
iterkw:
The keyword arguments to be passed to iterfunct via the dictionary
keyword mechanism. This should be a dictionary and is similar in
operation to FUNCTKW.
Default: {} No arguments are passed.
iterfunct:
The name of a function to be called upon each NPRINT iteration of the
MPFIT routine. It should be declared in the following way:
def iterfunct(myfunct, p, iter, fnorm, functkw=None,
parinfo=None, quiet=0, dof=None, [iterkw keywords here])
# perform custom iteration update
iterfunct must accept all three keyword parameters (FUNCTKW, PARINFO
and QUIET).
myfunct: The user-supplied function to be minimized,
p: The current set of model parameters
iter: The iteration number
functkw: The arguments to be passed to myfunct.
fnorm: The chi-squared value.
quiet: Set when no textual output should be printed.
dof: The number of degrees of freedom, normally the number of points
less the number of free parameters.
See below for documentation of parinfo.
In implementation, iterfunct can perform updates to the terminal or
graphical user interface, to provide feedback while the fit proceeds.
If the fit is to be stopped for any reason, then iterfunct should return a
a status value between -15 and -1. Otherwise it should return None
(e.g. no return statement) or 0.
In principle, iterfunct should probably not modify the parameter values,
because it may interfere with the algorithm's stability. In practice it
is allowed.
Default: an internal routine is used to print the parameter values.
Set iterfunct=None if there is no user-defined routine and you don't
want the internal default routine be called.
maxiter:
The maximum number of iterations to perform. If the number is exceeded,
then the status value is set to 5 and MPFIT returns.
Default: 200 iterations
nocovar:
Set this keyword to prevent the calculation of the covariance matrix
before returning (see COVAR)
Default: clear (=0) The covariance matrix is returned
nprint:
The frequency with which iterfunct is called. A value of 1 indicates
that iterfunct is called with every iteration, while 2 indicates every
other iteration, etc. Note that several Levenberg-Marquardt attempts
can be made in a single iteration.
Default value: 1
parinfo
Provides a mechanism for more sophisticated constraints to be placed on
parameter values. When parinfo is not passed, then it is assumed that
all parameters are free and unconstrained. Values in parinfo are never
modified during a call to MPFIT.
See description above for the structure of PARINFO.
Default value: None All parameters are free and unconstrained.
quiet:
Set this keyword when no textual output should be printed by MPFIT
damp:
A scalar number, indicating the cut-off value of residuals where
"damping" will occur. Residuals with magnitudes greater than this
number will be replaced by their hyperbolic tangent. This partially
mitigates the so-called large residual problem inherent in
least-squares solvers (as for the test problem CURVI,
http://www.maxthis.com/curviex.htm).
A value of 0 indicates no damping.
Default: 0
Note: DAMP doesn't work with autoderivative=0
xtol:
A nonnegative input variable. Termination occurs when the relative error
between two consecutive iterates is at most xtol (and status is
accordingly set to 2 or 3). Therefore, xtol measures the relative error
desired in the approximate solution.
Default: 1E-10
Outputs:
Returns an object of type mpfit. The results are attributes of this class,
e.g. mpfit.status, mpfit.errmsg, mpfit.params, npfit.niter, mpfit.covar.
.status
An integer status code is returned. All values greater than zero can
represent success (however .status == 5 may indicate failure to
converge). It can have one of the following values:
-16
A parameter or function value has become infinite or an undefined
number. This is usually a consequence of numerical overflow in the
user's model function, which must be avoided.
-15 to -1
These are error codes that either MYFUNCT or iterfunct may return to
terminate the fitting process. Values from -15 to -1 are reserved
for the user functions and will not clash with MPFIT.
0 Improper input parameters.
1 Both actual and predicted relative reductions in the sum of squares
are at most ftol.
2 Relative error between two consecutive iterates is at most xtol
3 Conditions for status = 1 and status = 2 both hold.
4 The cosine of the angle between fvec and any column of the jacobian
is at most gtol in absolute value.
5 The maximum number of iterations has been reached.
6 ftol is too small. No further reduction in the sum of squares is
possible.
7 xtol is too small. No further improvement in the approximate solution
x is possible.
8 gtol is too small. fvec is orthogonal to the columns of the jacobian
to machine precision.
.fnorm
The value of the summed squared residuals for the returned parameter
values.
.covar
The covariance matrix for the set of parameters returned by MPFIT.
The matrix is NxN where N is the number of parameters. The square root
of the diagonal elements gives the formal 1-sigma statistical errors on
the parameters if errors were treated "properly" in fcn.
Parameter errors are also returned in .perror.
To compute the correlation matrix, pcor, use this example:
cov = mpfit.covar
pcor = cov * 0.
for i in range(n):
for j in range(n):
pcor[i,j] = cov[i,j]/sqrt(cov[i,i]*cov[j,j])
If nocovar is set or MPFIT terminated abnormally, then .covar is set to
a scalar with value None.
.errmsg
A string error or warning message is returned.
.nfev
The number of calls to MYFUNCT performed.
.niter
The number of iterations completed.
.perror
The formal 1-sigma errors in each parameter, computed from the
covariance matrix. If a parameter is held fixed, or if it touches a
boundary, then the error is reported as zero.
If the fit is unweighted (i.e. no errors were given, or the weights
were uniformly set to unity), then .perror will probably not represent
the true parameter uncertainties.
*If* you can assume that the true reduced chi-squared value is unity --
meaning that the fit is implicitly assumed to be of good quality --
then the estimated parameter uncertainties can be computed by scaling
.perror by the measured chi-squared value.
dof = len(x) - len(mpfit.params) # deg of freedom
# scaled uncertainties
pcerror = mpfit.perror * sqrt(mpfit.fnorm / dof)
"""
self.niter = 0
self.params = None
self.covar = None
self.perror = None
self.status = 0 # Invalid input flag set while we check inputs
self.debug = debug
self.errmsg = ''
self.nfev = 0
self.damp = damp
self.dof=0
if fcn==None:
self.errmsg = "Usage: parms = mpfit('myfunt', ... )"
return
if iterfunct == 'default':
iterfunct = self.defiter
# Parameter damping doesn't work when user is providing their own
# gradients.
if (self.damp != 0) and (autoderivative == 0):
self.errmsg = 'ERROR: keywords DAMP and AUTODERIVATIVE are mutually exclusive'
return
# Parameters can either be stored in parinfo, or x. x takes precedence if it exists
if (xall is None) and (parinfo is None):
self.errmsg = 'ERROR: must pass parameters in P or PARINFO'
return
# Be sure that PARINFO is of the right type
if parinfo is not None:
if type(parinfo) is not list:
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
else:
if type(parinfo[0]) is not dict:
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
if ((xall is not None) and (len(xall) != len(parinfo))):
self.errmsg = 'ERROR: number of elements in PARINFO and P must agree'
return
# If the parameters were not specified at the command line, then
# extract them from PARINFO
if xall is None:
xall = self.parinfo(parinfo, 'value')
if xall is None:
self.errmsg = 'ERROR: either P or PARINFO(*)["value"] must be supplied.'
return
# Make sure parameters are numpy arrays
xall = numpy.asarray(xall)
# In the case if the xall is not float or if is float but has less
# than 64 bits we do convert it into double
if xall.dtype.kind != 'f' or xall.dtype.itemsize<=4:
xall = xall.astype(numpy.float)
npar = len(xall)
self.fnorm = -1.
fnorm1 = -1.
# TIED parameters?
ptied = self.parinfo(parinfo, 'tied', default='', n=npar)
self.qanytied = 0
for i in range(npar):
ptied[i] = ptied[i].strip()
if ptied[i] != '':
self.qanytied = 1
self.ptied = ptied
# FIXED parameters ?
pfixed = self.parinfo(parinfo, 'fixed', default=0, n=npar)
pfixed = (pfixed == 1)
for i in range(npar):
pfixed[i] = pfixed[i] or (ptied[i] != '') # Tied parameters are also effectively fixed
# Finite differencing step, absolute and relative, and sidedness of deriv.
step = self.parinfo(parinfo, 'step', default=0., n=npar)
dstep = self.parinfo(parinfo, 'relstep', default=0., n=npar)
dside = self.parinfo(parinfo, 'mpside', default=0, n=npar)
# Maximum and minimum steps allowed to be taken in one iteration
maxstep = self.parinfo(parinfo, 'mpmaxstep', default=0., n=npar)
minstep = self.parinfo(parinfo, 'mpminstep', default=0., n=npar)
qmin = minstep != 0
qmin[:] = False # Remove minstep for now!!
qmax = maxstep != 0
if numpy.any(qmin & qmax & (maxstep<minstep)):
self.errmsg = 'ERROR: MPMINSTEP is greater than MPMAXSTEP'
return
wh = (numpy.nonzero((qmin!=0.) | (qmax!=0.)))[0]
qminmax = len(wh > 0)
# Finish up the free parameters
ifree = (numpy.nonzero(pfixed != 1))[0]
nfree = len(ifree)
if nfree == 0:
self.errmsg = 'ERROR: no free parameters'
return
# Compose only VARYING parameters
self.params = xall.copy() # self.params is the set of parameters to be returned
x = self.params[ifree] # x is the set of free parameters
# LIMITED parameters ?
limited = self.parinfo(parinfo, 'limited', default=[0,0], n=npar)
limits = self.parinfo(parinfo, 'limits', default=[0.,0.], n=npar)
if (limited is not None) and (limits is not None):
# Error checking on limits in parinfo
if numpy.any((limited[:,0] & (xall < limits[:,0])) |
(limited[:,1] & (xall > limits[:,1]))):
self.errmsg = 'ERROR: parameters are not within PARINFO limits'
return
if numpy.any((limited[:,0] & limited[:,1]) &
(limits[:,0] >= limits[:,1]) &
(pfixed == 0)):
self.errmsg = 'ERROR: PARINFO parameter limits are not consistent'
return
# Transfer structure values to local variables
qulim = (limited[:,1])[ifree]
ulim = (limits [:,1])[ifree]
qllim = (limited[:,0])[ifree]
llim = (limits [:,0])[ifree]
if numpy.any((qulim!=0.) | (qllim!=0.)):
qanylim = 1
else:
qanylim = 0
else:
# Fill in local variables with dummy values
qulim = numpy.zeros(nfree)
ulim = x * 0.
qllim = qulim
llim = x * 0.
qanylim = 0
n = len(x)
# Check input parameters for errors
if (n < 0) or (ftol <= 0) or (xtol <= 0) or (gtol <= 0) \
or (maxiter < 0) or (factor <= 0):
self.errmsg = 'ERROR: input keywords are inconsistent'
return
if rescale != 0:
self.errmsg = 'ERROR: DIAG parameter scales are inconsistent'
if len(diag) < n:
return
if numpy.any(diag <= 0):
return
self.errmsg = ''
[self.status, fvec] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'ERROR: first call to "'+str(fcn)+'" failed'
return
# If the returned fvec has more than four bits I assume that we have
# double precision
# It is important that the machar is determined by the precision of
# the returned value, not by the precision of the input array
if numpy.array([fvec]).dtype.itemsize>4:
self.machar = machar(double=1)
self.blas_enorm = mpfit.blas_enorm64
else:
self.machar = machar(double=0)
self.blas_enorm = mpfit.blas_enorm32
machep = self.machar.machep
m = len(fvec)
if m < n:
self.errmsg = 'ERROR: number of parameters must not exceed data'
return
self.dof = m-nfree
self.fnorm = self.enorm(fvec)
# Initialize Levelberg-Marquardt parameter and iteration counter
par = 0.
self.niter = 1
qtf = x * 0.
self.status = 0
# Beginning of the outer loop
while(1):
# If requested, call fcn to enable printing of iterates
self.params[ifree] = x
if self.qanytied:
self.params = self.tie(self.params, ptied)
if (nprint > 0) and (iterfunct is not None):
if ((self.niter-1) % nprint) == 0:
mperr = 0
xnew0 = self.params.copy()
dof = numpy.max([len(fvec) - len(x), 0])
status = iterfunct(fcn, self.params, self.niter, self.fnorm**2,
functkw=functkw, parinfo=parinfo, quiet=quiet,
dof=dof, **iterkw)
if status is not None:
self.status = status
# Check for user termination
if self.status < 0:
self.errmsg = 'WARNING: premature termination by ' + str(iterfunct)
return
# If parameters were changed (grrr..) then re-tie
if numpy.max(numpy.abs(xnew0-self.params)) > 0:
if self.qanytied:
self.params = self.tie(self.params, ptied)
x = self.params[ifree]
# Calculate the jacobian matrix
self.status = 2
catch_msg = 'calling MPFIT_FDJAC2'
fjac = self.fdjac2(fcn, x, fvec, step, qulim, ulim, dside,
epsfcn=epsfcn,
autoderivative=autoderivative, dstep=dstep,
functkw=functkw, ifree=ifree, xall=self.params)
if fjac is None:
self.errmsg = 'WARNING: premature termination by FDJAC2'
return
# Determine if any of the parameters are pegged at the limits
if qanylim:
catch_msg = 'zeroing derivatives of pegged parameters'
whlpeg = (numpy.nonzero(qllim & (x == llim)))[0]
nlpeg = len(whlpeg)
whupeg = (numpy.nonzero(qulim & (x == ulim)))[0]
nupeg = len(whupeg)
# See if any "pegged" values should keep their derivatives
if nlpeg > 0:
# Total derivative of sum wrt lower pegged parameters
for i in range(nlpeg):
sum0 = sum(fvec * fjac[:,whlpeg[i]])
if sum0 > 0:
fjac[:,whlpeg[i]] = 0
if nupeg > 0:
# Total derivative of sum wrt upper pegged parameters
for i in range(nupeg):
sum0 = sum(fvec * fjac[:,whupeg[i]])
if sum0 < 0:
fjac[:,whupeg[i]] = 0
# Compute the QR factorization of the jacobian
[fjac, ipvt, wa1, wa2] = self.qrfac(fjac, pivot=1)
# On the first iteration if "diag" is unspecified, scale
# according to the norms of the columns of the initial jacobian
catch_msg = 'rescaling diagonal elements'
if self.niter == 1:
if (rescale==0) or (len(diag) < n):
diag = wa2.copy()
diag[diag == 0] = 1.
# On the first iteration, calculate the norm of the scaled x
# and initialize the step bound delta
wa3 = diag * x
xnorm = self.enorm(wa3)
delta = factor*xnorm
if delta == 0.:
delta = factor
# Form (q transpose)*fvec and store the first n components in qtf
catch_msg = 'forming (q transpose)*fvec'
wa4 = fvec.copy()
for j in range(n):
lj = ipvt[j]
temp3 = fjac[j,lj]
if temp3 != 0:
fj = fjac[j:,lj]
wj = wa4[j:]
# *** optimization wa4(j:*)
wa4[j:] = wj - fj * sum(fj*wj) / temp3
fjac[j,lj] = wa1[j]
qtf[j] = wa4[j]
# From this point on, only the square matrix, consisting of the
# triangle of R, is needed.
fjac = fjac[0:n, 0:n]
fjac.shape = [n, n]
temp = fjac.copy()
for i in range(n):
temp[:,i] = fjac[:, ipvt[i]]
fjac = temp.copy()
# Check for overflow. This should be a cheap test here since FJAC
# has been reduced to a (small) square matrix, and the test is
# O(N^2).
#wh = where(finite(fjac) EQ 0, ct)
#if ct GT 0 then goto, FAIL_OVERFLOW
# Compute the norm of the scaled gradient
catch_msg = 'computing the scaled gradient'
gnorm = 0.
if self.fnorm != 0:
for j in range(n):
l = ipvt[j]
if wa2[l] != 0:
sum0 = sum(fjac[0:j+1,j]*qtf[0:j+1])/self.fnorm
gnorm = numpy.max([gnorm,numpy.abs(sum0/wa2[l])])
# Test for convergence of the gradient norm
if gnorm <= gtol:
self.status = 4
break
if maxiter == 0:
self.status = 5
break
# Rescale if necessary
if rescale == 0:
diag = numpy.choose(diag>wa2, (wa2, diag))
# Beginning of the inner loop
while(1):
# Determine the levenberg-marquardt parameter
catch_msg = 'calculating LM parameter (MPFIT_)'
[fjac, par, wa1, wa2] = self.lmpar(fjac, ipvt, diag, qtf,
delta, wa1, wa2, par=par)
# Store the direction p and x+p. Calculate the norm of p
wa1 = -wa1
if (qanylim == 0) and (qminmax == 0):
# No parameter limits, so just move to new position WA2
alpha = 1.
wa2 = x + wa1
else:
# Respect the limits. If a step were to go out of bounds, then
# we should take a step in the same direction but shorter distance.
# The step should take us right to the limit in that case.
alpha = 1.
if qanylim:
# Do not allow any steps out of bounds
catch_msg = 'checking for a step out of bounds'
if nlpeg > 0:
wa1[whlpeg] = numpy.clip( wa1[whlpeg], 0., numpy.max(wa1))
if nupeg > 0:
wa1[whupeg] = numpy.clip(wa1[whupeg], numpy.min(wa1), 0.)
dwa1 = numpy.abs(wa1) > machep
whl = (numpy.nonzero(((dwa1!=0.) & qllim) & ((x + wa1) < llim)))[0]
if len(whl) > 0:
t = ((llim[whl] - x[whl]) /
wa1[whl])
alpha = numpy.min([alpha, numpy.min(t)])
whu = (numpy.nonzero(((dwa1!=0.) & qulim) & ((x + wa1) > ulim)))[0]
if len(whu) > 0:
t = ((ulim[whu] - x[whu]) /
wa1[whu])
alpha = numpy.min([alpha, numpy.min(t)])
# Obey any max step values.
if qminmax:
nwa1 = wa1 * alpha
whmax = (numpy.nonzero((qmax[ifree] != 0.) & (maxstep[ifree] > 0)))[0]
if len(whmax) > 0:
mrat = numpy.max(numpy.abs(nwa1[whmax]) /
numpy.abs(maxstep[ifree[whmax]]))
if mrat > 1:
alpha = alpha / mrat
# Scale the resulting vector
wa1 = wa1 * alpha
wa2 = x + wa1
# Adjust the final output values. If the step put us exactly
# on a boundary, make sure it is exact.
sgnu = (ulim >= 0) * 2. - 1.
sgnl = (llim >= 0) * 2. - 1.
# Handles case of
# ... nonzero *LIM ... ...zero * LIM
ulim1 = ulim * (1 - sgnu * machep) - (ulim == 0) * machep
llim1 = llim * (1 + sgnl * machep) + (llim == 0) * machep
wh = (numpy.nonzero((qulim!=0) & (wa2 >= ulim1)))[0]
if len(wh) > 0:
wa2[wh] = ulim[wh]
wh = (numpy.nonzero((qllim!=0.) & (wa2 <= llim1)))[0]
if len(wh) > 0:
wa2[wh] = llim[wh]
# endelse
wa3 = diag * wa1
pnorm = self.enorm(wa3)
# On the first iteration, adjust the initial step bound
if self.niter == 1:
delta = numpy.min([delta,pnorm])
self.params[ifree] = wa2
# Evaluate the function at x+p and calculate its norm
mperr = 0
catch_msg = 'calling '+str(fcn)
[self.status, wa4] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'WARNING: premature termination by "'+fcn+'"'
return
fnorm1 = self.enorm(wa4)
# Compute the scaled actual reduction
catch_msg = 'computing convergence criteria'
actred = -1.
if (0.1 * fnorm1) < self.fnorm:
actred = - (fnorm1/self.fnorm)**2 + 1.
# Compute the scaled predicted reduction and the scaled directional
# derivative
for j in range(n):
wa3[j] = 0
wa3[0:j+1] = wa3[0:j+1] + fjac[0:j+1,j]*wa1[ipvt[j]]
# Remember, alpha is the fraction of the full LM step actually
# taken
temp1 = self.enorm(alpha*wa3)/self.fnorm
temp2 = (numpy.sqrt(alpha*par)*pnorm)/self.fnorm
prered = temp1*temp1 + (temp2*temp2)/0.5
dirder = -(temp1*temp1 + temp2*temp2)
# Compute the ratio of the actual to the predicted reduction.
ratio = 0.
if prered != 0:
ratio = actred/prered
# Update the step bound
if ratio <= 0.25:
if actred >= 0:
temp = .5
else:
temp = .5*dirder/(dirder + .5*actred)
if ((0.1*fnorm1) >= self.fnorm) or (temp < 0.1):
temp = 0.1
delta = temp*numpy.min([delta,pnorm/0.1])
par = par/temp
else:
if (par == 0) or (ratio >= 0.75):
delta = pnorm/.5
par = .5*par
# Test for successful iteration
if ratio >= 0.0001:
# Successful iteration. Update x, fvec, and their norms
x = wa2
wa2 = diag * x
fvec = wa4
xnorm = self.enorm(wa2)
self.fnorm = fnorm1
self.niter = self.niter + 1
# Tests for convergence
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1):
self.status = 1
if delta <= xtol*xnorm:
self.status = 2
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1) and (self.status == 2):
self.status = 3
if self.status != 0:
break
# Tests for termination and stringent tolerances
if self.niter >= maxiter:
self.status = 5
if (numpy.abs(actred) <= machep) and (prered <= machep) \
and (0.5*ratio <= 1):
self.status = 6
if delta <= machep*xnorm:
self.status = 7
if gnorm <= machep:
self.status = 8
if self.status != 0:
break
# End of inner loop. Repeat if iteration unsuccessful
if ratio >= 0.0001:
break
# Check for over/underflow
if ~numpy.all(numpy.isfinite(wa1) & numpy.isfinite(wa2) & \
numpy.isfinite(x)) or ~numpy.isfinite(ratio):
errmsg = ('''ERROR: parameter or function value(s) have become
'infinite; check model function for over- 'and underflow''')
self.status = -16
break
#wh = where(finite(wa1) EQ 0 OR finite(wa2) EQ 0 OR finite(x) EQ 0, ct)
#if ct GT 0 OR finite(ratio) EQ 0 then begin
if self.status != 0:
break;
# End of outer loop.
catch_msg = 'in the termination phase'
# Termination, either normal or user imposed.
if len(self.params) == 0:
return
if nfree == 0:
self.params = xall.copy()
else:
self.params[ifree] = x
if (nprint > 0) and (self.status > 0):
catch_msg = 'calling ' + str(fcn)
[status, fvec] = self.call(fcn, self.params, functkw)
catch_msg = 'in the termination phase'
self.fnorm = self.enorm(fvec)
if (self.fnorm is not None) and (fnorm1 is not None):
self.fnorm = numpy.max([self.fnorm, fnorm1])
self.fnorm = self.fnorm**2.
self.covar = None
self.perror = None
# (very carefully) set the covariance matrix COVAR
if (self.status > 0) and (nocovar==0) and (n is not None) \
and (fjac is not None) and (ipvt is not None):
sz = fjac.shape
if (n > 0) and (sz[0] >= n) and (sz[1] >= n) \
and (len(ipvt) >= n):
catch_msg = 'computing the covariance matrix'
cv = self.calc_covar(fjac[0:n,0:n], ipvt[0:n])
cv.shape = [n, n]
nn = len(xall)
# Fill in actual covariance matrix, accounting for fixed
# parameters.
self.covar = numpy.zeros([nn, nn], dtype=float)
for i in range(n):
self.covar[ifree,ifree[i]] = cv[:,i]
# Compute errors in parameters
catch_msg = 'computing parameter errors'
self.perror = numpy.zeros(nn, dtype=float)
d = numpy.diagonal(self.covar)
wh = (numpy.nonzero(d >= 0))[0]
if len(wh) > 0:
self.perror[wh] = numpy.sqrt(d[wh])
return
def __str__(self):
return {'params': self.params,
'niter': self.niter,
'params': self.params,
'covar': self.covar,
'perror': self.perror,
'status': self.status,
'debug': self.debug,
'errmsg': self.errmsg,
'nfev': self.nfev,
'damp': self.damp
#,'machar':self.machar
}.__str__()
# Default procedure to be called every iteration. It simply prints
# the parameter values.
def defiter(self, fcn, x, iter, fnorm=None, functkw=None,
quiet=0, iterstop=None, parinfo=None,
format=None, pformat='%.10g', dof=1):
if self.debug:
print ('Entering defiter...')
if quiet:
return
if fnorm is None:
[status, fvec] = self.call(fcn, x, functkw)
fnorm = self.enorm(fvec)**2
# Determine which parameters to print
nprint = len(x)
print ("Iter ", ('%6i' % iter)," CHI-SQUARE = ",('%.10g' % fnorm)," DOF = ", ('%i' % dof))
for i in range(nprint):
if (parinfo is not None) and ('parname' in parinfo[i]):
p = ' ' + parinfo[i]['parname'] + ' = '
else:
p = ' P' + str(i) + ' = '
if (parinfo is not None) and ('mpprint' in parinfo[i]):
iprint = parinfo[i]['mpprint']
else:
iprint = 1
if iprint:
print (p + (pformat % x[i]) + ' ')
return 0
# DO_ITERSTOP:
# if keyword_set(iterstop) then begin
# k = get_kbrd(0)
# if k EQ string(byte(7)) then begin
# message, 'WARNING: minimization not complete', /info
# print, 'Do you want to terminate this procedure? (y/n)', $
# format='(A,$)'
# k = ''
# read, k
# if strupcase(strmid(k,0,1)) EQ 'Y' then begin
# message, 'WARNING: Procedure is terminating.', /info
# mperr = -1
# endif
# endif
# endif
# Procedure to parse the parameter values in PARINFO, which is a list of dictionaries
def parinfo(self, parinfo=None, key='a', default=None, n=0):
if self.debug:
print ('Entering parinfo...')
if (n == 0) and (parinfo is not None):
n = len(parinfo)
if n == 0:
values = default
return values
values = []
for i in range(n):
if (parinfo is not None) and (key in parinfo[i]):
values.append(parinfo[i][key])
else:
values.append(default)
# Convert to numeric arrays if possible
test = default
if type(default) is list:
test=default[0]
values = numpy.asarray(values)
return values
# Call user function or procedure, with _EXTRA or not, with
# derivatives or not.
def call(self, fcn, x, functkw, fjac=None):
if self.debug:
print ('Entering call...')
if self.qanytied:
x = self.tie(x, self.ptied)
self.nfev = self.nfev + 1
if fjac is None:
[status, f] = fcn(x, fjac=fjac, **functkw)
if self.damp > 0:
# Apply the damping if requested. This replaces the residuals
# with their hyperbolic tangent. Thus residuals larger than
# DAMP are essentially clipped.
f = numpy.tanh(f/self.damp)
return [status, f]
else:
return fcn(x, fjac=fjac, **functkw)
def enorm(self, vec):
ans = self.blas_enorm(vec)
return ans
def fdjac2(self, fcn, x, fvec, step=None, ulimited=None, ulimit=None, dside=None,
epsfcn=None, autoderivative=1,
functkw=None, xall=None, ifree=None, dstep=None):
if self.debug:
print ('Entering fdjac2...')
machep = self.machar.machep
if epsfcn is None:
epsfcn = machep
if xall is None:
xall = x
if ifree is None:
ifree = numpy.arange(len(xall))
if step is None:
step = x * 0.
nall = len(xall)
eps = numpy.sqrt(numpy.max([epsfcn, machep]))
m = len(fvec)
n = len(x)
# Compute analytical derivative if requested
if autoderivative == 0:
mperr = 0
fjac = numpy.zeros(nall, dtype=float)
fjac[ifree] = 1.0 # Specify which parameters need derivatives
[status, fp] = self.call(fcn, xall, functkw, fjac=fjac)
if len(fjac) != m*nall:
print ('ERROR: Derivative matrix was not computed properly.')
return None
# This definition is consistent with CURVEFIT
# Sign error found (thanks Jesus Fernandez <fernande@irm.chu-caen.fr>)
fjac.shape = [m,nall]
fjac = -fjac
# Select only the free parameters
if len(ifree) < nall:
fjac = fjac[:,ifree]
fjac.shape = [m, n]
return fjac
fjac = numpy.zeros([m, n], dtype=float)
h = eps * numpy.abs(x)
# if STEP is given, use that
# STEP includes the fixed parameters
if step is not None:
stepi = step[ifree]
wh = (numpy.nonzero(stepi > 0))[0]
if len(wh) > 0:
h[wh] = stepi[wh]
# if relative step is given, use that
# DSTEP includes the fixed parameters
if len(dstep) > 0:
dstepi = dstep[ifree]
wh = (numpy.nonzero(dstepi > 0))[0]
if len(wh) > 0:
h[wh] = numpy.abs(dstepi[wh]*x[wh])
# In case any of the step values are zero
h[h == 0] = eps
# Reverse the sign of the step if we are up against the parameter
# limit, or if the user requested it.
# DSIDE includes the fixed parameters (ULIMITED/ULIMIT have only
# varying ones)
mask = dside[ifree] == -1
if len(ulimited) > 0 and len(ulimit) > 0:
mask = (mask | ((ulimited!=0) & (x > ulimit-h)))
wh = (numpy.nonzero(mask))[0]
if len(wh) > 0:
h[wh] = - h[wh]
# Loop through parameters, computing the derivative for each
for j in range(n):
xp = xall.copy()
xp[ifree[j]] = xp[ifree[j]] + h[j]
[status, fp] = self.call(fcn, xp, functkw)
if status < 0:
return None
if numpy.abs(dside[ifree[j]]) <= 1:
# COMPUTE THE ONE-SIDED DERIVATIVE
# Note optimization fjac(0:*,j)
fjac[0:,j] = (fp-fvec)/h[j]
else:
# COMPUTE THE TWO-SIDED DERIVATIVE
xp[ifree[j]] = xall[ifree[j]] - h[j]
mperr = 0
[status, fm] = self.call(fcn, xp, functkw)
if status < 0:
return None
# Note optimization fjac(0:*,j)
fjac[0:,j] = (fp-fm)/(2*h[j])
return fjac
# Original FORTRAN documentation
# **********
#
# subroutine qrfac
#
# this subroutine uses householder transformations with column
# pivoting (optional) to compute a qr factorization of the
# m by n matrix a. that is, qrfac determines an orthogonal
# matrix q, a permutation matrix p, and an upper trapezoidal
# matrix r with diagonal elements of nonincreasing magnitude,
# such that a*p = q*r. the householder transformation for
# column k, k = 1,2,...,min(m,n), is of the form
#
# t
# i - (1/u(k))*u*u
#
# where u has zeros in the first k-1 positions. the form of
# this transformation and the method of pivoting first
# appeared in the corresponding linpack subroutine.
#
# the subroutine statement is
#
# subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa)
#
# where
#
# m is a positive integer input variable set to the number
# of rows of a.
#
# n is a positive integer input variable set to the number
# of columns of a.
#
# a is an m by n array. on input a contains the matrix for
# which the qr factorization is to be computed. on output
# the strict upper trapezoidal part of a contains the strict
# upper trapezoidal part of r, and the lower trapezoidal
# part of a contains a factored form of q (the non-trivial
# elements of the u vectors described above).
#
# lda is a positive integer input variable not less than m
# which specifies the leading dimension of the array a.
#
# pivot is a logical input variable. if pivot is set true,
# then column pivoting is enforced. if pivot is set false,
# then no column pivoting is done.
#
# ipvt is an integer output array of length lipvt. ipvt
# defines the permutation matrix p such that a*p = q*r.
# column j of p is column ipvt(j) of the identity matrix.
# if pivot is false, ipvt is not referenced.
#
# lipvt is a positive integer input variable. if pivot is false,
# then lipvt may be as small as 1. if pivot is true, then
# lipvt must be at least n.
#
# rdiag is an output array of length n which contains the
# diagonal elements of r.
#
# acnorm is an output array of length n which contains the
# norms of the corresponding columns of the input matrix a.
# if this information is not needed, then acnorm can coincide
# with rdiag.
#
# wa is a work array of length n. if pivot is false, then wa
# can coincide with rdiag.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm
#
# fortran-supplied ... dmax1,dsqrt,min0
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
#
# PIVOTING / PERMUTING:
#
# Upon return, A(*,*) is in standard parameter order, A(*,IPVT) is in
# permuted order.
#
# RDIAG is in permuted order.
# ACNORM is in standard parameter order.
#
#
# NOTE: in IDL the factors appear slightly differently than described
# above. The matrix A is still m x n where m >= n.
#
# The "upper" triangular matrix R is actually stored in the strict
# lower left triangle of A under the standard notation of IDL.
#
# The reflectors that generate Q are in the upper trapezoid of A upon
# output.
#
# EXAMPLE: decompose the matrix [[9.,2.,6.],[4.,8.,7.]]
# aa = [[9.,2.,6.],[4.,8.,7.]]
# mpfit_qrfac, aa, aapvt, rdiag, aanorm
# IDL> print, aa
# 1.81818* 0.181818* 0.545455*
# -8.54545+ 1.90160* 0.432573*
# IDL> print, rdiag
# -11.0000+ -7.48166+
#
# The components marked with a * are the components of the
# reflectors, and those marked with a + are components of R.
#
# To reconstruct Q and R we proceed as follows. First R.
# r = fltarr(m, n)
# for i = 0, n-1 do r(0:i,i) = aa(0:i,i) # fill in lower diag
# r(lindgen(n)*(m+1)) = rdiag
#
# Next, Q, which are composed from the reflectors. Each reflector v
# is taken from the upper trapezoid of aa, and converted to a matrix
# via (I - 2 vT . v / (v . vT)).
#
# hh = ident # identity matrix
# for i = 0, n-1 do begin
# v = aa(*,i) & if i GT 0 then v(0:i-1) = 0 # extract reflector
# hh = hh # (ident - 2*(v # v)/total(v * v)) # generate matrix
# endfor
#
# Test the result:
# IDL> print, hh # transpose(r)
# 9.00000 4.00000
# 2.00000 8.00000
# 6.00000 7.00000
#
# Note that it is usually never necessary to form the Q matrix
# explicitly, and MPFIT does not.
def qrfac(self, a, pivot=0):
if self.debug: print ('Entering qrfac...')
machep = self.machar.machep
sz = a.shape
m = sz[0]
n = sz[1]
# Compute the initial column norms and initialize arrays
acnorm = numpy.zeros(n, dtype=float)
for j in range(n):
acnorm[j] = self.enorm(a[:,j])
rdiag = acnorm.copy()
wa = rdiag.copy()
ipvt = numpy.arange(n)
# Reduce a to r with householder transformations
minmn = numpy.min([m,n])
for j in range(minmn):
if pivot != 0:
# Bring the column of largest norm into the pivot position
rmax = numpy.max(rdiag[j:])
kmax = (numpy.nonzero(rdiag[j:] == rmax))[0]
ct = len(kmax)
kmax = kmax + j
if ct > 0:
kmax = kmax[0]
# Exchange rows via the pivot only. Avoid actually exchanging
# the rows, in case there is lots of memory transfer. The
# exchange occurs later, within the body of MPFIT, after the
# extraneous columns of the matrix have been shed.
if kmax != j:
temp = ipvt[j] ; ipvt[j] = ipvt[kmax] ; ipvt[kmax] = temp
rdiag[kmax] = rdiag[j]
wa[kmax] = wa[j]
# Compute the householder transformation to reduce the jth
# column of A to a multiple of the jth unit vector
lj = ipvt[j]
ajj = a[j:,lj]
ajnorm = self.enorm(ajj)
if ajnorm == 0:
break
if a[j,lj] < 0:
ajnorm = -ajnorm
ajj = ajj / ajnorm
ajj[0] = ajj[0] + 1
# *** Note optimization a(j:*,j)
a[j:,lj] = ajj
# Apply the transformation to the remaining columns
# and update the norms
# NOTE to SELF: tried to optimize this by removing the loop,
# but it actually got slower. Reverted to "for" loop to keep
# it simple.
if j+1 < n:
for k in range(j+1, n):
lk = ipvt[k]
ajk = a[j:,lk]
# *** Note optimization a(j:*,lk)
# (corrected 20 Jul 2000)
if a[j,lj] != 0:
a[j:,lk] = ajk - ajj * sum(ajk*ajj)/a[j,lj]
if (pivot != 0) and (rdiag[k] != 0):
temp = a[j,lk]/rdiag[k]
rdiag[k] = rdiag[k] * numpy.sqrt(numpy.max([(1.-temp**2), 0.]))
temp = rdiag[k]/wa[k]
if (0.05*temp*temp) <= machep:
rdiag[k] = self.enorm(a[j+1:,lk])
wa[k] = rdiag[k]
rdiag[j] = -ajnorm
return [a, ipvt, rdiag, acnorm]
# Original FORTRAN documentation
# **********
#
# subroutine qrsolv
#
# given an m by n matrix a, an n by n diagonal matrix d,
# and an m-vector b, the problem is to determine an x which
# solves the system
#
# a*x = b , d*x = 0 ,
#
# in the least squares sense.
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then qrsolv expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. the system
# a*x = b, d*x = 0, is then equivalent to
#
# t t
# r*z = q *b , p *d*p*z = 0 ,
#
# where x = p*z. if this system does not have full rank,
# then a least squares solution is obtained. on output qrsolv
# also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + d*d)*p = s *s .
#
# s is computed within qrsolv and may be of separate interest.
#
# the subroutine statement is
#
# subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, d*x = 0.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def qrsolv(self, r, ipvt, diag, qtb, sdiag):
if self.debug:
print ('Entering qrsolv...')
sz = r.shape
m = sz[0]
n = sz[1]
# copy r and (q transpose)*b to preserve input and initialize s.
# in particular, save the diagonal elements of r in x.
for j in range(n):
r[j:n,j] = r[j,j:n]
x = numpy.diagonal(r).copy()
wa = qtb.copy()
# Eliminate the diagonal matrix d using a givens rotation
for j in range(n):
l = ipvt[j]
if diag[l] == 0:
break
sdiag[j:] = 0
sdiag[j] = diag[l]
# The transformations to eliminate the row of d modify only a
# single element of (q transpose)*b beyond the first n, which
# is initially zero.
qtbpj = 0.
for k in range(j,n):
if sdiag[k] == 0:
break
if numpy.abs(r[k,k]) < numpy.abs(sdiag[k]):
cotan = r[k,k]/sdiag[k]
sine = 0.5/numpy.sqrt(.25 + .25*cotan*cotan)
cosine = sine*cotan
else:
tang = sdiag[k]/r[k,k]
cosine = 0.5/numpy.sqrt(.25 + .25*tang*tang)
sine = cosine*tang
# Compute the modified diagonal element of r and the
# modified element of ((q transpose)*b,0).
r[k,k] = cosine*r[k,k] + sine*sdiag[k]
temp = cosine*wa[k] + sine*qtbpj
qtbpj = -sine*wa[k] + cosine*qtbpj
wa[k] = temp
# Accumulate the transformation in the row of s
if n > k+1:
temp = cosine*r[k+1:n,k] + sine*sdiag[k+1:n]
sdiag[k+1:n] = -sine*r[k+1:n,k] + cosine*sdiag[k+1:n]
r[k+1:n,k] = temp
sdiag[j] = r[j,j]
r[j,j] = x[j]
# Solve the triangular system for z. If the system is singular
# then obtain a least squares solution
nsing = n
wh = (numpy.nonzero(sdiag == 0))[0]
if len(wh) > 0:
nsing = wh[0]
wa[nsing:] = 0
if nsing >= 1:
wa[nsing-1] = wa[nsing-1]/sdiag[nsing-1] # Degenerate case
# *** Reverse loop ***
for j in range(nsing-2,-1,-1):
sum0 = sum(r[j+1:nsing,j]*wa[j+1:nsing])
wa[j] = (wa[j]-sum0)/sdiag[j]
# Permute the components of z back to components of x
x[ipvt] = wa
return (r, x, sdiag)
# Original FORTRAN documentation
#
# subroutine lmpar
#
# given an m by n matrix a, an n by n nonsingular diagonal
# matrix d, an m-vector b, and a positive number delta,
# the problem is to determine a value for the parameter
# par such that if x solves the system
#
# a*x = b , sqrt(par)*d*x = 0 ,
#
# in the least squares sense, and dxnorm is the euclidean
# norm of d*x, then either par is zero and
#
# (dxnorm-delta) .le. 0.1*delta ,
#
# or par is positive and
#
# abs(dxnorm-delta) .le. 0.1*delta .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then lmpar expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. on output
# lmpar also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + par*d*d)*p = s *s .
#
# s is employed within lmpar and may be of separate interest.
#
# only a few iterations are generally needed for convergence
# of the algorithm. if, however, the limit of 10 iterations
# is reached, then the output par will contain the best
# value obtained so far.
#
# the subroutine statement is
#
# subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,
# wa1,wa2)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# delta is a positive input variable which specifies an upper
# bound on the euclidean norm of d*x.
#
# par is a nonnegative variable. on input par contains an
# initial estimate of the levenberg-marquardt parameter.
# on output par contains the final estimate.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, sqrt(par)*d*x = 0,
# for the output par.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa1 and wa2 are work arrays of length n.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm,qrsolv
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None):
if self.debug:
print ('Entering lmpar...')
dwarf = self.machar.minnum
machep = self.machar.machep
sz = r.shape
m = sz[0]
n = sz[1]
# Compute and store in x the gauss-newton direction. If the
# jacobian is rank-deficient, obtain a least-squares solution
nsing = n
wa1 = qtb.copy()
rthresh = numpy.max(numpy.abs(numpy.diagonal(r))) * machep
wh = (numpy.nonzero(numpy.abs(numpy.diagonal(r)) < rthresh))[0]
if len(wh) > 0:
nsing = wh[0]
wa1[wh[0]:] = 0
if nsing >= 1:
# *** Reverse loop ***
for j in range(nsing-1,-1,-1):
wa1[j] = wa1[j]/r[j,j]
if j-1 >= 0:
wa1[0:j] = wa1[0:j] - r[0:j,j]*wa1[j]
# Note: ipvt here is a permutation array
x[ipvt] = wa1
# Initialize the iteration counter. Evaluate the function at the
# origin, and test for acceptance of the gauss-newton direction
iter = 0
wa2 = diag * x
dxnorm = self.enorm(wa2)
fp = dxnorm - delta
if fp <= 0.1*delta:
return [r, 0., x, sdiag]
# If the jacobian is not rank deficient, the newton step provides a
# lower bound, parl, for the zero of the function. Otherwise set
# this bound to zero.
parl = 0.
if nsing >= n:
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
wa1[0] = wa1[0] / r[0,0] # Degenerate case
for j in range(1,n): # Note "1" here, not zero
sum0 = sum(r[0:j,j]*wa1[0:j])
wa1[j] = (wa1[j] - sum0)/r[j,j]
temp = self.enorm(wa1)
parl = ((fp/delta)/temp)/temp
# Calculate an upper bound, paru, for the zero of the function
for j in range(n):
sum0 = sum(r[0:j+1,j]*qtb[0:j+1])
wa1[j] = sum0/diag[ipvt[j]]
gnorm = self.enorm(wa1)
paru = gnorm/delta
if paru == 0:
paru = dwarf/numpy.min([delta,0.1])
# If the input par lies outside of the interval (parl,paru), set
# par to the closer endpoint
par = numpy.max([par,parl])
par = numpy.min([par,paru])
if par == 0:
par = gnorm/dxnorm
# Beginning of an interation
while(1):
iter = iter + 1
# Evaluate the function at the current value of par
if par == 0:
par = numpy.max([dwarf, paru*0.001])
temp = numpy.sqrt(par)
wa1 = temp * diag
[r, x, sdiag] = self.qrsolv(r, ipvt, wa1, qtb, sdiag)
wa2 = diag*x
dxnorm = self.enorm(wa2)
temp = fp
fp = dxnorm - delta
if (numpy.abs(fp) <= 0.1*delta) or \
((parl == 0) and (fp <= temp) and (temp < 0)) or \
(iter == 10):
break;
# Compute the newton correction
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
for j in range(n-1):
wa1[j] = wa1[j]/sdiag[j]
wa1[j+1:n] = wa1[j+1:n] - r[j+1:n,j]*wa1[j]
wa1[n-1] = wa1[n-1]/sdiag[n-1] # Degenerate case
temp = self.enorm(wa1)
parc = ((fp/delta)/temp)/temp
# Depending on the sign of the function, update parl or paru
if fp > 0:
parl = numpy.max([parl,par])
if fp < 0:
paru = numpy.min([paru,par])
# Compute an improved estimate for par
par = numpy.max([parl, par+parc])
# End of an iteration
# Termination
return [r, par, x, sdiag]
# Procedure to tie one parameter to another.
def tie(self, p, ptied=None):
if self.debug:
print ('Entering tie...')
if ptied is None:
return
for i in range(len(ptied)):
if ptied[i] == '':
continue
cmd = 'p[' + str(i) + '] = ' + ptied[i]
exec(cmd)
return p
# Original FORTRAN documentation
# **********
#
# subroutine covar
#
# given an m by n matrix a, the problem is to determine
# the covariance matrix corresponding to a, defined as
#
# t
# inverse(a *a) .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then covar expects
# the full upper triangle of r and the permutation matrix p.
# the covariance matrix is then computed as
#
# t t
# p*inverse(r *r)*p .
#
# if a is nearly rank deficient, it may be desirable to compute
# the covariance matrix corresponding to the linearly independent
# columns of a. to define the numerical rank of a, covar uses
# the tolerance tol. if l is the largest integer such that
#
# abs(r(l,l)) .gt. tol*abs(r(1,1)) ,
#
# then covar computes the covariance matrix corresponding to
# the first l columns of r. for k greater than l, column
# and row ipvt(k) of the covariance matrix are set to zero.
#
# the subroutine statement is
#
# subroutine covar(n,r,ldr,ipvt,tol,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle must
# contain the full upper triangle of the matrix r. on output
# r contains the square symmetric covariance matrix.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# tol is a nonnegative input variable used to define the
# numerical rank of a in the manner described above.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs
#
# argonne national laboratory. minpack project. august 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
def calc_covar(self, rr, ipvt=None, tol=1.e-14):
if self.debug:
print ('Entering calc_covar...')
if numpy.ndim(rr) != 2:
print ('ERROR: r must be a two-dimensional matrix')
return -1
s = rr.shape
n = s[0]
if s[0] != s[1]:
print ('ERROR: r must be a square matrix')
return -1
if ipvt is None:
ipvt = numpy.arange(n)
r = rr.copy()
r.shape = [n,n]
# For the inverse of r in the full upper triangle of r
l = -1
tolr = tol * numpy.abs(r[0,0])
for k in range(n):
if numpy.abs(r[k,k]) <= tolr:
break
r[k,k] = 1./r[k,k]
for j in range(k):
temp = r[k,k] * r[j,k]
r[j,k] = 0.
r[0:j+1,k] = r[0:j+1,k] - temp*r[0:j+1,j]
l = k
# Form the full upper triangle of the inverse of (r transpose)*r
# in the full upper triangle of r
if l >= 0:
for k in range(l+1):
for j in range(k):
temp = r[j,k]
r[0:j+1,j] = r[0:j+1,j] + temp*r[0:j+1,k]
temp = r[k,k]
r[0:k+1,k] = temp * r[0:k+1,k]
# For the full lower triangle of the covariance matrix
# in the strict lower triangle or and in wa
wa = numpy.repeat([r[0,0]], n)
for j in range(n):
jj = ipvt[j]
sing = j > l
for i in range(j+1):
if sing:
r[i,j] = 0.
ii = ipvt[i]
if ii > jj:
r[ii,jj] = r[i,j]
if ii < jj:
r[jj,ii] = r[i,j]
wa[jj] = r[j,j]
# Symmetrize the covariance matrix in r
for j in range(n):
r[0:j+1,j] = r[j,0:j+1]
r[j,j] = wa[j]
return r
class machar:
def __init__(self, double=1):
if double == 0:
info = numpy.finfo(numpy.float32)
else:
info = numpy.finfo(numpy.float64)
self.machep = info.eps
self.maxnum = info.max
self.minnum = info.tiny
self.maxlog = numpy.log(self.maxnum)
self.minlog = numpy.log(self.minnum)
self.rdwarf = numpy.sqrt(self.minnum*1.5) * 10
self.rgiant = numpy.sqrt(self.maxnum) * 0.1
|
segasai/astrolibpy
|
mpfit/mpfit.py
|
Python
|
gpl-3.0
| 78,287
|
[
"Gaussian"
] |
ad24d39d37524feff44fd8e1907cc2cc8e1c199dfa3b80fe2672b99194550171
|
##################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2015 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
##
## rxdSpineSize.py: Builds a cell with spines and a propagating reaction
## wave. Products diffuse into the spine and cause it to get bigger.
##################################################################
try:
from PyQt4 import QtGui
import moogli
import moogli.extensions.moose
except ImportError as e:
print( "[INFO ] moogli is not found. Quitting..." )
quit()
import math
import pylab
import numpy
import matplotlib.pyplot as plt
import moose
import sys
import rdesigneur as rd
import matplotlib
doMoo = True
PI = 3.141592653
ScalingForTesting = 10
RM = 1.0 / ScalingForTesting
RA = 1.0 * ScalingForTesting
CM = 0.01 * ScalingForTesting
runtime = 100.0
frameruntime = 1.0
diffConst = 5e-12
dendLen = 100e-6
diffLen = 1.0e-6
dendDia = 2e-6
somaDia = 5e-6
concInit = 0.001 # 1 millimolar
spineSpacing = 10e-6
spineSpacingDistrib = 1e-6
spineSize = 1.0
spineSizeDistrib = 0.5
spineAngle= numpy.pi / 2.0
spineAngleDistrib = 0.0
def makeCellProto( name ):
elec = moose.Neuron( '/library/' + name )
ecompt = []
soma = rd.buildCompt( elec, 'soma', dx=somaDia, dia=somaDia, x=-somaDia, RM=RM, RA=RA, CM=CM )
dend = rd.buildCompt( elec, 'dend', dx=dendLen, dia=dendDia, x=0, RM=RM, RA=RA, CM=CM )
moose.connect( soma, 'axial', dend, 'raxial' )
elec.buildSegmentTree()
def makeChemProto( name ):
chem = moose.Neutral( '/library/' + name )
comptVol = diffLen * dendDia * dendDia * PI / 4.0
for i in ( ['dend', comptVol], ['spine', 1e-19], ['psd', 1e-20] ):
print(('making ', i))
compt = moose.CubeMesh( chem.path + '/' + i[0] )
compt.volume = i[1]
z = moose.Pool( compt.path + '/z' )
z.concInit = 0.0
z.diffConst = diffConst
nInit = comptVol * 6e23 * concInit
nstr = str( 1/nInit)
x = moose.Pool( chem.path + '/dend/x' )
x.diffConst = diffConst
func = moose.Function( x.path + '/func' )
func.expr = "-x0 * (0.3 - " + nstr + " * x0) * ( 1 - " + nstr + " * x0)"
print((func.expr))
func.x.num = 1
moose.connect( x, 'nOut', func.x[0], 'input' )
moose.connect( func, 'valueOut', x, 'increment' )
z = moose.element( '/library/' + name + '/dend/z' )
reac = moose.Reac( '/library/' + name + '/dend/reac' )
reac.Kf = 1
reac.Kb = 10
moose.connect( reac, 'sub', x, 'reac' )
moose.connect( reac, 'prd', z, 'reac' )
def makeModel():
moose.Neutral( '/library' )
makeCellProto( 'cellProto' )
makeChemProto( 'cProto' )
rdes = rd.rdesigneur( useGssa = False,
turnOffElec = True,
chemDt = 0.1,
diffDt = 0.1,
combineSegments = False,
stealCellFromLibrary = True,
diffusionLength = 1e-6,
cellProto = [['cellProto', 'elec' ]] ,
spineProto = [['makePassiveSpine()', 'spine' ]] ,
chemProto = [['cProto', 'chem' ]] ,
spineDistrib = [
['spine', '#',
str( spineSpacing ), str( spineSpacingDistrib ),
str( spineSize ), str( spineSizeDistrib ),
str( spineAngle ), str( spineAngleDistrib )
]
],
chemDistrib = [[ "chem", "#dend#,#psd#", "install", "1" ]],
adaptorList = [
[ 'psd/z', 'n', 'spine', 'psdArea', 50.0e-15, 500e-15 ],
]
)
moose.seed(1234)
rdes.buildModel( '/model' )
print( 'built model' )
x = moose.vec( '/model/chem/dend/x' )
x.concInit = 0.0
for i in range( 0,20 ):
x[i].concInit = concInit
def makePlot( name, srcVec, field ):
tab = moose.Table2('/graphs/' + name + 'Tab', len( srcVec ) ).vec
for i in zip(srcVec, tab):
moose.connect(i[1], 'requestOut', i[0], field)
return tab
def displayPlots():
for x in moose.wildcardFind( '/graphs/#[0]' ):
tab = moose.vec( x )
for i in range( len( tab ) ):
pylab.plot( tab[i].vector, label=x.name[:-3] + " " + str( i ) )
pylab.legend()
pylab.figure()
def main():
"""
This illustrates the use of rdesigneur to build a simple dendrite with
spines, and then to resize them using spine fields. These are the
fields that would be changed dynamically in a simulation with reactions
that affect spine geometry.
In this simulation there is a propagating reaction wave using a
highly abstracted equation, whose product diffuses into the spines and
makes them bigger.
"""
makeModel()
elec = moose.element( '/model/elec' )
elec.setSpineAndPsdMesh( moose.element('/model/chem/spine'), moose.element('/model/chem/psd') )
eHead = moose.wildcardFind( '/model/elec/#head#' )
oldDia = [ i.diameter for i in eHead ]
graphs = moose.Neutral( '/graphs' )
#makePlot( 'psd_x', moose.vec( '/model/chem/psd/x' ), 'getN' )
#makePlot( 'head_x', moose.vec( '/model/chem/spine/x' ), 'getN' )
makePlot( 'dend_x', moose.vec( '/model/chem/dend/x' ), 'getN' )
makePlot( 'dend_z', moose.vec( '/model/chem/dend/z' ), 'getN' )
makePlot( 'head_z', moose.vec( '/model/chem/spine/z' ), 'getN' )
makePlot( 'psd_z', moose.vec( '/model/chem/psd/z' ), 'getN' )
makePlot( 'headDia', eHead, 'getDiameter' )
'''
debug = moose.PyRun( '/pyrun' )
debug.tick = 10
debug.runString = """print( "RUNNING: ", moose.element( '/model/chem/psd/z').n, moose.element( '/model/elec/head0' ).diameter)"""
'''
moose.reinit()
moose.start( runtime )
displayPlots()
pylab.plot( oldDia, label = 'old Diameter' )
pylab.plot( [ i.diameter for i in eHead ], label = 'new Diameter' )
pylab.legend()
pylab.show()
if doMoo:
app = QtGui.QApplication(sys.argv)
morphology = moogli.read_morphology_from_moose( name="", path = '/model/elec' )
widget = moogli.MorphologyViewerWidget( morphology )
widget.show()
return app.exec_()
quit()
# Run the 'main' if this script is executed standalone.
def showVisualization():
makeModel()
elec = moose.element( '/model/elec' )
elec.setSpineAndPsdMesh( moose.element('/model/chem/spine'), moose.element('/model/chem/psd') )
eHead = moose.wildcardFind( '/model/elec/#head#' )
oldDia = [ i.diameter for i in eHead ]
graphs = moose.Neutral( '/graphs' )
#makePlot( 'psd_x', moose.vec( '/model/chem/psd/x' ), 'getN' )
#makePlot( 'head_x', moose.vec( '/model/chem/spine/x' ), 'getN' )
makePlot( 'dend_x', moose.vec( '/model/chem/dend/x' ), 'getN' )
dendZ = makePlot( 'dend_z', moose.vec( '/model/chem/dend/z' ), 'getN' )
makePlot( 'head_z', moose.vec( '/model/chem/spine/z' ), 'getN' )
psdZ = makePlot( 'psd_z', moose.vec( '/model/chem/psd/z' ), 'getN' )
diaTab = makePlot( 'headDia', eHead, 'getDiameter' )
dendrite = moose.element("/model/elec/dend")
dendrites = [dendrite.path + "/" + str(i) for i in range(len(dendZ))]
moose.reinit()
spineHeads = moose.wildcardFind( '/model/elec/#head#')
if doMoo:
app = QtGui.QApplication(sys.argv)
viewer = create_viewer("/model/elec", dendrite, dendZ, diaTab, psdZ)
viewer.showMaximized()
viewer.start()
return app.exec_()
def create_viewer(path, moose_dendrite, dendZ, diaTab, psdZ):
network = moogli.extensions.moose.read(path=path,
vertices=15)
dendrite = network.groups["dendrite"].shapes[moose_dendrite.path]
chem_compt_group = dendrite.subdivide(len(dendZ))
normalizer = moogli.utilities.normalizer(0.0,
300.0,
clipleft=True,
clipright=True)
colormap = moogli.colors.MatplotlibColorMap(matplotlib.cm.rainbow)
mapper = moogli.utilities.mapper(colormap, normalizer)
def readValues(tables):
values = []
for i in range(len(tables)):
values.append(tables[i].vector[-1])
return values
def prelude(view):
view.home()
view.pitch(math.pi / 3.0)
view.zoom(0.3)
network.groups["soma"].set("color", moogli.colors.RED)
network.groups["spine"].groups["shaft"].set("color",
moogli.colors.RED)
def interlude(view):
moose.start(frameruntime)
network.groups["spine"].groups["head"].set("radius",
readValues(diaTab),
lambda x: x * 0.5e6)
network.groups["spine"].groups["head"].set("color",
readValues(psdZ),
mapper)
chem_compt_group.set("color",
readValues(dendZ),
mapper)
if moose.element("/clock").currentTime >= runtime:
view.stop()
viewer = moogli.Viewer("Viewer")
viewer.attach_shapes(list(network.shapes.values()))
viewer.detach_shape(dendrite)
viewer.attach_shapes(list(chem_compt_group.shapes.values()))
view = moogli.View("main-view",
prelude=prelude,
interlude=interlude)
cb = moogli.widgets.ColorBar(id="cb",
title="Molecule #",
text_color=moogli.colors.BLACK,
position=moogli.geometry.Vec3f(0.975, 0.5, 0.0),
size=moogli.geometry.Vec3f(0.30, 0.05, 0.0),
text_font="/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-R.ttf",
orientation=math.pi / 2.0,
text_character_size=20,
label_formatting_precision=0,
colormap=moogli.colors.MatplotlibColorMap(matplotlib.cm.rainbow),
color_resolution=100,
scalar_range=moogli.geometry.Vec2f(0.0,
300.0))
view.attach_color_bar(cb)
viewer.attach_view(view)
return viewer
if __name__ == '__main__':
if doMoo:
showVisualization()
else:
main()
|
BhallaLab/moose-examples
|
snippets/rxdSpineSize.py
|
Python
|
gpl-2.0
| 10,748
|
[
"MOOSE",
"NEURON"
] |
d39be95b57f77875c026e5e917737155041ac30463ab0972ba93b4b7f03a2b73
|
import tempfile
from copy import deepcopy
import numpy as np
from numpy import cos, sin, pi
import gmin_ as GMIN
from pele.potentials import GMINPotential
from pele.potentials import LJ
from pele.angleaxis import RBTopology, RBSystem, RigidFragment
from pele.utils.xyz import write_xyz
class OTPPotential(LJ):
def __init__(self, aatopology):
self.aatopology = aatopology
LJ.__init__(self)
def getEnergy(self, coords):
atom_coords = self.aatopology.to_atomistic(coords)
return LJ.getEnergy(self, atom_coords.flatten())
def getEnergyGradient(self, coords):
atom_coords = self.aatopology.to_atomistic(coords)
e, atom_grad = LJ.getEnergyGradient(self, atom_coords.flatten())
grad = self.aatopology.transform_gradient(coords, atom_grad)
return e, grad
class OTPSystem(RBSystem):
def __init__(self, nmol, boxl=None):
# super(OTPSystem, self)
self.nmol = nmol
if boxl is None:
self.periodic = False
self.boxl = 1e100
else:
self.periodic = True
self.boxl = boxl
super(OTPSystem, self).__init__()
self.setup_params(self.params)
def get_rigid_fragment(self):
return RigidFragment()
def make_otp(self):
otp = self.get_rigid_fragment()
otp.add_atom("O", np.array([0.0, -2./3 * np.sin( 7.*pi/24.), 0.0]), 1.)
otp.add_atom("O", np.array([cos( 7.*pi/24.), 1./3. * sin( 7.* pi/24.), 0.0]), 1.)
otp.add_atom("O", np.array([-cos( 7.* pi/24.), 1./3. * sin( 7.*pi/24), 0.0]), 1.)
otp.finalize_setup()
return otp
def setup_params(self, params):
params.gui["basinhopping_nsteps"] = 1000
nebparams = params.double_ended_connect.local_connect_params.NEBparams
nebparams.max_images = 50
nebparams.image_density = 5
nebparams.iter_density = 10.
nebparams.k = 5.
nebparams.reinterpolate = 50
nebparams.NEBquenchParams["iprint"] = 10
tssearch = params.double_ended_connect.local_connect_params.tsSearchParams
tssearch.nsteps_tangent1 = 10
tssearch.nsteps_tangent2 = 30
tssearch.lowestEigenvectorQuenchParams["nsteps"] = 50
tssearch.iprint=1
tssearch.nfail_max = 100
def get_random_coordinates(self):
coords = np.zeros([self.nmol*2, 3])
coords[:self.nmol,:] = np.random.uniform(-1,1,[self.nmol,3]) * (self.nmol*3)**(-1./3) * 1.5
from pele.utils.rotations import random_aa
for i in range(self.nmol, self.nmol*2):
coords[i,:] = random_aa()
return coords.flatten()
def write_coords_data(self):
coords = self.get_random_coordinates()
coords = coords.reshape(-1,3)
with open("coords", "w") as fout:
for xyz in coords:
fout.write( "%f %f %f\n" % tuple(xyz) )
data = "LWOTP 2.614\n"
if self.periodic:
data += "periodic %f %f %f\n" % (self.boxl, self.boxl, self.boxl)
with open("data", "w") as fout:
fout.write(data)
def setup_aatopology(self):
self.write_coords_data()
GMIN.initialize()
self.pot = GMINPotential(GMIN)
coords = self.pot.getCoords()
self.nrigid = coords.size/6
assert(self.nrigid == self.nmol)
#self.nrigid = self.nmol
otp = self.make_otp()
topology = RBTopology()
topology.add_sites([deepcopy(otp) for i in xrange(self.nrigid)])
self.render_scale = 0.2
self.atom_types = topology.get_atomtypes()
self.draw_bonds = []
for i in xrange(self.nrigid):
self.draw_bonds.append((3*i, 3*i+1))
self.draw_bonds.append((3*i, 3*i+2))
self.params.double_ended_connect.local_connect_params.tsSearchParams.iprint = 10
return topology
def get_potential(self):
#return OTPPotential(self.aasystem)
return self.pot
def __call__(self):
return self
def load_coords_pymol(self, coordslist, oname, index=1):
import pymol
super(OTPSystem, self).load_coords_pymol(coordslist, oname, index=index)
pymol.cmd.set("sphere_scale", value=0.2, selection=oname)
def rungui(system, db=None):
import pele.gui.run as gr
from pele.storage import Database
gr.run_gui(system, db=db)
if __name__ == "__main__":
nmol = 7
periodic = False
if periodic:
boxl = 10.
dbname = "otp_N%d_boxl%.2f.db"
else:
dbname = "otp_N%d.db" % nmol
boxl = None
system = OTPSystem(nmol, boxl=boxl)
# bh = system.get_basinhopping()
# bh.run(10)
rungui(system, db=dbname)
|
js850/pele
|
playground/otp/otp.py
|
Python
|
gpl-3.0
| 4,872
|
[
"PyMOL"
] |
12c307114f029f9a82c7b8f8f63b7c1ee3b1c0bd29a2d43aafde4f83c009e9f3
|
# import modules
import mcpi.minecraft as minecraft
from time import sleep
# connect python to minecraft
mc = minecraft.Minecraft.create()
# create CONSTANTS for block and light colours
AIR = 0
STONE = 1
WOOL = 35
BLACK = 15
RED = 14
AMBER = 4
GREEN = 5
# clear area in middle of map and move player there
mc.setBlocks(-60, 0, -60, 60, 50, 60, AIR)
mc.setBlocks(-60, -1, -60, 60, -1, 60, STONE)
mc.player.setPos(5, 0, 0)
# create initial light stack
for i in range(1, 7):
mc.setBlock(10, 0 +i, 0, WOOL, 8)
mc.setBlock(9, 6, 0, WOOL, BLACK)
mc.setBlock(9, 5, 0, WOOL, BLACK)
mc.setBlock(9, 4, 0, WOOL, BLACK)
# wait three seconds before starting sequence
sleep(3)
# traffic light sequence
while True:
# turn on red
mc.setBlock(9, 6, 0, WOOL, RED)
# wait three seconds
sleep(3)
# turn on amber
mc.setBlock(9, 5, 0, WOOL, AMBER)
# wait one second
sleep(1)
# turn off red & amber, turn on green
mc.setBlock(9, 6, 0, WOOL, BLACK)
mc.setBlock(9, 5, 0, WOOL, BLACK)
mc.setBlock(9, 4, 0, WOOL, GREEN)
# wait three seconds
sleep(3)
# turn off green
mc.setBlock(9, 4, 0, WOOL, BLACK)
# turn on amber
mc.setBlock(9, 5, 0, WOOL, AMBER)
# wait one second
sleep(1)
# turn off amber
mc.setBlock(9, 5, 0, WOOL, BLACK)
|
NeilCFord/CPD4T_Minecraft_Workshop
|
trafficlights.py
|
Python
|
cc0-1.0
| 1,305
|
[
"Amber"
] |
8b19b1693ff02872aa3ae9e832118c37217f6d0c19923073f89bf99897d8ef13
|
from flow_workflow.perl_action import actions
from flow_workflow.perl_action import adapter_base
from flow_workflow.perl_action import future_nets
from lxml import etree
import mock
import unittest
PARALLEL_BY_XML = '''
<operation name="test_op_name" parallelBy="foo">
<operationtype commandClass="NullCommand"
typeClass="Workflow::OperationType::Command" />
</operation>
'''
NORMAL_XML = '''
<operation name="test_op_name">
<operationtype commandClass="NullCommand"
typeClass="Workflow::OperationType::Command" />
</operation>
'''
class FakeAdapter(adapter_base.PerlActionAdapterBase):
operation_class = 'fake'
def single_future_net(self, **kwargs):
return adapter_base.PerlActionAdapterBase.single_future_net(self,
**kwargs)
@property
def action_type(self):
return 'foo'
@property
def action_id(self):
return 12345
class NormalPerlActionAdapterBaseTest(unittest.TestCase):
def setUp(self):
self.log_dir = '/exciting/log/dir'
self.operation_id = 12345
self.parent = mock.Mock()
self.adapter = FakeAdapter(xml=etree.XML(NORMAL_XML),
operation_id=self.operation_id,
log_dir=self.log_dir, parent=self.parent)
def test_parallel_by(self):
self.assertEqual(None, self.adapter.parallel_by)
def test_shortcut_action_class(self):
self.assertEqual(actions.ForkAction,
self.adapter.shortcut_action_class)
def test_execute_action_class_remote(self):
self.assertEqual(actions.LSFAction,
self.adapter.execute_action_class)
def test_execute_action_class_local(self):
self.adapter.local_workflow = True
self.assertEqual(actions.ForkAction,
self.adapter.execute_action_class)
def test_future_net(self):
net = self.adapter.future_net(resources={})
self.assertIsInstance(net, future_nets.PerlActionNet)
if __name__ == '__main__':
unittest.main()
|
genome/flow-workflow
|
unit_tests/perl_action/test_adapter_base.py
|
Python
|
agpl-3.0
| 2,054
|
[
"exciting"
] |
052925a4d5ab2e3d1cfd6269e3def53d52c0f3892ab754af11d72df4018310d4
|
from Sire.IO import *
from Sire.Mol import *
from Sire.Base import *
import Sire.Config
import subprocess
import shlex
import sys
import os
from nose.tools import assert_equal
sire_python = getBinDir() + "/sire_python"
gromacs_path = StringProperty("../io/gromacs")
def _test_broken_pdb():
try:
verbose = os.getenv("VERBOSE_TEST")
except:
verbose = False
if verbose:
print("Loading the original PDB...")
s = MoleculeParser.read("../io/aladip.pdb")
m = s[MolIdx(0)]
if verbose:
print("Writing the PDB using locale '%s'" % os.getenv("LC_ALL"))
# write this PDB using the environment locale
PDB().write(m, "test_broken_pdb.pdb")
if verbose:
print("Re-reading the PDB...")
# read this back to see if we are using commas or numbers
s = MoleculeParser.read("test_broken_pdb.pdb")
m2 = s[MolIdx(0)]
assert_equal( m.nAtoms(), m2.nAtoms() )
for i in range(0, m.nAtoms()):
if verbose:
print("%s vs %s" % (m.atoms()[i].property("coordinates"),
m2.atoms()[i].property("coordinates")))
assert_equal( m.atoms()[i].property("coordinates"),
m2.atoms()[i].property("coordinates") )
# now try the same with PDB2
if verbose:
print("Checking PDB2. Writing to file...")
p = PDB2(s)
p.writeToFile("test_broken_pdb.pdb")
if verbose:
print("Re-reading from the file...")
s = MoleculeParser.read("test_broken_pdb.pdb")
m2 = s[MolIdx(0)]
assert_equal( m.nAtoms(), m2.nAtoms() )
for i in range(0, m.nAtoms()):
if verbose:
print("%s vs %s" % (m.atoms()[i].property("coordinates"),
m2.atoms()[i].property("coordinates")))
assert_equal( m.atoms()[i].property("coordinates"),
m2.atoms()[i].property("coordinates") )
def _test_broken_rst7():
try:
verbose = os.getenv("VERBOSE_TEST")
except:
verbose = False
if verbose:
print("Loading the original RST7...")
s = MoleculeParser.read("../io/ala.top", "../io/ala.crd")
if verbose:
print("Writing out again...")
a = AmberRst7(s)
a.writeToFile("test.rst7")
if verbose:
print("Re-reading...")
s2 = MoleculeParser.read("../io/ala.top", "test.rst7")
if verbose:
print("Validating...")
for i in range(0,s.nMolecules()):
m1 = s[MolIdx(i)]
m2 = s[MolIdx(i)]
for j in range(0,m1.nAtoms()):
v1 = m1.atoms()[j].property("coordinates")
v2 = m2.atoms()[j].property("coordinates")
assert_equal(v1, v2)
if verbose:
print("All ok :-)")
def _test_broken_gro():
try:
verbose = os.getenv("VERBOSE_TEST")
except:
verbose = False
if verbose:
print("Loading the original GRO...")
s = MoleculeParser.read("../io/urea.top", "../io/urea.gro",
{"GROMACS_PATH":gromacs_path})
if verbose:
print("Writing to a new file...")
g = Gro87(s)
g.writeToFile("test.gro")
if verbose:
print("Re-reading...")
s = MoleculeParser.read("../io/urea.top", "test.gro",
{"GROMACS_PATH":gromacs_path})
if verbose:
print("Validating...")
for i in range(0,s.nMolecules()):
m1 = s[MolIdx(i)]
m2 = s[MolIdx(i)]
for j in range(0,m1.nAtoms()):
v1 = m1.atoms()[j].property("coordinates")
v2 = m2.atoms()[j].property("coordinates")
assert_equal(v1, v2)
if verbose:
print("All ok :-)")
def _test_broken_function(verbose, function):
cmd = "%s %s %s" % (sire_python, "test_locale.py", function)
if verbose:
print(cmd)
env = dict(os.environ, LC_ALL="it_IT.UTF-8")
if verbose:
env["VERBOSE_TEST"] = "1"
p = subprocess.Popen(shlex.split(cmd),
env=env,
stdout=subprocess.PIPE)
p.wait()
if verbose:
for line in p.stdout.readlines():
print(str(line))
if p.returncode != 0:
assert(False)
def test_broken_pdb(verbose=False):
_test_broken_function(verbose, "--test_broken_pdb")
def test_broken_rst7(verbose=False):
_test_broken_function(verbose, "--test_broken_rst7")
def test_broken_gro(verbose=False):
_test_broken_function(verbose, "--test_broken_gro")
funcs = {}
funcs["--test_broken_pdb"] = _test_broken_pdb
funcs["--test_broken_rst7"] = _test_broken_rst7
funcs["--test_broken_gro"] = _test_broken_gro
if __name__ == "__main__":
if len(sys.argv) > 1:
funcs[sys.argv[1]]()
else:
test_broken_pdb(True)
test_broken_rst7(True)
test_broken_gro(True)
#print("DISABLING LOCALE TEST AS INFINITE LOOP")
|
michellab/SireUnitTests
|
unittests/SireIO/test_locale.py
|
Python
|
gpl-3.0
| 4,886
|
[
"Gromacs"
] |
d17d211cfdb69ac2413436d9aacdc0f4116f35c494c1ebeb6074081af12b48e4
|
import numpy as np
from scipy.ndimage.interpolation import shift as scipy_shift
from skued import register_time_shift, register_time_shifts
import pytest
np.random.seed(23)
def test_time_shift_trivial():
"""Test that the time-shift between two identical traces is zero."""
trace1 = np.sin(2 * np.pi * np.linspace(0, 10, 64))
trace2 = np.array(trace1, copy=True)
shift = register_time_shift(trace1, trace2)
assert shift == 0
trace1 = np.sin(2 * np.pi * np.linspace(0, 10, 65))
trace2 = np.array(trace1, copy=True)
shift = register_time_shift(trace1, trace2)
assert shift == 0
def test_time_shift_no_noise():
"""Test measuring the time-shift between traces shifted from one another, without added noise"""
trace1 = np.sin(2 * np.pi * np.linspace(0, 10, 64))
trace2 = np.roll(trace1, 5)
shift = register_time_shift(trace1, trace2)
assert shift == -5
def test_time_shift_with_noise():
"""Test measuring the time-shift between traces shifted from one another, with added 6% gaussian noise"""
trace1 = np.sin(2 * np.pi * np.linspace(0, 10, 64))
trace2 = scipy_shift(trace1, 5)
trace1 = trace1[6:-6]
trace2 = trace2[6:-6]
trace1 += 0.03 * np.random.random(size=trace1.shape)
trace2 += 0.03 * np.random.random(size=trace2.shape)
shift = register_time_shift(trace1, trace2)
assert shift == -5
def test_time_shift_different_lengths():
"""Test that register_time_shift() raises an exception if the reference and trace do not have the same shape"""
with pytest.raises(ValueError):
trace1 = np.empty((16,))
trace2 = np.empty((8,))
register_time_shift(trace1, trace2)
def test_time_shift_not1d():
"""Test that register_time_shift() raises an exception if the reference or trace are not 1D"""
with pytest.raises(ValueError):
trace1 = np.empty((16, 45))
trace2 = np.empty((8,))
register_time_shift(trace1, trace2)
with pytest.raises(ValueError):
trace1 = np.empty((16,))
trace2 = np.empty((8, 2))
register_time_shift(trace1, trace2)
def test_time_shifts_trivial():
"""Test that the time-shifts between identical time traces"""
traces = [np.sin(2 * np.pi * np.linspace(0, 10, 64)) for _ in range(10)]
shifts = register_time_shifts(traces)
assert np.allclose(shifts, np.zeros_like(shifts))
traces = [np.sin(2 * np.pi * np.linspace(0, 10, 31)) for _ in range(10)]
shifts = register_time_shifts(traces)
assert np.allclose(shifts, np.zeros_like(shifts))
def test_time_shifts_output_shape():
"""Test the output shape"""
traces = [np.sin(2 * np.pi * np.linspace(0, 10, 64) + i) for i in range(10)]
shifts = register_time_shifts(traces)
assert shifts.shape == (len(traces),)
# The first shift should then be zero
# because it is the shift between the reference and it
assert shifts[0] == 0
traces = [np.sin(2 * np.pi * np.linspace(0, 10, 64) + i) for i in range(10)]
shifts = register_time_shifts(traces, reference=np.array(traces[0], copy=True))
assert shifts.shape == (len(traces),)
|
LaurentRDC/scikit-ued
|
skued/time_series/tests/test_time_zero.py
|
Python
|
gpl-3.0
| 3,135
|
[
"Gaussian"
] |
bef5b6132a0c11f577029fd459218f36af31b5cfd0f59b92dae4dbe31e3cd5d8
|
# -*- coding: utf-8 -*-
"""
A VTK RenderWindowInteractor widget for wxPython.
Find wxPython info at http://wxPython.org
Created by Prabhu Ramachandran, April 2002
Based on wxVTKRenderWindow.py
Fixes and updates by Charl P. Botha 2003-2008
Updated to new wx namespace and some cleaning up by Andrea Gavana,
December 2006
"""
"""
Please see the example at the end of this file.
----------------------------------------
Creation:
wxVTKRenderWindowInteractor(parent, ID, stereo=0, [wx keywords]):
You should create a wx.PySimpleApp() or some other wx**App before
creating the window.
Behaviour:
Uses __getattr__ to make the wxVTKRenderWindowInteractor behave just
like a vtkGenericRenderWindowInteractor.
----------------------------------------
"""
# import usual libraries
import math
import sys
import os
baseClass = object
_useCapture = None
try:
import wx
# a few configuration items, see what works best on your system
# Use GLCanvas as base class instead of wx.Window.
# This is sometimes necessary under wxGTK or the image is blank.
# (in wxWindows 2.3.1 and earlier, the GLCanvas had scroll bars)
if wx.Platform == "__WXGTK__":
import wx.glcanvas
baseClass = wx.glcanvas.GLCanvas
# Keep capturing mouse after mouse is dragged out of window
# (in wxGTK 2.3.2 there is a bug that keeps this from working,
# but it is only relevant in wxGTK if there are multiple windows)
_useCapture = (wx.Platform == "__WXMSW__")
except ImportError as e:
import traceback
# traceback.print_exc(file=sys.stdout)
sys.stderr.write("No proper wx installed'.\n")
try:
import vtk
except Exception as e:
sys.stderr.write("No proper vtk installed'.\n")
# end of configuration items
class EventTimer(wx.Timer):
"""Simple wx.Timer class."""
def __init__(self, iren):
"""
Default class constructor.
@param iren: current render window
"""
wx.Timer.__init__(self)
self.iren = iren
def Notify(self):
"""The timer has expired."""
self.iren.TimerEvent()
class wxVTKRenderWindowInteractor(baseClass):
"""
A wxRenderWindow for wxPython.
Use GetRenderWindow() to get the vtkRenderWindow.
Create with the keyword stereo=1 in order to
generate a stereo-capable window.
"""
# class variable that can also be used to request instances that use
# stereo; this is overridden by the stereo=1/0 parameter. If you set
# it to True, the NEXT instantiated object will attempt to allocate a
# stereo visual. E.g.:
# wxVTKRenderWindowInteractor.USE_STEREO = True
# myRWI = wxVTKRenderWindowInteractor(parent, -1)
USE_STEREO = False
def __init__(self, parent, ID, *args, **kw):
"""
Default class constructor.
@param parent: parent window
@param ID: window id
@param **kw: wxPython keywords (position, size, style) plus the
'stereo' keyword
"""
# private attributes
self.__RenderWhenDisabled = 0
# First do special handling of some keywords:
# stereo, position, size, style
stereo = 0
if 'stereo' in kw:
if kw['stereo']:
stereo = 1
del kw['stereo']
elif self.USE_STEREO:
stereo = 1
position, size = wx.DefaultPosition, wx.DefaultSize
if 'position' in kw:
position = kw['position']
del kw['position']
if 'size' in kw:
size = kw['size']
del kw['size']
# wx.WANTS_CHARS says to give us e.g. TAB
# wx.NO_FULL_REPAINT_ON_RESIZE cuts down resize flicker under GTK
style = wx.WANTS_CHARS | wx.NO_FULL_REPAINT_ON_RESIZE
if 'style' in kw:
style = style | kw['style']
del kw['style']
# the enclosing frame must be shown under GTK or the windows
# don't connect together properly
if wx.Platform != '__WXMSW__':
l = []
p = parent
while p: # make a list of all parents
l.append(p)
p = p.GetParent()
l.reverse() # sort list into descending order
for p in l:
p.Show(1)
if baseClass.__name__ == 'GLCanvas':
# code added by cpbotha to enable stereo and double
# buffering correctly where the user requests this; remember
# that the glXContext in this case is NOT allocated by VTK,
# but by WX, hence all of this.
# Initialize GLCanvas with correct attriblist
attribList = [wx.glcanvas.WX_GL_RGBA,
wx.glcanvas.WX_GL_MIN_RED, 1,
wx.glcanvas.WX_GL_MIN_GREEN, 1,
wx.glcanvas.WX_GL_MIN_BLUE, 1,
wx.glcanvas.WX_GL_DEPTH_SIZE, 16,
wx.glcanvas.WX_GL_DOUBLEBUFFER]
if stereo:
attribList.append(wx.glcanvas.WX_GL_STEREO)
try:
baseClass.__init__(self, parent, id=ID, pos=position, size=size, style=style,
attribList=attribList)
except wx.PyAssertionError:
# visual couldn't be allocated, so we go back to default
baseClass.__init__(self, parent, ID, position, size, style)
if stereo:
# and make sure everyone knows that the stereo
# visual wasn't set.
stereo = 0
else:
baseClass.__init__(self, parent, ID, position, size, style)
# create the RenderWindow and initialize it
self._Iren = vtk.vtkGenericRenderWindowInteractor()
self._Iren.SetRenderWindow(vtk.vtkRenderWindow())
self._Iren.AddObserver('CreateTimerEvent', self.CreateTimer)
self._Iren.AddObserver('DestroyTimerEvent', self.DestroyTimer)
self._Iren.GetRenderWindow().AddObserver('CursorChangedEvent',
self.CursorChangedEvent)
try:
self._Iren.GetRenderWindow().SetSize(size.width, size.height)
except AttributeError:
self._Iren.GetRenderWindow().SetSize(size[0], size[1])
if stereo:
self._Iren.GetRenderWindow().StereoCapableWindowOn()
self._Iren.GetRenderWindow().SetStereoTypeToCrystalEyes()
self.__handle = None
self.BindEvents()
# with this, we can make sure that the reparenting logic in
# Render() isn't called before the first OnPaint() has
# successfully been run (and set up the VTK/WX display links)
self.__has_painted = False
# set when we have captured the mouse.
self._own_mouse = False
# used to store WHICH mouse button led to mouse capture
self._mouse_capture_button = 0
# A mapping for cursor changes.
self._cursor_map = {0: wx.CURSOR_ARROW, # VTK_CURSOR_DEFAULT
1: wx.CURSOR_ARROW, # VTK_CURSOR_ARROW
2: wx.CURSOR_SIZENESW, # VTK_CURSOR_SIZENE
3: wx.CURSOR_SIZENWSE, # VTK_CURSOR_SIZENWSE
4: wx.CURSOR_SIZENESW, # VTK_CURSOR_SIZESW
5: wx.CURSOR_SIZENWSE, # VTK_CURSOR_SIZESE
6: wx.CURSOR_SIZENS, # VTK_CURSOR_SIZENS
7: wx.CURSOR_SIZEWE, # VTK_CURSOR_SIZEWE
8: wx.CURSOR_SIZING, # VTK_CURSOR_SIZEALL
9: wx.CURSOR_HAND, # VTK_CURSOR_HAND
10: wx.CURSOR_CROSS, # VTK_CURSOR_CROSSHAIR
}
def BindEvents(self):
"""Binds all the necessary events for navigation, sizing, drawing."""
# refresh window by doing a Render
self.Bind(wx.EVT_PAINT, self.OnPaint)
# turn off background erase to reduce flicker
self.Bind(wx.EVT_ERASE_BACKGROUND, lambda e: None)
# Bind the events to the event converters
self.Bind(wx.EVT_RIGHT_DOWN, self.OnButtonDown)
self.Bind(wx.EVT_LEFT_DOWN, self.OnButtonDown)
self.Bind(wx.EVT_MIDDLE_DOWN, self.OnButtonDown)
self.Bind(wx.EVT_RIGHT_UP, self.OnButtonUp)
self.Bind(wx.EVT_LEFT_UP, self.OnButtonUp)
self.Bind(wx.EVT_MIDDLE_UP, self.OnButtonUp)
self.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheel)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_ENTER_WINDOW, self.OnEnter)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeave)
# If we use EVT_KEY_DOWN instead of EVT_CHAR, capital versions
# of all characters are always returned. EVT_CHAR also performs
# other necessary keyboard-dependent translations.
self.Bind(wx.EVT_CHAR, self.OnKeyDown)
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.Bind(wx.EVT_SIZE, self.OnSize)
# the wx 2.8.7.1 documentation states that you HAVE to handle
# this event if you make use of CaptureMouse, which we do.
if _useCapture and hasattr(wx, 'EVT_MOUSE_CAPTURE_LOST'):
self.Bind(wx.EVT_MOUSE_CAPTURE_LOST,
self.OnMouseCaptureLost)
def __getattr__(self, attr):
"""Makes the object behave like a vtkGenericRenderWindowInteractor."""
if attr == '__vtk__':
return lambda t=self._Iren: t
elif hasattr(self._Iren, attr):
return getattr(self._Iren, attr)
else:
raise AttributeError(self.__class__.__name__ +
" has no attribute named " + attr)
def CreateTimer(self, obj, evt):
"""Creates a timer."""
self._timer = EventTimer(self)
self._timer.Start(10, True)
def DestroyTimer(self, obj, evt):
"""The timer is a one shot timer so will expire automatically."""
return 1
def _CursorChangedEvent(self, obj, evt):
"""Change the wx cursor if the renderwindow's cursor was changed."""
cur = self._cursor_map[obj.GetCurrentCursor()]
c = wx.StockCursor(cur)
self.SetCursor(c)
def CursorChangedEvent(self, obj, evt):
"""Called when the CursorChangedEvent fires on the render window."""
# This indirection is needed since when the event fires, the
# current cursor is not yet set so we defer this by which time
# the current cursor should have been set.
wx.CallAfter(self._CursorChangedEvent, obj, evt)
def HideCursor(self):
"""Hides the cursor."""
c = wx.StockCursor(wx.CURSOR_BLANK)
self.SetCursor(c)
def ShowCursor(self):
"""Shows the cursor."""
rw = self._Iren.GetRenderWindow()
cur = self._cursor_map[rw.GetCurrentCursor()]
c = wx.StockCursor(cur)
self.SetCursor(c)
def GetDisplayId(self):
"""
Function to get X11 Display ID from WX and return it in a format that
can be used by VTK Python.
We query the X11 Display with a new call that was added in wxPython
2.6.0.1. The call returns a SWIG object which we can query for the
address and subsequently turn into an old-style SWIG-mangled string
representation to pass to VTK.
"""
d = None
try:
d = wx.GetXDisplay()
except NameError:
# wx.GetXDisplay was added by Robin Dunn in wxPython 2.6.0.1
# if it's not available, we can't pass it. In general,
# things will still work; on some setups, it'll break.
pass
else:
# wx returns None on platforms where wx.GetXDisplay is not relevant
if d:
d = hex(d)
# On wxPython-2.6.3.2 and above there is no leading '0x'.
if not d.startswith('0x'):
d = '0x' + d
# we now have 0xdeadbeef
# VTK wants it as: _deadbeef_void_p (pre-SWIG-1.3 style)
d = '_%s_%s' % (d[2:], 'void_p')
return d
def OnMouseCaptureLost(self, event):
"""
This is signalled when we lose mouse capture due to an external event,
such as when a dialog box is shown.
See the wx documentation.
"""
# the documentation seems to imply that by this time we've
# already lost capture. I have to assume that we don't need
# to call ReleaseMouse ourselves.
if _useCapture and self._own_mouse:
self._own_mouse = False
def OnPaint(self, event):
"""Handles the wx.EVT_PAINT event for wxVTKRenderWindowInteractor."""
# wx should continue event processing after this handler.
# We call this BEFORE Render(), so that if Render() raises
# an exception, wx doesn't re-call OnPaint repeatedly.
event.Skip()
dc = wx.PaintDC(self)
# make sure the RenderWindow is sized correctly
self._Iren.GetRenderWindow().SetSize(self.GetSizeTuple())
# Tell the RenderWindow to render inside the wx.Window.
if not self.__handle:
# on relevant platforms, set the X11 Display ID
d = self.GetDisplayId()
if d:
self._Iren.GetRenderWindow().SetDisplayId(d)
# store the handle
self.__handle = self.GetHandle()
# and give it to VTK
self._Iren.GetRenderWindow().SetWindowInfo(str(self.__handle))
# now that we've painted once, the Render() reparenting logic
# is safe
self.__has_painted = True
self.Render()
def OnSize(self, event):
"""Handles the wx.EVT_SIZE event for wxVTKRenderWindowInteractor."""
# event processing should continue (we call this before the
# Render(), in case it raises an exception)
event.Skip()
try:
width, height = event.GetSize()
except:
width = event.GetSize().width
height = event.GetSize().height
self._Iren.SetSize(width, height)
self._Iren.ConfigureEvent()
# this will check for __handle
self.Render()
def OnMotion(self, event):
"""Handles the wx.EVT_MOTION event for wxVTKRenderWindowInteractor."""
# event processing should continue
# we call this early in case any of the VTK code raises an
# exception.
event.Skip()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
event.ControlDown(),
event.ShiftDown(),
chr(0), 0, None)
self._Iren.MouseMoveEvent()
def OnEnter(self, event):
"""Handles the wx.EVT_ENTER_WINDOW event for
wxVTKRenderWindowInteractor."""
# event processing should continue
event.Skip()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
event.ControlDown(),
event.ShiftDown(),
chr(0), 0, None)
self._Iren.EnterEvent()
def OnLeave(self, event):
"""Handles the wx.EVT_LEAVE_WINDOW event for
wxVTKRenderWindowInteractor."""
# event processing should continue
event.Skip()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
event.ControlDown(),
event.ShiftDown(),
chr(0), 0, None)
self._Iren.LeaveEvent()
def OnButtonDown(self, event):
"""Handles the wx.EVT_LEFT/RIGHT/MIDDLE_DOWN events for
wxVTKRenderWindowInteractor."""
# allow wx event processing to continue
# on wxPython 2.6.0.1, omitting this will cause problems with
# the initial focus, resulting in the wxVTKRWI ignoring keypresses
# until we focus elsewhere and then refocus the wxVTKRWI frame
# we do it this early in case any of the following VTK code
# raises an exception.
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, chr(0), 0, None)
button = 0
if event.RightDown():
self._Iren.RightButtonPressEvent()
button = 'Right'
elif event.LeftDown():
self._Iren.LeftButtonPressEvent()
button = 'Left'
elif event.MiddleDown():
self._Iren.MiddleButtonPressEvent()
button = 'Middle'
# save the button and capture mouse until the button is released
# we only capture the mouse if it hasn't already been captured
if _useCapture and not self._own_mouse:
self._own_mouse = True
self._mouse_capture_button = button
self.CaptureMouse()
def OnButtonUp(self, event):
"""Handles the wx.EVT_LEFT/RIGHT/MIDDLE_UP events for
wxVTKRenderWindowInteractor."""
# event processing should continue
event.Skip()
button = 0
if event.RightUp():
button = 'Right'
elif event.LeftUp():
button = 'Left'
elif event.MiddleUp():
button = 'Middle'
# if the same button is released that captured the mouse, and
# we have the mouse, release it.
# (we need to get rid of this as soon as possible; if we don't
# and one of the event handlers raises an exception, mouse
# is never released.)
if _useCapture and self._own_mouse and \
button == self._mouse_capture_button:
self.ReleaseMouse()
self._own_mouse = False
ctrl, shift = event.ControlDown(), event.ShiftDown()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, chr(0), 0, None)
if button == 'Right':
self._Iren.RightButtonReleaseEvent()
elif button == 'Left':
self._Iren.LeftButtonReleaseEvent()
elif button == 'Middle':
self._Iren.MiddleButtonReleaseEvent()
def OnMouseWheel(self, event):
"""Handles the wx.EVT_MOUSEWHEEL event for
wxVTKRenderWindowInteractor."""
# event processing should continue
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, chr(0), 0, None)
if event.GetWheelRotation() > 0:
self._Iren.MouseWheelForwardEvent()
else:
self._Iren.MouseWheelBackwardEvent()
def OnKeyDown(self, event):
"""Handles the wx.EVT_KEY_DOWN event for
wxVTKRenderWindowInteractor."""
# event processing should continue
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
keycode, keysym = event.GetKeyCode(), None
key = chr(0)
if keycode < 256:
key = chr(keycode)
# wxPython 2.6.0.1 does not return a valid event.Get{X,Y}()
# for this event, so we use the cached position.
(x, y) = self._Iren.GetEventPosition()
self._Iren.SetEventInformation(x, y,
ctrl, shift, key, 0,
keysym)
self._Iren.KeyPressEvent()
self._Iren.CharEvent()
def OnKeyUp(self, event):
"""Handles the wx.EVT_KEY_UP event for wxVTKRenderWindowInteractor."""
# event processing should continue
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
keycode, keysym = event.GetKeyCode(), None
key = chr(0)
if keycode < 256:
key = chr(keycode)
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, key, 0,
keysym)
self._Iren.KeyReleaseEvent()
def GetRenderWindow(self):
"""Returns the render window (vtkRenderWindow)."""
return self._Iren.GetRenderWindow()
def Render(self):
"""Actually renders the VTK scene on screen."""
RenderAllowed = 1
if not self.__RenderWhenDisabled:
# the user doesn't want us to render when the toplevel frame
# is disabled - first find the top level parent
topParent = wx.GetTopLevelParent(self)
if topParent:
# if it exists, check whether it's enabled
# if it's not enabeld, RenderAllowed will be false
RenderAllowed = topParent.IsEnabled()
if RenderAllowed:
if self.__handle and self.__handle == self.GetHandle():
self._Iren.GetRenderWindow().Render()
elif self.GetHandle() and self.__has_painted:
# this means the user has reparented us; let's adapt to the
# new situation by doing the WindowRemap dance
self._Iren.GetRenderWindow().SetNextWindowInfo(
str(self.GetHandle()))
# make sure the DisplayId is also set correctly
d = self.GetDisplayId()
if d:
self._Iren.GetRenderWindow().SetDisplayId(d)
# do the actual remap with the new parent information
self._Iren.GetRenderWindow().WindowRemap()
# store the new situation
self.__handle = self.GetHandle()
self._Iren.GetRenderWindow().Render()
def SetRenderWhenDisabled(self, newValue):
"""
Change value of __RenderWhenDisabled ivar.
If __RenderWhenDisabled is false (the default), this widget will not
call Render() on the RenderWindow if the top level frame (i.e. the
containing frame) has been disabled.
This prevents recursive rendering during wx.SafeYield() calls.
wx.SafeYield() can be called during the ProgressMethod() callback of
a VTK object to have progress bars and other GUI elements updated -
it does this by disabling all windows (disallowing user-input to
prevent re-entrancy of code) and then handling all outstanding
GUI events.
However, this often triggers an OnPaint() method for wxVTKRWIs,
resulting in a Render(), resulting in Update() being called whilst
still in progress.
"""
self.__RenderWhenDisabled = bool(newValue)
#--------------------------------------------------------------------
def wxVTKRenderWindowInteractorConeExample():
"""Like it says, just a simple example."""
# every wx app needs an app
app = wx.PySimpleApp()
# create the top-level frame, sizer and wxVTKRWI
frame = wx.Frame(None, -1, "wxVTKRenderWindowInteractor", size=(400, 400))
widget = wxVTKRenderWindowInteractor(frame, -1)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(widget, 1, wx.EXPAND)
frame.SetSizer(sizer)
frame.Layout()
# It would be more correct (API-wise) to call widget.Initialize() and
# widget.Start() here, but Initialize() calls RenderWindow.Render().
# That Render() call will get through before we can setup the
# RenderWindow() to render via the wxWidgets-created context; this
# causes flashing on some platforms and downright breaks things on
# other platforms. Instead, we call widget.Enable(). This means
# that the RWI::Initialized ivar is not set, but in THIS SPECIFIC CASE,
# that doesn't matter.
widget.Enable(1)
widget.AddObserver("ExitEvent", lambda o, e, f=frame: f.Close())
ren = vtk.vtkRenderer()
widget.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# show the window
frame.Show()
app.MainLoop()
if __name__ == "__main__":
wxVTKRenderWindowInteractorConeExample()
|
KristoferHellman/gimli
|
python/pygimli/gui/vtk/wxVTKRenderWindowInteractor.py
|
Python
|
gpl-3.0
| 24,578
|
[
"VTK"
] |
f25866a7d4d3bb08b10733598f6e1a1b08cb2260f71564f52f921b2a8c662c94
|
#Author : Lewis Mervin lhm30@cam.ac.uk
#Supervisor : Dr. A. Bender
#All rights reserved 2014
#Protein Target Prediction Tool trained on SARs from PubChem (Mined 08/04/14) and ChEMBL18
#Molecular Descriptors : 2048bit Morgan Binary Fingerprints (Rdkit) - ECFP4
#Dependencies : rdkit, sklearn, numpy
#libraries
from rdkit import Chem
from rdkit.Chem import AllChem
from sklearn.naive_bayes import BernoulliNB
import cPickle
import glob
import os
import sys
import numpy as np
def introMessage():
print '=============================================================================================='
print ' Author: Lewis Mervin\n Email: lhm30@cam.ac.uk\n Supervisor: Dr. A. Bender'
print ' Address: Centre For Molecular Informatics, Dept. Chemistry, Lensfield Road, Cambridge CB2 1EW'
print '==============================================================================================\n'
return
#import user query
def importQuery():
query = open(file_name).read().splitlines()
matrix = []
for q in query:
matrix.append(calcFingerprints(q))
matrix = np.array(matrix, dtype=np.uint8)
return matrix
#calculate 2048bit morgan fingerprints, radius 2
def calcFingerprints(smiles):
m1 = Chem.MolFromSmiles(smiles)
fp = AllChem.GetMorganFingerprintAsBitVect(m1,2, nBits=2048)
binary = fp.ToBitString()
return list(binary)
#get names of uniprots
def getName():
global u_name
t_file = open('classes_in_model.txt').read().splitlines()
t_file.pop(0)
for t in t_file:
t = t.split('\t')
u_name[t[1]] = t[0]
return
#import thresholds as specified by user
def importThresholds():
global thresholds
global metric
if metric == 'p':
m = 1
if metric == 'f':
m = 2
if metric == 'r':
m = 3
if metric == 'a':
m = 4
if metric == '0.5':
m = 5
t_file = open('thresholds.txt').read().splitlines()
for t in t_file:
t = t.split('\t')
thresholds[t[0]] = float(t[m])
return
#main
introMessage()
metric = sys.argv[1]
file_name = sys.argv[2]
print ' Using Class Specific Cut-off Thresholds of : ' + metric
t_count = len(glob.glob('models/*.pkl'))
print ' Total Number of Classes : ' + str(t_count)
output_name = 'out_results_binary.txt'
file = open(output_name, 'w')
thresholds = dict()
importThresholds()
u_name = dict()
getName()
querymatrix = importQuery()
print ' Total Number of Query Molecules : ' + str(len(querymatrix))
count=0
#for each model
for filename in glob.glob('models/*.pkl'):
count +=1
#unpickle model
with open(filename, 'rb') as fid:
row = [u_name[filename[7:-4]],filename[7:-4]]
bnb = cPickle.load(fid)
probs = bnb.predict_proba(querymatrix)
for prob in probs:
#if the probability of activity is above threshold then active
if prob[1] >= thresholds[filename[7:-4]]:
row.append('1')
else:
row.append('0')
file.write('\t'.join(map(str,row)) + '\n')
#update precent finished
percent = (float(count)/float(t_count))*100
sys.stdout.write(' Performing Classification on Query Molecules: %3d%%\r' % percent)
sys.stdout.flush()
print '\n Wrote Results to: ' + output_name
file.close()
|
lhm30/PIDGIN
|
predict_binary.py
|
Python
|
mit
| 3,324
|
[
"RDKit"
] |
3a7f593b57f1ad7dbec6bddaa2d9f0a947fda63b599cd0e4df606002ab3adfff
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import reduce
import numpy
from pyscf import gto
from pyscf import scf
from pyscf.lo import nao
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = '''
O 0. 0. 0
1 0. -0.757 0.587
1 0. 0.757 0.587'''
mol.basis = 'cc-pvdz'
mol.build()
mf = scf.RHF(mol)
mf.conv_tol = 1e-14
mf.scf()
mol1 = mol.copy()
mol1.cart = True
mf1 = scf.RHF(mol1).set(conv_tol=1e-14).run()
class KnowValues(unittest.TestCase):
def test_pre_nao(self):
c = nao.prenao(mol, mf.make_rdm1())
self.assertAlmostEqual(numpy.linalg.norm(c), 5.7742626195362039, 9)
self.assertAlmostEqual(abs(c).sum(), 33.214804163888289, 6)
c = nao.prenao(mol1, mf1.make_rdm1())
self.assertAlmostEqual(numpy.linalg.norm(c), 5.5434134741828105, 9)
self.assertAlmostEqual(abs(c).sum(), 31.999905597187052, 6)
def test_nao(self):
c = nao.nao(mol, mf)
s = mf.get_ovlp()
self.assertTrue(numpy.allclose(reduce(numpy.dot, (c.T, s, c)),
numpy.eye(s.shape[0])))
self.assertAlmostEqual(numpy.linalg.norm(c), 8.982385484322208, 9)
self.assertAlmostEqual(abs(c).sum(), 90.443872916389637, 6)
c = nao.nao(mol1, mf1)
s = mf1.get_ovlp()
self.assertTrue(numpy.allclose(reduce(numpy.dot, (c.T, s, c)),
numpy.eye(s.shape[0])))
self.assertAlmostEqual(numpy.linalg.norm(c), 9.4629575662640129, 9)
self.assertAlmostEqual(abs(c).sum(), 100.24554485355642, 6)
if __name__ == "__main__":
print("Test orth")
unittest.main()
|
gkc1000/pyscf
|
pyscf/lo/test/test_nao.py
|
Python
|
apache-2.0
| 2,294
|
[
"PySCF"
] |
86792f46fc0534a1b10c2f86ab8cdb7bd9b0d044463e1fd3b8d16be0f9b41d2f
|
"""This module contains functional tests for the Home Page."""
import datetime
from django.contrib.auth import get_user_model
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
from mezzanine.blog.models import BlogPost
from communities.models import Community
from fec.utils import SeleniumTestCase
from homepage.models import HomepageContent
class HomePageTests(SeleniumTestCase):
"""Test Expectations for the Home Page."""
def setUp(self):
"""Create a User, the Homepage Content & visit the home page."""
user = get_user_model()
self.user = user.objects.create_superuser(
'test', 'test@test.com', 'test')
self.content = HomepageContent.objects.create(
intro_text='The intro text should be large',
content_title='Unboxed sections heading',
content='Unique content string',
)
self.selenium.get('{}{}'.format(self.live_server_url, '/'))
def test_intro_text_is_correct(self):
"""The page should include large introduction text."""
intro_text = self.selenium.find_element_by_css_selector(
"h3#lead-text").text
self.assertEqual(
intro_text, self.content.intro_text,
"The Intro Text is Incorrect.")
def test_content_header_is_correct(self):
"""The content's header should be set by the HomepageContent."""
header = self.selenium.find_element_by_css_selector(
"h4.homepage-heading").text
self.assertEqual(
header, self.content.content_title,
"The Custom Content Section's Header is Incorrect.")
def test_custom_content_is_correct(self):
"""The content should be set by the HomepageContent."""
content = self.selenium.find_element_by_css_selector(
'#homepage-custom-content').text
self.assertIn(self.content.content, content,
'Custom content not found in section')
def test_new_communities_are_shown(self):
"""The 3 newest communities are shown."""
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
community1 = Community.objects.create(
title='Kalamazoo', date_joined=today)
community2 = Community.objects.create(
title='Kirtan', date_joined=today)
community3 = Community.objects.create(
title='Oslard', date_joined=today)
hidden = Community.objects.create(
title='Jugulum', date_joined=yesterday)
also_hidden = Community.objects.create(
title='Shai\'hulud')
self.selenium.get('{}{}'.format(self.live_server_url, '/'))
html_excluding_footer = self.selenium.find_element_by_css_selector(
'*:not(footer) > .container > .row').text
self.assertNotIn(hidden.title, html_excluding_footer,
'Old community found on page.')
self.assertNotIn(also_hidden.title, html_excluding_footer,
'Old community found on page.')
communities = self.selenium.find_element_by_css_selector(
'div#newest-communities').text
self.assertIn(community1.title, communities, 'Community not in block.')
self.assertIn(community2.title, communities, 'Community not in block.')
self.assertIn(community3.title, communities, 'Community not in block.')
def test_latest_news_is_shown(self):
"""The latest blog post is shown."""
blog_post = BlogPost.objects.create(
title='Hank Aaron the 24th', status=CONTENT_STATUS_PUBLISHED,
user=self.user)
self.selenium.refresh()
html = self.selenium.find_element_by_css_selector('html').text
self.assertIn(blog_post.title, html)
def test_can_hide_communities_block(self):
"""The Newest Communities block can be hidden."""
community = Community.objects.create(title='Kalamazoo')
self.content.show_newest_communities = False
self.content.save()
self.selenium.refresh()
html_excluding_footer = self.selenium.find_element_by_css_selector(
'*:not(footer) > .container > .row').text
self.assertNotIn(community.title, html_excluding_footer,
'Newest communities not hidden.')
def test_can_hide_news_block(self):
"""The Latest News block can be hidden."""
blog_post = BlogPost.objects.create(
title='Hank Aaron the 24th', status=CONTENT_STATUS_PUBLISHED,
user=self.user)
self.content.show_news = False
self.content.save()
self.selenium.refresh()
html = self.selenium.find_element_by_css_selector('html').text
self.assertNotIn(blog_post.title, html, 'News was not hidden.')
def test_no_blocks_widens_content(self):
"""Hiding the News & Communiits Blocks widens the Custom Content."""
self.content.show_news = False
self.content.show_newest_communities = False
self.content.save()
self.selenium.refresh()
self.assertTrue(self.selenium.find_element_by_css_selector(
'#homepage-custom-content.col-md-12'
), 'Custom content block not widened.')
|
FederationOfEgalitarianCommunities/FECWebsite
|
fec/functional_tests/home_page_tests.py
|
Python
|
gpl-3.0
| 5,254
|
[
"VisIt"
] |
0f5d3184bf394f95a971f671c39a5bdfc220185ae10862fbb70cd32874cc1090
|
import tensorflow as tf
import numpy as np
import time
def get_batch(X, Xn, size):
a = np.random.choice(len(X), size, replace=False)
return X[a], Xn[a]
class Denoiser:
def __init__(self, input_dim, hidden_dim, epoch=10000, batch_size=50, learning_rate=0.001):
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = learning_rate
self.x = tf.placeholder(dtype=tf.float32, shape=[None, input_dim], name='x')
self.x_noised = tf.placeholder(dtype=tf.float32, shape=[None, input_dim], name='x_noised')
with tf.name_scope('encode'):
self.weights1 = tf.Variable(tf.random_normal([input_dim, hidden_dim], dtype=tf.float32), name='weights')
self.biases1 = tf.Variable(tf.zeros([hidden_dim]), name='biases')
self.encoded = tf.nn.sigmoid(tf.matmul(self.x_noised, self.weights1) + self.biases1, name='encoded')
with tf.name_scope('decode'):
weights = tf.Variable(tf.random_normal([hidden_dim, input_dim], dtype=tf.float32), name='weights')
biases = tf.Variable(tf.zeros([input_dim]), name='biases')
self.decoded = tf.matmul(self.encoded, weights) + biases
self.loss = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(self.x, self.decoded))))
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
self.saver = tf.train.Saver()
def add_noise(self, data):
noise_type = 'mask-0.2'
if noise_type == 'gaussian':
n = np.random.normal(0, 0.1, np.shape(data))
return data + n
if 'mask' in noise_type:
frac = float(noise_type.split('-')[1])
temp = np.copy(data)
for i in temp:
n = np.random.choice(len(i), round(frac * len(i)), replace=False)
i[n] = 0
return temp
def train(self, data):
data_noised = self.add_noise(data)
with open('log.csv', 'w') as writer:
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for i in range(self.epoch):
for j in range(50):
batch_data, batch_data_noised = get_batch(data, data_noised, self.batch_size)
l, _ = sess.run([self.loss, self.train_op], feed_dict={self.x: batch_data, self.x_noised: batch_data_noised})
if i % 10 == 0:
print('epoch {0}: loss = {1}'.format(i, l))
self.saver.save(sess, './model.ckpt')
epoch_time = int(time.time())
row_str = str(epoch_time) + ',' + str(i) + ',' + str(l) + '\n'
writer.write(row_str)
writer.flush()
self.saver.save(sess, './model.ckpt')
def test(self, data):
with tf.Session() as sess:
self.saver.restore(sess, './model.ckpt')
hidden, reconstructed = sess.run([self.encoded, self.decoded], feed_dict={self.x: data})
print('input', data)
print('compressed', hidden)
print('reconstructed', reconstructed)
return reconstructed
def get_params(self):
with tf.Session() as sess:
self.saver.restore(sess, './model.ckpt')
weights, biases = sess.run([self.weights1, self.biases1])
return weights, biases
|
BinRoot/TensorFlow-Book
|
ch07_autoencoder/denoiser.py
|
Python
|
mit
| 3,422
|
[
"Gaussian"
] |
5c0ccb6f3d2914b32c8c091503de60621f11561a71ac5d3a66a077ef9293bd23
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Set up a DPD fluid and calculate pressure as a function of the
varying density. The fluid is thermalized using a DPD thermostat.
"""
import espressomd
required_features = ["DPD", "HAT"]
espressomd.assert_features(required_features)
import numpy as np
# Set up the box and time step
system = espressomd.System(box_l=3 * [10])
system.time_step = 0.01
system.cell_system.skin = 0.4
# DPD parameters
n_part = 200
kT = 1.
gamma = 1.5
r_cut = 1.
# Repulsive parameter
F_max = 1.
# Activate the thermostat
system.thermostat.set_dpd(kT=kT, seed=123)
np.random.seed(seed=42)
# Set up the DPD friction interaction
system.non_bonded_inter[0, 0].dpd.set_params(
weight_function=0, gamma=gamma, r_cut=r_cut,
trans_weight_function=0, trans_gamma=gamma, trans_r_cut=r_cut)
# Set up the repulsive interaction
system.non_bonded_inter[0, 0].hat.set_params(F_max=F_max, cutoff=r_cut)
# Add particles that are randomly distributed over the box
system.part.add(pos=system.box_l * np.random.random((n_part, 3)))
# As a usage example, we calculate the pressure at several
# particle densities.
sample_size = 100
int_steps = 1000
for V in range(100, 1000, 100):
# Rescale the system to the new volume
system.change_volume_and_rescale_particles(V**0.3333)
# List of samples
p_samples = []
for i in range(sample_size):
system.integrator.run(int_steps)
p_samples.append(system.analysis.pressure()['total'])
# Average pressure
p_avg = np.mean(p_samples)
# Standard deviation of pressure
p_std = np.std(p_samples)
print('rho {:.2f} p {:.2f} ({:.2f})'
.format(float(n_part) / V, p_avg, p_std))
|
KaiSzuttor/espresso
|
samples/dpd.py
|
Python
|
gpl-3.0
| 2,365
|
[
"ESPResSo"
] |
a1ab0a9ab633ca356f60ac3fedc81341941645f7b6765f556d014778a8c9dec5
|
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.simpleapi import logger
import AbinsModules
class AbinsLoadCRYSTALTest(unittest.TestCase, AbinsModules.GeneralLoadAbInitioTester):
def tearDown(self):
AbinsModules.AbinsTestHelpers.remove_output_files(list_of_names=["LoadCRYSTAL"])
# *************************** USE CASES ********************************************
# ===================================================================================
# | Use cases: Gamma point calculation for CRYSTAL |
# ===================================================================================
_gamma_crystal = "crystalB3LYP_LoadCRYSTAL"
_set_crystal = "crystal_set_key_LoadCRYSTAL"
# ===================================================================================
# | Use case: Molecular calculation for CRYSTAL |
# ===================================================================================
_molecule = "toluene_molecule_LoadCRYSTAL"
# ===================================================================================
# | Use cases: Phonon dispersion calculation for CRYSTAL |
# ===================================================================================
_phonon_dispersion_v1 = "mgo-GX_LoadCRYSTAL"
_phonon_dispersion_v2 = "MgO-222-DISP_LoadCRYSTAL"
def test_gamma_crystal(self):
self.check(name=self._gamma_crystal, loader=AbinsModules.LoadCRYSTAL)
self.check(name=self._set_crystal, loader=AbinsModules.LoadCRYSTAL)
def test_molecule(self):
self.check(name=self._molecule, loader=AbinsModules.LoadCRYSTAL)
def test_phonon_dispersion_crystal(self):
self.check(name=self._phonon_dispersion_v1, loader=AbinsModules.LoadCRYSTAL)
self.check(name=self._phonon_dispersion_v2, loader=AbinsModules.LoadCRYSTAL)
if __name__ == '__main__':
unittest.main()
|
ScreamingUdder/mantid
|
scripts/test/AbinsLoadCRYSTALTest.py
|
Python
|
gpl-3.0
| 2,066
|
[
"CRYSTAL"
] |
abe5d41be0391fef6731211c01d4254b978ad8834c68b09aaadca12f797f47d2
|
import truetypetracer as ttt # https://github.com/aewallin/truetype-tracer
import openvoronoi as ovd # https://github.com/aewallin/openvoronoi
#import ovdvtk
import time
#import vtk
import random
import string
import sys
def translate(segs,x,y):
out = []
for seg in segs:
seg2 = []
for p in seg:
p2 = []
p2.append(p[0] + x)
p2.append(p[1] + y)
seg2.append(p2)
#seg2.append(seg[3] + y)
out.append(seg2)
return out
def insert_polygon_points(vd, polygon):
pts=[]
for p in polygon:
pts.append( ovd.Point( p[0], p[1] ) )
id_list = []
print "inserting ",len(pts)," point-sites:"
m=0
for p in pts:
id_list.append( vd.addVertexSite( p ) )
print " ",m," added vertex ", id_list[ len(id_list) -1 ]
m=m+1
print vd.numFaces()," faces after all points inserted"
return id_list
def insert_polygon_segments(vd,id_list):
j=0
jmax=9999999 # for debugging, set jmax to the problematic case to stop algorithm in the middle
print "inserting ",len(id_list)," line-segments:"
for n in range(len(id_list)):
n_nxt = n+1
if n==(len(id_list)-1):
n_nxt=0
if (j<jmax):
#vd.debug_on()
print " ",j,"inserting segment ",id_list[n]," - ",id_list[n_nxt]
if id_list[n] == 115869: # 51456: 115869
vd.debug_on()
vd.addLineSite( id_list[n], id_list[n_nxt],6)
vod.setVDText2([1,1])
vod.setAll()
#verts=[92555, 51680,92624,52559,51474,92620,52805]
#for v in verts:
#print "drawing ",v
#print vod
#print dir(vod)
# vod.drawVertexIdx(v)
print "PYTHON All DONE."
myscreen.render()
myscreen.iren.Start()
else:
#pass
vd.addLineSite( id_list[n], id_list[n_nxt])
j=j+1
def modify_segments(segs):
segs_mod =[]
for seg in segs:
first = seg[0]
last = seg[ len(seg)-1 ]
assert( first[0]==last[0] and first[1]==last[1] )
seg.pop()
seg.reverse()
segs_mod.append(seg)
#drawSegment(myscreen, seg)
return segs_mod
def insert_many_polygons(vd,segs):
polygon_ids =[]
t_before = time.time()
for poly in segs:
poly_id = insert_polygon_points(vd,poly)
polygon_ids.append(poly_id)
t_after = time.time()
pt_time = t_after-t_before
t_before = time.time()
for ids in polygon_ids:
insert_polygon_segments(vd,ids)
t_after = time.time()
seg_time = t_after-t_before
return [pt_time, seg_time]
def ttt_segments(text,scale):
wr = ttt.SEG_Writer()
# wr.scale = 3
wr.arc = False
wr.conic = False
wr.cubic = False
wr.scale = float(1)/float(scale)
# "L" has 36 points by default
wr.conic_biarc_subdivision = 10 # this has no effect?
wr.conic_line_subdivision = 200 # =10 increasesn nr of points to 366, = 5 gives 729 pts
wr.cubic_biarc_subdivision = 10 # no effect?
wr.cubic_line_subdivision = 10 # no effect?
s3 = ttt.ttt(text,wr)
segs = wr.get_segments()
ext = wr.extents
return [ext, segs]
def scale_segs(segs, current_length, desired_length):
out=[]
scale = float(desired_length) / float(current_length)
for seg in segs:
seg2 = []
for p in seg:
p2 = []
p2.append(p[0] * scale)
p2.append(p[1] * scale)
seg2.append(p2)
#seg2.append(seg[3] + y)
out.append(seg2)
return [out,scale]
def get_random_row(row_length):
# construct some random strings
chars = ""
for n in range(row_length):
c = random.choice(string.ascii_lowercase) # http://stackoverflow.com/questions/2823316/generate-a-random-letter-in-python
chars+=c
chars+=" "
return chars
def get_scaled_segs( chars, length):
# generate segs with scale 1
ret = ttt_segments( chars , 1)
extents = ret[0]
segs = ret[1]
# translate so lower left corner is at (0,0)
segs = translate(segs, -extents.minx, -extents.miny )
# scale to desired length
current_length = extents.maxx-extents.minx
current_height = extents.maxy-extents.miny
[segs,scale] = scale_segs(segs, current_length, length)
# remove duplicate points
segs = modify_segments(segs)
return [segs, extents,scale]
if __name__ == "__main__":
print ttt.version()
conic_subdiv = 200
seed = 42
if len(sys.argv) == 2:
seed = int(sys.argv[1]) # if seed was specified on command-line, take it
random.seed( seed )
row_length = 15
n_rows = 10
length = 1
dx = -0.5
start_y = -0.5
current_y = start_y
segs=[]
for n in range(n_rows):
chars = get_random_row(row_length)
[rowsegs, extents, scale] = get_scaled_segs( chars, length)
rowsegs_t = translate(rowsegs, dx, current_y )
current_y = current_y + 1.1*(extents.maxy-extents.miny)*scale
segs+=rowsegs_t
vd = ovd.VoronoiDiagram(1,120)
print ovd.version()
times = insert_many_polygons(vd,segs)
assert( vd.check() )
print "PYTHON All DONE."
|
aewallin/openvoronoi
|
src/test/pytest_ttt_random_chars/ttt_random_chars.py
|
Python
|
lgpl-2.1
| 5,444
|
[
"VTK"
] |
09ac2c918dee198b4c50c8eb63cd7395d279a7e337639130a44ef262d993def8
|
import flask
from views import *
from lookups import *
import rest as annotation
import requests
from config import config
if config.IMPORT_PYSAM_PRIMER3:
import pysam
import primer3
import myvariant
import re
from utils import *
import itertools
import csv
#hpo lookup
import phizz
import random
import orm
import vcf
import subprocess
import os
@app.route('/variant/<variant_str>')
@requires_auth
def variant_page(variant_str):
try:
variant=orm.Variant(variant_id=variant_str,db=get_db())
except:
return 'Variant does not exist'
if not variant: return 'Variant does not exist'
variant=variant.__dict__
if session['user'] == 'demo':
del variant['wt_samples']
del variant['het_samples']
del variant['hom_samples']
return render_template(
'variant.html',
title=variant_str,
variant=variant
)
#@app.route('/variant_json/<variant_str>')
#def variant_json(variant_str): return jsonify(result=vcf.vcf_query(variant_str=variant_str))
@app.route('/variant_json/<variant_str>')
def variant_json(variant_str):
variant=orm.Variant(variant_id=variant_str,db=get_db())
if session['user'] == 'demo':
variant.__dict__['wt_samples']=[]
variant.__dict__['het_samples']=[]
variant.__dict__['hom_samples']=[]
return jsonify(result=variant.__dict__)
@app.route('/variant_json_db_new/<variant_str>')
def variant_json_db_new(variant_str):
if session['user'] == 'demo': return ''
variant=orm.Variant(variant_id=variant_str,db=get_db())
return jsonify(result=variant.__dict__)
@app.route('/set_variant_causal/<individual>/<variant_str>')
def set_variant_causal(individual, variant_str):
print individual, variant_str
db=get_db()
#get_db().patients.update({'patient_id':individual},{'$addToSet':{'causal_variants':variant_str}})
var=db.variants.find_one({'variant_id':variant_str})
gene_id=var['genes'][0]
gene_name=db.genes.find_one({'gene_id':gene_id})['gene_name_upper']
print 'GENE_NAME', gene_name
p=get_db('DB_NAME_PATIENTS').patients.find_one({'external_id':individual})
get_db('DB_NAME_PATIENTS').patients.update_one({'external_id':individual},{'$set':{'genes': p.get('genes',[])+[{'gene':gene_name}]}})
print get_db(app.config['DB_NAME_PATIENTS']).patients.update({'external_id':individual},{'$set':p},w=0)
p=db.patients.find_one({'external_id':individual})
p['causal_variants']=list(frozenset(p.get('causal_variants',[])+[variant_str]))
db.patients.update({'external_id':individual},{'$set':{'causal_variants':p['causal_variants']}},w=0)
if request.referrer:
referrer=request.referrer
u = urlparse(referrer)
referrer='%s://%s' % (u.scheme,u.hostname,)
if u.port: referrer='%s:%s' % (referrer,u.port,)
return redirect(referrer+'/individual/'+individual)
@app.route('/unset_variant_causal/<individual>/<variant_str>')
def unset_variant_causal(individual, variant_str):
print individual, variant_str
db=get_db()
p=db.patients.find_one({'external_id':individual})
if 'causal_variants' in p and not p['causal_variants']: p['causal_variants']=[]
if variant_str in p.get('causal_variants',[]):
p['causal_variants']=p['causal_variants'].remove(variant_str)
db.patients.update({'external_id':individual},{'$set':{'causal_variants':p['causal_variants']}},w=0)
p2=get_db('DB_NAME_PATIENTS').patients.find_one({'external_id':individual})
p2['genes']=[]
for var in p['causal_variants']:
var=db.variants.find_one({'variant_id':var})
gene_id=var['genes'][0]
gene_name=db.genes.find_one({'gene_id':gene_id})['gene_name_upper']
print 'GENE_NAME', gene_name
p2['genes']=list(frozenset(p2.get('genes',[])+[{'gene':gene_name}]))
print get_db(app.config['DB_NAME_PATIENTS']).patients.update({'external_id':individual},{'$set':p2},w=0)
if request.referrer:
referrer=request.referrer
u = urlparse(referrer)
referrer='%s://%s' % (u.scheme,u.hostname,)
if u.port: referrer='%s:%s' % (referrer,u.port,)
return redirect(referrer+'/individual/'+individual)
@app.route('/set_variant_status/<individual>/<variant_str>/<status>')
def set_variant_status(individual, variant_str, status):
print individual, variant_str, status
db=get_db()
#print get_db().patients.update({'patient_id':individual},{'$addToSet':{'variant_status':{variant_str:status}}})
rare_variants=db.patients.find_one({'external_id':individual},{'rare_variants':1})['rare_variants']
for rv in rare_variants:
if rv['variant_id']==variant_str:
rv['status']=status
print db.patients.update({'external_id':individual},{'$set':{'rare_variants':rare_variants}})
return status
@app.route('/private_variants/<individual>')
def private_variants(individual):
pv=[]
cmd="bgt view -s,"+individual+" -s 'name!=\""+individual+"\"' -f 'AC1>0&&AC2==0' -G "+ "/slms/gee/research/vyplab/UCLex/mainset_July2016/bgt/mainset_July2016.bgt"
print(cmd)
s=subprocess.check_output([cmd],shell=True)
for l in s.split('\n'):
if len(l)<5: continue
if l.startswith('##'): continue
if l.startswith('#'):
headers=l.split('\t')
continue
d=dict(zip(headers,l.split('\t')))
d.update(dict([x.split('=') for x in d['INFO'].split(';')]))
del d['INFO']
d['variant_id']='-'.join([d['#CHROM'],d['POS'],d['REF'],d['ALT']])
pv.append(d)
return jsonify(result=pv)
@app.route('/rare_variants/<individual>/<AC>')
def rare_variants(individual,AC=10):
pv=[]
cmd="bgt view -s,"+individual+" -s 'name!=\""+individual+"\"' -f 'AC1>0&&AC2<%s' "%str(AC)+ "-G /slms/gee/research/vyplab/UCLex/mainset_July2016/bgt/mainset_July2016.bgt"
print(cmd)
proc=subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=True)
def generate():
for l in iter(proc.stdout.readline,''):
l=l.strip()
print(l)
if len(l)<5: continue
if l.startswith('##'): continue
if l.startswith('#'):
headers=l.split('\t')
continue
d=dict(zip(headers,l.split('\t')))
d.update(dict([x.split('=') for x in d['INFO'].split(';')]))
del d['INFO']
if ',' in d['ALT']: d['ALT']=d['ALT'].split(',')[0]
d['variant_id']='-'.join([d['#CHROM'],d['POS'],d['REF'],d['ALT']])
try:
var=orm.Variant(variant_id=d['variant_id'],db=get_db())
except Exception, e:
print(e)
print(d)
continue
yield flask.json.dumps(var.__dict__)+'\n'
#yield l+'\n'
#return Response(stream_with_context(generate()),mimetype='application/json')
return Response(stream_with_context(generate()),mimetype='text/plain')
@app.route('/common_private_variants/<individual>/<individual2>')
def common_private_variants(individual,individual2):
pv=[]
s=subprocess.check_output(["bgt view -s,"+individual+" -s,"+individual2+" -s 'name!=\""+individual+"\"&&name!=\""+individual2+"\"' -f 'AC1>0&&AC2>0&&AC3==0' -G /slms/gee/research/vyplab/UCLex/mainset_July2016/bgt/mainset_July2016.bgt" ],shell=True)
#bgt view -s,IRDC_batch6_LON_2055 -s,WebsterURMD_Sample_06G02870 -s 'name!="IRDC_batch6_LON_2055"&&name!="WebsterURMD_Sample_06G02870"' -f 'AC1>0&&AC2>0&&AC3==0' -G mainset_July2016_chr1.bgt
for l in s.split('\n'):
if len(l)<5: continue
if l.startswith('##'): continue
if l.startswith('#'):
headers=l.split('\t')
continue
d=dict(zip(headers,l.split('\t')))
d.update(dict([x.split('=') for x in d['INFO'].split(';')]))
del d['INFO']
d['variant_id']='-'.join([d['#CHROM'],d['POS'],d['REF'],d['ALT']])
pv.append(d)
return jsonify(result=pv)
@app.route('/common_rare_variants/<individual>/<individual2>/<AC>')
def common_rare_variants(individual,individual2,AC=1):
pv=[]
s=subprocess.check_output(["bgt view -s,"+individual+" -s,"+individual2+" -s 'name!=\""+individual+"\"&&name!=\""+individual2+"\"' -f 'AC1>0&&AC2>0&&AC3<%s' "%AC+ "-G /slms/gee/research/vyplab/UCLex/mainset_July2016/bgt/mainset_July2016.bgt" ],shell=True)
#bgt view -s,IRDC_batch6_LON_2055 -s,WebsterURMD_Sample_06G02870 -s 'name!="IRDC_batch6_LON_2055"&&name!="WebsterURMD_Sample_06G02870"' -f 'AC1>0&&AC2>0&&AC3==0' -G mainset_July2016_chr1.bgt
for l in s.split('\n'):
if len(l)<5: continue
if l.startswith('##'): continue
if l.startswith('#'):
headers=l.split('\t')
continue
d=dict(zip(headers,l.split('\t')))
d.update(dict([x.split('=') for x in d['INFO'].split(';')]))
del d['INFO']
#d['variant_id']='-'.join([d['#CHROM'],d['POS'],d['REF'],d['ALT']])
#pv.append(d)
d['variant_id']='-'.join([d['#CHROM'],d['POS'],d['REF'],d['ALT']])
try:
var=orm.Variant(variant_id=d['variant_id'],db=get_db())
except Exception, e:
print(e)
print(d)
continue
pv.append(var.__dict__)
return jsonify(result=pv)
|
Withington/phenopolis
|
views/variant.py
|
Python
|
mit
| 9,280
|
[
"pysam"
] |
a163dc2c0f8d7fa1ca602f20520f68c27149cd6d6b1f2924a602c2c49f4d45b0
|
# Copyright 2004 by James Casbon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Code to deal with COMPASS output, a program for profile/profile comparison.
Compass is described in:
Sadreyev R, Grishin N. COMPASS: a tool for comparison of multiple protein
alignments with assessment of statistical significance. J Mol Biol. 2003 Feb
7;326(1):317-36.
Tested with COMPASS 1.24.
Functions:
read Reads a COMPASS file containing one COMPASS record
parse Iterates over records in a COMPASS file.
Classes:
Record One result of a COMPASS file
DEPRECATED CLASSES:
_Scanner Scan compass results
_Consumer Consume scanner events
RecordParser Parse one compass record
Iterator Iterate through a number of compass records
"""
import re
def read(handle):
record = None
try:
line = handle.next()
record = Record()
__read_names(record, line)
line = handle.next()
__read_threshold(record, line)
line = handle.next()
__read_lengths(record, line)
line = handle.next()
__read_profilewidth(record, line)
line = handle.next()
__read_scores(record, line)
except StopIteration:
if not record:
raise ValueError("No record found in handle")
else:
raise ValueError("Unexpected end of stream.")
for line in handle:
if is_blank_line(line):
continue
__read_query_alignment(record, line)
try:
line = handle.next()
__read_positive_alignment(record, line)
line = handle.next()
__read_hit_alignment(record, line)
except StopIteration:
raise ValueError("Unexpected end of stream.")
return record
def parse(handle):
record = None
try:
line = handle.next()
except StopIteration:
return
while True:
try:
record = Record()
__read_names(record, line)
line = handle.next()
__read_threshold(record, line)
line = handle.next()
__read_lengths(record, line)
line = handle.next()
__read_profilewidth(record, line)
line = handle.next()
__read_scores(record, line)
except StopIteration:
raise ValueError("Unexpected end of stream.")
for line in handle:
if not line.strip():
continue
if "Ali1:" in line:
yield record
break
__read_query_alignment(record, line)
try:
line = handle.next()
__read_positive_alignment(record, line)
line = handle.next()
__read_hit_alignment(record, line)
except StopIteration:
raise ValueError("Unexpected end of stream.")
else:
yield record
break
class Record:
"""
Hold information from one compass hit.
Ali1 one is the query, Ali2 the hit.
"""
def __init__(self):
self.query=''
self.hit=''
self.gap_threshold=0
self.query_length=0
self.query_filtered_length=0
self.query_nseqs=0
self.query_neffseqs=0
self.hit_length=0
self.hit_filtered_length=0
self.hit_nseqs=0
self.hit_neffseqs=0
self.sw_score=0
self.evalue=-1
self.query_start=-1
self.hit_start=-1
self.query_aln=''
self.hit_aln=''
self.positives=''
def query_coverage(self):
"""Return the length of the query covered in alignment"""
s = self.query_aln.replace("=", "")
return len(s)
def hit_coverage(self):
"""Return the length of the hit covered in the alignment"""
s = self.hit_aln.replace("=", "")
return len(s)
# Everything below is private
__regex = {"names": re.compile("Ali1:\s+(\S+)\s+Ali2:\s+(\S+)\s+"),
"threshold": re.compile("Threshold of effective gap content in columns: (\S+)"),
"lengths": re.compile("length1=(\S+)\s+filtered_length1=(\S+)\s+length2=(\S+)\s+filtered_length2=(\S+)"),
"profilewidth": re.compile("Nseqs1=(\S+)\s+Neff1=(\S+)\s+Nseqs2=(\S+)\s+Neff2=(\S+)"),
"scores": re.compile("Smith-Waterman score = (\S+)\s+Evalue = (\S+)"),
"start": re.compile("(\d+)"),
"align": re.compile("^.{15}(\S+)"),
"positive_alignment": re.compile("^.{15}(.+)"),
}
def __read_names(record, line):
"""
Ali1: 60456.blo.gz.aln Ali2: allscop//14984.blo.gz.aln
------query----- -------hit-------------
"""
if not "Ali1:" in line:
raise ValueError("Line does not contain 'Ali1:':\n%s" % line)
m = __regex["names"].search(line)
record.query = m.group(1)
record.hit = m.group(2)
def __read_threshold(record,line):
if not line.startswith("Threshold"):
raise ValueError("Line does not start with 'Threshold':\n%s" % line)
m = __regex["threshold"].search(line)
record.gap_threshold = float(m.group(1))
def __read_lengths(record, line):
if not line.startswith("length1="):
raise ValueError("Line does not start with 'length1=':\n%s" % line)
m = __regex["lengths"].search(line)
record.query_length = int(m.group(1))
record.query_filtered_length = float(m.group(2))
record.hit_length = int(m.group(3))
record.hit_filtered_length = float(m.group(4))
def __read_profilewidth(record, line):
if not "Nseqs1" in line:
raise ValueError("Line does not contain 'Nseqs1':\n%s" % line)
m = __regex["profilewidth"].search(line)
record.query_nseqs = int(m.group(1))
record.query_neffseqs = float(m.group(2))
record.hit_nseqs = int(m.group(3))
record.hit_neffseqs = float(m.group(4))
def __read_scores(record, line):
if not line.startswith("Smith-Waterman"):
raise ValueError("Line does not start with 'Smith-Waterman':\n%s" % line)
m = __regex["scores"].search(line)
if m:
record.sw_score = int(m.group(1))
record.evalue = float(m.group(2))
else:
record.sw_score = 0
record.evalue = -1.0
def __read_query_alignment(record, line):
m = __regex["start"].search(line)
if m:
record.query_start = int(m.group(1))
m = __regex["align"].match(line)
assert m!=None, "invalid match"
record.query_aln += m.group(1)
def __read_positive_alignment(record, line):
m = __regex["positive_alignment"].match(line)
assert m!=None, "invalid match"
record.positives += m.group(1)
def __read_hit_alignment(record, line):
m = __regex["start"].search(line)
if m:
record.hit_start = int(m.group(1))
m = __regex["align"].match(line)
assert m!=None, "invalid match"
record.hit_aln += m.group(1)
# Everything below is deprecated
from Bio import File
from Bio.ParserSupport import *
import Bio
class _Scanner:
"""Reads compass output and generate events (DEPRECATED)"""
def __init__(self):
import warnings
warnings.warn("Bio.Compass._Scanner is deprecated; please use the read() and parse() functions in this module instead", Bio.BiopythonDeprecationWarning)
def feed(self, handle, consumer):
"""Feed in COMPASS ouput"""
if isinstance(handle, File.UndoHandle):
pass
else:
handle = File.UndoHandle(handle)
assert isinstance(handle, File.UndoHandle), \
"handle must be an UndoHandle"
if handle.peekline():
self._scan_record(handle, consumer)
def _scan_record(self,handle,consumer):
self._scan_names(handle, consumer)
self._scan_threshold(handle, consumer)
self._scan_lengths(handle,consumer)
self._scan_profilewidth(handle, consumer)
self._scan_scores(handle,consumer)
self._scan_alignment(handle,consumer)
def _scan_names(self,handle,consumer):
"""
Ali1: 60456.blo.gz.aln Ali2: allscop//14984.blo.gz.aln
"""
read_and_call(handle, consumer.names, contains="Ali1:")
def _scan_threshold(self,handle, consumer):
"""
Threshold of effective gap content in columns: 0.5
"""
read_and_call(handle, consumer.threshold, start="Threshold")
def _scan_lengths(self,handle, consumer):
"""
length1=388 filtered_length1=386 length2=145 filtered_length2=137
"""
read_and_call(handle, consumer.lengths, start="length1=")
def _scan_profilewidth(self,handle,consumer):
"""
Nseqs1=399 Neff1=12.972 Nseqs2=1 Neff2=6.099
"""
read_and_call(handle, consumer.profilewidth, contains="Nseqs1")
def _scan_scores(self,handle, consumer):
"""
Smith-Waterman score = 37 Evalue = 5.75e+02
"""
read_and_call(handle, consumer.scores, start="Smith-Waterman")
def _scan_alignment(self,handle, consumer):
"""
QUERY 2 LSDRLELVSASEIRKLFDIAAGMKDVISLGIGEPDFDTPQHIKEYAKEALDKGLTHYGPN
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
QUERY 2 LSDRLELVSASEIRKLFDIAAGMKDVISLGIGEPDFDTPQHIKEYAKEALDKGLTHYGPN
QUERY IGLLELREAIAEKLKKQNGIEADPKTEIMVLLGANQAFLMGLSAFLKDGEEVLIPTPAFV
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
QUERY IGLLELREAIAEKLKKQNGIEADPKTEIMVLLGANQAFLMGLSAFLKDGEEVLIPTPAFV
"""
while 1:
line = handle.readline()
if not line:
break
if is_blank_line(line):
continue
else:
consumer.query_alignment(line)
read_and_call(handle, consumer.positive_alignment)
read_and_call(handle, consumer.hit_alignment)
class _Consumer:
# all regular expressions used -- compile only once
_re_names = re.compile("Ali1:\s+(\S+)\s+Ali2:\s+(\S+)\s+")
_re_threshold = \
re.compile("Threshold of effective gap content in columns: (\S+)")
_re_lengths = \
re.compile("length1=(\S+)\s+filtered_length1=(\S+)\s+length2=(\S+)"
+ "\s+filtered_length2=(\S+)")
_re_profilewidth = \
re.compile("Nseqs1=(\S+)\s+Neff1=(\S+)\s+Nseqs2=(\S+)\s+Neff2=(\S+)")
_re_scores = re.compile("Smith-Waterman score = (\S+)\s+Evalue = (\S+)")
_re_start = re.compile("(\d+)")
_re_align = re.compile("^.{15}(\S+)")
_re_positive_alignment = re.compile("^.{15}(.+)")
def __init__(self):
import warnings
warnings.warn("Bio.Compass._Consumer is deprecated; please use the read() and parse() functions in this module instead", Bio.BiopythonDeprecationWarning)
self.data = None
def names(self, line):
"""
Ali1: 60456.blo.gz.aln Ali2: allscop//14984.blo.gz.aln
------query----- -------hit-------------
"""
self.data = Record()
m = self.__class__._re_names.search(line)
self.data.query = m.group(1)
self.data.hit = m.group(2)
def threshold(self,line):
m = self.__class__._re_threshold.search(line)
self.data.gap_threshold = float(m.group(1))
def lengths(self,line):
m = self.__class__._re_lengths.search(line)
self.data.query_length = int(m.group(1))
self.data.query_filtered_length = float(m.group(2))
self.data.hit_length = int(m.group(3))
self.data.hit_filtered_length = float(m.group(4))
def profilewidth(self,line):
m = self.__class__._re_profilewidth.search(line)
self.data.query_nseqs = int(m.group(1))
self.data.query_neffseqs = float(m.group(2))
self.data.hit_nseqs = int(m.group(3))
self.data.hit_neffseqs = float(m.group(4))
def scores(self, line):
m = self.__class__._re_scores.search(line)
if m:
self.data.sw_score = int(m.group(1))
self.data.evalue = float(m.group(2))
else:
self.data.sw_score = 0
self.data.evalue = -1.0
def query_alignment(self, line):
m = self.__class__._re_start.search(line)
if m:
self.data.query_start = int(m.group(1))
m = self.__class__._re_align.match(line)
assert m!=None, "invalid match"
self.data.query_aln = self.data.query_aln + m.group(1)
def positive_alignment(self,line):
m = self.__class__._re_positive_alignment.match(line)
assert m!=None, "invalid match"
self.data.positives = self.data.positives + m.group(1)
def hit_alignment(self,line):
m = self.__class__._re_start.search(line)
if m:
self.data.hit_start = int(m.group(1))
m = self.__class__._re_align.match(line)
assert m!=None, "invalid match"
self.data.hit_aln = self.data.hit_aln + m.group(1)
class RecordParser(AbstractParser):
"""Parses compass results into a Record object (DEPRECATED).
"""
def __init__(self):
import warnings
warnings.warn("Bio.Compass._RecordParser is deprecated; please use the read() and parse() functions in this module instead", Bio.BiopythonDeprecationWarning)
self._scanner = _Scanner()
self._consumer = _Consumer()
def parse(self, handle):
if isinstance(handle, File.UndoHandle):
uhandle = handle
else:
uhandle = File.UndoHandle(handle)
self._scanner.feed(uhandle, self._consumer)
return self._consumer.data
class Iterator:
"""Iterate through a file of compass results (DEPRECATED)."""
def __init__(self, handle):
import warnings
warnings.warn("Bio.Compass.Iterator is deprecated; please use the parse() function in this module instead", Bio.BiopythonDeprecationWarning)
self._uhandle = File.UndoHandle(handle)
self._parser = RecordParser()
def next(self):
lines = []
while 1:
line = self._uhandle.readline()
if not line:
break
if line[0:4] == "Ali1" and lines:
self._uhandle.saveline(line)
break
lines.append(line)
if not lines:
return None
data = ''.join(lines)
return self._parser.parse(File.StringHandle(data))
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/Compass/__init__.py
|
Python
|
gpl-2.0
| 14,941
|
[
"Biopython"
] |
f42ca4d62116bf12852f8655a50aa72fbe8520b0e2630d960e79e4a1aa018712
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#For future compatibility with Python 3
"""
Contains the general class for DataObjects who may have mixed scalars, vectors, and higher dimensional
needs, depending on any combination of "index" dimensions (time, space, etc).
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import copy
import itertools
import pickle as pk
import numpy as np
import pandas as pd
import xarray as xr
# relative import for RAVEN, local import for unit tests
try:
from .DataObject import DataObject
except ValueError:
from DataObject import DataObject
import CsvLoader
from utils import utils, cached_ndarray, xmlUtils, mathUtils
#
#
#
#
class DataSet(DataObject):
"""
This class outlines the behavior for the basic in-memory DataObject, including support
for ND and ragged input/output variable data shapes. Other in-memory DataObjects are
specialized implementations of this class.
DataObject developed Oct 2017 with the intent to obtain linear performance from data objects when appending, over
thousands of variables and millions of samples. Wraps np.ndarray for collecting and uses xarray.Dataset
for final form. Subclasses are shortcuts (recipes) for this most general case.
The interface for these data objects is specific. The methods under "EXTERNAL API", "INITIALIZATION",
and "BUILTINS" are the only methods that should be called to interact with the object.
"""
### INITIALIZATION ###
# These are the necessary functions to construct and initialize this data object
def __init__(self):#, in_vars, out_vars, meta_vars=None, dynamic=False, var_dims=None,cacheSize=100,prealloc=False):
"""
Constructor.
@ In, None
@ Out, None
"""
DataObject.__init__(self)
self.name = 'DataSet'
self.type = 'DataSet'
self.types = None # list of type objects, for each realization entry
self.printTag = self.name
self.defaultDtype = object
self._scaleFactors = {} # mean, sigma for data for matching purposes
self._alignedIndexes = {} # dict {index:values} of indexes with aligned coordinates (so they are not in the collector, but here instead)
self._neededForReload = [self.sampleTag] # metavariables required to reload this data object.
def _readMoreXML(self,xmlNode):
"""
Initializes data object based on XML input
@ In, xmlNode, xml.etree.ElementTree.Element, input information
@ Out, None
"""
inp = DataSet.getInputSpecification()()
inp.parseNode(xmlNode)
# let parent read first
DataObject._readMoreXML(self,inp)
# any additional custom reading below
### EXTERNAL API ###
# These are the methods that RAVEN entities should call to interact with the data object
def addExpectedMeta(self, keys, params={}, overwrite=False):
"""
Registers meta to look for in realizations.
@ In, keys, set(str), keys to register
@ In, params, dict, optional, {key:[indexes]}, keys of the dictionary are the variable names,
values of the dictionary are lists of the corresponding indexes/coordinates of given variable
@ In, overwrite, bool, optional, if True then allow existing data while changing keys
@ Out, keys, list(str), extra keys that has been registered
"""
# TODO add option to skip parts of meta if user wants to
# remove already existing keys
keys = list(key for key in keys if key not in self.getVars()+self.indexes)
# if no new meta, move along
if len(keys) == 0:
return keys
# CANNOT add expected meta after samples are started
if not overwrite:
assert(self._data is None)
assert(self._collector is None or len(self._collector) == 0)
self._metavars.extend(keys)
self._orderedVars.extend(keys)
self.setPivotParams(params)
return keys
def addMeta(self, tag, xmlDict = None, node = None):
"""
Adds general (not pointwise) metadata to this data object. Can add several values at once, collected
as a dict keyed by target variables. Alternatively, a node can be added directly.
Data ends up being written as follows (see docstrings below for dict structure)
- A good default for 'target' is 'general' if there's not a specific target
<tag>
<target>
<scalarMetric>value</scalarMetric>
<scalarMetric>value</scalarMetric>
<vectorMetric>
<wrt>value</wrt>
<wrt>value</wrt>
</vectorMetric>
</target>
<target>
<scalarMetric>value</scalarMetric>
<vectorMetric>
<wrt>value</wrt>
</vectorMetric>
</target>
</tag>
@ In, tag, str, section to add metadata to, usually the data submitter (BasicStatistics, DataObject, etc)
@ In, xmlDict, dict, optional, data to change, of the form {target:{scalarMetric:value,scalarMetric:value,vectorMetric:{wrt:value,wrt:value}}}
@ In, node, xml.etree.ElementTree.Element, optional, already-filled node to add directly
@ Out, None
"""
# check either xmlDict OR node
## this should not be user-facing, so check through assertion
assert(not (xmlDict is None and node is None))
assert(not (xmlDict is not None and node is not None))
# if an xmlDict was provided ....
if xmlDict is not None:
# check if tag already exists
## TODO potentially slow if MANY top level tags
if tag not in self._meta.keys():
new = xmlUtils.StaticXmlElement(tag)
self._meta[tag] = new
destination = self._meta[tag]
for target in sorted(list(xmlDict.keys())):
for metric,value in sorted(list(xmlDict[target].items())):
# Two options: if a dict is given, means vectorMetric case
if isinstance(value,dict):
destination.addVector(target,metric,value)
# Otherwise, scalarMetric
else:
# sanity check to make sure suitable values are passed in
assert(mathUtils.isSingleValued(value))
destination.addScalar(target,metric,value)
# otherwise if a node was provided directly ...
else:
## TODO check replacement?
## TODO check structure?
self._meta[tag] = node
def addRealization(self, rlz):
"""
Adds a "row" (or "sample") to this data object.
This is the method to add data to this data object.
Note that rlz can include many more variables than this data object actually wants.
Before actually adding the realization, data is formatted for this data object.
@ In, rlz, dict, {var:val} format where
"var" is the variable name as a string,
"val" is a np.ndarray of values.
@ Out, None
"""
#########
# A note about what we're expecting in "rlz":
#
# Each entry in "rlz" should be a variable name, with exceptions described below.
# For each entry, the contents should be a numpy nd array:
# - For a scalar, it's a length-one array with a single value as {'pi': np.array([3.14])}
# - For a history, it's a single-dimensional array with the history values {'fib': np.array([0,1,1,2,3,5])}
# - For a high-dimensional object, it's a numpy ndarray with each dimension depending on a different index
# for example {'fibgrow': np.array([[0,1,1,2,3,5], [0,2,2,4,6,10]])}
# IN THIS CASE (for high-dimensional objects), a unique special node needs to be passed with the
# realization "_indexMap" that describes what order the numpy array dimensions show up in.
#
# For example, if my realization has one high-dimensional variable H(X, Y) depending on
# indices X and Y, and say that X has 3 values (0, 1, 2) and Y has 2 values (0.5, 1.5), then
# "rlz" should be as follows (with all [] as np.array([])):
# rlz = {'H': [[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]],
# 'X': [0, 1, 2],
# 'Y': [0.5, 1.5],
# '_indexMap': ['Y', 'X']}
# Note the order, H has shape (2, 3) so the first index is Y and the second is X.
# A sanity check is that H.shape == tuple(var.size for var in rlz['_indexMap'])
#
# Yours truly, talbpw, May 2019
#########
# protect against back-changing realization
rlz = copy.deepcopy(rlz)
# if index map was included, remove that now before checking variables
indexMap = rlz.pop('_indexMap', None)
if indexMap is not None:
# keep only those parts of the indexMap that correspond to variables we care about.
indexMap = dict((key, val) for key, val in indexMap[0].items() if key in self.getVars()) # [0] because everything is nested in a list by now, it seems
# clean out entries that aren't desired
try:
rlz = dict((var, rlz[var]) for var in self.getVars() + self.indexes)
except KeyError as e:
self.raiseAWarning('Variables provided:',rlz.keys())
self.raiseAnError(KeyError,'Provided realization does not have all requisite values for object "{}": "{}"'.format(self.name,e.args[0]))
# check consistency, but make it an assertion so it can be passed over
if not self._checkRealizationFormat(rlz, indexMap=indexMap):
self.raiseAnError(SyntaxError,'Realization was not formatted correctly for "{}"! See warnings above.'.format(self.name))
# format the data
rlz = self._formatRealization(rlz)
## establish types if not done yet
self._setDataTypes(rlz)
# perform selective collapsing/picking of data
rlz = self._selectiveRealization(rlz)
## check alignment of indexes
self._checkAlignedIndexes(rlz)
# NB If no scalar entry is made, this construction fails. In that case,
# instead of treating each dataarrray as an object, numpy.asarray calls their asarray methods,
# unfolding them and making a full numpy array with more dimensions, instead of effectively
# a list of realizations, where each realization is effectively a list of xr.DataArray objects.
#
# To mitigate this behavior, we forcibly add a [0.0] entry to each realization, then exclude
# it once the realizations are constructed. This seems like an innefficient option; others
# should be explored. - talbpaul, 12/2017
# newData is a numpy array of realizations,
# each of which is a numpy array of some combination of scalar values and/or xr.DataArrays.
# This is because the cNDarray collector expects a LIST of realization, not a single realization.
# Maybe the "append" method should be renamed to "extend" or changed to append one at a time.
## set realizations as a list of realizations (which are ordered lists)
newData = np.array(list(rlz[var] for var in self._orderedVars)+[0.0], dtype=object)
newData = newData[:-1]
# if data storage isn't set up, set it up
if self._collector is None:
self._collector = self._newCollector(width=len(rlz))
# append
self._collector.append(newData)
# if hierarchical, clear the parent as an ending
self._clearParentEndingStatus(rlz)
# reset scaling factors, kd tree
self._resetScaling()
def addVariable(self,varName,values,classify='meta',indices=None):
"""
Adds a variable/column to the data. "values" needs to be as long as self.size.
@ In, varName, str, name of new variable
@ In, values, np.array, new values (floats/str for scalars, xr.DataArray for hists)
@ In, classify, str, optional, either 'input', 'output', or 'meta'
@ In, indices, list, optional, list of indexes this variable depends on
@ Out, None
"""
if indices is None:
indices = []
# TODO might be removable
assert(isinstance(values,np.ndarray))
assert(len(values) == self.size), 'Expected {} entries in new variable but got {}!'.format(self.size, len(values))
assert(classify in ['input','output','meta'])
# if we're currently empty of data, then no new data to store (IMPORTANT: don't remove the assertion that len(values) == self.size above!)
if self.size != 0:
# first, collapse existing entries
self.asDataset()
labels = self._data[self.sampleTag]
column = self._collapseNDtoDataArray(values, varName, labels=labels)
# add to the dataset
self._data = self._data.assign(**{varName:column})
if classify == 'input':
self._inputs.append(varName)
elif classify == 'output':
self._outputs.append(varName)
else:
self._metavars.append(varName)
# move from the elif classify =='output', since the metavars can also contain the
# time-dependent meta data.
if len(values) and type(values[0]) == xr.DataArray:
indexes = values[0].sizes.keys()
for index in indexes:
if index in self._pivotParams.keys():
self._pivotParams[index].append(varName)
else:
self._pivotParams[index]=[varName]
# if provided, set the indices for this variable
for index in indices:
if index in self._pivotParams:
self._pivotParams[index].append(varName)
else:
self._pivotParams[index] = [varName]
# register variable in order
self._orderedVars.append(varName)
def asDataset(self, outType='xrDataset'):
"""
Casts this dataObject as dictionary or an xr.Dataset depending on outType.
@ In, outType, str, optional, type of output object (xr.Dataset or dictionary).
@ Out, data, xr.Dataset or dictionary. If dictionary, a copy is returned; if dataset, then a reference is returned.
"""
data = None
if outType == 'xrDataset':
# return reference to the xArray
data = self._convertToXrDataset()
elif outType=='dict':
# return a dict (copy of data, no link to original)
data = self._convertToDict()
else:
# raise an error
self.raiseAnError(ValueError, 'DataObject method "asDataset" has been called with wrong '
'type: ' +str(outType) + '. Allowed values are: xrDataset, dict.')
return data
def checkIndexAlignment(self,indexesToCheck=None):
"""
Checks that all realizations share common coordinates along given indexes.
That is, assures data is not sparse, but full (no NaN entries).
@ In, indexesToCheck, list(str) or str or None, optional, indexes to check (or single index if string, or if None will check ALL indexes)
@ Out, same, bool, if True then alignment is good
"""
# format request so that indexesToCheck is always a list
if mathUtils.isAString(indexesToCheck):
indexesToCheck = [indexesToCheck]
elif indexesToCheck is None:
indexesToCheck = self.indexes[:]
else:
try:
indexesToCheck = list(indexesToCheck) # TODO what if this errs?
except TypeError:
self.raiseAnError('Unrecognized input to checkIndexAlignment! Expected list, string, or None, but got "{}"'.format(type(indexesToCheck)))
# check the alignment of each index by checking for NaN values in each slice
data = self.asDataset()
if data is None:
self.raiseAnError(ValueError, 'DataObject named "{}" is empty!'.format(self.name))
for index in indexesToCheck:
# check that index is indeed an index
assert(index in self.indexes)
# get a typical variable from set to look at
## NB we can do this because each variable within one realization must be aligned with the rest
## of the variables in that same realization, so checking one variable that depends on "index"
## is as good as checking all of them.
##TODO: This approach is only working for our current data struture, for ND case, this should be
## improved.
data = data[self._pivotParams[index][-1]]
# if any nulls exist in this data, this suggests missing data, therefore misalignment.
if data.isnull().sum() > 0:
self.raiseADebug('Found misalignment index variable "{}".'.format(index))
return False
# if you haven't returned False by now, you must be aligned
return True
def constructNDSample(self, vals, dims, coords, name=None):
"""
Constructs a single realization instance (for one variable) from a realization entry.
@ In, vals, np.ndarray, should have shape of (len(coords[d]) for d in dims)
@ In, dims, list(str), names of dependent dimensions IN ORDER of appearance in vals, e.g. ['time','x','y']
@ In, coords, dict, {dimension:list(float)}, values for each dimension at which 'val' was obtained, e.g. {'time':
@ Out, obj, xr.DataArray, completed realization instance suitable for sending to "addRealization"
"""
# while simple, this API will allow easier extensibility in the future.
obj = xr.DataArray(vals, dims=dims, coords=coords)
obj.rename(name)
return obj
def getDimensions(self,var=None):
"""
Provides the independent dimensions that this variable depends on.
To get all dimensions at once, use self.indexes property.
@ In, var, str, optional, name of variable (or None, or 'input', or 'output')
@ Out, dims, dict, {name:values} of independent dimensions
"""
# TODO add unit tests
# TODO allow several variables requested at once?
if var is None:
var = self.getVars()
elif var in ['input','output']:
var = self.getVars(var)
else:
var = [var]
dims = dict((v,list(key for key in self._pivotParams.keys() if v in self._pivotParams[key])) for v in var)
return dims
def getMeta(self,keys=None,pointwise=False,general=False):
"""
Method to obtain entries in the metadata. If neither pointwise nor general, then returns an empty dict.
@ In, keys, list(str), optional, the keys (or main tag) to search for. If None, return all.
@ In, pointwise, bool, optional, if True then matches will be searched in the pointwise metadata
@ In, general, bool, optional, if True then matches will be searched in the general metadata
@ Out, meta, dict, key variables/xpaths to data object entries (column if pointwise, XML if general)
"""
# if keys is None, keys is all of them
if keys is None:
keys = []
if pointwise:
keys += self._metavars
if general:
keys += self._meta.keys()
gKeys = set([]) if not general else set(self._meta.keys()).intersection(set(keys))
pKeys = set([]) if not pointwise else set(self._metavars).intersection(set(keys))
# get any left overs
missing = list(set(keys).difference(gKeys.union(pKeys)))
if len(missing)>0:
missing = ', '.join(missing)
self.raiseAnError(KeyError,'Some requested keys could not be found in the requested metadata: ({})'.format(missing))
meta = {}
if pointwise:
# TODO slow key crawl
for var in self._metavars:
if var in pKeys:
# TODO if still collecting, an option to NOT call asDataset
meta[var] = self.asDataset()[var]
if general:
meta.update(dict((key,self._meta[key]) for key in gKeys))
return meta
def getData(self):
"""
Acquire the data for this dataset, as might go into an on-file database.
@ In, None
@ Out, data, xr.Dataset, sample data
@ Out, meta, dict, dictionary of xmlUtils.StaticXmlElement elements with meta information
"""
self.asDataset()
return self._data, self._meta
def getVars(self,subset=None):
"""
Gives list of variables that are part of this dataset.
@ In, subset, str, optional, if given can return 'input','output','meta' subset types
@ Out, getVars, list(str), list of variable names requested
"""
if subset is None:
return self.vars
subset = subset.strip().lower()
if subset == 'input':
return self._inputs[:]
elif subset == 'output':
return self._outputs[:]
elif subset == 'meta':
return self._metavars[:]
elif subset == 'indexes':
return self.indexes[:]
else:
self.raiseAnError(KeyError,'Unrecognized subset choice: "{}"'.format(subset))
def getVarValues(self,var):
"""
Returns the sampled values of "var"
@ In, var, str or list(str), name(s) of variable(s)
@ Out, res, xr.DataArray, samples (or dict of {var:xr.DataArray} if multiple variables requested)
"""
## NOTE TO DEVELOPER:
# This method will make a COPY of all the data into dictionaries.
# This is necessarily fairly cumbersome and slow.
# For faster access, consider using data.asDataset()['varName'] for one variable, or
# data.asDataset()[ ('var1','var2','var3') ] for multiple.
self.asDataset()
if self.isEmpty:
self.raiseAnError(ValueError, 'DataObject named "{}" is empty!'.format(self.name))
if mathUtils.isAString(var):
val = self._data[var]
#format as scalar
if len(val.dims) == 0:
res = self._data[var].item(0)
#format as dataarray
else:
res = self._data[var]
elif isinstance(var,list):
res = dict((v,self.getVarValues(v)) for v in var)
else:
self.raiseAnError(RuntimeError,'Unrecognized request type:',type(var))
return res
def load(self, dataIn, style='netCDF', **kwargs):
"""
Reads this dataset from disk based on the format.
@ In, dataIn, str, path and name of file to read
@ In, style, str, optional, options are enumerated below
@ In, kwargs, dict, optional, additional arguments to pass to reading function
@ Out, None
"""
style = style.lower()
# if fileToLoad in kwargs, then filename is actualle fileName/fileToLoad
if 'fileToLoad' in kwargs.keys():
dataIn = kwargs['fileToLoad'].getAbsFile()
# load based on style for loading
if style == 'netcdf':
self._fromNetCDF(dataIn, **kwargs)
elif style == 'csv':
# make sure we don't include the "csv"
if dataIn.endswith('.csv'):
dataIn = dataIn[:-4]
self._fromCSV(dataIn,**kwargs)
elif style == 'dict':
self._fromDict(dataIn,**kwargs)
elif style == 'dataset':
self._fromXarrayDataset(dataIn)
# TODO dask
else:
self.raiseAnError(NotImplementedError,'Unrecognized read style: "{}"'.format(style))
# after loading, set or reset scaling factors
self._setScalingFactors()
# @profile
def realization(self, index=None, matchDict=None, noMatchDict=None, tol=1e-15, unpackXArray=False, asDataSet = False, first = True):
"""
Method to obtain a realization from the data, either by index or matching value.
Either "index" or one of ("matchDict", "noMatchDict") must be supplied.
If matchDict and no match is found, will return (len(self),None) after the pattern of numpy, scipy
@ In, index, int, optional, number of row to retrieve (by index, not be "sample")
@ In, matchDict, dict, optional, {key:val} to search for matches
@ In, noMatchDict, dict, optional, {key:val} to search for antimatches (vars should NOT match vals within tolerance)
@ In, tol, float, optional, tolerance to which match should be made
@ In, unpackXArray, bool, optional, True if the coordinates of the xarray variables must be exposed in the dict (e.g. if P(t) => {P:ndarray, t:ndarray}) (valid only for dataset)
@ In, asDataSet, bool, optional, return realization from the data as a DataSet
@ In, first, bool, optional, return the first matching realization only?
If False, it returns a list of all mathing realizations Default:True
@ Out, (index, rlz), tuple ( (int, dict) or (list(int),list(dict)) ), where:
first element:
if first: int, index where match was found OR size of data if not found
else : list, list of indices where matches were found OR size of data if not found
second element:
if first:
if asDataSet: xarray.Dataset, first matching realization as xarray.Dataset OR None if not found
else : dict, first matching realization as {var:value} OR None if not found
else :
if asDataSet: xarray.Dataset, all matching realizations as xarray.Dataset OR None if not found
else : list, list of matching realizations as [{var:value1}, {var:value2}, ...]
"""
# TODO convert input space to KD tree for faster searching -> XArray.DataArray has this built in?
## first, check that some direction was given, either an index or a match to find
if (index is None and (matchDict is None and noMatchDict is None)) or (index is not None and (matchDict is not None or noMatchDict is not None)):
self.raiseAnError(TypeError,'Either "index" OR ("matchDict" and/or "noMatchDict") (not both) must be specified to use "realization!"')
numInData = len(self._data[self.sampleTag]) if self._data is not None else 0
numInCollector = len(self._collector) if self._collector is not None else 0
## next, depends on if we're doing an index lookup or a realization match
if index is not None:
# traditional request: nonnegative integers
if index >= 0:
## if index past the data, try the collector
if index > numInData-1:
## if past the data AND the collector, we don't have that entry
if index > numInData + numInCollector - 1:
self.raiseAnError(IndexError,'{}: Requested index "{}" but only have {} entries (zero-indexed)!'.format(self.name,index,numInData+numInCollector))
## otherwise, take from the collector
else:
rlz = self._getRealizationFromCollectorByIndex(index - numInData)
## otherwise, take from the data
else:
rlz = self._getRealizationFromDataByIndex(index, unpackXArray)
# handle "-" requests (counting from the end): first end of collector, or if not then end of data
else:
# caller is requesting so many "from the end", so work backwards
## if going back further than what's in the collector ...
if abs(index) > numInCollector:
## if further going back further than what's in the data, then we don't have that entry
if abs(index) > numInData + numInCollector:
self.raiseAnError(IndexError,'Requested index "{}" but only have {} entries!'.format(index,numInData+numInCollector))
## otherwise, grab the requested index from the data
else:
rlz = self._getRealizationFromDataByIndex(index + numInCollector, unpackXArray)
## otherwise, grab the entry from the collector
else:
rlz = self._getRealizationFromCollectorByIndex(index)
# add index map where necessary
rlz = self._addIndexMapToRlz(rlz)
return rlz
## END select by index
## START collect by matching realization
else: # matchDict must not be None
# if nothing in data, try collector
if numInData == 0:
# if nothing in data OR collector, we can't have a match
if numInCollector == 0:
return 0, None
# otherwise, get it from the collector
else:
index, rlz = self._getRealizationFromCollectorByValue(matchDict, noMatchDict, tol=tol, first=first)
# otherwise, first try to find it in the data
else:
index, rlz = self._getRealizationFromDataByValue(matchDict, noMatchDict, tol=tol, first=first, unpackXArray=unpackXArray)# should we add options=options to this one as well?
# if no match found in data, try in the collector (if there's anything in it)
if rlz is None:
if numInCollector > 0:
index, rlz = self._getRealizationFromCollectorByValue(matchDict, noMatchDict, tol=tol, first=first)
# if as Dataset convert it
if asDataSet:
if not isinstance(rlz, xr.Dataset):
rlzs = rlz if type(rlz).__name__ == "list" else [rlz]
rlzs = [self._addIndexMapToRlz(rl) for rl in rlzs]
dims = self.getDimensions()
for index, rl in enumerate(rlzs):
d = {k:{'dims':tuple(dims[k]) ,'data': v} for (k,v) in rl.items()}
rlz[index] = xr.Dataset.from_dict(d)
if len(rlzs) > 1:
# concatenate just in case there are multiple realizations
rlz = xr.concat(rlz,dim=self.sampleTag)
else:
# the following ".copy(deep=True)" is required because of a bug in expend_dims
# see https://github.com/pydata/xarray/issues/2891
# FIXME: remove ".copy(deep=True)" once xarray mainstream fixes it
rlz = rlz[0].expand_dims(self.sampleTag).copy(deep=True)
return index, rlz
def remove(self,variable):
"""
Used to remove either a realization or a variable from this data object.
@ In, variable, str, name of "column" to remove
@ Out, None
"""
if self.size == 0:
self.raiseAWarning('Called "remove" on DataObject, but it is empty!')
return
noData = self._data is None or len(self._data) == 0
noColl = self._collector is None or len(self._collector) == 0
# remove from self._data
if not noData:
self._data = self._data.drop(variable)
# remove from self._collector
if not noColl:
varIndex = self._orderedVars.index(variable)
self._collector.removeEntity(varIndex)
# remove references to variable in lists
self._orderedVars.remove(variable)
# TODO potentially slow lookups
for varlist in [self._inputs,self._outputs,self._metavars]:
if variable in varlist:
varlist.remove(variable)
# remove from pivotParams, and remove any indexes without keys
for pivot in self.indexes:
if variable in self._pivotParams[pivot]:
self._pivotParams[pivot].remove(variable)
if len(self._pivotParams[pivot]) == 0:
del self._pivotParams[pivot]
# if in self._data, clear the index
if not noData and pivot in self._data.dims:
del self._data[pivot]
# if in aligned indexes, remove it there as well
if pivot in self._alignedIndexes:
del self._alignedIndexes[pivot]
# TODO remove references from general metadata?
if self._scaleFactors is not None:
self._scaleFactors.pop(variable,None)
#either way reset kdtree
self.inputKDTree = None
def renameVariable(self,old,new):
"""
Changes the name of a variable from "old" to "new".
@ In, old, str, old name
@ In, new, str, new name
@ Out, None
"""
# determine where the old variable was
isInput = old in self._inputs
isOutput = old in self._outputs
isMeta = old in self._metavars
isIndex = old in self.indexes
# make the changes to the variable listings
if isInput:
self._inputs = list(a if (a != old) else new for a in self._inputs)
if isOutput:
self._outputs = list(a if (a != old) else new for a in self._outputs)
if isMeta:
self._metavars = list(a if (a != old) else new for a in self._metavars)
if isIndex:
# change the pivotParameters listing, as well as the sync/unsynced listings
self._pivotParams[new] = self._pivotParams.pop(old)
if old in self._alignedIndexes.keys():
self._alignedIndexes[new] = self._alignedIndexes.pop(old)
else:
self._orderedVars = list(a if a != old else new for a in self._orderedVars)
# if in/out/meta, change allvars (TODO wastefully already done if an unaligned index)
if isInput or isOutput or isMeta:
self._orderedVars = list(a if a != old else new for a in self._orderedVars)
# change scaling factor entry
if old in self._scaleFactors:
self._scaleFactors[new] = self._scaleFactors.pop(old)
if self._data is not None:
self._data = self._data.rename({old:new})
def reset(self):
"""
Sets this object back to its initial state, keeping only the lists of the variables but removing
all of the variable values.
@ In, None
@ Out, None
"""
self._data = None
self._collector = None
self._meta = {}
self._alignedIndexes = {}
self._scaleFactors = {}
def setData(self, data, meta):
"""
Directly set the data for this data object, such as from an on-file database.
@ In, data, xr.Dataset, structured data set including the sampleID with realizations
@ In, meta, dict, dictionary of xmlUtils.StaticXmlElement elements with meta information
@ Out, None
"""
assert isinstance(data, xr.Dataset)
self._collector = None
self._data = data
self._meta = meta
# if we have meta information, we can reconstruct the IO space for this DO
if 'DataSet' in meta:
self._setStructureFromMetaXML(meta['DataSet'])
# otherwise, we don't know where anything goes, so dump it all in output
else:
self._pivotParams = {}
# index map
for var in data:
indices = list(data[var].coords.keys())
for idx in indices:
if idx == self.sampleTag:
continue
if idx not in self._pivotParams:
self._pivotParams[idx] = []
self._pivotParams[idx].append(var)
self._outputs.append(var)
self._metavars = self._outputs[:]
def sliceByIndex(self,index):
"""
Returns list of realizations at "snapshots" along dimension "index".
For example, if 'index' is 'time', then returns cross-sectional slices of the dataobject at each recorded 'time' index value.
@ In, index, str, name of index along which to obtain slices
@ Out, slices, list, list of xr.Dataset slices.
"""
data = self.asDataset()
# if empty, nothing to do
if self._data is None or len(self._data) == 0:
self.raiseAWarning('Tried to return sliced data, but DataObject is empty!')
return []
# assert that index is indeed an index
if index not in self.indexes + [self.sampleTag]:
self.raiseAnError(IOError,'Requested slices along "{}" but that variable is not an index! Options are: {}'.format(index,self.indexes))
numIndexCoords = len(data[index])
slices = list(data.isel(**{index:i}) for i in range(numIndexCoords))
# NOTE: The slice may include NaN if a variable does not have a value along a different index for this snapshot along "index"
return slices
def write(self, fileName, style='netCDF', **kwargs):
"""
Writes this dataset to disk based on the format.
@ In, fileName, str, path and name of file to write
@ In, style, str, optional, options are enumerated below
@ In, kwargs, dict, optional, additional arguments to pass to writing function
Includes: firstIndex, int, optional, if included then is the realization index that writing should start from (implies appending instead of rewriting)
@ Out, index, int, index of latest rlz to be written, for tracking purposes
"""
self.asDataset() #just in case there is stuff left in the collector
if style.lower() == 'netcdf':
self._toNetCDF(fileName, **kwargs)
elif style.lower() == 'csv':
if len(self) == 0:
self.raiseAWarning('Nothing to write to CSV! Checking metadata ...')
else:
#first write the CSV
firstIndex = kwargs.get('firstIndex',0)
self._toCSV(fileName, start=firstIndex, **kwargs)
# then the metaxml
if len(self._meta):
self._toCSVXML(fileName,**kwargs)
# TODO dask?
else:
self.raiseAnError(NotImplementedError,'Unrecognized write style: "{}"'.format(style))
if not self.hierarchical and 'RAVEN_isEnding' in self.getVars():
return len(self._data.where(self._data['RAVEN_isEnding']==True,drop=True)['RAVEN_isEnding'])
else:
return len(self) # so that other entities can track which realization we've written
### BUIlTINS AND PROPERTIES ###
# These are special commands that RAVEN entities can use to interact with the data object
def __len__(self):
"""
Overloads the len() operator.
@ In, None
@ Out, int, number of samples in this dataset
"""
return self.size
@property
def isEmpty(self):
"""
@ In, None
@ Out, boolean, True if the dataset is empty otherwise False
"""
empty = True if self.size == 0 else False
return empty
@property
def vars(self):
"""
Property to access all the pointwise variables being controlled by this data object.
As opposed to "self._orderedVars", returns the variables clustered by subset (inp, out, meta) instead of order added
@ In, None
@ Out, vars, list(str), variable names list
"""
return self._inputs + self._outputs + self._metavars
@property
def size(self):
"""
Property to access the amount of data in this data object.
@ In, None
@ Out, size, int, number of samples
"""
s = 0 # counter for size
# from collector
s += self._collector.size if self._collector is not None else 0
# from data
try:
s += len(self._data[self.sampleTag]) if self._data is not None else 0
except KeyError: #sampleTag not found, so it _should_ be empty ...
s += 0
return s
@property
def indexes(self):
"""
Property to access the independent axes in this problem
@ In, None
@ Out, indexes, list(str), independent index names (e.g. ['time'])
"""
return list(self._pivotParams.keys())
### INTERNAL USE FUNCTIONS ###
def _addIndexMapToRlz(self, rlz):
"""
Adds the special key _indexMap along with index mapping
for any N-dimensional entries in "rlz", if N > 1.
@ In, rlz, dict, single-sample realization
@ Out, rlz, dict, same dict with _indexMap added if necessary
"""
if rlz is None:
return rlz
# an index map is needed if you're not a scalar; i.e. you depend on at least 1 non-null index
need = any(val.size > 1 for val in rlz.values() if hasattr(val, 'size'))
if need:
rlz['_indexMap'] = self.getDimensions()
return rlz
def _changeVariableValue(self,index,var,value):
"""
Changes the value of a variable for a particular realization in the data object, in collector or data.
Should only rarely be called! Adding or removing data is recommended.
For now, only works for scalar variables.
@ In, index, int, index of realization to be modified
@ In, var, str, name of variable to change
@ In, value, float or int or str, new value for entry
@ Out, None
"""
assert(var in self._orderedVars)
assert(mathUtils.isSingleValued(value)) #['float','str','int','unicode','bool'])
lenColl = len(self._collector) if self._collector is not None else 0
lenData = len(self._data[self.sampleTag]) if self._data is not None else 0
# if it's in the data ...
if index < lenData:
self._data[var].values[index] = value
# if it's in the collector ...
elif index < lenColl + lenData:
self._collector[index][self._orderedVars.index(var)] = value
else:
self.raiseAnError(IndexError,'Requested value change for realization "{}", which is past the end of the data object!'.format(index))
def _checkAlignedIndexes(self,rlz,tol=1e-15):
"""
Checks to see if indexes should be stored as "aligned" or if they need to be stored distinctly.
If store distinctly for the first time, adds a variable to the collector columns instead of storing it as
an aligned index.
@ In, rlz, dict, formatted realization with either singular or np arrays as values
@ In, tol, float, optional, matching tolerance
@ Out, None
"""
for index in self.indexes:
# if it's aligned so far, check if it still is
if index in self._alignedIndexes.keys():
# first, if lengths don't match, they're not aligned.
# TODO there are concerns this check may slow down runs; it should be profiled along with other bottlenecks to optimize our efforts.
if len(rlz[index]) != len(self._alignedIndexes[index]):
closeEnough = False
else:
# "close enough" if float/int, otherwise require exactness
if mathUtils.isAFloatOrInt(rlz[index][0]):
closeEnough = all(np.isclose(rlz[index],self._alignedIndexes[index],rtol=tol))
else:
closeEnough = all(rlz[index] == self._alignedIndexes[index])
# if close enough, then keep the aligned values; otherwise, take action
if not closeEnough:
dtype = rlz[index].dtype
# TODO add new column to collector, propagate values up to (not incl) current rlz
self.raiseAWarning('A mismatch in time scales has been found between consecutive realizations. Consider synchronizing before doing any postprocessing!')
# TODO if self._data is not none!
if self._collector is not None:
aligned = self._alignedIndexes.pop(index)
values = [aligned] * len(self._collector)
self._collector.addEntity(values)
self._orderedVars.append(index)
# otherwise, they are close enough, so no action needs to be taken
# if index is not among the aligned, check if it is already in the collector/data
else:
# if we don't have any samples in the collector, congratulations, you're aligned with yourself
if self._collector is None or len(self._collector) == 0:
try:
self._alignedIndexes[index] = rlz.pop(index)
except KeyError:
# it's already gone; this can happen if this pivot parameter is only being used to collapse data (like in PointSet case)
pass
# otherwise, you're misaligned, and have been since before this realization, no action.
return
def _checkRealizationFormat(self, rlz, indexMap=None):
"""
Checks that a passed-in realization has a format acceptable to data objects.
Data objects require a CSV-like result with either float or np.ndarray instances.
@ In, rlz, dict, realization with {key:value} pairs.
@ Out, okay, bool, True if acceptable or False if not
"""
# check that indexMap and expected indexes line up
## This check can be changed when we can automatically collapse dimensions intelligently
## NOTE that if this dataset is non-indexed, don't check index alignment
if indexMap is not None and self.indexes:
okay = True # track if the indexes provided are okay
mapIndices = set() # these are the indices provided by the realization
mapIndices.update(*list(indexMap.values()))
# see if the provided indices match the required indices for this data object
if mapIndices != set(self.indexes):
extra = mapIndices-set(self.indexes) # extra indices not expected
missing = set(self.indexes) - mapIndices # indices expected but not provided
if extra:
# perhaps someday we can collapse dimensions intelligently, but for not, this is an error state
okay = False # if there's extra indices listed that aren't part of the DataObject, we don't handle this yet
elif missing:
# if the variables depending on an index ONLY depend on that one index, we can infer the
# structure and allow the contributing source to not explicitly provide the index map.
# Should we be, though? Should this be an action of the CSV loader? a Realization class?
for missed in missing:
# the missing index has to be in the provided realization for us to infer the structure and values
if missed in rlz:
# update the mapping for each variable that is meant to depend on this index
for var in self._pivotParams[missed]:
if var in indexMap:
# this variable already has dependencies, but not the missed dependency, so we
# cannot infer the structure without help from the data source
okay = False
break
else:
indexMap[var] = [missed]
if not okay:
break
else:
okay = False
break
if not okay:
# update extra/missing in case there have been changes
extra = mapIndices-set(self.indexes)
missing = set(self.indexes) - mapIndices
self.raiseAWarning('Realization indexes do not match expected indexes!\n',
f'Extra from realization: {extra}\n',
f'Missing from realization: {missing}')
return False
if not isinstance(rlz, dict):
self.raiseAWarning('Realization is not a "dict" instance!')
return False
for var, value in rlz.items():
#if not isinstance(value,(float,int,unicode,str,np.ndarray)): TODO someday be more flexible with entries?
if not isinstance(value, (np.ndarray, xr.DataArray)):
self.raiseAWarning('Variable "{}" is not an acceptable type: "{}"'.format(var, type(value)))
return False
# check if index-dependent variables have matching shapes
# FIXME: this check will not work in case of variables depending on multiple indexes.
# When this need comes, we will change this check(alfoa)
if self.indexes:
dims = self.getDimensions(var)[var] # NOTE we assume this is always in the same order, which should be true.
# if this variable depends on no dimensions, no check needed
# if this variable depends on one dimension, check length trivially
if len(dims) == 1:
dim = dims[0]
correctShape = rlz[dim].shape
if rlz[var].shape != correctShape:
self.raiseAWarning(f'Variable "{var}" with shape {rlz[var].shape} ' +
f'is not consistent with respect its index "{dim}" with shape {correctShape} for DataSet "{self.name}"!')
return False
# if this variable depends on multiple dimensions, check shape
elif len(dims) > 1:
# the model should have provided an index map for the shaping of the variables
if indexMap is None:
self.raiseAWarning('No variable index map "_indexMap" was provided in model realization, but ' +
'a multidimensional variable ("{}") is expected!'.format(var))
return False
try:
rlzDimOrder = list(indexMap[var]) # want a list for the equality comparison below
# if the realization order wasn't provided, return a useful error describing the problem
except KeyError:
self.raiseAWarning('Variable "{}" is multidimensional, but no entry '.format(var) +
'was given in the "_indexMap" for "{}" in the model realization!'.format(var))
self.raiseAWarning('Received entries for: {}'.format(list(indexMap.keys())))
return False
# check that the realization is consistent
## TODO assumes indexes are single-dimensional, seems like a safe assumption for now
correctShape = tuple(rlz[idx].size for idx in rlzDimOrder)
if rlz[var].shape != correctShape:
self.raiseAWarning(('Variable "{}" with shape {} '+
'is not consistent with respect its indices "{}" with shapes {}!')
.format(var,
rlz[var].shape,
rlzDimOrder,
correctShape))
return False
# re-order the provided realization to fit the expected dims order, if needed
if rlzDimOrder != dims:
sourceOrder = range(len(dims))
destOrder = list(dims.index(dim) for dim in rlzDimOrder)
rlz[var] = np.moveaxis(rlz[var], sourceOrder, destOrder)
assert rlz[var].shape == tuple(rlz[idx].size for idx in dims)
# all conditions for failing formatting were not met, so formatting is fine
return True
def _clearAlignment(self):
"""
Clears the alignment tracking for the collector, and removes columns from it if necessary
@ In, None
@ Out, None
"""
# get list of indexes that need to be removed since we're starting over with alignment
toRemove = list(self._orderedVars.index(var) for var in self.indexes if (var not in self._alignedIndexes.keys()
and var in self._orderedVars))
# sort them in reverse order so we don't screw up indexing while removing
toRemove.sort(reverse=True)
for index in toRemove:
self._orderedVars.pop(index)
self._collector.removeEntity(index)
self._alignedIndexes = {}
def _clearParentEndingStatus(self,rlz):
"""
If self is hierarchical, then set the parent of the given realization "rlz" to False.
@ In, rlz, dict, realization (from addRealization, already formatted)
@ Out, None
"""
# TODO set global status of 'parentID' instead of check every time
idVar = 'RAVEN_parentID'
endVar = 'RAVEN_isEnding'
if idVar in self.getVars():
# get the parent ID
parentID = rlz[idVar]
# if root or parentless, nothing to do
if parentID == "None":
return
# otherwise, find the index of the match
idx,match = self.realization(matchDict={'prefix':parentID})
self._changeVariableValue(idx,endVar,False)
def _collapseNDtoDataArray(self,data,var,labels=None,dtype=None):
"""
Converts a row of numpy samples (float or xr.DataArray) into a single DataArray suitable for a xr.Dataset.
@ In, data, np.ndarray, array of either float or xr.DataArray; array must be single-dimension
@ In, var, str, name of the variable being acted on
@ In, labels, list, optional, list of labels to use for collapsed array under self.sampleTag title
@ In, dtype, type, optional, type from _getCompatibleType to cast data as
@ Out, DataArray, xr.DataArray, single dataarray object
"""
assert(isinstance(data,np.ndarray))
assert(len(data.shape) == 1)
# set up sampleTag values
if labels is None:
labels = range(len(data))
else:
assert(len(labels) == len(data))
# find first non-None entry, and get its type if needed
dataType = type(None)
i = -1
while dataType is type(None):
i += 1
try:
dataType = type(data[i])
except IndexError:
self.raiseADebug('Could not find a type for "{}"; using None.'.format(var))
dataType = type(None)
i = 0
break
# if "type" predetermined, override it (but we still needed "i" so always do the loop above)
# TODO this can be sped up probably, by checking the "type" directly with dtype; but we ALSO need to know if
# it's a history or not, so we need to check the first non-NaN entry....
if dtype is not None:
dataType = dtype
# method = 'once' # see below, parallelization is possible but not implemented
# first case: single entry per node: floats, strings, ints, etc
if mathUtils.isSingleValued(data[i]):
data = np.array(data,dtype=dataType)
array = xr.DataArray(data,
dims=[self.sampleTag],
coords={self.sampleTag:labels},
name=var) # THIS is very fast
# second case: ND set (history set or higher dimension) --> CURRENTLY should be unused
elif type(data[i]) == xr.DataArray:
# two methods: all at "once" or "split" into multiple parts. "once" is faster, but not parallelizable.
# ONCE #
#if method == 'once':
val = xr.concat(data, self.sampleTag)
val.coords[self.sampleTag] = labels
# SPLIT # currently unused, but could be for parallel performance
#elif method == 'split':
# chunk = 150
# start = 0
# N = len(data)
# vals = []
# # TODO can be parallelized
# while start < N-1:
# stop = min(start+chunk+1,N)
# ival = dict((i,data[i,v]) for i in range(start,stop))
# ival = xr.Dataset(data_vars=ival)
# ival = ival.to_array(dim=self.sampleTag) # TODO does this end up indexed correctly?
# vals.append(ival)
# start = stop
# val = xr.concat(vals,dim=self.sampleTag)
# END #
array = val
else:
self.raiseAnError(TypeError,'Unrecognized data type for var "{}": "{}"'.format(var,type(data[0])))
array.rename(var)
return array
@staticmethod
def _clearDuplicates(toClear):
"""
Clears out duplicate coordinates from a xr.DataArray
@ In, toClear, xr.DataArray, data array to remove duplicate coordinates
@ Out, toClear, xr.DataArray, new data array with no duplicates
"""
for dim in toClear.coords.dims:
toClear = toClear.isel({dim:np.unique(toClear[dim], return_index=True)[1]})
return toClear
def _convertArrayListToDataset(self,array,action='return'):
"""
Converts a 1-D array of xr.DataArrays into a xr.Dataset, then takes action on self._data:
action=='replace': replace self._data with the new dataset
action=='extend' : add new dataset to self._data using merge
action=='return' : (default) return new dataset
@ In, array, list(xr.DataArray), list of variables as samples to turn into dataset
@ In, action, str, optional, can be used to specify the action to take with the new dataset
@ Out, new, xr.Dataset, single data entity
"""
try:
new = xr.Dataset(array)
except ValueError as e:
self.raiseAnError(RuntimeError,'While trying to create a new Dataset, a variable has itself as an index!'+\
' Error: ' +str(e))
# if "action" is "extend" but self._data is None, then we really want to "replace".
if action == 'extend' and self._data is None:
action = 'replace'
if action == 'return':
return new
elif action == 'replace':
self._data = new
# general metadata included if first time
# determine dimensions for each variable
dimsMeta = {}
for name, var in new.variables.items():
if name not in self._inputs + self._outputs + self._metavars:
continue
dims = list(var.dims)
# don't list if only entry is sampleTag
if dims == [self.sampleTag]:
continue
# even then, don't list sampleTag
try:
dims.remove(self.sampleTag)
except ValueError:
pass #not there, so didn't need to remove
dimsMeta[name] = ','.join(dims)
# store sample tag, IO information, coordinates
self.addMeta('DataSet',{'dims':dimsMeta})
self.addMeta('DataSet',{'general':{'sampleTag':self.sampleTag,
'inputs':','.join(self._inputs),
'outputs':','.join(self._outputs),
'pointwise_meta':','.join(sorted(self._metavars)),
'datasetName':self.name
}})
self._data.attrs = self._meta
elif action == 'extend':
# TODO compatability check!
# TODO Metadata update?
# merge can change dtypes b/c no NaN int type: self._data.merge(new,inplace=True)
self._data = xr.concat([self._data,new],dim=self.sampleTag)
else:
self.raiseAnError(RuntimeError,'action "{}" was not an expected value for converting array list to dataset!'
.format(action))
# regardless if "replace" or "return", set up scaling factors
self._setScalingFactors()
return new
def _convertFinalizedDataRealizationToDict(self,rlz, unpackXarray=False):
"""
After collapsing into xr.Dataset, all entries are stored as xr.DataArrays.
This converts them into a dictionary like the realization sent in.
@ In, rlz, dict(varname:xr.DataArray), "row" from self._data
@ In, unpackXarray, bool, unpack XArray coordinates in numpy arrays (it assumes that the coordinates are consistent among the data)
@ Out, new, dict(varname:value), where "value" could be singular (float,str) or xr.DataArray
"""
# TODO this has a lot of looping and might be slow for many variables. Bypass or rewrite where possible.
new = {}
for k,v in rlz.items():
# if singular, eliminate dataarray container
if len(v.dims)==0:
new[k] = v.item(0)
# otherwise, trim NaN entries before returning
else:
for dim in v.dims:
v = v.dropna(dim)
if unpackXarray:
new[dim] = v.coords[dim].values
new[k] = v if not unpackXarray else v.values
return new
def _convertToDict(self):
"""
Casts this dataObject as dictionary.
@ In, None
@ Out, asDataset, xr.Dataset or dict, data in requested format
"""
if self.isEmpty:
self.raiseAnError(ValueError, 'DataObject named "{}" is empty!'.format(self.name))
self.raiseAWarning('DataObject._convertToDict can be a slow operation and should be avoided where possible!')
# container for all necessary information
dataDict = {}
# supporting data
dataDict['dims'] = self.getDimensions()
dataDict['metadata'] = self.getMeta(general=True)
dataDict['type'] = self.type
dataDict['inpVars'] = self.getVars('input')
dataDict['outVars'] = self.getVars('output')
dataDict['numberRealizations'] = self.size
dataDict['name'] = self.name
dataDict['metaKeys'] = self.getVars('meta')
# main data
if self.type == "PointSet":
## initialize with np arrays of objects
dataDict['data'] = dict((var,np.zeros(self.size,dtype=object)) for var in self.vars)
for var in self.vars:
dataDict['data'][var] = self.asDataset()[var].values
else:
dataDict['data'] = dict((var,np.zeros(self.size,dtype=object)) for var in self.vars+self.indexes)
# need to remove NaNs, so loop over slices
for s,rlz in enumerate(self.sliceByIndex(self.sampleTag)):
## loop over realizations to get distinct values without NaNs
for var in self.vars:
# how we get and store variables depends on the dimensionality of the variable
dims=self.getDimensions(var)[var]
# if scalar (no dims and not an index), just grab the values
if len(dims)==0 and var not in self.indexes:
dataDict['data'][var] = self.asDataset()[var].values
continue
# get data specific to this var for this realization (slice)
data = rlz[var]
# need to drop indexes for which no values are present
for index in dims:
data = data.dropna(index)
dataDict['data'][index][s] = data[index].values
dataDict['data'][var][s] = data.values
return dataDict
def _convertToXrDataset(self):
"""
Casts this dataobject as an xr.Dataset and returns a REFERENCE to the underlying data structure.
Functionally, typically collects the data from self._collector and places it in self._data.
Efficiency note: this is the slowest part of typical data collection.
@ In, None
@ Out, xarray.Dataset, all the data from this data object.
"""
# TODO make into a protected method? Should it be called from outside?
# if we have collected data, collapse it
if self._collector is not None and len(self._collector) > 0:
# keep track of the first sampling index, if we already have some samples (otherwise 0)
firstSample = int(self._data[self.sampleTag][-1])+1 if self._data is not None else 0
# storage array for each variable's xr.DataArray with all rlz data from every rlz
arrays = {}
# loop over variables IN ORDER of collector storage to collapse data into nice xr.DataArray of realization data
for v, var in enumerate(self._orderedVars):
# only converting variables, so ignore indexes (they'll be used by the variables)
if var in self.indexes:
continue
# gather the data type from first realization: if np.array, it's ND; otherwise singular
dtype = self.types[v]
if isinstance(self._collector[0,v],np.ndarray):
# for each index, determine if all aligned; make data arrays as required
dims = self.getDimensions(var)[var]
# make sure "dims" isn't polluted
assert(self.sampleTag not in dims)
# loop over indexes (just one for now?) and create data
## SPECIAL CASE: if only histories/scalars, and histories are aligned, we can shortcut this
if len(dims) == 1 and dims[0] in self._alignedIndexes:
# since aligned, grab the data into one large chunk and make a datarray with all rlzs
data = np.vstack(self._collector[:,v]).astype(dtype)
coords = {dims[0]: self._alignedIndexes[dims[0]]}
#coords[self.sampleTag] = np.arange(len(self._collector))
arrays[var] = self.constructNDSample(data, dims=[self.sampleTag]+dims, coords=coords)
else:
for r in range(len(self._collector)):
values = self._collector[r, v]
dtype = self._getCompatibleType(values[0])
values = np.array(values,dtype=dtype)
coords = {}
for idx in dims:
val = self._alignedIndexes.get(idx, None)
if val is None:
val = self._collector[r, self._orderedVars.index(idx)]
coords[idx] = val
self._collector[r][v] = self.constructNDSample(values, dims, coords, name=str(r))
# then collapse these entries into a single datarray
arrays[var] = self._collapseNDtoDataArray(self._collector[:,v], var, dtype=dtype)
# if it's a dataarray, then that's old-style histories, no-can do right now
elif isinstance(self._collector[0,v],xr.DataArray):
self.raiseAnError(NotImplementedError,'History entries should be numpy arrays, not data arrays!')
# if not ND, then it's a simple data array construction
else:
try:
varData = np.array(self._collector[:,v],dtype=dtype)
except ValueError as e:
# infinte/missing data can't be cast to anything but floats or objects, as far as I can tell
if dtype != float and pd.isnull(self._collector[:,v]).sum() != 0:
self.raiseAWarning('NaN detected, but no safe casting NaN to "{}" so switching to "object" type. '.format(dtype) \
+ ' This may cause problems with other entities in RAVEN.')
varData = self._collector[:,v][:]
dtype = object
# otherwise, let error be raised.
else:
raise e
# create single dataarrays
arrays[var] = self._collapseNDtoDataArray(varData,var,dtype=dtype)
# END if for variable data type (ndarray, xarray, or scalar)
# re-index samples
#was arrays[var][self.sampleTag] += firstSample
#This line works because arrays[var][self.sampleTag] is [0],
# or because firstSample is 0
arrays[var] = arrays[var].assign_coords({self.sampleTag:arrays[var][self.sampleTag]+firstSample})
# collect all data into dataset, and update self._data
self._convertArrayListToDataset(arrays,action='extend')
# reset collector
self._collector = self._newCollector(width=self._collector.width)
# write hierarchal data to general meta, if any
paths = self._generateHierPaths()
for p in paths:
self.addMeta('DataSet',{'Hierarchical':{'path':','.join(p)}})
# clear alignment tracking for indexes
self._clearAlignment()
return self._data
def _formatRealization(self,rlz):
"""
Formats realization without truncating data
Namely, assures indexes are correctly typed and length-1 variable arrays become floats
@ In, rlz, dict, {var:val} format (see addRealization)
@ Out, rlz, dict, {var:val} modified
"""
# TODO this could be much more efficient on the parallel (finalizeCodeOutput) than on serial
# TODO costly for loop
# do indexes first to assure correct typing on first realization collection
# - note, the other variables are set in _setDataTypes which is called after _formatRealization in addRealization
if self._collector is None or len(self._collector) == 0:
for var in self._pivotParams.keys():
dtype = self._getCompatibleType(rlz[var][0])
# Note, I don't like this action happening here, but I don't have an alternative way to assure
# indexes have the correct dtype. In the first pass, they aren't going into the collector, but into alignedIndexes.
rlz[var] = np.array(rlz[var],dtype=dtype)
# for now, leave them as the arrays they are, except single entries need converting
for var, val in rlz.items():
# if an index variable, skip it
if var in self._pivotParams:
continue
dims = self.getDimensions(var)[var]
## change dimensionless to floats -> TODO use operator to collapse!
if dims in [[self.sampleTag], []]:
if len(val) == 1:
rlz[var] = val[0]
return rlz
def _fromCSV(self,fileName,**kwargs):
"""
Loads a dataset from CSV (preferably one it wrote itself, but maybe not necessarily?
@ In, fileName, str, filename to load from (not including .csv or .xml)
@ In, kwargs, dict, optional arguments
@ Out, None
"""
# first, try to read from csv
df = self._readPandasCSV(fileName+'.csv')
# load in metadata
dims = self._loadCsvMeta(fileName)
# find distinct number of samples
try:
samples = list(set(df[self.sampleTag]))
except KeyError:
# sample ID wasn't given, so assume each row is sample
samples = range(len(df.index))
df[self.sampleTag] = samples
# create arrays from which to create the data set
arrays = {}
for var in self.getVars():
if var in dims.keys():
varDims = dims[var]
data = df[[var, self.sampleTag] + dims[var]]
data.set_index(self.sampleTag, inplace=True)
ndat = np.zeros(len(samples),dtype=object)
for s, sample in enumerate(samples):
# set dtype on first pass
if s == 0:
dtype = self._getCompatibleType(sample)
places = data.index.get_loc(sample)
vals = data[places].dropna().set_index(dims[var])
if len(varDims) > 1:
useVals = vals.values.reshape(vals.index.levshape) #[:, 0], -> why :, 0??
coords = dict((dim, vals.index.levels[vals.index.names.index(dim)].values) for dim in dims[var])
else:
useVals = vals.values[:, 0]
coords = dict((var, vals.index.values) for var in varDims)
# slower, but more clear:
#coords = {}
#for dim in dims[var]:
# pdIndexLevel = vals.index.names.index(dim)
# coords[dim] = vals.index.levels[pdIndexLevel].values
# I think this is fixed now. - talbpw -> TODO this needs to be improved before ND will work; we need the individual sub-indices (time, space, etc)
ndat[s] = xr.DataArray(useVals,
dims=varDims,
coords=coords) #dict((var,vals.index.values) for var in dims[var]))
# END for sample in samples
arrays[var] = self._collapseNDtoDataArray(ndat,var,labels=samples,dtype=dtype)
else:
# scalar example
data = df[[var,self.sampleTag]].groupby(self.sampleTag).first().values[:,0]
dtype = self._getCompatibleType(data.item(0))
arrays[var] = self._collapseNDtoDataArray(data,var,labels=samples,dtype=dtype)
self._convertArrayListToDataset(arrays,action='extend')
def _fromCSVXML(self,fileName):
"""
Loads in the XML portion of a CSV if it exists. Returns information found.
@ In, fileName, str, filename to read as filename.xml
@ Out, metadata, dict, metadata discovered
"""
metadata = {}
# check if we have anything from which to read
try:
meta,_ = xmlUtils.loadToTree(fileName+'.xml')
self.raiseADebug('Reading metadata from "{}.xml"'.format(fileName))
haveMeta = True
except IOError:
haveMeta = False
# if nothing to load, return nothing
if not haveMeta:
return metadata
tagNode = xmlUtils.findPath(meta,'DataSet/general/sampleTag')
# read samplerTag
if tagNode is not None:
metadata['sampleTag'] = tagNode.text
# read dimensional data
dimsNode = xmlUtils.findPath(meta,'DataSet/dims')
if dimsNode is not None:
metadata['pivotParams'] = dict((child.tag,child.text.split(',')) for child in dimsNode)
inputsNode = xmlUtils.findPath(meta,'DataSet/general/inputs')
if inputsNode is not None:
metadata['inputs'] = inputsNode.text.split(',')
outputsNode = xmlUtils.findPath(meta,'DataSet/general/outputs')
if outputsNode is not None:
metadata['outputs'] = outputsNode.text.split(',')
# these DO have to be read from meta if present
metavarsNode = xmlUtils.findPath(meta,'DataSet/general/pointwise_meta')
if metavarsNode is not None:
metadata['metavars'] = metavarsNode.text.split(',')
# return
return metadata
def _fromDict(self,source,dims=None,**kwargs):
"""
Loads data from a dictionary with variables as keys and values as np.arrays of realization values
Format for entries in "source":
- scalars: source['a'] = np.array([1, 2, 3, 4]) -> each entry is a realization
- vectors: source['b'] = np.array([ np.array([1, 2]), np.array([3,4,5]) ]) -> each entry is a realization
- indexes: same as "vectors"
@ In, source, dict, as {var:values} with types {str:np.array}
@ In, dims, dict, optional, ordered list of dimensions that each var depends on as {var:[list]}
@ In, kwargs, dict, optional, additional arguments
@ Out, None
"""
# if anything is in the collector, collapse it first
if self._collector is not None:
self.asDataset()
# not safe to default to dict, so if "dims" not specified set it here
if dims is None:
dims = {}
# data sent in is as follows:
# single-entry (scalars) - np.array([val, val, val])
# histories - np.array([np.array(vals), np.array(vals), np.array(vals)])
# etc
## check that all inputs, outputs required are provided
providedVars = set(source.keys())
requiredVars = set(self.getVars())
## figure out who's missing from the IO space
missing = requiredVars - providedVars
if len(missing) > 0:
self.raiseAnError(KeyError,'Variables are missing from "source" that are required for data object "',
self.name.strip(),'":',",".join(missing))
# set orderedVars to all vars, for now don't be fancy with alignedIndexes
self._orderedVars = self.vars + self.indexes
# make a collector from scratch
rows = len(utils.first(source.values()))
cols = len(self._orderedVars)
# can this for-loop be done in a comprehension? The dtype seems to be a bit of an issue.
data = np.zeros([rows,cols],dtype=object)
for v,var in enumerate(self._orderedVars):
if len(source[var].shape) > 1:
# we can't set all at once, because the user gave us an ND array instead of a np.array(dtype=object) of np.array.
# if we try -> ValueError: "could not broadcast input array from shape (#rlz,#time) into shape (#rlz)
for i in range(len(data)):
data[i,v] = source[var][i]
else:
# we can set it at once, the fast way.
data[:,v] = source[var]
# set up collector as cached nd array of values -> TODO might be some wasteful copying here
self._collector = cached_ndarray.cNDarray(values=data,dtype=object)
# set datatypes for each variable
rlz = self.realization(index=0)
self._setDataTypes(rlz)
# collapse into xr.Dataset
self.asDataset()
def _fromXarrayDataset(self,dataset):
"""
Loads data from an xarray dataset
@ In, dataset, xarray.Dataset, the data set containg the data
@ Out, None
"""
if not self.isEmpty:
self.raiseAnError(IOError, 'DataObject', self.name.strip(),'is not empty!')
#select data from dataset
providedVars = set(dataset.data_vars.keys())
requiredVars = set(self.getVars())
## figure out who's missing from the IO space
missing = requiredVars - providedVars
if len(missing) > 0:
self.raiseAnError(KeyError,'Variables are missing from "source" that are required for data object "',
self.name.strip(),'":',",".join(missing))
# remove self.sampleTag since it is an internal used dimension
providedDims = set(dataset.sizes.keys()) - set([self.sampleTag])
requiredDims = set(self.indexes)
missing = requiredDims - providedDims
if len(missing) > 0:
self.raiseAnError(KeyError,'Dimensions are missing from "source" that are required for data object "',
self.name.strip(),'":',",".join(missing))
# select the required data from given dataset
datasetSub = dataset[list(requiredVars)]
# check the dimensions
for var in self.vars:
requiredDims = set(self.getDimensions(var)[var])
# make sure "dims" isn't polluted
assert(self.sampleTag not in requiredDims)
providedDims = set(datasetSub[var].sizes.keys()) - set([self.sampleTag])
if requiredDims != providedDims:
self.raiseAnError(KeyError,'Dimensions of variable',var,'from "source"', ",".join(providedDims),
'is not consistent with the required dimensions for data object "',
self.name.strip(),'":',",".join(requiredDims))
self._orderedVars = self.vars
self._data = datasetSub
for key, val in self._data.attrs.items():
self._meta[key] = val
def _getCompatibleType(self,val):
"""
Determines the data type for "val" that is compatible with the rest of the data object.
@ In, val, object, item whose type should be determined.
@ Out, _type, type instance, type to use
"""
# ND uses first entry as example type
if isinstance(val,(xr.DataArray,np.ndarray)):
val = val.item(0)
# identify other scalars by instance
if mathUtils.isAFloat(val):
_type = float
elif mathUtils.isABoolean(val):
_type = bool
elif mathUtils.isAnInteger(val):
_type = int
# strings and unicode have to be stored as objects to prevent string sizing in numpy
elif mathUtils.isAString(val):
_type = object
# catchall
else:
_type = object
return _type
def _getRealizationFromCollectorByIndex(self,index):
"""
Obtains a realization from the collector storage using the provided index.
@ In, index, int, index to return
@ Out, rlz, dict, realization as {var:value}
"""
assert(self._collector is not None)
assert(index < len(self._collector))
rlz = dict(zip(self._orderedVars, self._collector[index]))
# don't forget the aligned indices! If indexes stored there instead of in collector, retrieve them
for var,vals in self._alignedIndexes.items():
rlz[var] = vals
return rlz
def _getRealizationFromCollectorByValue(self, toMatch, noMatch, tol=1e-15, first=True):
"""
Obtains a realization from the collector storage matching the provided index
@ In, toMatch, dict, elements to match
@ In, noMatch, dict, elements to AVOID matching (should not match within tolerance)
@ In, tol, float, optional, tolerance to which match should be made
@ In, first, bool, optional, return the first matching realization only?
If False, it returns a list of all mathing realizations Default:True
@ Out, (r, rlz) or (rr, rlzs), tuple ( (int, dict) or (list(int),list(dict)) ), where:
first element:
if first: r, int, index where match was found OR size of data if not found
else : rr, list, list of indices where matches were found OR size of data if not found
second element:
if first: rlz, dict, first matching realization as {var:value} OR None if not found
else : rlzs, list, list of matching realizatiions as [{var:value1}, {var:value2}, ...]
"""
if toMatch is None:
toMatch = {}
assert(self._collector is not None)
# TODO KD Tree for faster values -> still want in collector?
# TODO slow double loop
matchVars, matchVals = zip(*toMatch.items()) if toMatch else ([], [])
avoidVars, avoidVals = zip(*noMatch.items()) if noMatch else ([], [])
matchIndices = tuple(self._orderedVars.index(var) for var in matchVars)# What did we use this in?
if not first:
rr, rlz = [], []
for r, row in enumerate(self._collector[:]): #TODO: CAN WE MAKE R START FROM LAST MATCHINDEXES ?
match = True
# find matches first
if toMatch:
possibleMatch = self._collector[r, matchIndices]
for e, element in enumerate(np.atleast_1d(possibleMatch)):
if mathUtils.isAFloatOrInt(element):
match &= mathUtils.compareFloats(matchVals[e], element, tol=tol)
else:
match &= matchVals[e] == element
if not match:
break
# avoid antimatches if we match so far
if match and noMatch:
# NOTE there may be multiple entries per var in noMatch
possibleAvoid = self._collector[r, tuple(self._orderedVars.index(var) for var in avoidVars)]
for e, element in enumerate(np.atleast_1d(possibleAvoid)):
var = avoidVars[e]
if mathUtils.isAFloatOrInt(element):
for avoid in np.atleast_1d(avoidVals[e]):
match &= not mathUtils.compareFloats(avoid, element, tol=tol)
if not match:
break
else:
match &= element not in np.atleast_1d(avoidVals[e]) # TODO histories?
if not match:
break
if match:
if first:
break
else:
rr.append(r)
rlz.append(self._getRealizationFromCollectorByIndex(r))
if match:
if first:
return r, self._getRealizationFromCollectorByIndex(r)
else:
return rr, rlz
else:
return len(self), None
def _getRealizationFromDataByIndex(self,index, unpackXArray=False):
"""
Obtains a realization from the data storage using the provided index.
@ In, index, int, index to return
@ In, unpackXArray, bool, optional, True if the coordinates of the xarray variables must be exposed in the dict (e.g. if P(t) => {P:ndarray, t:ndarray})
@ Out, rlz, dict, realization as {var:value} where value is a DataArray with only coordinate dimensions
"""
assert(self._data is not None)
rlz = self._data[{self.sampleTag:index}].drop(self.sampleTag).data_vars
rlz = self._convertFinalizedDataRealizationToDict(rlz, unpackXArray)
return rlz
# @profile
def _getRealizationFromDataByValue(self, match, noMatch, tol=1e-15, unpackXArray=False, first=True):
"""
Obtains a realization from the data storage using the provided matching (or antimatching) dictionaries.
For "match", valid entries must be within tol of the provided value for each variable
For "noMatch", valid entries must NOT be within tol of the provided value for each variable
@ In, match, dict, elements to match
@ In, noMatch, dict, elements to AVOID matching (should not match within tolerance)
@ In, tol, float, optional, tolerance to which match should be made
@ In, unpackXArray, bool, optional, True if the coordinates of the xarray variables must
be exposed in the dict (e.g. if P(t) => {P:ndarray, t:ndarray}).
This can be used only if "first" ==> True. Otherwise we return a Dataset directly
@ In, first, bool, optional, return the first matching realization only?
If False, it returns a list of all mathing realizations Default:True
@ Out, (rr, rlz), tuple ( (int, dict) or (list(int), Dataset) ), where:
first element:
if first: rr, int, index where match was found OR size of data if not found
else : rr, list, list of indices where matches were found OR size of data if not found
second element:
if first: rlz, dict, first matching realization as {var:value} OR None if not found
else : rlz, Dataset, Dataset of matching realizatiions
"""
assert(self._data is not None)
if unpackXArray:
assert (first)
if match is None:
match = {}
if noMatch is None:
noMatch = {}
matchVars = list(match.keys())
avoidVars = list(noMatch.keys())
# TODO what if a variable is in both??
# TODO this could be slow, should do KD tree instead
mask = 1.0
for var in matchVars: #, val in match.items():
val = match[var]
# float instances are relative, others are absolute
if mathUtils.isAFloatOrInt(val):
# scale if we know how
loc, scale = self._getScalingFactors(var)
scaleVal = (val-loc) / scale
# create mask of where the dataarray matches the desired value
mask *= abs((self._data[var]-loc)/scale - scaleVal) < tol
else:
mask *= self._data[var] == val
# if all potential matches eliminated, stop looking
if not np.any(mask):
break
# continue checking for avoidance variables
## NOTE that there may be multiple entries per avoidance variable
if np.any(mask) and avoidVars:
for var in avoidVars:
vals = np.atleast_1d(noMatch[var]) # values to AVOID matching # TODO what about histories?
# float instances are relative, others are absolute
if mathUtils.isAFloatOrInt(vals[0]):
# scale if we know how
loc, scale = self._getScalingFactors(var)
# create mask of where the dataarray matches the desired value
dataVal = (self._data[var] - loc) / scale
for val in vals:
scaleVal = (val-loc) / scale
mask *= np.logical_not(abs(dataVal - scaleVal) < tol)
else:
for val in vals:
mask *= np.logical_not(self._data[var] == val)
# if all potential matches eliminated, stop looking
if sum(mask) == 0:
break
rlz = self._data.where(mask,drop=True)
try:
rr = rlz[self.sampleTag].item(0) if first else rlz[self.sampleTag].data.tolist()
except IndexError:
return len(self),None
return (rr, self._getRealizationFromDataByIndex(rr, unpackXArray)) if first else (rr, rlz)
def _getRequestedElements(self, options):
"""
Obtains a list of the elements to be written, based on defaults and options[what]
@ In, options, dict, general list of options for writing output files
@ Out, keep, list(str), list of variables that will be written to file
"""
if 'what' in options.keys():
elements = options['what']
keep = []
for entry in elements:
small = entry.strip().lower()
if small == 'input':
keep += self._inputs
continue
elif small == 'output':
keep += self._outputs
continue
elif small == 'metadata':
keep += self._metavars
continue
else:
keep.append(entry.split('|')[-1].strip())
else:
# need the sampleTag meta to load histories
# BY DEFAULT keep everything needed to reload this entity. Inheritors can define _neededForReload to specify what that is.
keep = set(self._inputs + self._outputs + self._metavars + self._neededForReload)
return keep
def _getScalingFactors(self, var):
"""
Returns (or defaults) scaling factors.
@ In, var, str, name of variable for which factors should be obtained
@ Out, loc, float, translation scalar
@ Out, scale, float, scaling scalar
"""
try:
loc, scale = self._scaleFactors[var]
except KeyError:
loc = 0.0
scale = 1.0
if scale == 0:
scale = 1.0
return loc, scale
def _getVariableIndex(self,var):
"""
Obtains the index in the list of variables for the requested var.
@ In, var, str, variable name (input, output, or pointwise medatada)
@ Out, index, int, column corresponding to the variable
"""
return self._orderedVars.index(var)
def _identifyVariablesInCSV(self,fileName):
"""
Gets the list of available variables from the file "fileName.csv". A method is necessary because HistorySets
don't store all the data in one csv.
@ In, fileName, str, name of file without extension
@ Out, varList, list(str), list of variables
"""
# utf-8-sig is commongly used by Excel when writing CSV files as of this writing (2021).
# the BOM for this (first character in file) is \ufeff, causing the first var to be unrecognized
# if the encoding isn't right.
with open(fileName+'.csv', 'r', encoding='utf-8-sig') as f:
provided = list(s.strip() for s in f.readline().split(','))
return provided
def _loadCsvMeta(self,fileName):
"""
Attempts to load metadata from an associated XML file.
If found, update stateful parameters.
If not available, check the CSV itself for the available variables.
@ In, fileName, str, filename (without extension) of the CSV/XML combination
@ Out, dims, dict, dimensionality dictionary with {var:[indices]} structure
"""
meta = self._fromCSVXML(fileName)
# if we have meta, use it to load data, as it will be efficient to read from
if len(meta) > 0:
# TODO shouldn't we be respecting user wishes more carefully? TODO
self._samplerTag = meta.get('sampleTag',self.sampleTag)
dims = meta.get('pivotParams',{})
if len(dims)>0:
self.setPivotParams(dims)
# vector metavars is also stored in 'DataSet/dims' node
metavars = meta.get('metavars',[])
# get dict of vector metavars
params = {key:val for key, val in dims.items() if key in metavars}
# add metadata, so we get probability weights and etc
self.addExpectedMeta(metavars,params)
# check all variables desired are available
provided = set(meta.get('inputs',[])+meta.get('outputs',[])+meta.get('metavars',[]))
# otherwise, if we have no meta XML to load from, infer what we can from the CSV, which is only the available variables.
else:
# we can infer dimensionality from the user-specified settings
provided = set(self._identifyVariablesInCSV(fileName))
dims = dict((v, i) for v, i in self.getDimensions().items() if len(i)>0)
# check provided match needed
needed = set(self._orderedVars)
missing = needed - provided
if len(missing) > 0:
extra = provided - needed
self.raiseAnError(IOError, f'Not all variables requested for data object "{self.name}" were found in csv "{fileName}.csv"!' +
f'\nNeeded: {needed}; \nUnused: {extra}; \nMissing: {missing}')
# otherwise, return happily and continue loading the CSV
return dims
def _newCollector(self,width=1,length=100,dtype=None):
"""
Creates a new collector object and returns it.
@ In, width, int, optional, width of collector
@ In, length, int, optional, initial length of (allocated) collector
@ In, dtype, type, optional, type of entires (float if all float, usually should be object)
"""
if dtype is None:
dtype = self.defaultDtype # set in subclasses if different
return cached_ndarray.cNDarray(width=width,length=length,dtype=dtype)
def _readPandasCSV(self, fname, nullOK=None):
"""
Reads in a CSV and does some simple checking.
@ In, fname, str, name of file to read in (WITH the .csv extension)
@ In, nullOK, bool, optional, if provided then determines whether to error on nulls or not
@ Out, df, pd.DataFrame, contents of file
"""
# if nullOK not provided, infer from type: points and histories can't have them
if self.type in ['PointSet','HistorySet']:
nullOK = False
# datasets can have them because we don't have a 2d+ CSV storage strategy yet
else:
nullOK = True
loader = CsvLoader.CsvLoader()
df = loader.loadCsvFile(fname, nullOK=nullOK)
return df
def _resetScaling(self):
"""
Removes the KDTree and scaling factors, usually because the data changed in some way
@ In, None
@ Out, None
"""
self._scaleFactors = {}
self._inputKDTree = None
def _selectiveRealization(self,rlz):
"""
Used for selecting a subset of the given data. Not implemented for ND.
@ In, rlz, dict, {var:val} format (see addRealization)
@ Out, rlz, dict, {var:val} modified
"""
return rlz
def _setDataTypes(self,rlz):
"""
Set the data types according to the given realization.
@ In, rlz, dict, standardized and formatted realization
@ Out, None
"""
if self.types is None:
self.types = [None]*len(self.getVars())
for v, name in enumerate(self.getVars()):
val = rlz[name]
self.types[v] = self._getCompatibleType(val)
def _setScalingFactors(self,var=None):
"""
Sets the scaling factors for the data (mean, scale).
@ In, var, str, optional, if given then will only set factors for "var"
@ Out, None
"""
if var is None:
# clear existing factors and set list to "all"
self._scaleFactors = {}
varList = self.getVars()
else:
# clear existing factor and reset variable scale, if existing
varList = [var]
try:
del self._scaleFactors[var]
except KeyError:
pass
# TODO someday make KDTree too!
assert(self._data is not None) # TODO check against collector entries?
ds = self._data[varList] if var is not None else self._data
mean = ds.mean().variables
scale = ds.std().variables
for name in varList:
try:
m = mean[name].values[()]
s = scale[name].values[()]
self._scaleFactors[name] = (m,s)
except Exception:
self.raiseADebug('Had an issue with setting scaling factors for variable "{}". No big deal.'.format(name))
def _setStructureFromMetaXML(self, meta):
"""
Sets this DataSet's structure based on structured meta XML
@ In, meta, xmlUtils.StaticXmlElement, xml structure
@ Out, None
"""
root = meta.getRoot()
# locate index map
dims = root.find('dims')
varToDim = {}
if dims is not None:
for child in dims:
varToDim[child.tag] = list(x.strip() for x in child.text.split(','))
# inputs, outputs, indices meta
inputs = root.find('general/inputs')
if inputs is not None:
if inputs.text is None:
self._inputs = []
else:
self._inputs = list(x.strip() for x in inputs.text.split(','))
outputs = root.find('general/outputs')
if outputs is not None:
if outputs.text is None:
self._outputs = []
else:
self._outputs = list(x.strip() for x in outputs.text.split(','))
self._orderedVars = self._inputs + self._outputs
self._pivotParams = {}
for var in self._orderedVars:
if var in varToDim:
indices = varToDim[var]
for idx in indices:
if idx not in self._pivotParams:
self._pivotParams[idx] = []
self._pivotParams[idx].append(var)
pointwiseMeta = root.find('general/pointwise_meta').text.split(',')
if pointwiseMeta:
self.addExpectedMeta(pointwiseMeta, overwrite=True)
def _toCSV(self, fileName, start=0, **kwargs):
"""
Writes this data object to CSV file (except the general metadata, see _toCSVXML)
@ In, fileName, str, path/name to write file
@ In, start, int, optional, first realization to start printing from (if > 0, implies append mode)
@ In, kwargs, dict, optional, keywords for options
Possibly includes:
'clusterLabel': name of variable to cluster printing by. If included then triggers history-like printing.
@ Out, None
"""
filenameLocal = fileName # TODO path?
keep = self._getRequestedElements(kwargs)
toDrop = list(var for var in self.getVars() if var not in keep)
# if printing by cluster, divert now
if 'clusterLabel' in kwargs:
clusterLabel = kwargs.pop('clusterLabel')
self._toCSVCluster(fileName,start,clusterLabel,**kwargs)
return
# set up data to write
if start > 0:
# slice data starting from "start"
sl = slice(start,None,None)
data = self._data.isel(**{self.sampleTag:sl})
mode = 'a'
else:
data = self._data
mode = 'w'
data = data.drop(toDrop)
self.raiseADebug('Printing data from "{}" to CSV: "{}"'.format(self.name,filenameLocal+'.csv'))
# get the list of elements the user requested to write
# order data according to user specs # TODO might be time-inefficient, allow user to skip?
ordered = list(i for i in self._inputs if i in keep)
ordered += list(o for o in self._outputs if o in keep)
ordered += list(m for m in self._metavars if m in keep)
self._usePandasWriteCSV(filenameLocal,data,ordered,keepSampleTag = self.sampleTag in keep,mode=mode)
def _toCSVCluster(self,fileName,start,clusterLabel,**kwargs):
"""
Writes this data object as a chain of CSVs, grouped by the cluster
@ In, fileName, str, path/name to write file
@ In, start, int, optional, TODO UNUSED first realization to start printing from (if > 0, implies append mode)
@ In, clusterLable, str, variable by which to cluster printing
@ In, kwargs, dict, optional, keywords for options
@ Out, None
"""
# get list of variables to print
keep = self._getRequestedElements(kwargs)
# get unique cluster labels
clusterIDs = set(self._data[clusterLabel].values)
# write main CSV pointing to other files
with open(fileName+'.csv','w') as writeFile: # TODO append mode if printing each step
writeFile.writelines('{},filename\n'.format(clusterLabel))
for ID in clusterIDs:
writeFile.writelines('{},{}_{}.csv\n'.format(ID,fileName,ID))
self.raiseADebug('Wrote master cluster file to "{}.csv"'.format(fileName))
# write sub files as point sets
ordered = list(var for var in itertools.chain(self._inputs,self._outputs,self._metavars) if (var != clusterLabel and var in keep))
for ID in clusterIDs:
data = self._data.where(self._data[clusterLabel] == ID, drop = True).drop(clusterLabel)
subName = '{}_{}'.format(fileName,ID)
self._usePandasWriteCSV(subName, data, ordered, keepSampleTag=self.sampleTag in keep, mode='w') # TODO append mode
self.raiseADebug('Wrote sub-cluster file to "{}.csv"'.format(subName))
def _toCSVXML(self,fileName,**kwargs):
"""
Writes the general metadata of this data object to XML file
@ In, fileName, str, path/name to write file
@ In, kwargs, dict, additional options
@ Out, None
"""
# make copy of XML and modify it
meta = copy.deepcopy(self._meta)
# remove variables that aren't being "kept" from the meta record
keep = self._getRequestedElements(kwargs)
if 'DataSet' in meta.keys():
## remove from "dims"
dimsNode = xmlUtils.findPath(meta['DataSet'].getRoot(),'dims')
if dimsNode is not None:
toRemove = []
for child in dimsNode:
if child.tag not in keep:
toRemove.append(child)
for r in toRemove:
dimsNode.remove(r)
## remove from "inputs, outputs, pointwise"
toRemove = []
## TODO doesn't work for time-dependent requests!
genNode = xmlUtils.findPath(meta['DataSet'].getRoot(),'general')
if genNode is not None:
for child in genNode:
if child.tag in ['inputs','outputs','pointwise_meta']:
vs = []
for var in child.text.split(','):
if var.strip() in keep:
vs.append(var)
if len(vs) == 0:
toRemove.append(child)
else:
child.text = ','.join(vs)
for r in toRemove:
genNode.remove(r)
self.raiseADebug('Printing metadata XML: "{}"'.format(fileName+'.xml'))
with open(fileName+'.xml','w') as ofile:
#header
ofile.writelines('<DataObjectMetadata name="{}">\n'.format(self.name))
for name in sorted(list(meta.keys())):
target = meta[name]
xml = xmlUtils.prettify(target.getRoot(),startingTabs=1,addRavenNewlines=False)
ofile.writelines(' {}\n'.format(xml))
ofile.writelines('</DataObjectMetadata>\n')
def _usePandasWriteCSV(self,fileName,data,ordered,keepSampleTag=False,keepIndex=False,mode='w'):
"""
Uses Pandas to write a CSV.
@ In, fileName, str, path/name to write file
@ In, data, xr.Dataset, data to write (with only "keep" vars included, plus self.sampleTag)
@ In, ordered, list(str), ordered list of headers
@ In, keepSampleTag, bool, optional, if True then keep the sampleTag in the CSV
@ In, keepIndex, bool, optional, if True then keep indices in the CSV even if not multiindex
@ In, mode, str, optional, mode to write CSV in (write, append as 'w','a')
@ Out, None
"""
# TODO asserts
# make a pandas dataframe, they write to CSV very well
data = data.to_dataframe()
# order entries
data = data[ordered]
# set up writing mode; if append, don't write headers
if mode == 'a':
header = False
else:
header = True
# write, depending on whether to keep sampleTag in index or not
if keepSampleTag:
data.to_csv(fileName+'.csv',mode=mode,header=header)
else:
# if other multiindices included, don't omit them #for ND DataSets only
if isinstance(data.index,pd.MultiIndex):
# if we have just the self.sampleTag index (we can not drop it otherwise pandas fail). We use index=False (a.a.)
localIndex = True
if len(data.index.names) == 1:
localIndex = self.sampleTag not in data.index.names
else:
if self.sampleTag in data.index.names:
data.index = data.index.droplevel(self.sampleTag)
if not localIndex:
data.to_csv(fileName+'.csv',mode=mode,header=header, index=localIndex)
else:
data.to_csv(fileName+'.csv',mode=mode,header=header)
## START garbled index fix ##
## At one point we were seeing "garbled" indexes printed from Pandas: a,b,(RAVEN_sample_ID,),c
## Here, commented is a workaround that @alfoa set up to prevent that problem.
## However, it is painfully slow, so if garbled data shows up again, we can
## revisit this fix.
## When using this fix, comment out the data.to_csv line above.
#dataString = data.to_string()
# find headers
#splitted = [",".join(elm.split())+"\n" for elm in data.to_string().split("\n")]
#header, stringData = splitted[0:2], splitted[2:]
#header.reverse()
#toPrint = [",".join(header).replace("\n","")+"\n"]+stringData
#with open(fileName+'.csv', mode='w+') as fileObject:
# fileObject.writelines(toPrint)
## END garbled index fix ##
# if keepIndex, then print as is
elif keepIndex:
data.to_csv(fileName+'.csv',mode=mode,header=header)
# if only index was sampleTag and we don't want it, index = False takes care of that
else:
data.to_csv(fileName+'.csv',index=False,mode=mode,header=header)
# DEBUGG tool for incremental writing, keep for future use
# raw_input('Just wrote to CSV "{}.csv", press enter to continue ...'.format(fileName))
# _useNumpyWriteCSV (below) is a secondary method to write out POINT SET CSVs. When benchmarked with Pandas, I tested using
# different numbers of variables (M=5,25,100) and different numbers of realizations (R=4,100,1000).
# For each test, I did a unit check just on _usePandasWriteCSV versus _useNumpyWriteCSV, and took the average time
# to run a trial over 1000 trials (in seconds). The results are as follows:
# R M pandas numpy ratio per float p per float n per float ratio
# 4 5 0.001748 0.001004 1.741035857 0.00008740 0.00005020 1.741035857
# 4 25 0.002855 0.001378 2.071843251 0.00002855 0.00001378 2.071843251
# 4 100 0.007006 0.002633 2.660843145 0.00001752 6.5825E-06 2.660843145
# 100 5 0.001982 0.001819 1.089609676 0.00000396 0.00000364 1.089609676
# 100 25 0.003922 0.003898 1.006182658 1.5688E-06 1.5592E-06 1.006182658
# 100 100 0.011124 0.011386 0.976989285 1.1124E-06 1.1386E-06 0.976989285
# 1000 5 0.004108 0.008688 0.472859116 8.2164E-07 1.7376E-06 0.472859116
# 1000 25 0.013367 0.027660 0.483261027 5.3468E-07 1.1064E-06 0.483261027
# 1000 100 0.048791 0.095213 0.512442602 4.8791E-07 9.5213E-07 0.512442602
# The per-float columns divide the time taken by (R*M) to give a fair comparison. The summary of the # var versus # realizations per float is:
# ---------- R ----------------
# M 4 100 1000
# 5 1.741035857 1.089609676 0.472859116
# 25 2.071843251 1.006182658 0.483261027
# 100 2.660843145 0.976989285 0.512442602
# When the value is > 1, numpy is better (so when < 1, pandas is better). It seems that "R" is a better
# indicator of which method is better, and R < 100 is a fairly simple case that is pretty fast anyway,
# so for now we just keep everything using Pandas. - talbpaul and alfoa, January 2018
#
#def _useNumpyWriteCSV(self,fileName,data,ordered,keepSampleTag=False,keepIndex=False,mode='w'):
# # TODO docstrings
# # TODO assert point set -> does not work right for ND (use Pandas)
# # TODO the "mode" should be changed for python 3: mode has to be 'ba' if appending, not 'a' when using numpy.savetxt
# with open(fileName+'.csv',mode) as outFile:
# if mode == 'w':
# #write header
# header = ','.join(ordered)
# else:
# header = ''
# data = data[ordered].to_array()
# if not keepSampleTag:
# data = data.drop(self.sampleTag)
# data = data.values.transpose()
# # set up formatting for types
# # TODO potentially slow loop
# types = list('%.18e' if self._getCompatibleType(data[0][i]) == float else '%s' for i in range(len(ordered)))
# np.savetxt(outFile,data,header=header,fmt=types)
# # format data?
### HIERARCHICAL STUFF ###
def _constructHierPaths(self):
"""
Construct a list of xr.Datasets, each of which is the samples taken along one hierarchical path
@ In, None
@ Out, results, list(xr.Dataset), dataset containing only the path information
"""
# TODO can we do this without collapsing? Should we?
if self.isEmpty:
self.raiseAnError(ValueError, 'DataObject named "{}" is empty!'.format(self.name))
data = self.asDataset()
paths = self._generateHierPaths()
results = [None] * len(paths)
for p,path in enumerate(paths):
rlzs = list(self._data.where(data['prefix']==ID,drop=True) for ID in path)
results[p] = xr.concat(rlzs,dim=self.sampleTag)
return results
def _generateHierPaths(self):
"""
Returns paths followed to obtain endings
@ In, None
@ Out, paths, list(list(str)), list of paths (which are lists of prefixes)
"""
# get the ending realizations
endings = self._getPathEndings()
paths = [None]*len(endings)
for e,ending in enumerate(endings):
# reconstruct path that leads to this ending
path = [ending['prefix']]
while ending['RAVEN_parentID'] != "None" and not pd.isnull(ending['RAVEN_parentID']):
_,ending = self.realization(matchDict={'prefix':ending['RAVEN_parentID']})
if ending is None:
break
path.append(ending['prefix'])
# sort it in order by progression
path.reverse()
# add it to the path list
paths[e] = path
return paths
def _getPathEndings(self):
"""
Finds all those nodes who are the end of the line.
@ In, None
@ Out, endings, list({var:float/str or xr.DataArray}, ...), realizations
"""
# TODO returning dicts means copying the data! Do more efficiently by masking and creating xr.Dataset instances!
# check if hierarchal data exists, by checking for the isEnding tag
if not 'RAVEN_isEnding' in self.getVars():
return []
# get realization slices for each realization that is an ending
# get from the collector first
if self._collector is not None and len(self._collector) > 0:
# first get rows from collector
fromColl = self._collector[np.where(self._collector[:,self._orderedVars.index('RAVEN_isEnding')])]
# then turn them into realization-like
fromColl = list( dict(zip(self._orderedVars,c)) for c in fromColl )
else:
fromColl = []
# then get from data
if self._data is not None and len(self._data[self.sampleTag].values) > 0:
# first get indexes of realizations
indexes = self._data.where(self._data['RAVEN_isEnding'],drop=True)[self.sampleTag].values
# then collect them into a list
fromData = list(self._getRealizationFromDataByIndex(i) for i in indexes)
else:
fromData = []
endings = fromColl + fromData
return endings
|
idaholab/raven
|
framework/DataObjects/DataSet.py
|
Python
|
apache-2.0
| 107,450
|
[
"NetCDF"
] |
f69a1c67c3285ef68d55b76a6dd180e57c976726ac2309fa4daa786dcccf1d14
|
from collections import OrderedDict
class GenInput:
def __init__(self):
self.rtRange = self.setRtRange()
self.cycles=[] # number of iterations in each for loop
self.rtvar=[] # variable rt parameter [ {"TCLOUD":[0,10,20]}, {"SZA":[0,30,60]}, {"ALBCON":[0,0.5,1.0]}]
self.rtcons={} # constant rt parameters { "NRE": 10}
self.IOUTformat = 10
def CycleSequence(self):
"""
given rtvar=[ {"TCLOUD":[0,10,20]}, {"SZA":[0,30,60]}, {"ALBCON":[0,0.5,1.0]}]
:return: if iout==10: ["SZA","ALBCON"] , else: ["TCLOUD", "SZA", "ALBCON"]
"""
seq = [h.keys()[0] for h in self.rtvar]
return seq[1:] if self.IOUTformat == 10 else seq
def GetParmMenuItems(self):
"""
Get the sequence of individual variant values for each nesting cycle directly from command text
Given a cmd text such as
A=0;1
B=2;3
C=4;5
D=6
:return: [["A=0","A=1"]["B=2","B=3"],["C=4","C=5"],["D=6"]]
"""
tags=[]
ncycles = len(self.rtvar)
if ncycles == 0: return tags
i=-1
for cdict in self.rtvar:
i+=1
k=cdict.keys()[0]
tags.append([])
for v in cdict[k]:
tags[i].append("{}={}".format(k,v))
return tags
def CycleSetup(self, cmd):
"""
cycle through Runrt command buffer
:param:cmd: -- RunRT command sequence
:returns:xvariable: list of values for the first variant if IOUT=10,
:returns:xlabel: the parameter name used to label x-axis
"""
lines = cmd.split('\n')
self.rtvar=[] # a list of dictionaries, a dictionary for each nested loop
self.rtcons={}
self.cycles=[]
nesting = -1
niter = 0
xvariable = []
xlabel = ""
prevcycles = 0 # check that lock-step variables have same number of iterations
self.IOUTformat = 10 # this is the default
for line in lines:
line = line.split('#')[0].strip() # strip off comments
if len(line) == 0:
continue
if not line.endswith("&"):
nesting+=1
if "=" in line:
parm, rhs = line.split('=')
if parm.upper() == "IOUT":
if rhs.count(';') > 0:
rhs=rhs.split(';')[0] # only one output format allowed
self.IOUTformat = int(rhs)
if ";" in rhs:
covariant = rhs.endswith("&")
if covariant:
rhs=rhs[0:-2]
values=[vv.strip().replace(' ','_') for vv in rhs.split(';')]
if covariant and not prevcycles == len(values) :
xvariable = values
xlabel = "Error: Number of elements in covariant variable, {}, is incorrect.".format(parm)
return xvariable, xlabel
prevcycles = len(values)
#values=[vv.strip() for vv in rhs.split(';')]
if not covariant:
self.cycles.append(len(values))
self.rtvar.append(OrderedDict())
self.rtvar[nesting][parm]=values
if nesting == 0:
# note that xvariable is set here for IOUT=10,
# it is reset in Plot01 and Plot11 to wavelength and altitude, respectively
xvariable = values[:]
xlabel = str(parm)
else:
self.rtcons[parm]=rhs
return xvariable, xlabel
def Niter(self):
"""
:return: total number of iterations required to cycle through command file
"""
niter = 1
for n in self.cycles:
niter*=n
return niter
def CycleInput(self, iteration):
"""
generates INPUT for a given iteration
:param
iteration - iteration number between 0 and Niter()-1
:returns:
rtinp - SBDART input for this iteration
rtlist - rtinp broken into list of leading variants in each
cycle. E.g., if cmd is
TCLOUD=0;10;100
WLINF=0.5;0.8
WLSUP=0.5;0.8 &
ALBCON=0.5;1.0
rtinp=TCLOUD=0\nWLINF=0.8\nWLSUP=0.8\nALBCON=0.5
rtlist=["TCLOUD=0","WLINF=0.8","ALBCON=0.5"] for iteration 3
"""
rtinp=""
rtlist=[]
niter = self.Niter()
if iteration >= niter:
return None
else:
iter = iteration
for nest in range(0,len(self.rtvar)):
if nest > 0:
iter=iter/self.cycles[nest-1]
i = iter % self.cycles[nest]
first = True
for p,vlist in self.rtvar[nest].iteritems():
v=vlist[i]
if p[0].isalpha(): # don't write output RT parm unless its legit fortran var
rtinp+="{}={}\n".format(p,v)
if first:
rtlist.append("{}={}".format(p,v))
first = False
for p,v in self.rtcons.iteritems():
rtinp+="{}={}\n".format(p,v.split('#')[0])
return rtinp,rtlist
def DocString(self, key):
docstr = {"IOUT=1" : "Spectral output between WLINF and WLSUP",
"IOUT=2" : "Spectral profile of lowtran optical depth",
"IOUT=10" : "Radiant flux at top and bottom of atmosphere",
"IOUT=11" : "Radiant flux at each atmospheric layer",
"IOUT=20" : "Radiance at top of atmosphere",
"IOUT=21" : "Radiance at bottom of atmosphere",
"IDATM=0" : "User specified atmospheric profile",
"IDATM=1" : "Tropical atmospheric profile",
"IDATM=2" : "Mid-latitude summer atmospheric profile",
"IDATM=3" : "Mid-latitude winter atmospheric profile",
"IDATM=4" : "Sub-arctic summer atmospheric profile",
"IDATM=5" : "Sub-arctice winter atmospheric profile",
"IDATM=6" : "US-62 atmospheric profile",
"IAER=0" : "No boundary layer aerosol",
"IAER=1" : "Rural aerosol",
"IAER=2" : "Urban aerosol",
"IAER=3" : "Oceanic aerosol",
"IAER=4" : "Tropospheric aerosol",
"IAER=5" : "User defined (see WLBAER, QBAER, TBAER, WBAER, GBAER)",
"JAER=0" : "No stratospheric aerosol",
"JAER=1" : "Background stratospheric aerosol",
"JAER=2" : "Aged volcanic stratospheric aerosol",
"JAER=3" : "Fresh volcanic stratospheric aerosol",
"JAER=4" : "Meteor dust stratospheric aerosol",
"ISALB=-1" : "Read surface albedo from albedo.dat",
"ISALB=0" : "Constant albedo set by ALBCON",
"ISALB=1" : "Snow",
"ISALB=2" : "Clear water",
"ISALB=3" : "Lake water",
"ISALB=4" : "Sea water",
"ISALB=5" : "Sand",
"ISALB=6" : "Vegetation",
"ISALB=7" : "Ocean water BRDF, requires SC parameters",
"ISALB=8" : "Hapke BRDF model, requires SC parameters",
"ISALB=9" : "Ross-thick Li-sparse BRDF, requires SC parameters",
"ISALB=10" : "Snow, seawater, sand and vegetation, SC sets partitions",
"ISAT=-4" : "Gaussian filter, WLINF-2*WLSUP to WLINF+2*WLSUP",
"ISAT=-3" : "Triangular filter, WLINF-WLSUP to WLINF+WLSUP",
"ISAT=-2" : "Flat filter, WLINF-0.5*WLSUP to WLINF+0.5*WLSUP",
"ISAT=-1" : "User defined, read from filter.dat",
"ISAT=0" : "WLINF to WLSUP with filter function = 1 (default)",
"ISAT=1" : "METEO",
"ISAT=2" : "GOES(EAST)",
"ISAT=3" : "GOES(WEST)",
"ISAT=4" : "AVHRR1(NOAA8)",
"ISAT=5" : "AVHRR2(NOAA8)",
"ISAT=6" : "AVHRR1(NOAA9)",
"ISAT=7" : "AVHRR2(NOAA9)",
"ISAT=8" : "AVHRR1(NOAA10)",
"ISAT=9" : "AVHRR2(NOAA10)",
"ISAT=10" : "AVHRR1(NOAA11)",
"ISAT=11" : "AVHRR2(NOAA11)",
"ISAT=12" : "GTR-100 ch1",
"ISAT=13" : "GTR-100 ch2",
"ISAT=14" : "GTR-100 410nm channel",
"ISAT=15" : "GTR-100 936nm channel",
"ISAT=16" : "MFRSR 415nm channel",
"ISAT=17" : "MFRSR 500nm channel",
"ISAT=18" : "MFRSR 610nm channel",
"ISAT=19" : "MFRSR 665nm channel",
"ISAT=20" : "MFRSR 862nm channel",
"ISAT=21" : "MFRSR 940nm channel",
"ISAT=22" : "AVHRR3 (nominal)",
"ISAT=23" : "AVHRR4 (nominal)",
"ISAT=24" : "AVHRR5 (nominal)",
"ISAT=25" : "Biological action spectra for DNA damage by UVB radiation",
"ISAT=26" : "AIRS1 380-460nm",
"ISAT=27" : "AIRS2 520-700nm",
"ISAT=28" : "AIRS3 670-975nm",
"ISAT=29" : "AIRS4 415-1110nm",
"NOTHRM=-1": "Thermal emission turned on when lambda > 2um",
"NOTHRM=0" : "Thermal emission turned on for all wavelengths",
"NOTHRM=1" : "No thermal emission"
}
if key in docstr:
return "{:10s}# {}".format(key, docstr[key])
else:
return key
def ParmMatch(self, pattern):
'''
get parm that starts with the characters in pattern
for example alb return ALBCON
:param pattern:
:return:
'''
pat = pattern.upper()
for p in self.setRtRange().keys():
if p.startswith(pat):
return p
return ''
def setRtRange(self):
'''
rt parameter range dictionary. key is rt parameter name, value is description and suggested range and skew
:return:
'''
return {"SZA": "Solar zenith angle (degrees) $ 0.0:90:1",
"CSZA": "Cosine of solar zenith $ 0:1:1",
"WLINF": "Wavelength lower limit (um) $ 0.250:100:1",
"WLSUP": "Wavelength upper limit (um) $ 0.250:100:1",
"WLINC": "Wavelength/wavenumber increment $ -.01;0;20",
"CSZA": "cosine of solar zenith $ 0:1:1",
"IDATM": "Model atmosphere selector $ 0;1;2;3;4;5;6",
"ZPRES": "Effective surface altitude (km) $ 0:5:1",
"PBAR": "Surface pressure (mb) $ 500:2000:1",
"SCLH2O": "Water vapor scale height (km) $ 0.5:5:1",
"UW": "Integrated water vapor (g/cm2) $ 0:8:0.3",
"UO3": "Integrated ozone amount (atm-cm) $ 0.050:0.200:0.3",
"O3TRP": "Tropospheric ozone amount (atm-cm) $ 0.00:0.02:0.3",
"ZTRP": "Tropospheric altitude (km) $ 5:12:1",
"XRSC": "Rayleigh scattering sensitivity factor $ 0.5:2:1",
"XN2": "N2 volume mixing ratio (ppm) $ 5e5:1e6:1",
"XO2": "O2 volume mixing ratio (ppm) $ 1e5:4e5:1",
"XCO2": "CO2 volume mixing ratio (ppm) $ 0:800:1",
"XCH4": "CH4 volume mixing ratio (ppm) $ 0:3.48:1",
"XN2O": "N2O volume mixing ratio (ppm) $ 0:0.64:1",
"XCO": "CO volume mixing ratio (ppm) $ 0:30:1",
"XNO2": "NO2 volume mixing ratio (ppm) $ 0:4.6e-5:1",
"XSO2": "SO2 volume mixing ratio (ppm) $ 0:6e-4:1",
"XNH3": "NH3 volume mixing ratio (ppm) $ 0:1.0e-3:1",
"XNO": "NO volume mixing ratio (ppm) $ 0:6.0e-4:1",
"XHNO3": "HNO3 volume mixing ratio (ppm) $ 0:1e-6:1",
"XO4": "O4 density sensitivity factor $ 0.5:2.0:1",
"ALBCON": "Surface albedo $ 0:1:1",
"ISALB": "Surface albedo model $ -1;0;1;2;3;4;5;6;7;8;9;10",
"ISAT": "Filter function $ -4;-3;-2;-1;0;1;2;3;4;5;6;7;8;9;10;11;12;13;14;15;16;17;18;19;20;21;22;23;24;25;26;27;28;29",
"ZCLOUD": "Cloud height (km,km,km,km,km) $ 0:10:1",
"TCLOUD": "Cloud optical depth (,,,,,) $ 0:100:0.3",
"LWP": "Liquid water path (g/m2) $ 0:1000:0.3",
"NRE": "Cloud drop radius (um) $ 2:128:0",
"RHCLD": "Cloud relative humidity $ 0.0:1.0:1",
"JAER": "Stratospheric Aerosol type $ 0;1;2;3;4",
"ZAER": "SA layer altitudes (km,km,km,km,km) $ 0:10:1",
"TAERST": "SA Optical depths (,,,,,) $ 0:10:0.3",
"IAER": "Bounday Layer aerosol type $ 0;1;2;3;4",
"RHAER": "Relative humidity for BL aerosols $ 0:1:1",
"TBAER": "Optical depth of BL aerosols $ 0:10:0.3",
"BTEMP": "Surface termperature (K) $ 180:330:1",
"NOTHRM": "Thermal calculation selector $ -1;0;1",
"IOUT": "Output format selector $ 1;2;10;11;20;21",
"ZGRID1": "Minimum vertical step size (km) $ 0.1:1:1",
"ZGRID2": "Maximum vertical step size (km) $ 2:50:1",
"NGRID": "Number of cells in re-grided atmosphere $ -1:200:1",
"ZOUT": "Output altitude (km,km) $ 0.0:10.0:1"}
|
paulricchiazzi/SBDART
|
RunRT/GenInput.py
|
Python
|
gpl-3.0
| 14,678
|
[
"Gaussian"
] |
7e552c6a9c618b013911874ee7d948bbfb7ec5d381ca52becd48b5e4683b9e13
|
""" This module loads all the classes from the VTK Charts library into
its namespace. This is an optional module."""
from vtkChartsPython import *
|
b3c/VTK-5.8
|
Wrapping/Python/vtk/charts.py
|
Python
|
bsd-3-clause
| 149
|
[
"VTK"
] |
e88e3142f387f5b11a889e4b5a2933dd7d07a768ed2ddd54aef863f0e7cfd689
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates .h and .rc files for strings extracted from a .grd file.
This script generates an rc file and header (NAME.{rc,h}) to be included in
a build target. The rc file includes translations for strings pulled from the
given .grd file(s) and their corresponding localized .xtb files.
To specify strings that will be extracted, the script pointed to by the
argument "extract-datafile" should contain one or both of the following global
variables:
STRING_IDS is a list of strings IDs we want to import from the .grd files and
include in the generated RC file. These strings are universal for all brands.
MODE_SPECIFIC_STRINGS: is a dictionary of strings for which there are brand
specific values. This mapping provides brand- and mode-specific string ids for a
given input id as described here:
{
resource_id_1: { # A resource ID for use with GetLocalizedString.
brand_1: [ # 'google_chrome', for example.
string_id_1, # Strings listed in order of the brand's modes, as
string_id_2, # specified in install_static::InstallConstantIndex.
...
string_id_N,
],
brand_2: [ # 'chromium', for example.
...
],
},
resource_id_2: ...
}
Note: MODE_SPECIFIC_STRINGS cannot be specified if STRING_IDS is not specified.
"""
# The generated header file includes IDs for each string, but also has values to
# allow getting a string based on a language offset. For example, the header
# file looks like this:
#
# #define IDS_L10N_OFFSET_AR 0
# #define IDS_L10N_OFFSET_BG 1
# #define IDS_L10N_OFFSET_CA 2
# ...
# #define IDS_L10N_OFFSET_ZH_TW 41
#
# #define IDS_MY_STRING_AR 1600
# #define IDS_MY_STRING_BG 1601
# ...
# #define IDS_MY_STRING_BASE IDS_MY_STRING_AR
#
# This allows us to lookup an an ID for a string by adding IDS_MY_STRING_BASE
# and IDS_L10N_OFFSET_* for the language we are interested in.
#
from __future__ import print_function
import argparse
import glob
import io
import os
import sys
from xml import sax
BASEDIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(1, os.path.join(BASEDIR, '../../../tools/grit'))
sys.path.insert(2, os.path.join(BASEDIR, '../../../tools/python'))
from grit.extern import tclib
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "app", "resources"))
import vivaldi_resources as vivaldi
# Treat the list of expected translation input files as a filter and ignore any
# translation not on the list.
vivaldi_filter_translation_files = True
class GrdHandler(sax.handler.ContentHandler):
"""Extracts selected strings from a .grd file.
Attributes:
messages: A dict mapping string identifiers to their corresponding messages.
referenced_xtb_files: A list of all xtb files referenced inside the .grd
file.
"""
def __init__(self, string_id_set, dir):
"""Constructs a handler that reads selected strings from a .grd file.
The dict attribute |messages| is populated with the strings that are read.
Args:
string_id_set: An optional set of message identifiers to extract; all
messages are extracted if empty.
"""
sax.handler.ContentHandler.__init__(self)
self.messages = {}
self.referenced_xtb_files = []
self.__id_set = string_id_set
self.__message_name = None
self.__element_stack = []
self.__text_scraps = []
self.__characters_callback = None
self.__dir = dir
# Map a message name to its translation id
self.translation_ids = {}
# Map a translation id to a map of its placeholders
self.translation_placeholders = {}
# An array of message parts that are used to calculate a hash to get the
# translation id. This is similar to __text_scraps but uses the placeholder
# name, not the placeholder text for placeholders.
self.__hash_scraps = []
# Map a placeholder name to its text for the current message
self.__placeholders = {}
# The current placeholder name if any
self.__placeholder_name = ''
self.__placeholder_text = ''
def startElement(self, name, attrs):
self.__element_stack.append(name)
if name == 'message':
self.__OnOpenMessage(attrs.getValue('name'))
elif name == 'ph':
self.__placeholder_name = attrs.getValue('name')
elif name == 'file':
parent = self.__element_stack[-2]
if parent == 'translations':
self.__OnAddXtbFile(attrs.getValue('path'))
elif name == 'part':
part_path = os.path.join(self.__dir, attrs.getValue('file'))
saved_dir = self.__dir
self.__dir = os.path.dirname(part_path)
sax.parse(part_path, self)
self.__dir = saved_dir
def endElement(self, name):
popped = self.__element_stack.pop()
assert popped == name
if name == 'message':
self.__OnCloseMessage()
elif name == 'ph':
if self.__message_name:
self.__text_scraps.append(self.__placeholder_text)
self.__hash_scraps.append(self.__placeholder_name)
self.__placeholders[self.__placeholder_name] = self.__placeholder_text
self.__placeholder_name = ''
self.__placeholder_text = ''
def characters(self, content):
if self.__characters_callback:
self.__characters_callback(self.__element_stack[-1], content)
def __IsExtractingMessage(self):
"""Returns True if a message is currently being extracted."""
return self.__message_name is not None
def __OnOpenMessage(self, message_name):
"""Invoked at the start of a <message> with message's name."""
assert not self.__IsExtractingMessage()
self.__message_name = (message_name if (not (self.__id_set) or
message_name in self.__id_set)
else None)
if self.__message_name:
self.__characters_callback = self.__OnMessageText
def __OnMessageText(self, containing_element, message_text):
"""Invoked to handle a block of text for a message."""
if message_text and containing_element == 'message':
self.__text_scraps.append(message_text)
self.__hash_scraps.append(message_text)
if message_text and containing_element == 'ph':
self.__placeholder_text += message_text
def __OnCloseMessage(self):
"""Invoked at the end of a message."""
if self.__IsExtractingMessage():
message_text = ''.join(self.__text_scraps).strip()
if self.__message_name not in vivaldi.REPLACE_GOOGLE_EXCEPTIONS:
message_text = vivaldi.ReplaceGoogleInString(message_text)
self.messages[self.__message_name] = message_text
hash_text = ''.join(self.__hash_scraps).strip()
translation_id = tclib.GenerateMessageId(hash_text)
self.translation_ids[self.__message_name] = translation_id
self.translation_placeholders[translation_id] = self.__placeholders
self.__message_name = None
self.__text_scraps = []
self.__hash_scraps = []
self.__placeholders = {}
self.__characters_callback = None
def __OnAddXtbFile(self, xtb_file_path):
"""Adds the xtb file path of a 'file'."""
if os.path.splitext(xtb_file_path)[1].lower() == '.xtb':
self.referenced_xtb_files.append(xtb_file_path)
class XtbHandler(sax.handler.ContentHandler):
"""Extracts selected translations from an .xtd file.
Populates the |lang| and |translations| attributes with the language and
selected strings of an .xtb file. Instances may be re-used to read the same
set of translations from multiple .xtb files.
Attributes:
translations: A mapping of translation ids to strings.
lang: The language parsed from the .xtb file.
"""
def __init__(self, translation_ids, translation_placeholders):
"""Constructs an instance to parse the given strings from an .xtb file.
Args:
translation_ids: a mapping of translation ids to their string
identifiers list for the translations to be extracted.
translation_placeholders: a mapping of translation ids to their
mappig of placeholder names to the corresponding placeholder texts.
"""
sax.handler.ContentHandler.__init__(self)
self.lang = None
self.translations = None
self.__translation_ids = translation_ids
self.__translation_placeholders = translation_placeholders
self.__element_stack = []
self.__string_ids = None
self.__text_scraps = []
self.__placeholders = {}
self.__characters_callback = None
def startDocument(self):
# Clear the lang and translations since a new document is being parsed.
self.lang = ''
self.translations = {}
def startElement(self, name, attrs):
self.__element_stack.append(name)
# translationbundle is the document element, and hosts the lang id.
if len(self.__element_stack) == 1:
assert name == 'translationbundle'
self.__OnLanguage(attrs.getValue('lang'))
if name == 'translation':
self.__OnOpenTranslation(attrs.getValue('id'))
elif name == 'ph':
# Modify __text_scraps only if <ph> is inside <translation> that is
# included into the output.
if self.__string_ids:
placeholder_text = self.__placeholders.get(attrs.getValue('name'), '')
if placeholder_text:
self.__text_scraps.append(placeholder_text)
def endElement(self, name):
popped = self.__element_stack.pop()
assert popped == name
if name == 'translation':
self.__OnCloseTranslation()
def characters(self, content):
if self.__characters_callback:
self.__characters_callback(self.__element_stack[-1], content)
def __OnLanguage(self, lang):
self.lang = lang.replace('-', '_').upper()
def __OnOpenTranslation(self, translation_id):
assert self.__string_ids is None
self.__string_ids = self.__translation_ids.get(translation_id)
if self.__string_ids:
self.__characters_callback = self.__OnTranslationText
self.__placeholders = self.__translation_placeholders.get(translation_id, {})
def __OnTranslationText(self, containing_element, message_text):
if message_text and containing_element == 'translation':
self.__text_scraps.append(message_text)
def __OnCloseTranslation(self):
if self.__string_ids:
translated_string = ''.join(self.__text_scraps).strip()
translated_string = vivaldi.ReplaceGoogleInString(translated_string)
for string_id in self.__string_ids:
self.translations[string_id] = translated_string
self.__string_ids = None
self.__text_scraps = []
self.__characters_callback = None
class StringRcMaker(object):
"""Makes .h and .rc files containing strings and translations."""
def __init__(self, inputs, expected_xtb_input_files, header_file, rc_file,
brand, first_resource_id, string_ids_to_extract, mode_specific_strings):
"""Constructs a maker.
Args:
inputs: A list of (grd_file, xtb_dir) pairs containing the source data.
expected_xtb_input_files: A list of xtb files that are expected to exist
in the inputs folders. If there is a discrepency between what exists
and what is expected the script will fail.
header_file: The location of the header file to write containing all the
defined string IDs.
rc_file: The location of the rc file to write containing all the string
resources.
brand: The brand to check against when extracting mode-specific strings.
first_resource_id: The starting ID for the generated string resources.
string_ids_to_extract: The IDs of strings we want to import from the .grd
files and include in the generated RC file. These strings are universal
for all brands.
mode_specific_strings: A dictionary of strings that have conditional
values based on the brand's install mode. Refer to the documentation at
the top of this file for more information on the format of the
dictionary.
"""
self.inputs = inputs
self.expected_xtb_input_files = expected_xtb_input_files
self.expected_xtb_input_files.sort()
self.header_file = header_file
self.rc_file = rc_file
self.brand = brand
self.first_resource_id = first_resource_id;
self.string_id_set = set(string_ids_to_extract)
self.mode_specific_strings = mode_specific_strings
self.__AddModeSpecificStringIds()
def MakeFiles(self):
translated_strings = self.__ReadSourceAndTranslatedStrings()
self.__WriteRCFile(translated_strings)
self.__WriteHeaderFile(translated_strings)
class __TranslationData(object):
"""A container of information about a single translation."""
def __init__(self, resource_id_str, language, translation):
self.resource_id_str = resource_id_str
self.language = language
self.translation = translation
def __lt__(self, other):
"""Allow __TranslationDatas to be sorted by id then by language."""
return (self.resource_id_str, self.language) < (other.resource_id_str,
other.language)
def __AddModeSpecificStringIds(self):
"""Adds the mode-specific strings for all of the current brand's install
modes to self.string_id_set."""
for string_id, brands in self.mode_specific_strings.items():
brand_strings = brands.get(self.brand)
if not brand_strings:
raise RuntimeError(
'No strings declared for brand \'%s\' in MODE_SPECIFIC_STRINGS for '
'message %s' % (self.brand, string_id))
self.string_id_set.update(brand_strings)
def __ReadSourceAndTranslatedStrings(self):
"""Reads the source strings and translations from all inputs."""
translated_strings = {}
all_xtb_files = []
for grd_file, xtb_dir in self.inputs:
# Get the name of the grd file sans extension.
source_name = os.path.splitext(os.path.basename(grd_file))[0]
# Compute a glob for the translation files.
xtb_pattern = os.path.join(os.path.dirname(grd_file), xtb_dir,
'%s*.xtb' % source_name)
local_xtb_files = [x.replace('\\', '/') for x in glob.glob(xtb_pattern)]
all_xtb_files.extend(local_xtb_files)
translated_strings.update(dict([((x.resource_id_str, x.language), x) for x in
self.__ReadSourceAndTranslationsFrom(grd_file, local_xtb_files)]))
msg_ids = set([x[0] for x in translated_strings.keys()])
locales = set([x[1] for x in translated_strings.keys()])
for msg_id in msg_ids:
for locale in locales:
if (msg_id, locale) not in translated_strings:
translated_strings[(msg_id, locale)] = self.__TranslationData(msg_id, locale,
translated_strings[(msg_id, "EN_US")].translation)
translated_strings = list(translated_strings.values())
translated_strings.sort()
all_xtb_files.sort()
# When filtering all_xtb_files must include all files from the filter.
if vivaldi_filter_translation_files and \
len(set(self.expected_xtb_input_files) - set(all_xtb_files)) == 0:
return translated_strings
if self.expected_xtb_input_files != all_xtb_files:
extra = list(set(all_xtb_files) - set(self.expected_xtb_input_files))
missing = list(set(self.expected_xtb_input_files) - set(all_xtb_files))
error = '''Asserted file list does not match.
Expected input files:
{}
Actual input files:
{}
Missing input files:
{}
Extra input files:
{}
'''
print(error.format('\n'.join(self.expected_xtb_input_files),
'\n'.join(all_xtb_files), '\n'.join(missing),
'\n'.join(extra)))
sys.exit(1)
return translated_strings
def __ReadSourceAndTranslationsFrom(self, grd_file, xtb_files):
"""Reads source strings and translations for a .grd file.
Reads the source strings and all available translations for the messages
identified by self.string_id_set (or all the messages if self.string_id_set
is empty). The source string is used where translations are missing.
Args:
grd_file: Path to a .grd file.
xtb_files: List of paths to .xtb files.
Returns:
An unsorted list of __TranslationData instances.
"""
sax_parser = sax.make_parser()
# Read the source (en-US) string from the .grd file.
grd_handler = GrdHandler(self.string_id_set, os.path.dirname(grd_file))
sax_parser.setContentHandler(grd_handler)
sax_parser.parse(grd_file)
source_strings = grd_handler.messages
grd_file_path = os.path.dirname(grd_file)
source_xtb_files = []
for xtb_file in grd_handler.referenced_xtb_files:
relative_xtb_file_path = (
os.path.join(grd_file_path, xtb_file).replace('\\', '/'))
source_xtb_files.append(relative_xtb_file_path)
missing_xtb_files = list(set(source_xtb_files) - set(xtb_files))
# Manually put the source strings as en-US in the list of translated
# strings.
translated_strings = []
for string_id, message_text in source_strings.items():
translated_strings.append(self.__TranslationData(string_id,
'EN_US',
message_text))
# Generate the message ID for each source string to correlate it with its
# translations in the .xtb files. Multiple source strings may have the same
# message text; hence the message id is mapped to a list of string ids
# instead of a single value.
translation_ids = {}
for (string_id, message_text) in source_strings.items():
translation_id = grd_handler.translation_ids[string_id]
translation_ids.setdefault(translation_id, []).append(string_id);
# Track any xtb files that appear in the xtb folder but are not present in
# the grd file.
extra_xtb_files = []
# Gather the translated strings from the .xtb files. Use the en-US string
# for any message lacking a translation.
xtb_handler = XtbHandler(translation_ids, grd_handler.translation_placeholders)
sax_parser.setContentHandler(xtb_handler)
for xtb_filename in xtb_files:
if not xtb_filename in source_xtb_files:
extra_xtb_files.append(xtb_filename)
if vivaldi_filter_translation_files:
if xtb_filename not in self.expected_xtb_input_files:
continue
sax_parser.parse(xtb_filename)
for string_id, message_text in source_strings.items():
translated_string = xtb_handler.translations.get(string_id,
message_text)
translated_strings.append(self.__TranslationData(string_id,
xtb_handler.lang,
translated_string))
if missing_xtb_files or extra_xtb_files:
if missing_xtb_files:
missing_error = ("There were files that were found in the .grd file "
"'{}' but do not exist on disk:\n{}")
print(missing_error.format(grd_file, '\n'.join(missing_xtb_files)))
if extra_xtb_files:
extra_error = ("There were files that exist on disk but were not found "
"in the .grd file '{}':\n{}")
print(extra_error.format(grd_file, '\n'.join(extra_xtb_files)))
sys.exit(1)
return translated_strings
def __WriteRCFile(self, translated_strings):
"""Writes a resource file with the strings provided in |translated_strings|.
"""
HEADER_TEXT = (
u'#include "%s"\n\n'
u'STRINGTABLE\n'
u'BEGIN\n'
) % os.path.basename(self.header_file)
FOOTER_TEXT = (
u'END\n'
)
with io.open(self.rc_file,
mode='w',
encoding='utf-16',
newline='\n') as outfile:
outfile.write(HEADER_TEXT)
for translation in translated_strings:
# Escape special characters for the rc file.
escaped_text = (translation.translation.replace('"', '""')
.replace('\t', '\\t')
.replace('\n', '\\n'))
outfile.write(u' %s "%s"\n' %
(translation.resource_id_str + '_' + translation.language,
escaped_text))
outfile.write(FOOTER_TEXT)
def __WriteHeaderFile(self, translated_strings):
"""Writes a .h file with resource ids."""
# TODO(grt): Stream the lines to the file rather than building this giant
# list of lines first.
lines = []
do_languages_lines = ['\n#define DO_LANGUAGES']
installer_string_mapping_lines = ['\n#define DO_STRING_MAPPING']
do_mode_strings_lines = ['\n#define DO_MODE_STRINGS']
# Write the values for how the languages ids are offset.
seen_languages = set()
offset_id = 0
for translation_data in translated_strings:
lang = translation_data.language
if lang not in seen_languages:
seen_languages.add(lang)
lines.append('#define IDS_L10N_OFFSET_%s %s' % (lang, offset_id))
do_languages_lines.append(' HANDLE_LANGUAGE(%s, IDS_L10N_OFFSET_%s)'
% (lang.replace('_', '-').lower(), lang))
offset_id += 1
else:
break
# Write the resource ids themselves.
resource_id = self.first_resource_id
for translation_data in translated_strings:
lines.append('#define %s %s' % (translation_data.resource_id_str + '_' +
translation_data.language,
resource_id))
resource_id += 1
# Handle mode-specific strings.
for string_id, brands in self.mode_specific_strings.items():
# Populate the DO_MODE_STRINGS macro.
brand_strings = brands.get(self.brand)
if not brand_strings:
raise RuntimeError(
'No strings declared for brand \'%s\' in MODE_SPECIFIC_STRINGS for '
'message %s' % (self.brand, string_id))
do_mode_strings_lines.append(
' HANDLE_MODE_STRING(%s_BASE, %s)'
% (string_id, ', '.join([ ('%s_BASE' % s) for s in brand_strings])))
# Generate defines for the specific strings to extract or take all of the
# strings found in the translations.
if self.string_id_set:
string_ids_to_write = self.string_id_set;
else:
string_ids_to_write = {t.resource_id_str for t in translated_strings}
# Write out base ID values.
for string_id in sorted(string_ids_to_write):
lines.append('#define %s_BASE %s_%s' % (string_id,
string_id,
translated_strings[0].language))
installer_string_mapping_lines.append(' HANDLE_STRING(%s_BASE, %s)'
% (string_id, string_id))
with open(self.header_file, 'w') as outfile:
outfile.write('\n'.join(lines))
outfile.write('\n#ifndef RC_INVOKED')
outfile.write(' \\\n'.join(do_languages_lines))
outfile.write(' \\\n'.join(installer_string_mapping_lines))
outfile.write(' \\\n'.join(do_mode_strings_lines if len(do_mode_strings_lines) >1 else []))
# .rc files must end in a new line
outfile.write('\n#endif // ndef RC_INVOKED\n')
def BuildArgumentParser():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-b',
help='identifier of the browser brand (e.g., chromium).'
'This argument is mandatory if the module file included'
'by --extract-datafile contains MODE_SPECIFIC_STRINGS',
dest='brand')
parser.add_argument('-i', action='append',
required=True,
help='path to .grd file',
dest='input_grd_files')
parser.add_argument('-r', action='append',
required=True,
help='relative path to .xtb dir for each .grd file',
dest='input_xtb_relative_paths')
parser.add_argument('-x', action='append',
required=True,
help='expected xtb input files to read',
dest='expected_xtb_input_files')
parser.add_argument('--header-file',
required=True,
help='path to generated .h file to write',
dest='header_file')
parser.add_argument('--rc-file',
required=True,
help='path to generated .rc file to write',
dest='rc_file')
parser.add_argument('--first-resource-id',
type=int,
required=True,
help='first id for the generated string resources',
dest='first_resource_id')
parser.add_argument('--extract-datafile',
help='the python file execute that will define the '
'specific strings to extract from the source .grd file.'
'The module should contain a global array STRING_IDS '
'that specifies which string IDs need to be extracted '
'(if no global member by that name exists, then all the '
'strings are extracted). It may also optionally contain '
'a dictionary MODE_SPECIFIC_STRINGS which defines the '
'mode-specific strings to use for a given brand that is '
'extracted.',
dest='extract_datafile')
return parser
def main():
parser = BuildArgumentParser()
args = parser.parse_args()
# Extract all the strings from the given grd by default.
string_ids_to_extract = []
mode_specific_strings = {}
# Check to see if an external module containing string extraction information
# was specified.
extract_datafile = args.extract_datafile
if extract_datafile:
datafile_locals = dict();
exec(open(extract_datafile).read(), globals(), datafile_locals)
if 'STRING_IDS' in datafile_locals:
string_ids_to_extract = datafile_locals['STRING_IDS']
if 'MODE_SPECIFIC_STRINGS' in datafile_locals:
if not string_ids_to_extract:
parser.error('MODE_SPECIFIC_STRINGS was specified in file ' +
extract_datafile + ' but there were no specific STRING_IDS '
'specified for extraction')
mode_specific_strings = datafile_locals['MODE_SPECIFIC_STRINGS']
brand = args.brand
if brand:
if not mode_specific_strings:
parser.error('A brand was specified (' + brand + ') but no mode '
'specific strings were given.')
valid_brands = [b for b in
next(iter(mode_specific_strings.values())).keys()]
if not brand in valid_brands:
parser.error('A brand was specified (' + brand + ') but it is not '
'a valid brand [' + ', '.join(valid_brands) + '].')
elif mode_specific_strings:
parser.error('MODE_SPECIFIC_STRINGS were specified but no brand was '
'given.')
grd_files = args.input_grd_files
xtb_relative_paths = args.input_xtb_relative_paths
if len(grd_files) != len(xtb_relative_paths):
parser.error('Mismatch in number of grd files ({}) and xtb relative '
'paths ({})'.format(len(grd_files), len(xtb_relative_paths)))
inputs = zip(grd_files, xtb_relative_paths)
StringRcMaker(inputs, args.expected_xtb_input_files, args.header_file,
args.rc_file, brand, args.first_resource_id, string_ids_to_extract,
mode_specific_strings).MakeFiles()
return 0
if '__main__' == __name__:
sys.exit(main())
|
ric2b/Vivaldi-browser
|
chromium/base/win/embedded_i18n/create_string_rc.py
|
Python
|
bsd-3-clause
| 27,808
|
[
"xTB"
] |
887cad6273faca31a4cd1108912115b377478535ca8e63661a433eb0b58b4eb8
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from camelot.core.orm import Entity, Session
from sqlalchemy.schema import Column
from sqlalchemy.types import Unicode, Integer
"""Classes to support the loading and updating of required datasets into the
database. The use of this classes is documented in the reference
documentation : :ref:`doc-fixtures`"""
class Fixture( Entity ):
"""Keep track of static data loaded into the database. This class keeps
track of data inserted into the database by storing the primary key and
the class name of the inserted data, and associating this with a `fixture
key` specified by the developer.
The developer can then use the `fixture key` to find out if this data has
been stored in the database, or to update it in future versions of the
application.
Only classes which have an integer field as their primary key can be
tracked."""
__tablename__ = 'fixture'
model = Column( Unicode( 256 ), index = True, nullable=False )
primary_key = Column( Integer, index = True, nullable=False )
fixture_key = Column( Unicode( 256 ), index = True, nullable=False )
fixture_class = Column( Unicode( 256 ), index = True, nullable=True )
@classmethod
def find_fixture_reference( cls,
entity,
fixture_key,
fixture_class = None ):
"""Find the :class:`Fixture` instance that refers to the data
stored for a fixture key.
:param entity: the class of the stored data
:param fixture_key: a string used to refer to the stored data
:param fixture_class: a string used to refer to a group of stored data
:return: a :class:`Fixture` instance refering to the stored data, or
None of no data was found.
"""
entity_name = unicode( entity.__name__ )
return Session().query( cls ).filter_by( model = unicode( entity_name ),
fixture_key = fixture_key,
fixture_class = fixture_class ).first()
@classmethod
def find_fixture( cls, entity, fixture_key, fixture_class = None ):
"""Find data that has been stored for a fixture key.
:param entity: the class of the stored data
:param fixture_key: a string used to refer to the stored data
:param fixture_class: a string used to refer to a group of stored data
:return: a instance of type entity, or None if no fixture is found"""
reference = cls.find_fixture_reference( entity,
fixture_key,
fixture_class )
if reference:
return entity.get( reference.primary_key )
@classmethod
def find_fixture_key( cls, entity, primary_key ):
"""Find the fixture key for an object of type entity with primary key
:param entity: the class of the stored data
:param primary_key: the integer primary key of the stored data
:return: a string with the fixture_key that refers to this data, None
if no such data is found
"""
entity_name = unicode( entity.__name__ )
fixture = Session().query( cls ).filter_by( model = entity_name,
primary_key = primary_key ).first()
if fixture:
return fixture.fixture_key
else:
return None
@classmethod
def find_fixture_key_and_class( cls, obj ):
"""Find out if an object was stored in the database through the fixture
mechanism and return its `fixture_key` and `fixture_class`
:param obj: the object stored in the database
:return: (fixture_key, fixture_class) if the object was registered
through the fixture mechanism, (None, None) otherwise
"""
entity_name = unicode( obj.__class__.__name__ )
fixture = Session().query( cls ).filter_by( model = entity_name,
primary_key = obj.id ).first()
if fixture:
return ( fixture.fixture_key, fixture.fixture_class )
else:
return ( None, None )
@classmethod
def find_fixture_keys_and_classes( cls, entity ):
"""Load all fixture keys of a certain entity class in batch.
:param entity: the class of the stored data
:return: a dictionary mapping the primary key of a on object of type
entity to a tuple of type (fixture key, fixture class)
"""
entity_name = unicode( entity.__name__ )
fixtures = Session().query( cls ).filter_by( model = entity_name ).all()
return dict( ( f.primary_key, (f.fixture_key,
f.fixture_class) ) for f in fixtures )
@classmethod
def insert_or_update_fixture( cls,
entity,
fixture_key,
values,
fixture_class = None ):
"""Store data in the database through the fixture mechanism, to be
able to keep track of it later.
:param entity: the class of the stored data
:param fixture_key: a string used to refer to the stored data
:param values: a dictionary with the data that should be insert or
updated in the database
:param fixture_class: a string used to refer to a group of stored data
:return: an object of type entity, either created or modified
"""
from sqlalchemy.orm.session import Session
obj = cls.find_fixture( entity, fixture_key, fixture_class )
store_fixture = False
if not obj:
obj = entity()
store_fixture = True
obj.from_dict( values )
Session.object_session( obj ).flush()
if store_fixture:
#
# The fixture itself might have been deleted, but the reference
# might be intact, so this should be updated
#
reference = cls.find_fixture_reference( entity,
fixture_key,
fixture_class )
if not reference:
reference = cls( model = unicode( entity.__name__ ),
primary_key = obj.id,
fixture_key = fixture_key,
fixture_class = fixture_class )
else:
reference.primary_key = obj.id
Session.object_session( reference ).flush()
return obj
@classmethod
def remove_all_fixtures( cls, entity ):
"""
Remove all data of a certain class from the database, if it was stored
through the fixture mechanism.
:param entity: the class of the stored data
"""
keys_and_classes = cls.find_fixture_keys_and_classes( entity ).values()
for fixture_key, fixture_class in keys_and_classes:
cls.remove_fixture( entity, fixture_key, fixture_class )
@classmethod
def remove_fixture( cls, entity, fixture_key, fixture_class ):
"""
Remove data from the database, if it was stored through the fixture
mechanism.
:param entity: the class of the stored data
:param fixture_key: a string used to refer to the stored data
:param fixture_class: a string used to refer to a group of stored data
"""
# remove the object itself
from sqlalchemy.orm.session import Session
obj = cls.find_fixture( entity, fixture_key, fixture_class)
obj.delete()
Session.object_session( obj ).flush()
# if this succeeeds, remove the reference
reference = cls.find_fixture_reference( entity,
fixture_key,
fixture_class )
reference.delete()
Session.object_session( reference ).flush()
class FixtureVersion( Entity ):
"""Keep track of the version the fixtures have in the current database, the
subversion revision number is a good candidate to be used as a fixture
version.
:return: an integer representing the current version, 0 if no version found
"""
__tablename__ = 'fixture_version'
fixture_version = Column( Integer, index = True, nullable=False, default=0 )
fixture_class = Column( Unicode( 256 ), index = True, nullable=True,
unique=True )
@classmethod
def get_current_version( cls, fixture_class = None ):
"""Get the current version of the fixtures in the database for a certain
fixture class.
:param fixture_class: the fixture class for which to get the version
"""
obj = Session().query( cls ).filter_by( fixture_class = fixture_class ).first()
if obj:
return obj.fixture_version
return 0
@classmethod
def set_current_version( cls, fixture_class = None, fixture_version = 0 ):
"""Set the current version of the fixtures in the database for a certain
fixture class.
:param fixture_class: the fixture class for which to get the version
:param fixture_version: the version number to which to set the fixture
version
"""
from sqlalchemy.orm.session import Session
obj = Session().query( cls ).filter_by( fixture_class = fixture_class ).first()
if not obj:
obj = FixtureVersion( fixture_class = fixture_class )
obj.fixture_version = fixture_version
Session.object_session( obj ).flush()
|
jeroendierckx/Camelot
|
camelot/model/fixture.py
|
Python
|
gpl-2.0
| 11,034
|
[
"VisIt"
] |
c77fd9a664b26d952960fc76f6610453af065ff5b1f534d9e2bb4593b2e64f40
|
from django.shortcuts import render_to_response, get_object_or_404
from django.db import models
from django.http import Http404, HttpResponseRedirect
from django.core.urlresolvers import resolve, reverse
from pprint import pprint
from django.template.context import Context, RequestContext
from forms import TellAFriendForm
from django.template.loader import select_template
from email_utils import send_link
def tellafriend(request, theme=None, form_class=TellAFriendForm):
"""
Takes a site URL and an optional template name to send to a friend.
On first visit, the URL is tested for validity and a form is rendered for extra info.
On valid form POST, the URL in question is internally processed, and we get the context used in that URL - the view needs a slight mod to be able to do this.
The context is passed to the email template as URL_CONTEXT, so we can use extra info in the email template.
TODO: rate limiting, HTML cleansing in the message, a simpler version for telling people (and us - eg reporting, voting) about model instances.
"""
# Is it a valid and working URL on this site?
try:
url = request.REQUEST ['url'] #check POST then GET
except KeyError:
raise Http404, "No URL to send"
view, args, kwargs = resolve(url)
"""
I haven't found a non-hacky way to derive the context from a view's response. I tried with django test Client, but that seems to operate on globals which interrupt the view surrounding this code.
To substitute, every view pointed to by tellafriend should accept 'return_context_dict = True' and return just the context.
Alternative strategies - how does debugtoolbar do it? Can we do this with a decorator?
"""
kwargs['return_context_dict'] = True
kwargs['request'] = request
extra_context = view(*args, **kwargs) #may raise Http404
TEMPLATE_DIR = "tellafriend"
email_html_template = []
email_txt_template = []
subject_template = []
form_template = []
success_template = []
if theme:
SUB_TEMPLATE_DIR = TEMPLATE_DIR + "/%s" % theme
email_html_template += ["%s/email.html" % SUB_TEMPLATE_DIR]
email_txt_template += ["%s/email.txt" % SUB_TEMPLATE_DIR]
subject_template += ["%s/subject.txt" % SUB_TEMPLATE_DIR]
form_template += ["%s/form_page.html" % SUB_TEMPLATE_DIR]
success_template += ["%s/success.html" % SUB_TEMPLATE_DIR]
email_html_template += ["%s/email.html" % TEMPLATE_DIR]
email_txt_template += ["%s/email.txt" % TEMPLATE_DIR]
subject_template += ["%s/subject.txt" % TEMPLATE_DIR]
form_template += ["%s/form_page.html" % TEMPLATE_DIR]
success_template += ["%s/success.html" % TEMPLATE_DIR]
if request.method == 'POST':
form = form_class(request.POST)
if form.is_valid():
#SEND THE EMAIL
email_context = {
'sender_name': form.cleaned_data['sender_name'],
'sender_email': form.cleaned_data['sender_email'],
'personal_message': form.cleaned_data['personal_message'],
'recipient_name': getattr(form.cleaned_data, 'recipient_name', form.cleaned_data['recipient_email']),
'recipient_email': form.cleaned_data['recipient_email'],
'url': url,
'extra_context': extra_context,
}
email_html_template = select_template(email_html_template)
email_txt_template = select_template(email_txt_template)
subject_template = select_template(subject_template)
send_link(
sender_name = form.cleaned_data['sender_name'],
sender_email = form.cleaned_data['sender_email'],
recipient_name = getattr(form.cleaned_data, 'recipient_name', form.cleaned_data['recipient_email']),
recipient_email = form.cleaned_data['recipient_email'],
email_html_template = email_html_template,
email_txt_template = email_txt_template,
subject_template = subject_template,
context = RequestContext(request, email_context),
)
# trying to make it redirect on success
request.session['success_template'] = success_template
# there's no need to make a copy really as we've finished with email_context, but put it in in case that's not always the case
reduced_email_context = email_context.copy()
# extra_context can't be pickled so can't save it in the session
del reduced_email_context['extra_context']
request.session['email_context'] = reduced_email_context
return HttpResponseRedirect(reverse('message_sent'))
else: #GET
form = form_class(initial={'url': url })
return render_to_response(form_template, {
'form': form,
'url': url,
'extra_context': extra_context,
}, context_instance=RequestContext(request))
def message_sent(request):
success_template = request.session.get('success_template', None)
email_context = request.session.get('email_context', None)
if success_template and email_context:
del request.session['success_template']
del request.session['email_context']
return render_to_response(success_template, email_context, context_instance=RequestContext(request))
return HttpResponseRedirect('/')
|
gregplaysguitar/glamkit
|
glamkit/incubated/tellafriend/views.py
|
Python
|
bsd-3-clause
| 5,531
|
[
"VisIt"
] |
e055725d9608325943c95fd3d463a4e04073daef00fc86febdd193b02231b388
|
# Copyright 1999 by Jeffrey Chang. All rights reserved.
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# Revisions Copyright 2007 by Peter Cock. All rights reserved.
# Revisions Copyright 2009 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module provides code to work with the prosite dat file from
Prosite.
http://www.expasy.ch/prosite/
Tested with:
Release 20.43, 10-Feb-2009
Functions:
read Reads a Prosite file containing one Prosite record
parse Iterates over records in a Prosite file.
Classes:
Record Holds Prosite data.
"""
def parse(handle):
"""Parse Prosite records.
This function is for parsing Prosite files containing multiple
records.
handle - handle to the file."""
while True:
record = __read(handle)
if not record:
break
yield record
def read(handle):
"""Read one Prosite record.
This function is for parsing Prosite files containing
exactly one record.
handle - handle to the file."""
record = __read(handle)
# We should have reached the end of the record by now
remainder = handle.read()
if remainder:
raise ValueError("More than one Prosite record found")
return record
class Record(object):
"""Holds information from a Prosite record.
Members:
name ID of the record. e.g. ADH_ZINC
type Type of entry. e.g. PATTERN, MATRIX, or RULE
accession e.g. PS00387
created Date the entry was created. (MMM-YYYY)
data_update Date the 'primary' data was last updated.
info_update Date data other than 'primary' data was last updated.
pdoc ID of the PROSITE DOCumentation.
description Free-format description.
pattern The PROSITE pattern. See docs.
matrix List of strings that describes a matrix entry.
rules List of rule definitions (from RU lines). (strings)
prorules List of prorules (from PR lines). (strings)
NUMERICAL RESULTS
nr_sp_release SwissProt release.
nr_sp_seqs Number of seqs in that release of Swiss-Prot. (int)
nr_total Number of hits in Swiss-Prot. tuple of (hits, seqs)
nr_positive True positives. tuple of (hits, seqs)
nr_unknown Could be positives. tuple of (hits, seqs)
nr_false_pos False positives. tuple of (hits, seqs)
nr_false_neg False negatives. (int)
nr_partial False negatives, because they are fragments. (int)
COMMENTS
cc_taxo_range Taxonomic range. See docs for format
cc_max_repeat Maximum number of repetitions in a protein
cc_site Interesting site. list of tuples (pattern pos, desc.)
cc_skip_flag Can this entry be ignored?
cc_matrix_type
cc_scaling_db
cc_author
cc_ft_key
cc_ft_desc
cc_version version number (introduced in release 19.0)
DATA BANK REFERENCES - The following are all
lists of tuples (swiss-prot accession,
swiss-prot name)
dr_positive
dr_false_neg
dr_false_pos
dr_potential Potential hits, but fingerprint region not yet available.
dr_unknown Could possibly belong
pdb_structs List of PDB entries.
"""
def __init__(self):
self.name = ''
self.type = ''
self.accession = ''
self.created = ''
self.data_update = ''
self.info_update = ''
self.pdoc = ''
self.description = ''
self.pattern = ''
self.matrix = []
self.rules = []
self.prorules = []
self.postprocessing = []
self.nr_sp_release = ''
self.nr_sp_seqs = ''
self.nr_total = (None, None)
self.nr_positive = (None, None)
self.nr_unknown = (None, None)
self.nr_false_pos = (None, None)
self.nr_false_neg = None
self.nr_partial = None
self.cc_taxo_range = ''
self.cc_max_repeat = ''
self.cc_site = []
self.cc_skip_flag = ''
self.dr_positive = []
self.dr_false_neg = []
self.dr_false_pos = []
self.dr_potential = []
self.dr_unknown = []
self.pdb_structs = []
# Everything below are private functions
def __read(handle):
import re
record = None
for line in handle:
keyword, value = line[:2], line[5:].rstrip()
if keyword=='ID':
record = Record()
cols = value.split("; ")
if len(cols) != 2:
raise ValueError("I don't understand identification line\n%s" \
% line)
record.name = cols[0]
record.type = cols[1].rstrip('.') # don't want '.'
elif keyword=='AC':
record.accession = value.rstrip(';')
elif keyword=='DT':
dates = value.rstrip('.').split("; ")
if (not dates[0].endswith('(CREATED)')) or \
(not dates[1].endswith('(DATA UPDATE)')) or \
(not dates[2].endswith('(INFO UPDATE)')):
raise ValueError("I don't understand date line\n%s" % line)
record.created = dates[0].rstrip(' (CREATED)')
record.data_update = dates[1].rstrip(' (DATA UPDATE)')
record.info_update = dates[2].rstrip(' (INFO UPDATE)')
elif keyword=='DE':
record.description = value
elif keyword=='PA':
record.pattern += value
elif keyword=='MA':
record.matrix.append(value)
elif keyword=='PP':
record.postprocessing.extend(value.split(";"))
elif keyword=='RU':
record.rules.append(value)
elif keyword=='NR':
cols = value.split(";")
for col in cols:
if not col:
continue
qual, data = [word.lstrip() for word in col.split("=")]
if qual == '/RELEASE':
release, seqs = data.split(",")
record.nr_sp_release = release
record.nr_sp_seqs = int(seqs)
elif qual == '/FALSE_NEG':
record.nr_false_neg = int(data)
elif qual == '/PARTIAL':
record.nr_partial = int(data)
elif qual in ['/TOTAL', '/POSITIVE', '/UNKNOWN', '/FALSE_POS']:
m = re.match(r'(\d+)\((\d+)\)', data)
if not m:
raise Exception("Broken data %s in comment line\n%s" \
% (repr(data), line))
hits = tuple(map(int, m.groups()))
if(qual == "/TOTAL"):
record.nr_total = hits
elif(qual == "/POSITIVE"):
record.nr_positive = hits
elif(qual == "/UNKNOWN"):
record.nr_unknown = hits
elif(qual == "/FALSE_POS"):
record.nr_false_pos = hits
else:
raise ValueError("Unknown qual %s in comment line\n%s" \
% (repr(qual), line))
elif keyword=='CC':
#Expect CC lines like this:
#CC /TAXO-RANGE=??EPV; /MAX-REPEAT=2;
#Can (normally) split on ";" and then on "="
cols = value.split(";")
for col in cols:
if not col or col[:17] == 'Automatic scaling':
# DNAJ_2 in Release 15 has a non-standard comment line:
# CC Automatic scaling using reversed database
# Throw it away. (Should I keep it?)
continue
if col.count("=") == 0:
#Missing qualifier! Can we recover gracefully?
#For example, from Bug 2403, in PS50293 have:
#CC /AUTHOR=K_Hofmann; N_Hulo
continue
qual, data = [word.lstrip() for word in col.split("=")]
if qual == '/TAXO-RANGE':
record.cc_taxo_range = data
elif qual == '/MAX-REPEAT':
record.cc_max_repeat = data
elif qual == '/SITE':
pos, desc = data.split(",")
record.cc_site.append((int(pos), desc))
elif qual == '/SKIP-FLAG':
record.cc_skip_flag = data
elif qual == '/MATRIX_TYPE':
record.cc_matrix_type = data
elif qual == '/SCALING_DB':
record.cc_scaling_db = data
elif qual == '/AUTHOR':
record.cc_author = data
elif qual == '/FT_KEY':
record.cc_ft_key = data
elif qual == '/FT_DESC':
record.cc_ft_desc = data
elif qual == '/VERSION':
record.cc_version = data
else:
raise ValueError("Unknown qual %s in comment line\n%s" \
% (repr(qual), line))
elif keyword=='DR':
refs = value.split(";")
for ref in refs:
if not ref:
continue
acc, name, type = [word.strip() for word in ref.split(",")]
if type == 'T':
record.dr_positive.append((acc, name))
elif type == 'F':
record.dr_false_pos.append((acc, name))
elif type == 'N':
record.dr_false_neg.append((acc, name))
elif type == 'P':
record.dr_potential.append((acc, name))
elif type == '?':
record.dr_unknown.append((acc, name))
else:
raise ValueError("I don't understand type flag %s" % type)
elif keyword=='3D':
cols = value.split()
for id in cols:
record.pdb_structs.append(id.rstrip(';'))
elif keyword=='PR':
rules = value.split(";")
record.prorules.extend(rules)
elif keyword=='DO':
record.pdoc = value.rstrip(';')
elif keyword=='CC':
continue
elif keyword=='//':
if not record:
# Then this was the copyright statement
continue
break
else:
raise ValueError("Unknown keyword %s found" % keyword)
else:
return
if not record:
raise ValueError("Unexpected end of stream.")
return record
|
bryback/quickseq
|
genescript/Bio/ExPASy/Prosite.py
|
Python
|
mit
| 10,917
|
[
"Biopython"
] |
bdddc23ac6aedf0e55b7579507d9e90c8570a965967b52157f8d0be956d80049
|
# Copyright 2008 Brian Boyer, Ryan Mark, Angela Nitzke, Joshua Pollock,
# Stuart Tiffen, Kayla Webley and the Medill School of Journalism, Northwestern
# University.
#
# This file is part of Crunchberry Pie.
#
# Crunchberry Pie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Crunchberry Pie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Crunchberry Pie. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from bartender.models import Article
from django.contrib.auth.models import User
from django import forms
from django.forms import ModelForm
# Create your models here.
class Question(models.Model):
article = models.ForeignKey(Article)
block = models.IntegerField(blank=True,default=-1)
user = models.ForeignKey(User)
text = models.TextField("Ask a question.")
notify = models.BooleanField()
offensive = models.BooleanField()
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
if self.offensive:
return "Question #"+str(self.id)+" on '"+self.article.headline+"' (offensive)"
else:
return "Question #"+str(self.id)+" on '"+self.article.headline+"'"
def get_absolute_url(self):
return "%s#question-%s-%s" % (self.article.get_absolute_url(),self.block,self.id)
class QuestionForm(ModelForm):
text = forms.CharField(widget=forms.Textarea(attrs={'rows':5}))
class Meta:
model = Question
exclude = ('article', 'block', 'user', 'offensive')
class Answer(models.Model):
question = models.ForeignKey(Question)
user = models.ForeignKey(User)
text = models.TextField("Answer the question.")
reference = models.URLField(blank=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "Answer #"+str(self.id)+" to question #"+str(self.question.id)+" on '"+self.question.article.headline+"'"
def get_absolute_url(self):
return "%s#answer-%s-%s" % (self.question.article.get_absolute_url(),self.question.block,self.id)
class AnswerForm(ModelForm):
text = forms.CharField(widget=forms.Textarea(attrs={'rows':3}))
class Meta:
model = Answer
exclude = ('question', 'user', 'offensive')
|
brianboyer/newsmixer
|
social/questions/models.py
|
Python
|
gpl-3.0
| 2,701
|
[
"Brian"
] |
f3f97162fb8f2720e191dfca4f67f407c67e5c21c5ca92582ebd955777ca3a8e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.