text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=80 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2014 Raoul Snyman #
# Portions copyright (c) 2008-2014 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`error_codes` module provides the error codes and messages
"""
__version__ = '0.0.2'
__v = __version__.split('.')
__version_hex__ = int(__v[0]) << 24 | \
int(__v[1]) << 16 | \
int(__v[2]) << 8
__module = 'projectors'
import logging
log = logging.getLogger(__name__)
log.info('error_codes loaded')
import errno
import os
import socket
"""
This module provides the error and status codes used for the projector
control modules.
"""
__all__ = ['S_OK','E_GENERAL','E_NOTCONNECTED','E_NETWORK','E_FAN','E_LAMP',
'E_TEMP','E_COVER','E_FILTER','E_AUTHENTICATION','E_UNDEFINED',
'E_PARAMETER','E_UNAVAILABLE','E_PROJECTOR','E_INVALID_DATA',
'E_WARN','E_ERROR','S_NOT_CONNECTED','S_CONNECTING','S_STATUS',
'S_OFF','S_INITIALIZE','S_STANDBY','S_WARMUP','S_ON','S_COOLDOWN',
'S_INFO','err_code','err_msg',
'ErrorCodes',
'ProjectorError',
'ProjectorAuthenticationError',
'ProjectorCommandError',
'ProjectorFailedError',
'ProjectorFanFailure',
'ProjectorNetworkError',
'ProjectorNotAvailableError',
'ProjectorNotConnectedError',
'ProjectorParameterError'
]
S_OK = E_OK = 0
# Error codes. Start at 200 so we don't duplicate system error codes.
E_GENERAL = 200 # Unknown error
E_NOTCONNECTED = 201
E_NETWORK = 202
E_FAN = 203
E_LAMP = 204
E_TEMP = 205
E_COVER = 206
E_FILTER = 207
E_AUTHENTICATION = 208 # ERRA
E_UNDEFINED = 209 # ERR1
E_PARAMETER = 210 # ERR2
E_UNAVAILABLE = 211 # ERR3
E_PROJECTOR = 212 # ERR4
E_INVALID_DATA = 213
E_WARN = 214
E_ERROR = 215
# Status codes start at 300
S_NOT_CONNECTED = 300
S_CONNECTING = 301
S_STATUS = 302
S_OFF = 303
S_INITIALIZE = 304
S_STANDBY = 305
S_WARMUP = 306
S_ON = 307
S_COOLDOWN = 308
S_INFO = 309
# Map error/status codes to string
err_code = { 0: 'S_OK', 200: 'E_GENERAL', 201: 'E_NOTCONNECTED',
202: 'E_NETWORK', 203: 'E_FAN', 204: 'E_LAMP', 205: 'E_TEMP',
206: 'E_COVER', 207: 'E_FILTER', 208: 'E_AUTHENTICATION',
209: 'E_UNDEFINED', 210: 'E_PARAMETER', 211: 'E_UNAVAILABLE',
212: 'E_PROJECTOR', 213: 'E_INVALID_DATA', 214: 'E_WARN',
215: 'E_ERROR', 300: 'S_NOT_CONNECTED', 301: 'S_CONNECTING',
302: 'S_STATUS', 303: 'S_OFF', 304: 'S_INITIALIZE', 305: 'S_STANDBY',
306: 'S_WARMUP', 307: 'S_ON', 308: 'S_COOLDOWN', 309: 'S_INFO'
}
# Map error/status codes to message strings
err_msg = { S_OK: 'OK',
E_GENERAL: "General projector error", # Error codes
E_NOTCONNECTED: "Not connected error",
E_NETWORK: "Network error",
E_LAMP: "Lamp error",
E_FAN: "Fan error",
E_TEMP: "High temperature detected",
E_COVER: "Cover open detected",
E_FILTER: "Check filter",
E_AUTHENTICATION: "Authentication Error",
E_UNDEFINED: "Undefined Command",
E_PARAMETER: "Invalid Parameter",
E_UNAVAILABLE: "Projector Busy",
E_PROJECTOR: "Projector/Display Error",
E_INVALID_DATA: "Invald packet received",
E_WARN: "Warning condition detected",
E_ERROR: "Error condition detected",
S_NOT_CONNECTED: "Not connected", # Status codes
S_CONNECTING: "Connecting",
S_STATUS: "Getting status",
S_OFF: "Off",
S_INITIALIZE: "Initialize in progress",
S_STANDBY: "Power standby",
S_WARMUP: "Warmup in progress",
S_ON: "Power on",
S_COOLDOWN: "Cooldown in progress",
S_INFO: "Projector Information availble"
}
# Error classes
class ProjectorError(Exception):
"""
Base projector error class
"""
def __init__(self, errno=None, msg=None):
self.errno = errno if errno is not None else E_GENERAL
self.msg = msg if msg is not None else u'Unknown projector error'
def __repr__(self):
return ((self.errno, self.msg))
def __str__(self):
return self.msg
class ProjectorNotConnectedError(ProjectorError):
"""
Projector not connected
"""
def __init__(self, errno=E_NOTCONNECTED,
msg=u'Projector Not Connected'):
super(ProjectorNotConnectedError, self).__init__(errno=errno, msg=msg)
class ProjectorParameterError(ProjectorError):
"""
Invalid parameter for command
"""
def __init__(self, errno=E_PARAMETER,
msg=u'Projector invalid option'):
super(ProjectorParameterError, self).__init__(errno=errno, msg=msg)
class ProjectorCommandError(ProjectorError):
"""
Invalid command
"""
def __init__(self, errno=E_UNDEFINED,
msg=u'Projector Undefined command'):
super(ProjectorCommandError, self).__init__(errno=errno, msg=msg)
class ProjectorNotAvailableError(ProjectorError):
"""
Invalid command
"""
def __init__(self, errno=E_UNAVAILABLE,
msg=u'Projector Unavailable (busy)'):
super(ProjectorNotAvailableError, self).__init__(errno=errno, msg=msg)
class ProjectorLampError(ProjectorError):
"""
Lamp failure
"""
def __init__(self, errno=E_LAMP,
msg=u'Lamp Failure'):
super(ProjectorLampError, self).__init__(errno=errno, msg=msg)
class ProjectorFailedError(ProjectorError):
"""
General projector failure
"""
def __init__(self, errno=E_GENERAL,
msg=u'Unknown projector failure'):
super(ProjectorFailedError, self).__init__(errno=errno, msg=msg)
class ProjectorAuthenticationError(ProjectorError):
"""
General projector failure
"""
def __init__(self, errno=E_GENERAL,
msg=u'Authentication error'):
super(ProjectorAuthenticationError, self).__init__(errno=errno, msg=msg)
class ProjectorNetworkError(ProjectorError):
"""
Network failure
"""
def __init__(self, errno=E_NETWORK,
msg=u'Projector network error'):
super(ProjectorNetworkError, self).__init__(errno=errno, msg=msg)
class ProjectorFanFailure(ProjectorError):
"""
Fan failure
"""
def __init__(self, errno=E_FAN,
msg=u'Projector fan failure'):
super(ProjectorFanError, self).__init__(errno=errno, msg=msg)
class ErrorCodes(object):
'''
Basic class to map system or local errors and error messages.
'''
def __init__(self, err):
self.errno = err
if err in err_msg:
self.msg = err_msg[err]
elif err in errno.errorcode:
self.msg = os.strerror(err)
else:
self.msg = ''
|
alisonken1/openlp-projector-2.0
|
openlp/projectors/error_codes.py
|
Python
|
gpl-2.0
| 8,670
|
[
"Brian"
] |
fcdb77ca4fef53905af42681142f78058c5f9dc121daebbbe2a0d58476d065a1
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import copy, sys
class Visitor:
def defaultVisit(self, node):
raise Exception, "INTERNAL ERROR: no visitor for node type `%s'"% (
node.__class__.__name__)
def visitWhitespace(self, ws):
pass
def visitFile(self, f):
for thing in f.stuff:
thing.accept(self)
def visitCppDirective(self, ppd):
pass
def visitBlock(self, block):
for stmt in block.stmts:
stmt.accept(self)
def visitNamespace(self, ns):
self.visitBlock(ns)
def visitType(self, type):
pass
def visitTypeArray(self, ta):
ta.basetype.accept(self)
ta.nmemb.accept(self)
def visitTypeEnum(self, enum):
pass
def visitTypeUnion(self, union):
for t, name in union.components:
t.accept(self)
def visitTypedef(self, tdef):
tdef.fromtype.accept(self)
def visitUsing(self, us):
us.type.accept(self)
def visitForwardDecl(self, fd):
pass
def visitDecl(self, decl):
decl.type.accept(self)
def visitParam(self, param):
self.visitDecl(param)
if param.default is not None:
param.default.accept(self)
def visitClass(self, cls):
for inherit in cls.inherits:
inherit.accept(self)
self.visitBlock(cls)
def visitInherit(self, inh):
pass
def visitFriendClassDecl(self, fcd):
pass
def visitMethodDecl(self, meth):
for param in meth.params:
param.accept(self)
if meth.ret is not None:
meth.ret.accept(self)
if meth.typeop is not None:
meth.typeop.accept(self)
if meth.T is not None:
meth.T.accept(self)
def visitMethodDefn(self, meth):
meth.decl.accept(self)
self.visitBlock(meth)
def visitFunctionDecl(self, fun):
self.visitMethodDecl(fun)
def visitFunctionDefn(self, fd):
self.visitMethodDefn(fd)
def visitConstructorDecl(self, ctor):
self.visitMethodDecl(ctor)
def visitConstructorDefn(self, cd):
cd.decl.accept(self)
for init in cd.memberinits:
init.accept(self)
self.visitBlock(cd)
def visitDestructorDecl(self, dtor):
self.visitMethodDecl(dtor)
def visitDestructorDefn(self, dd):
dd.decl.accept(self)
self.visitBlock(dd)
def visitExprLiteral(self, l):
pass
def visitExprVar(self, v):
pass
def visitExprPrefixUnop(self, e):
e.expr.accept(self)
def visitExprBinary(self, e):
e.left.accept(self)
e.right.accept(self)
def visitExprConditional(self, c):
c.cond.accept(self)
c.ife.accept(self)
c.elsee.accept(self)
def visitExprAddrOf(self, eao):
self.visitExprPrefixUnop(eao)
def visitExprDeref(self, ed):
self.visitExprPrefixUnop(ed)
def visitExprNot(self, en):
self.visitExprPrefixUnop(en)
def visitExprCast(self, ec):
ec.expr.accept(self)
def visitExprIndex(self, ei):
ei.arr.accept(self)
ei.idx.accept(self)
def visitExprSelect(self, es):
es.obj.accept(self)
def visitExprAssn(self, ea):
ea.lhs.accept(self)
ea.rhs.accept(self)
def visitExprCall(self, ec):
ec.func.accept(self)
for arg in ec.args:
arg.accept(self)
def visitExprNew(self, en):
en.ctype.accept(self)
if en.newargs is not None:
for arg in en.newargs:
arg.accept(self)
if en.args is not None:
for arg in en.args:
arg.accept(self)
def visitExprDelete(self, ed):
ed.obj.accept(self)
def visitExprMemberInit(self, minit):
self.visitExprCall(minit)
def visitExprSizeof(self, es):
self.visitExprCall(es)
def visitStmtBlock(self, sb):
self.visitBlock(sb)
def visitStmtDecl(self, sd):
sd.decl.accept(self)
if sd.init is not None:
sd.init.accept(self)
def visitLabel(self, label):
pass
def visitCaseLabel(self, case):
pass
def visitDefaultLabel(self, dl):
pass
def visitStmtIf(self, si):
si.cond.accept(self)
si.ifb.accept(self)
if si.elseb is not None:
si.elseb.accept(self)
def visitStmtFor(self, sf):
if sf.init is not None:
sf.init.accept(self)
if sf.cond is not None:
sf.cond.accept(self)
if sf.update is not None:
sf.update.accept(self)
def visitStmtSwitch(self, ss):
ss.expr.accept(self)
self.visitBlock(ss)
def visitStmtBreak(self, sb):
pass
def visitStmtExpr(self, se):
se.expr.accept(self)
def visitStmtReturn(self, sr):
if sr.expr is not None:
sr.expr.accept(self)
##------------------------------
class Node:
def __init__(self):
pass
def accept(self, visitor):
visit = getattr(visitor, 'visit'+ self.__class__.__name__, None)
if visit is None:
return getattr(visitor, 'defaultVisit')(self)
return visit(self)
class Whitespace(Node):
# yes, this is silly. but we need to stick comments in the
# generated code without resorting to more serious hacks
def __init__(self, ws, indent=0):
Node.__init__(self)
self.ws = ws
self.indent = indent
Whitespace.NL = Whitespace('\n')
class File(Node):
def __init__(self, filename):
Node.__init__(self)
self.name = filename
# array of stuff in the file --- stmts and preprocessor thingies
self.stuff = [ ]
def addthing(self, thing):
assert thing is not None
assert not isinstance(thing, list)
self.stuff.append(thing)
def addthings(self, things):
for t in things: self.addthing(t)
# "look like" a Block so code doesn't have to care whether they're
# in global scope or not
def addstmt(self, stmt):
assert stmt is not None
assert not isinstance(stmt, list)
self.stuff.append(stmt)
def addstmts(self, stmts):
for s in stmts: self.addstmt(s)
class CppDirective(Node):
'''represents |#[directive] [rest]|, where |rest| is any string'''
def __init__(self, directive, rest=None):
Node.__init__(self)
self.directive = directive
self.rest = rest
class Block(Node):
def __init__(self):
Node.__init__(self)
self.stmts = [ ]
def addstmt(self, stmt):
assert stmt is not None
assert not isinstance(stmt, tuple)
self.stmts.append(stmt)
def addstmts(self, stmts):
for s in stmts: self.addstmt(s)
##------------------------------
# type and decl thingies
class Namespace(Block):
def __init__(self, name):
assert isinstance(name, str)
Block.__init__(self)
self.name = name
class Type(Node):
def __init__(self, name, const=0,
ptr=0, ptrconst=0, ptrptr=0, ptrconstptr=0,
ref=0,
hasimplicitcopyctor=True,
T=None):
"""
To avoid getting fancy with recursive types, we limit the kinds
of pointer types that can be be constructed.
ptr => T*
ptrconst => T* const
ptrptr => T**
ptrconstptr => T* const*
Any type, naked or pointer, can be const (const T) or ref (T&).
"""
assert isinstance(name, str)
assert not isinstance(const, str)
assert not isinstance(T, str)
Node.__init__(self)
self.name = name
self.const = const
self.ptr = ptr
self.ptrconst = ptrconst
self.ptrptr = ptrptr
self.ptrconstptr = ptrconstptr
self.ref = ref
self.hasimplicitcopyctor = hasimplicitcopyctor
self.T = T
# XXX could get serious here with recursive types, but shouldn't
# need that for this codegen
def __deepcopy__(self, memo):
return Type(self.name,
const=self.const,
ptr=self.ptr, ptrconst=self.ptrconst,
ptrptr=self.ptrptr, ptrconstptr=self.ptrconstptr,
ref=self.ref,
T=copy.deepcopy(self.T, memo))
Type.BOOL = Type('bool')
Type.INT = Type('int')
Type.INT32 = Type('int32_t')
Type.INTPTR = Type('intptr_t')
Type.NSRESULT = Type('nsresult')
Type.UINT32 = Type('uint32_t')
Type.UINT32PTR = Type('uint32_t', ptr=1)
Type.SIZE = Type('size_t')
Type.VOID = Type('void')
Type.VOIDPTR = Type('void', ptr=1)
Type.AUTO = Type('auto')
class TypeArray(Node):
def __init__(self, basetype, nmemb):
'''the type |basetype DECLNAME[nmemb]|. |nmemb| is an Expr'''
self.basetype = basetype
self.nmemb = nmemb
def __deepcopy__(self, memo):
return TypeArray(deepcopy(self.basetype, memo), nmemb)
class TypeEnum(Node):
def __init__(self, name=None):
'''name can be None'''
Node.__init__(self)
self.name = name
self.idnums = [ ] # pairs of ('Foo', [num]) or ('Foo', None)
def addId(self, id, num=None):
self.idnums.append((id, num))
class TypeUnion(Node):
def __init__(self, name=None):
Node.__init__(self)
self.name = name
self.components = [ ] # [ Decl ]
def addComponent(self, type, name):
self.components.append(Decl(type, name))
class Typedef(Node):
def __init__(self, fromtype, totypename, templateargs=[]):
assert isinstance(totypename, str)
Node.__init__(self)
self.fromtype = fromtype
self.totypename = totypename
self.templateargs = templateargs
def __cmp__(self, o):
return cmp(self.totypename, o.totypename)
def __eq__(self, o):
return (self.__class__ == o.__class__
and 0 == cmp(self, o))
def __hash__(self):
return hash(self.totypename)
class Using(Node):
def __init__(self, type):
Node.__init__(self)
self.type = type
class ForwardDecl(Node):
def __init__(self, pqname, cls=0, struct=0):
assert (not cls and struct) or (cls and not struct)
self.pqname = pqname
self.cls = cls
self.struct = struct
class Decl(Node):
'''represents |Foo bar|, e.g. in a function signature'''
def __init__(self, type, name):
assert type is not None
assert not isinstance(type, str)
assert isinstance(name, str)
Node.__init__(self)
self.type = type
self.name = name
def __deepcopy__(self, memo):
return Decl(copy.deepcopy(self.type, memo), self.name)
class Param(Decl):
def __init__(self, type, name, default=None):
Decl.__init__(self, type, name)
self.default = default
def __deepcopy__(self, memo):
return Param(copy.deepcopy(self.type, memo), self.name,
copy.deepcopy(self.default, memo))
##------------------------------
# class stuff
class Class(Block):
def __init__(self, name, inherits=[ ],
interface=0, abstract=0, final=0,
specializes=None, struct=0):
assert not (interface and abstract)
assert not (abstract and final)
assert not (interface and final)
assert not (inherits and specializes)
Block.__init__(self)
self.name = name
self.inherits = inherits # [ Type ]
self.interface = interface # bool
self.abstract = abstract # bool
self.final = final # bool
self.specializes = specializes # Type or None
self.struct = struct # bool
class Inherit(Node):
def __init__(self, type, viz='public'):
assert isinstance(viz, str)
Node.__init__(self)
self.type = type
self.viz = viz
class FriendClassDecl(Node):
def __init__(self, friend):
Node.__init__(self)
self.friend = friend
class MethodDecl(Node):
def __init__(self, name, params=[ ], ret=Type('void'),
virtual=0, const=0, pure=0, static=0, warn_unused=0,
inline=0, force_inline=0, never_inline=0,
typeop=None,
T=None):
assert not (virtual and static)
assert not pure or virtual # pure => virtual
assert not (static and typeop)
assert not (name and typeop)
assert name is None or isinstance(name, str)
assert not isinstance(ret, list)
for decl in params: assert not isinstance(decl, str)
assert not isinstance(T, int)
assert not (inline and never_inline)
assert not (force_inline and never_inline)
if typeop is not None:
ret = None
Node.__init__(self)
self.name = name
self.params = params # [ Param ]
self.ret = ret # Type or None
self.virtual = virtual # bool
self.const = const # bool
self.pure = pure # bool
self.static = static # bool
self.warn_unused = warn_unused # bool
self.force_inline = (force_inline or T) # bool
self.inline = inline # bool
self.never_inline = never_inline # bool
self.typeop = typeop # Type or None
self.T = T # Type or None
self.only_for_definition = False
def __deepcopy__(self, memo):
return MethodDecl(
self.name,
params=copy.deepcopy(self.params, memo),
ret=copy.deepcopy(self.ret, memo),
virtual=self.virtual,
const=self.const,
pure=self.pure,
static=self.static,
warn_unused=self.warn_unused,
inline=self.inline,
force_inline=self.force_inline,
never_inline=self.never_inline,
typeop=copy.deepcopy(self.typeop, memo),
T=copy.deepcopy(self.T, memo))
class MethodDefn(Block):
def __init__(self, decl):
Block.__init__(self)
self.decl = decl
class FunctionDecl(MethodDecl):
def __init__(self, name, params=[ ], ret=Type('void'),
static=0, warn_unused=0,
inline=0, force_inline=0,
T=None):
MethodDecl.__init__(self, name, params=params, ret=ret,
static=static, warn_unused=warn_unused,
inline=inline, force_inline=force_inline,
T=T)
class FunctionDefn(MethodDefn):
def __init__(self, decl):
MethodDefn.__init__(self, decl)
class ConstructorDecl(MethodDecl):
def __init__(self, name, params=[ ], explicit=0, force_inline=0):
MethodDecl.__init__(self, name, params=params, ret=None,
force_inline=force_inline)
self.explicit = explicit
def __deepcopy__(self, memo):
return ConstructorDecl(self.name,
copy.deepcopy(self.params, memo),
self.explicit)
class ConstructorDefn(MethodDefn):
def __init__(self, decl, memberinits=[ ]):
MethodDefn.__init__(self, decl)
self.memberinits = memberinits
class DestructorDecl(MethodDecl):
def __init__(self, name, virtual=0, force_inline=0, inline=0):
MethodDecl.__init__(self, name, params=[ ], ret=None,
virtual=virtual,
force_inline=force_inline, inline=inline)
def __deepcopy__(self, memo):
return DestructorDecl(self.name,
virtual=self.virtual,
force_inline=self.force_inline,
inline=self.inline)
class DestructorDefn(MethodDefn):
def __init__(self, decl): MethodDefn.__init__(self, decl)
##------------------------------
# expressions
class ExprVar(Node):
def __init__(self, name):
assert isinstance(name, str)
Node.__init__(self)
self.name = name
ExprVar.THIS = ExprVar('this')
class ExprLiteral(Node):
def __init__(self, value, type):
'''|type| is a Python format specifier; 'd' for example'''
Node.__init__(self)
self.value = value
self.type = type
@staticmethod
def Int(i): return ExprLiteral(i, 'd')
@staticmethod
def String(s): return ExprLiteral('"'+ s +'"', 's')
@staticmethod
def WString(s): return ExprLiteral('L"'+ s +'"', 's')
def __str__(self):
return ('%'+ self.type)% (self.value)
ExprLiteral.ZERO = ExprLiteral.Int(0)
ExprLiteral.ONE = ExprLiteral.Int(1)
ExprLiteral.NULL = ExprVar('nullptr')
ExprLiteral.TRUE = ExprVar('true')
ExprLiteral.FALSE = ExprVar('false')
class ExprPrefixUnop(Node):
def __init__(self, expr, op):
assert not isinstance(expr, tuple)
self.expr = expr
self.op = op
class ExprNot(ExprPrefixUnop):
def __init__(self, expr):
ExprPrefixUnop.__init__(self, expr, '!')
class ExprAddrOf(ExprPrefixUnop):
def __init__(self, expr):
ExprPrefixUnop.__init__(self, expr, '&')
class ExprDeref(ExprPrefixUnop):
def __init__(self, expr):
ExprPrefixUnop.__init__(self, expr, '*')
class ExprCast(Node):
def __init__(self, expr, type,
dynamic=0, static=0, reinterpret=0, const=0, C=0):
assert 1 == reduce(lambda a, x: a+x, [ dynamic, static, reinterpret, const, C ])
Node.__init__(self)
self.expr = expr
self.type = type
self.dynamic = dynamic
self.static = static
self.reinterpret = reinterpret
self.const = const
self.C = C
class ExprBinary(Node):
def __init__(self, left, op, right):
Node.__init__(self)
self.left = left
self.op = op
self.right = right
class ExprConditional(Node):
def __init__(self, cond, ife, elsee):
Node.__init__(self)
self.cond = cond
self.ife = ife
self.elsee = elsee
class ExprIndex(Node):
def __init__(self, arr, idx):
Node.__init__(self)
self.arr = arr
self.idx = idx
class ExprSelect(Node):
def __init__(self, obj, op, field):
assert obj and op and field
assert not isinstance(obj, str)
assert isinstance(field, str)
Node.__init__(self)
self.obj = obj
self.op = op
self.field = field
class ExprAssn(Node):
def __init__(self, lhs, rhs, op='='):
Node.__init__(self)
self.lhs = lhs
self.op = op
self.rhs = rhs
class ExprCall(Node):
def __init__(self, func, args=[ ]):
assert hasattr(func, 'accept')
assert isinstance(args, list)
for arg in args: assert arg and not isinstance(arg, str)
Node.__init__(self)
self.func = func
self.args = args
class ExprMove(ExprCall):
def __init__(self, arg):
ExprCall.__init__(self, ExprVar("mozilla::Move"), args=[arg])
class ExprNew(Node):
# XXX taking some poetic license ...
def __init__(self, ctype, args=[ ], newargs=None):
assert not (ctype.const or ctype.ref)
Node.__init__(self)
self.ctype = ctype
self.args = args
self.newargs = newargs
class ExprDelete(Node):
def __init__(self, obj):
Node.__init__(self)
self.obj = obj
class ExprMemberInit(ExprCall):
def __init__(self, member, args=[ ]):
ExprCall.__init__(self, member, args)
class ExprSizeof(ExprCall):
def __init__(self, t):
ExprCall.__init__(self, ExprVar('sizeof'), [ t ])
##------------------------------
# statements etc.
class StmtBlock(Block):
def __init__(self, stmts=[ ]):
Block.__init__(self)
self.addstmts(stmts)
class StmtDecl(Node):
def __init__(self, decl, init=None, initargs=None):
assert not (init and initargs)
assert not isinstance(init, str) # easy to confuse with Decl
assert not isinstance(init, list)
assert not isinstance(decl, tuple)
Node.__init__(self)
self.decl = decl
self.init = init
self.initargs = initargs
class Label(Node):
def __init__(self, name):
Node.__init__(self)
self.name = name
Label.PUBLIC = Label('public')
Label.PROTECTED = Label('protected')
Label.PRIVATE = Label('private')
class CaseLabel(Node):
def __init__(self, name):
Node.__init__(self)
self.name = name
class DefaultLabel(Node):
def __init__(self):
Node.__init__(self)
class StmtIf(Node):
def __init__(self, cond):
Node.__init__(self)
self.cond = cond
self.ifb = Block()
self.elseb = None
def addifstmt(self, stmt):
self.ifb.addstmt(stmt)
def addifstmts(self, stmts):
self.ifb.addstmts(stmts)
def addelsestmt(self, stmt):
if self.elseb is None: self.elseb = Block()
self.elseb.addstmt(stmt)
def addelsestmts(self, stmts):
if self.elseb is None: self.elseb = Block()
self.elseb.addstmts(stmts)
class StmtFor(Block):
def __init__(self, init=None, cond=None, update=None):
Block.__init__(self)
self.init = init
self.cond = cond
self.update = update
class StmtRangedFor(Block):
def __init__(self, var, iteree):
assert isinstance(var, ExprVar)
assert iteree
Block.__init__(self)
self.var = var
self.iteree = iteree
class StmtSwitch(Block):
def __init__(self, expr):
Block.__init__(self)
self.expr = expr
self.nr_cases = 0
def addcase(self, case, block):
'''NOTE: |case| is not checked for uniqueness'''
assert not isinstance(case, str)
assert (isinstance(block, StmtBreak)
or isinstance(block, StmtReturn)
or isinstance(block, StmtSwitch)
or (hasattr(block, 'stmts')
and (isinstance(block.stmts[-1], StmtBreak)
or isinstance(block.stmts[-1], StmtReturn))))
self.addstmt(case)
self.addstmt(block)
self.nr_cases += 1
def addfallthrough(self, case):
self.addstmt(case)
self.nr_cases += 1
class StmtBreak(Node):
def __init__(self):
Node.__init__(self)
class StmtExpr(Node):
def __init__(self, expr):
assert expr is not None
Node.__init__(self)
self.expr = expr
class StmtReturn(Node):
def __init__(self, expr=None):
Node.__init__(self)
self.expr = expr
StmtReturn.TRUE = StmtReturn(ExprLiteral.TRUE)
StmtReturn.FALSE = StmtReturn(ExprLiteral.FALSE)
|
Yukarumya/Yukarum-Redfoxes
|
ipc/ipdl/ipdl/cxx/ast.py
|
Python
|
mpl-2.0
| 23,105
|
[
"VisIt"
] |
bf4bc6bf0dddb3168bc0978d76ae379a12ef78c9b37ea41890927dcd5e211f4f
|
from jinja2 import Environment, FileSystemLoader
import subprocess
import os.path
import shutil
from PIL import Image
import win32gui
import glob
import time
import uuid
import os
from subprocess import *
#TODO: Create new Zero template for sprite-based playback
#they said it couldn't be done
#and they were wrong!
#well, they said it couldn't be done *correctly*...
#so i guess not
original_movie = "Countdown.wmv"
#original_movie = "The Mysterious Floating Orb.mp4"
#original_movie = "London Brawling.mp4"
print "Located video file at " + original_movie
print "Deleting old frames..."
for old_frame in glob.glob("*.jpg"):
os.remove(old_frame)
for old_frame in glob.glob("*.png"):
os.remove(old_frame)
for old_audio in glob.glob("*.mp3"):
os.remove(old_audio)
for old_video in glob.glob("*.avi"):
os.remove(old_video)
for old_frame in glob.glob(os.path.expanduser("~\Documents\GitHub\Nada\LosslessVideoTest\Content\\") + "*.jpg"):
os.remove(old_frame)
for old_meta in glob.glob(os.path.expanduser("~\Documents\GitHub\Nada\LosslessVideoTest\Content\\") + "*.jpg.meta"):
os.remove(old_meta)
for old_frame in glob.glob(os.path.expanduser("~\Documents\GitHub\Nada\LosslessVideoTest\Content\\") + "*.png"):
os.remove(old_frame)
for old_meta in glob.glob(os.path.expanduser("~\Documents\GitHub\Nada\LosslessVideoTest\Content\\") + "*.png.meta"):
os.remove(old_meta)
for old_audio in glob.glob(os.path.expanduser("~\Documents\GitHub\Nada\LosslessVideoTest\Content\\") + "*.mp3"):
os.remove(old_audio)
print "Extracting audio..."
subprocess.call("ffmpeg -i \"" + original_movie + "\" -vn -ar 44100 -ac 2 -ab 96k -f mp3 vid_sound.mp3")
print "Splitting into frames..."
subprocess.call("ffmpeg -i \"" + original_movie + "\" -r 24 frame%08d.png")
print "Optimizing frames..."
idx = 0
tasks = []
#judging by my experiments baseline is always better for this
#print "Generating progressive samples..."
#
#while True:
# idx += 1
# curpath = "frame" + str(idx).zfill(8) + ".png"
# if not os.path.isfile(curpath):
# break
# print curpath.replace(".png", "_progressive")
# tasks.append(subprocess.Popen("\"C:\Program Files\ImageMagick-6.8.9-Q16\convert.exe\" -strip -resize 50% -gaussian-blur 0.08 -sampling-factor 4:2:0 -quality 80% -interlace Plane " + curpath + " " + curpath.replace(".png", "_progressive.jpg"))) # -define:extent=50000
#
#while len(tasks) > 0:
# for task in tasks:
# if not task.poll() == None:
# tasks.remove(task)
#
#idx = 0
#
#print "Generating baseline samples..."
#
#while True:
# idx += 1
# curpath = "frame" + str(idx).zfill(8) + ".png"
# if not os.path.isfile(curpath):
# break
# print curpath.replace(".png", "_baseline")
# tasks.append(subprocess.Popen("\"C:\Program Files\ImageMagick-6.8.9-Q16\convert.exe\" -strip -resize 50% -gaussian-blur 0.08 -sampling-factor 4:2:0 -quality 80% " + curpath + " " + curpath.replace(".png", "_baseline.jpg")))
#
#while len(tasks) > 0:
# for task in tasks:
# if not task.poll() == None:
# tasks.remove(task)
#
#print "Selecting best versions..."
#
#idx = 0
#
#while True:
# idx += 1
# standard = "frame" + str(idx).zfill(8) + ".jpg"
# baseline = "frame" + str(idx).zfill(8) + "_baseline.jpg"
# progressive = "frame" + str(idx).zfill(8) + "_progressive.jpg"
# print baseline
# if not os.path.isfile(baseline):
# break
# b_size = os.path.getsize(baseline)
# p_size = os.path.getsize(progressive)
# if b_size > p_size:
# print "b-" + str(b_size)
# print "p-" + str(p_size)
# shutil.copyfile(baseline, standard)
# else:
# print "b-" + str(b_size)
# print "p-" + str(p_size)
# shutil.copyfile(progressive, standard)
print "Calculating square sizes..."
resize = 0.5
side = str((Image.open("frame00000001.png").size[0] - Image.open("frame00000001.png").size[1]) * resize / 2)
pct_size = str(Image.open("frame00000001.png").size[0]) + "x" + str(Image.open("frame00000001.png").size[1])
print "Generating diff masks..."
#if only i had /dev/null as a service
#http://devnull-as-a-service.com/home/
idx = 0
with open(os.devnull, 'w') as tempf:
while True:
idx += 1
curpath = "frame" + str(idx).zfill(8) + ".png"
if not os.path.isfile(curpath):
break
if idx % 12 == 1: #you can replace 12 with 24 for slight file savings
print curpath.replace(".png", "") + " - keyframe"
nsp = subprocess.Popen("\"C:\Program Files\ImageMagick-6.8.9-Q16\convert.exe\" -size " + pct_size + "x" + pct_size + " xc:white " + "mask" + str(idx).zfill(8) + ".png", stdout=tempf, stderr=tempf)
#nsp.communicate()
tasks.append(nsp)
elif os.path.isfile("frame" + str(idx + 1).zfill(8) + ".png"):
print curpath.replace(".png", "")
nsp = subprocess.Popen("\"C:\Program Files\ImageMagick-6.8.9-Q16\compare.exe\" -fuzz 3% frame" + str(idx - 1).zfill(8) + ".png frame" + str(idx).zfill(8) + ".png -compose Src -highlight-color White -lowlight-color Black omask" + str(idx).zfill(8) + ".png", stdout=tempf, stderr=tempf)
#nsp.communicate()
tasks.append(nsp)
while len(tasks) > 50:
for task in tasks:
if not task.poll() == None:
tasks.remove(task)
while len(tasks) > 0:
for task in tasks:
if not task.poll() == None:
tasks.remove(task)
print "Dilating diff masks..."
idx = 0
with open(os.devnull, 'w') as tempf:
while True:
idx += 1
curpath = "omask" + str(idx).zfill(8) + ".png"
if not os.path.isfile(curpath):
if os.path.isfile("omask" + str(idx + 1).zfill(8) + ".png"):
continue
else:
break
print curpath.replace(".png", "")
nsp = subprocess.Popen("\"C:\Program Files\ImageMagick-6.8.9-Q16\convert.exe\" omask" + str(idx).zfill(8) + ".png -blur 5x65000 -threshold 0 -fill white -opaque white mask" + str(idx).zfill(8) + ".png", stdout=tempf, stderr=tempf)
tasks.append(nsp)
while len(tasks) > 50:
for task in tasks:
if not task.poll() == None:
tasks.remove(task)
while len(tasks) > 0:
for task in tasks:
if not task.poll() == None:
tasks.remove(task)
print "Generating colored diffs..."
idx = 0
with open(os.devnull, 'w') as tempf:
while True:
idx += 1
curpath = "frame" + str(idx).zfill(8) + ".png"
if not os.path.isfile(curpath):
break
print curpath.replace(".png", "")
nsp = subprocess.Popen("\"C:\Program Files\ImageMagick-6.8.9-Q16\convert.exe\" frame" + str(idx).zfill(8) + ".png mask" + str(idx).zfill(8) + ".png -alpha Off -compose CopyOpacity -strip -resize " + str(round(resize * 100)) + "% -quality 00 -composite final" + str(idx).zfill(8) + ".png", stdout=tempf, stderr=tempf)
#nsp.communicate()
tasks.append(nsp)
while len(tasks) > 50:
for task in tasks:
if not task.poll() == None:
tasks.remove(task)
while len(tasks) > 0:
for task in tasks:
if not task.poll() == None:
tasks.remove(task)
print "Optimizing colored diffs..."
#no borders this time
idx = 0
with open(os.devnull, 'w') as tempf:
while True:
idx += 1
curpath = "final" + str(idx).zfill(8) + ".png"
if not os.path.isfile(curpath):
break
print curpath.replace(".png", "")
nsp = subprocess.Popen("optipng.exe " + curpath, stdout=tempf, stderr=tempf)
tasks.append(nsp)
while len(tasks) > 50:
for task in tasks:
if not task.poll() == None:
tasks.remove(task)
while len(tasks) > 0:
for task in tasks:
if not task.poll() == None:
tasks.remove(task)
#no preview for this version
print "Calculating file size..."
idx = 0
size = 0
for frame in glob.glob("final*.png"):
size += os.path.getsize(frame)
for audio in glob.glob("*.mp3"):
size += os.path.getsize(audio)
print str(size / 1088576) + " megabytes"
print "Importing into test project..."
env = Environment(loader=FileSystemLoader('templates'))
vpresenter = env.get_template('VideoPresenter.z')
framemeta = env.get_template('FrameMetadata.png.meta')
with open(os.path.expanduser("~\Documents\GitHub\Nada\LosslessVideoTest\Content\VideoPresenter.z"), "w") as zilchscript:
result = vpresenter.render(framenum=str(len(glob.glob("final*.png"))))
print result
zilchscript.write(result)
zilchscript.close()
size_x = str(Image.open("final00000001.png").size[0])
size_y = str(Image.open("final00000001.png").size[1])
origin_x = str(Image.open("final00000001.png").size[0] / 2)
origin_y = str(Image.open("final00000001.png").size[1] / 2)
while True:
idx += 1
curpath = "final" + str(idx).zfill(8) + ".png"
if not os.path.isfile(curpath):
break
print curpath.replace(".png", "")
shutil.copyfile(curpath.replace("frame", "final"), os.path.expanduser("~\Documents\GitHub\Nada\LosslessVideoTest\Content\\" + curpath))
with open(os.path.expanduser("~\Documents\GitHub\Nada\LosslessVideoTest\Content\\" + curpath + ".meta"), "w") as metadata:
result = framemeta.render(frameid=str(idx),resid=str(uuid.uuid4().get_hex().lower()[0:16]),framesizex=size_x,framesizey=size_y,frameoriginx=origin_x,frameoriginy=origin_y)
print result
metadata.write(result)
metadata.close()
print "vid_sound.mp3"
shutil.copyfile("vid_sound.mp3", os.path.expanduser("~\Documents\GitHub\Nada\LosslessVideoTest\Content\\vid_sound.mp3"))
print "Done!"
etc = size / 30922 / 60
#Test results:
#17749228 bytes
#574 seconds
#30922 bytes/second
print "Cleaning up..."
for old_frame in glob.glob("*.jpg"):
os.remove(old_frame)
for old_frame in glob.glob("*.png"):
os.remove(old_frame)
for old_audio in glob.glob("*.mp3"):
os.remove(old_audio)
for old_video in glob.glob("*.avi"):
os.remove(old_video)
time.sleep(2)
os.startfile(os.path.expanduser("~\Documents\GitHub\Nada\LosslessVideoTest\LosslessVideoTest.zeroproj"))
#time.sleep(2)
#os.startfile("video.avi")
#print "Locating cache..."
#zengine_ui = subprocess.Popen(("C:\Program Files (x86)\ZeroEditor\ZeroEditor.exe", os.path.expanduser("~\Documents\GitHub\Nada\LosslessVideoTest\LosslessVideoTest.zeroproj")))
#time.sleep(3)
#zengine_ui.terminate()
#time.sleep(2)
#def all_subdirs_of(b='.'):
# result = []
# for d in os.listdir(b):
# bd = os.path.join(b, d)
# if os.path.isdir(bd): result.append(bd)
# return result
#latest_subdir = max(all_subdirs_of(os.path.expanduser('~\AppData\Local\ZeroContent')), key=os.path.getmtime)
#print "Cache found in " + latest_subdir + "."
#print "Caching Zero texture files..."
#idx = 0
#tasks = []
#while True:
# idx += 1
# curpath = "frame" + str(idx).zfill(8) + ".jpg"
# if not os.path.isfile(curpath):
# break
# print curpath.replace(".jpg", ".ztex")
# tasks.append(subprocess.Popen(("\"C:\Program Files (x86)\ZeroEditor\Tools\ImageProcessor.exe", "-in \"" + os.path.abspath(curpath) + "\"", "-out " + latest_subdir + "\frame" + str(idx) + ".ztex")))
# while len(tasks) > 10:
# for task in tasks:
# if not task.poll() == None:
# tasks.remove(task)
|
milkey-mouse/Nada
|
ZeroLossless/ZeroVideo/ZeroVideo/ZeroVideo.py
|
Python
|
mit
| 11,524
|
[
"Gaussian"
] |
9ab01d85d7deff78797f0606a78359c7a5e36e5b25925ae6d13043be4d391629
|
"""
It is a helper module used to create a certain plot...
"""
from DIRAC import S_OK, S_ERROR
def _convertToSeconds(interval):
"""
Converts number of minutes, hours, days, weeks, months, years into seconds
"""
# unit symbols
units = ["s", "m", "h", "d", "w", "M", "y"]
# this is the number of previous units in a unit
numbers = [1, 60, 60, 24, 7, 30.0 / 7.0, 366.0 / 30.0]
seconds = 1.0
for unit, num in zip(units, numbers):
seconds *= num
if interval.endswith(unit):
return int(float(interval[:-1]) * seconds)
raise ValueError("Invalid time interval '%s'" % interval)
class DBUtils(object):
"""
.. class:: DBUtils
It implements few methods used to create the plots.
param: list __units it is elasticsearch specific unites
param: list __unitvalues the units in second
param: list __esunits used to determine the buckets size
"""
# TODO: Maybe it is better to use the same structure we have in BasePlotter
__esbucket = {
"1h": "1m",
"6h": "5m",
"12h": "10m",
"1d": "15m",
"2d": "30m",
"3.5d": "1h",
"1w": "2h",
"2w": "4h",
"1M": "8h",
"2M": "12h",
"3M": "1d",
"6M": "2d",
"9M": "3d",
"1y": "4d",
"10y": "7d",
"100y": "1w",
}
def __init__(self, db, setup):
"""c'tor
:param self: self reference
:param db: the database module
:param str setup: DIRAC setup
"""
self.__db = db
self.__setup = setup
def getKeyValues(self, typeName, condDict):
"""
Get all valid key values in a type
"""
return self.__db.getKeyValues(self.__setup, typeName, condDict)
def _retrieveBucketedData(
self, typeName, startTime, endTime, interval, selectField, condDict=None, grouping="", metadataDict=None
):
"""
It is a wrapper class...
"""
return self.__db.retrieveBucketedData(
typeName=typeName,
startTime=startTime,
endTime=endTime,
interval=interval,
selectField=selectField,
condDict=condDict,
grouping=grouping,
metainfo=metadataDict,
)
def _retrieveAggregatedData(
self, typeName, startTime, endTime, interval, selectField, condDict=None, grouping="", metadataDict=None
):
"""
Retrieve data from EL
"""
return self.__db.retrieveAggregatedData(
typeName=typeName,
startTime=startTime,
endTime=endTime,
interval=interval,
selectField=selectField,
condDict=condDict,
grouping=grouping,
metainfo=metadataDict,
)
def _determineBucketSize(self, start, end):
"""It is used to determine the bucket size using _esUnits
:param int start: epoch time
:param int end: epoch time
:return: S_OK/S_ERROR with tuple of (binUnit, seconds)
"""
diff = end - start
error = "Can not determine the bucket size..."
bucketSeconds = {}
try:
# Convert intervals into seconds
for interval, binUnit in self.__esbucket.items():
bucketSeconds[_convertToSeconds(interval)] = (binUnit, _convertToSeconds(binUnit))
# Determine bin size according to time span
for interval in sorted(bucketSeconds):
if diff <= interval:
return S_OK(bucketSeconds[interval])
except ValueError as e:
error += ": " + repr(e)
return S_ERROR(error)
def _divideByFactor(self, dataDict, factor):
"""
Divide by factor the values and get the maximum value
- dataDict = { 'key' : { time1 : value, time2 : value... }, 'key2'.. }
"""
maxValue = 0.0
for key in dataDict:
currentDict = dataDict[key]
for timeEpoch in currentDict:
currentDict[timeEpoch] /= float(factor)
maxValue = max(maxValue, currentDict[timeEpoch])
return dataDict, maxValue
def _getAccumulationMaxValue(self, dataDict):
"""
Divide by factor the values and get the maximum value
- dataDict = { 'key' : { time1 : value, time2 : value... }, 'key2'.. }
"""
maxValue = 0
maxEpoch = 0
for key in dataDict:
currentDict = dataDict[key]
for timeEpoch in currentDict:
if timeEpoch > maxEpoch:
maxEpoch = timeEpoch
maxValue = 0
if timeEpoch == maxEpoch:
maxValue += currentDict[timeEpoch]
return maxValue
@staticmethod
def _accumulate(granularity, startEpoch, endEpoch, dataDict):
"""
Accumulates all the values and builds the dataDict used to plot.
Used in DataOperationPlotter.
- granularity: bucket size
- startTime: epoch time
- endTime: epoch time
- dataDict = { 'key' : { time1 : value, time2 : value... }, 'key2'.. }
"""
startBucketEpoch = startEpoch - startEpoch % granularity
for key in dataDict:
currentDict = dataDict[key]
lastValue = 0
for timeEpoch in range(startBucketEpoch, endEpoch, granularity):
if timeEpoch in currentDict:
lastValue += currentDict[timeEpoch]
currentDict[timeEpoch] = lastValue
return dataDict
|
DIRACGrid/DIRAC
|
src/DIRAC/MonitoringSystem/private/DBUtils.py
|
Python
|
gpl-3.0
| 5,663
|
[
"DIRAC"
] |
1217d2b8c269d934a7a8ea2d5fdc3844cadc64e1348e44a019c464fc156edca8
|
"""
PHYLIP multiple sequence alignment format (:mod:`skbio.io.phylip`)
==================================================================
.. currentmodule:: skbio.io.phylip
The PHYLIP file format stores a multiple sequence alignment. The format was
originally defined and used in Joe Felsenstein's PHYLIP package [1]_, and has
since been supported by several other bioinformatics tools (e.g., RAxML [2]_).
See [3]_ for the original format description, and [4]_ and [5]_ for additional
descriptions.
An example PHYLIP-formatted file taken from [3]_::
5 42
Turkey AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT
Salmo gairAAGCCTTGGC AGTGCAGGGT GAGCCGTGGC CGGGCACGGT AT
H. SapiensACCGGTTGGC CGTTCAGGGT ACAGGTTGGC CGTTCAGGGT AA
Chimp AAACCCTTGC CGTTACGCTT AAACCGAGGC CGGGACACTC AT
Gorilla AAACCCTTGC CGGTACGCTT AAACCATTGC CGGTACGCTT AA
.. note:: Original copyright notice for the above PHYLIP file:
*(c) Copyright 1986-2008 by The University of Washington. Written by Joseph
Felsenstein. Permission is granted to copy this document provided that no
fee is charged for it and that this copyright notice is not removed.*
Format Support
--------------
**Has Sniffer: No**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|No |Yes |:mod:`skbio.alignment.Alignment` |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
PHYLIP format is a plain text format containing exactly two sections: a header
describing the dimensions of the alignment, followed by the multiple sequence
alignment itself.
The format described here is "strict" PHYLIP, as described in [4]_. Strict
PHYLIP requires that each sequence identifier is exactly 10 characters long
(padded with spaces as necessary). Other bioinformatics tools (e.g., RAxML) may
relax this rule to allow for longer sequence identifiers. See the
**Alignment Section** below for more details.
The format described here is "sequential" format. The original PHYLIP format
specification [3]_ describes both sequential and interleaved formats.
.. note:: scikit-bio currently only supports writing strict, sequential
PHYLIP-formatted files from an ``skbio.alignment.Alignment``. It does not
yet support reading PHYLIP-formatted files, nor does it support relaxed or
interleaved PHYLIP formats.
Header Section
^^^^^^^^^^^^^^
The header consists of a single line describing the dimensions of the
alignment. It **must** be the first line in the file. The header consists of
optional spaces, followed by two positive integers (``n`` and ``m``) separated
by one or more spaces. The first integer (``n``) specifies the number of
sequences (i.e., the number of rows) in the alignment. The second integer
(``m``) specifies the length of the sequences (i.e., the number of columns) in
the alignment. The smallest supported alignment dimensions are 1x1.
.. note:: scikit-bio will write the PHYLIP format header *without* preceding
spaces, and with only a single space between ``n`` and ``m``.
PHYLIP format *does not* support blank line(s) between the header and the
alignment.
Alignment Section
^^^^^^^^^^^^^^^^^
The alignment section immediately follows the header. It consists of ``n``
lines (rows), one for each sequence in the alignment. Each row consists of a
sequence identifier (ID) and characters in the sequence, in fixed width format.
The sequence ID can be up to 10 characters long. IDs less than 10 characters
must have spaces appended to them to reach the 10 character fixed width. Within
an ID, all characters except newlines are supported, including spaces,
underscores, and numbers.
.. note:: While not explicitly stated in the original PHYLIP format
description, scikit-bio only supports writing unique sequence identifiers
(i.e., duplicates are not allowed). Uniqueness is required because an
``skbio.alignment.Alignment`` cannot be created with duplicate IDs.
scikit-bio supports the empty string (``''``) as a valid sequence ID. An
empty ID will be padded with 10 spaces.
Sequence characters immediately follow the sequence ID. They *must* start at
the 11th character in the line, as the first 10 characters are reserved for the
sequence ID. While PHYLIP format does not explicitly restrict the set of
supported characters that may be used to represent a sequence, the original
format description [3]_ specifies the IUPAC nucleic acid lexicon for DNA or RNA
sequences, and the IUPAC protein lexicon for protein sequences. The original
PHYLIP specification uses ``-`` as a gap character, though older versions also
supported ``.``. The sequence characters may contain optional spaces (e.g., to
improve readability), and both upper and lower case characters are supported.
.. note:: scikit-bio will write a PHYLIP-formatted file even if the alignment's
sequence characters are not valid IUPAC characters. This differs from the
PHYLIP specification, which states that a PHYLIP-formatted file can only
contain valid IUPAC characters. To check whether all characters are valid
before writing, the user can call ``Alignment.is_valid()``.
Since scikit-bio supports both ``-`` and ``.`` as gap characters (e.g., in
``skbio.alignment.Alignment``), both are supported when writing a
PHYLIP-formatted file.
When writing a PHYLIP-formatted file, scikit-bio will split up each sequence
into chunks that are 10 characters long. Each chunk will be separated by a
single space. The sequence will always appear on a single line (sequential
format). It will *not* be wrapped across multiple lines. Sequences are
chunked in this manner for improved readability, and because most example
PHYLIP files are chunked in a similar way (e.g., see the example file
above). Note that this chunking is not required by the PHYLIP format.
Examples
--------
Let's create an alignment with three DNA sequences of equal length:
>>> from skbio import Alignment, DNA
>>> seqs = [DNA('ACCGTTGTA-GTAGCT', metadata={'id':'seq1'}),
... DNA('A--GTCGAA-GTACCT', metadata={'id':'sequence-2'}),
... DNA('AGAGTTGAAGGTATCT', metadata={'id':'3'})]
>>> aln = Alignment(seqs)
>>> aln
<Alignment: n=3; mean +/- std length=16.00 +/- 0.00>
Now let's write the alignment to file in PHYLIP format, and take a look at the
output:
>>> from StringIO import StringIO
>>> fh = StringIO()
>>> aln.write(fh, format='phylip')
>>> print(fh.getvalue())
3 16
seq1 ACCGTTGTA- GTAGCT
sequence-2A--GTCGAA- GTACCT
3 AGAGTTGAAG GTATCT
<BLANKLINE>
>>> fh.close()
Notice that the 16-character sequences were split into two chunks, and that
each sequence appears on a single line (sequential format). Also note that each
sequence ID is padded with spaces to 10 characters in order to produce a fixed
width column.
If the sequence IDs in an alignment surpass the 10-character limit, an error
will be raised when we try to write a PHYLIP file:
>>> long_id_seqs = [DNA('ACCGT', metadata={'id':'seq1'}),
... DNA('A--GT', metadata={'id':'long-sequence-2'}),
... DNA('AGAGT', metadata={'id':'seq3'})]
>>> long_id_aln = Alignment(long_id_seqs)
>>> fh = StringIO()
>>> long_id_aln.write(fh, format='phylip')
Traceback (most recent call last):
...
PhylipFormatError: Alignment can only be written in PHYLIP format if all \
sequence IDs have 10 or fewer characters. Found sequence with ID \
'long-sequence-2' that exceeds this limit. Use Alignment.update_ids to assign \
shorter IDs.
>>> fh.close()
One way to work around this is to update the IDs to be shorter. The recommended
way of accomplishing this is via ``Alignment.update_ids``, which provides a
flexible way of creating a new ``Alignment`` with updated IDs. For example, to
remap each of the IDs to integer-based IDs:
>>> short_id_aln, _ = long_id_aln.update_ids()
>>> short_id_aln.ids()
['1', '2', '3']
We can now write the new alignment in PHYLIP format:
>>> fh = StringIO()
>>> short_id_aln.write(fh, format='phylip')
>>> print(fh.getvalue())
3 5
1 ACCGT
2 A--GT
3 AGAGT
<BLANKLINE>
>>> fh.close()
References
----------
.. [1] http://evolution.genetics.washington.edu/phylip.html
.. [2] RAxML Version 8: A tool for Phylogenetic Analysis and
Post-Analysis of Large Phylogenies". In Bioinformatics, 2014
.. [3] http://evolution.genetics.washington.edu/phylip/doc/sequence.html
.. [4] http://www.phylo.org/tools/obsolete/phylip.html
.. [5] http://www.bioperl.org/wiki/PHYLIP_multiple_alignment_format
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from skbio.alignment import Alignment
from skbio.io import register_writer, PhylipFormatError
from skbio.util._misc import chunk_str
@register_writer('phylip', Alignment)
def _alignment_to_phylip(obj, fh):
if obj.is_empty():
raise PhylipFormatError(
"Alignment can only be written in PHYLIP format if there is at "
"least one sequence in the alignment.")
sequence_length = obj.sequence_length()
if sequence_length == 0:
raise PhylipFormatError(
"Alignment can only be written in PHYLIP format if there is at "
"least one position in the alignment.")
chunk_size = 10
for id_ in obj.ids():
if len(id_) > chunk_size:
raise PhylipFormatError(
"Alignment can only be written in PHYLIP format if all "
"sequence IDs have %d or fewer characters. Found sequence "
"with ID '%s' that exceeds this limit. Use "
"Alignment.update_ids to assign shorter IDs." %
(chunk_size, id_))
sequence_count = obj.sequence_count()
fh.write('{0:d} {1:d}\n'.format(sequence_count, sequence_length))
fmt = '{0:%d}{1}\n' % chunk_size
for seq in obj:
chunked_seq = chunk_str(str(seq), chunk_size, ' ')
fh.write(fmt.format(seq.metadata['id'], chunked_seq))
|
Achuth17/scikit-bio
|
skbio/io/phylip.py
|
Python
|
bsd-3-clause
| 10,543
|
[
"BioPerl",
"scikit-bio"
] |
af52feead40963498be3441e3981f5f564d8183f014f38e4041ff66ee52e55f7
|
'''
single crystal Bragg reflection
'''
from ocelot.optics.elements import *
# from ocelot.optics.wave import TransferFunction
from ocelot.optics.bragg import *
from ocelot.optics.ray import Ray, trace as trace_ray
from ocelot.gui.optics import *
class Signal(object):
def __init__(self, n=100):
self.t = np.linspace(-1,1, n)
self.f = np.zeros_like(self.t, dtype=np.complex)
self.n = n
class Signal3D(object):
def __init__(self, n=100):
self.t = np.linspace(-1,1, n)
self.f = np.zeros_like(self.t, dtype=np.complex)
self.n = n
def field_on_axis(self):
return self.fs[0]
def field_sum_abs(self):
return np.sum(np.abs(self.fs[:])**2)
def free(self):
pass
def read_signal(file_name, E_ref, npad = 10):
s = Signal()
data = np.loadtxt(file_name, dtype = complex)
s.f = data[:,2]
s.t = np.real(data[:,0])
''' spectrum with finer resolution '''
s.nslice = n = len(s.f)
s.npad = npad
s.f_ = np.zeros((2*npad+1)*n, dtype=complex)
s.f_[npad*n:(npad+1)*n] = s.f
s.f = s.f_
s.t = (npad+1)*np.linspace(s.t[0], s.t[-1], len(s.f))
spec = fft.fft(s.f)
dt = (s.t[1] - s.t[0]) * 1.e-15
k0 = E_ref / (hbar * c)
s.freq_k = 2*pi*(fftfreq(len(spec), d=dt) / c )
s.freq_k = -np.roll(s.freq_k, len(spec)/2) + k0 # take into account s/t
s.freq_ev = s.freq_k * hbar * c
s.sp = np.roll(spec, len(spec)/2)
return s
def plot_signal(s):
plt.plot(s.t, np.abs(s.f))
def plot_signal_spec(s):
plt.plot(s.freq_ev, np.abs(s.sp))
def plot_filters(filt, f_test=None, param='tr', ax= None):
if ax == None:
f = plt.figure()
ax = f.add_subplot(111)
ax.set_xlabel('Photon Energy [ev]')
ax2 = ax.twinx()
plt.grid(True)
if param == 'tr':
ax.set_title('Transmissivity')
data = filt.tr
if f_test != None: data_test = f_test.tr
if param == 'ref':
ax.set_title('Reflectivity')
data = filt.ref
if f_test != None: data_test = f_test.ref
l1,=ax.plot(filt.ev, np.abs(data), 'bd')
#ax2.plot(filt.ev, unfold_angles( np.angle(data)) , 'gd')
l2,=ax2.plot(filt.ev, np.angle(data) , 'gd')
plt.legend([l1,l2],['abs','phase'])
if f_test != None:
ax.plot(f_test.ev, np.abs(data_test), 'b--')
#ax2.plot(f_test.ev, unfold_angles(np.angle(data_test)), 'g--')
ax2.plot(f_test.ev, np.angle(data_test), 'g--')
def plot_spec_filt(s, filt, ax):
ax.plot(s.freq_ev, np.abs(s.sp), 'b.')
tr_r, tr_i = np.real(filt.tr), np.imag(filt.tr)
tr_mod = np.real(np.sqrt(tr_r*tr_r + tr_i*tr_i)) #modulus of T
ax.plot(filt.ev, tr_mod / np.max(tr_mod) * np.max(np.abs(s.sp)), 'r.--')
print(s.freq_k)
|
ocelot-collab/ocelot
|
ocelot/optics/utils.py
|
Python
|
gpl-3.0
| 2,866
|
[
"CRYSTAL"
] |
a324cc5541d99eb1c3b762a1b3d8996857616f740b9573444de3622a4ce36efb
|
#!/usr/bin/env python
# This example demonstrates the use of multiline 2D text using
# vtkTextMappers. It shows several justifications as well as
# single-line and multiple-line text inputs.
import vtk
font_size = 14
# Create the text mappers and the associated Actor2Ds.
# The font and text properties (except justification) are the same for
# each single line mapper. Let's create a common text property object
singleLineTextProp = vtk.vtkTextProperty()
singleLineTextProp.SetFontSize(font_size)
singleLineTextProp.SetFontFamilyToArial()
singleLineTextProp.BoldOff()
singleLineTextProp.ItalicOff()
singleLineTextProp.ShadowOff()
# The font and text properties (except justification) are the same for
# each multi line mapper. Let's create a common text property object
multiLineTextProp = vtk.vtkTextProperty()
multiLineTextProp.ShallowCopy(singleLineTextProp)
multiLineTextProp.BoldOn()
multiLineTextProp.ItalicOn()
multiLineTextProp.ShadowOn()
multiLineTextProp.SetLineSpacing(0.8)
# The text is on a single line and bottom-justified.
singleLineTextB = vtk.vtkTextMapper()
singleLineTextB.SetInput("Single line (bottom)")
tprop = singleLineTextB.GetTextProperty()
tprop.ShallowCopy(singleLineTextProp)
tprop.SetVerticalJustificationToBottom()
tprop.SetColor(1, 0, 0)
singleLineTextActorB = vtk.vtkActor2D()
singleLineTextActorB.SetMapper(singleLineTextB)
singleLineTextActorB.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
singleLineTextActorB.GetPositionCoordinate().SetValue(0.05, 0.85)
# The text is on a single line and center-justified (vertical
# justification).
singleLineTextC = vtk.vtkTextMapper()
singleLineTextC.SetInput("Single line (centered)")
tprop = singleLineTextC.GetTextProperty()
tprop.ShallowCopy(singleLineTextProp)
tprop.SetVerticalJustificationToCentered()
tprop.SetColor(0, 1, 0)
singleLineTextActorC = vtk.vtkActor2D()
singleLineTextActorC.SetMapper(singleLineTextC)
singleLineTextActorC.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
singleLineTextActorC.GetPositionCoordinate().SetValue(0.05, 0.75)
# The text is on a single line and top-justified.
singleLineTextT = vtk.vtkTextMapper()
singleLineTextT.SetInput("Single line (top)")
tprop = singleLineTextT.GetTextProperty()
tprop.ShallowCopy(singleLineTextProp)
tprop.SetVerticalJustificationToTop()
tprop.SetColor(0, 0, 1)
singleLineTextActorT = vtk.vtkActor2D()
singleLineTextActorT.SetMapper(singleLineTextT)
singleLineTextActorT.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
singleLineTextActorT.GetPositionCoordinate().SetValue(0.05, 0.65)
# The text is on multiple lines and left- and top-justified.
textMapperL = vtk.vtkTextMapper()
textMapperL.SetInput("This is\nmulti-line\ntext output\n(left-top)")
tprop = textMapperL.GetTextProperty()
tprop.ShallowCopy(multiLineTextProp)
tprop.SetJustificationToLeft()
tprop.SetVerticalJustificationToTop()
tprop.SetColor(1, 0, 0)
textActorL = vtk.vtkActor2D()
textActorL.SetMapper(textMapperL)
textActorL.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
textActorL.GetPositionCoordinate().SetValue(0.05, 0.5)
# The text is on multiple lines and center-justified (both horizontal and
# vertical).
textMapperC = vtk.vtkTextMapper()
textMapperC.SetInput("This is\nmulti-line\ntext output\n(centered)")
tprop = textMapperC.GetTextProperty()
tprop.ShallowCopy(multiLineTextProp)
tprop.SetJustificationToCentered()
tprop.SetVerticalJustificationToCentered()
tprop.SetColor(0, 1, 0)
textActorC = vtk.vtkActor2D()
textActorC.SetMapper(textMapperC)
textActorC.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
textActorC.GetPositionCoordinate().SetValue(0.5, 0.5)
# The text is on multiple lines and right- and bottom-justified.
textMapperR = vtk.vtkTextMapper()
textMapperR.SetInput("This is\nmulti-line\ntext output\n(right-bottom)")
tprop = textMapperR.GetTextProperty()
tprop.ShallowCopy(multiLineTextProp)
tprop.SetJustificationToRight()
tprop.SetVerticalJustificationToBottom()
tprop.SetColor(0, 0, 1)
textActorR = vtk.vtkActor2D()
textActorR.SetMapper(textMapperR)
textActorR.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
textActorR.GetPositionCoordinate().SetValue(0.95, 0.5)
# Draw the grid to demonstrate the placement of the text.
# Set up the necessary points.
Pts = vtk.vtkPoints()
Pts.InsertNextPoint(0.05, 0.0, 0.0)
Pts.InsertNextPoint(0.05, 1.0, 0.0)
Pts.InsertNextPoint(0.5, 0.0, 0.0)
Pts.InsertNextPoint(0.5, 1.0, 0.0)
Pts.InsertNextPoint(0.95, 0.0, 0.0)
Pts.InsertNextPoint(0.95, 1.0, 0.0)
Pts.InsertNextPoint(0.0, 0.5, 0.0)
Pts.InsertNextPoint(1.0, 0.5, 0.0)
Pts.InsertNextPoint(0.00, 0.85, 0.0)
Pts.InsertNextPoint(0.50, 0.85, 0.0)
Pts.InsertNextPoint(0.00, 0.75, 0.0)
Pts.InsertNextPoint(0.50, 0.75, 0.0)
Pts.InsertNextPoint(0.00, 0.65, 0.0)
Pts.InsertNextPoint(0.50, 0.65, 0.0)
# Set up the lines that use these points.
Lines = vtk.vtkCellArray()
Lines.InsertNextCell(2)
Lines.InsertCellPoint(0)
Lines.InsertCellPoint(1)
Lines.InsertNextCell(2)
Lines.InsertCellPoint(2)
Lines.InsertCellPoint(3)
Lines.InsertNextCell(2)
Lines.InsertCellPoint(4)
Lines.InsertCellPoint(5)
Lines.InsertNextCell(2)
Lines.InsertCellPoint(6)
Lines.InsertCellPoint(7)
Lines.InsertNextCell(2)
Lines.InsertCellPoint(8)
Lines.InsertCellPoint(9)
Lines.InsertNextCell(2)
Lines.InsertCellPoint(10)
Lines.InsertCellPoint(11)
Lines.InsertNextCell(2)
Lines.InsertCellPoint(12)
Lines.InsertCellPoint(13)
# Create a grid that uses these points and lines.
Grid = vtk.vtkPolyData()
Grid.SetPoints(Pts)
Grid.SetLines(Lines)
# Set up the coordinate system.
normCoords = vtk.vtkCoordinate()
normCoords.SetCoordinateSystemToNormalizedViewport()
# Set up the mapper and actor (2D) for the grid.
mapper = vtk.vtkPolyDataMapper2D()
mapper.SetInput(Grid)
mapper.SetTransformCoordinate(normCoords)
gridActor = vtk.vtkActor2D()
gridActor.SetMapper(mapper)
gridActor.GetProperty().SetColor(0.1, 0.1, 0.1)
# Create the Renderer, RenderWindow, and RenderWindowInteractor
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer; set the background and size; zoom in
# closer to the image; render
ren.AddActor2D(textActorL)
ren.AddActor2D(textActorC)
ren.AddActor2D(textActorR)
ren.AddActor2D(singleLineTextActorB)
ren.AddActor2D(singleLineTextActorC)
ren.AddActor2D(singleLineTextActorT)
ren.AddActor2D(gridActor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(500, 300)
ren.GetActiveCamera().Zoom(1.5)
iren.Initialize()
renWin.Render()
iren.Start()
|
b3c/VTK-5.8
|
Examples/Annotation/Python/multiLineText.py
|
Python
|
bsd-3-clause
| 6,586
|
[
"VTK"
] |
f3d8eb56654ae6bd0874347f717908af1199bda94a00fbafa710efc21da32955
|
#pylint: disable=invalid-name
from __future__ import (absolute_import, division, print_function)
from mantid.simpleapi import *
def heliumDetectorEff(workspace):
''' Calculate the corrected Helium detector values. '''
ac= 6.022169e23 # Avogadro's constant mol-1
vm= 2.24136e4 # Molar volume of gas cm3 mol-1
gp= 10.0 # Gas pressure (atms)
gsig0= 5333.0e-24 # Gas cross section at LAM0 cm2
gt= 2.5 # Gas path length cm
lam0= 1.8 # Characteristic wavelength
gn= ac*gp/vm # Number density of gas
sgn= gn*gsig0*gt/lam0 # Exponential term for gas
OneMinusExponentialCor(InputWorkspace=workspace,OutputWorkspace=workspace,C=str(sgn),Operation="Divide")
wt= 60.014 # Molecular weight Ni-Cu g mol-1
rho= 8.00 # Density Ni-Cu g cm-3
ct= 0.05 # Monel (Ni-Cu) wall thickness cm
wsig0= 4.522e-24 # Wall cross section at LAM0 cm2
wn= ac*rho/wt # Number density of wall
swn= wn*wsig0*ct/lam0 # Exponential term for wall
ExponentialCorrection(InputWorkspace=workspace,OutputWorkspace=workspace,C1=str(swn),Operation="Divide")
# simple polynomial correction based on a D2O spectrum taken at 1.5 deg
PolynomialCorrection(InputWorkspace=workspace,OutputWorkspace=workspace,Coefficients="-1.3697,0.8602,-0.7839,0.2866,-0.0447,0.0025")
return
def monitor2Eff(workspace):
''' Calculate the corrected monitor2 values. '''
# expon= unt*(1-exp(-8.3047 * zz * x_mean ))
# yout[i]= yin[i]*(1.0-expon) / expon
# eout[i]= ein[i]*(1.0-expon) / expon
# The above correction is equivalent to: (1/unt - 1) + e^(-8.3047*zz*x)
# ------------------------------
# ( 1 - e^(-8.3047*zz*x) )
unt=0.24 # 0.05 # ratio of scintillator to total area
zz = 0.6 #0.03 # thickness(cm) of scintillator
c1 = 0.7112*zz #8.3047*zz
ExponentialCorrection(InputWorkspace=workspace,OutputWorkspace=workspace,C1=str(c1),Operation="Multiply")
shift = (1.0/unt)-1.0
CreateSingleValuedWorkspace(OutputWorkspace="shift",DataValue=str(shift),ErrorValue="0.0")
Plus(LHSWorkspace=workspace,RHSWorkspace="shift",OutputWorkspace=workspace)
mtd.remove("shift")
OneMinusExponentialCor(InputWorkspace=workspace,OutputWorkspace=workspace,C=str(c1))
return
def main():
'''This main routine. It is executed on if the script is run directly, not if it is imported.'''
LoadRawDialog(OutputWorkspace="ws",SpectrumMin="1",SpectrumMax="1")
ConvertUnits(InputWorkspace="ws",OutputWorkspace="ws",Target="Wavelength",AlignBins="1")
heliumDetectorEff("ws")
monitor2Eff("ws")
print("Done!")
if __name__ == '__main__':
main()
|
wdzhou/mantid
|
scripts/LargeScaleStructures/ReflectometerCors.py
|
Python
|
gpl-3.0
| 2,907
|
[
"Avogadro"
] |
2f663d31f74c36a4f82a0874e256144664e0cae772072602de05b35ad7518310
|
import enum
import inspect
import pydoc
import unittest
from collections import OrderedDict
from enum import Enum, IntEnum, EnumMeta, Flag, IntFlag, unique, auto
from io import StringIO
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
from test import support
try:
import threading
except ImportError:
threading = None
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
Stooges = exc
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
IntStooges = exc
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception as exc:
FloatStooges = exc
try:
class FlagStooges(Flag):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
FlagStooges = exc
# for pickle test and subclass tests
try:
class StrEnum(str, Enum):
'accepts only string values'
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
except Exception as exc:
Name = exc
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception as exc:
Question = exc
try:
Answer = Enum('Answer', 'him this then there because')
except Exception as exc:
Answer = exc
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception as exc:
Theory = exc
# for doctests
try:
class Fruit(Enum):
TOMATO = 1
BANANA = 2
CHERRY = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None):
if target is None:
target = source
for protocol in range(HIGHEST_PROTOCOL + 1):
assertion(loads(dumps(source, protocol=protocol)), target)
def test_pickle_exception(assertion, exception, obj):
for protocol in range(HIGHEST_PROTOCOL + 1):
with assertion(exception):
dumps(obj, protocol=protocol)
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
# tests
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
def test_dir_on_class(self):
Season = self.Season
self.assertEqual(
set(dir(Season)),
set(['__class__', '__doc__', '__members__', '__module__',
'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
)
def test_dir_on_item(self):
Season = self.Season
self.assertEqual(
set(dir(Season.WINTER)),
set(['__class__', '__doc__', '__module__', 'name', 'value']),
)
def test_dir_with_added_behavior(self):
class Test(Enum):
this = 'that'
these = 'those'
def wowser(self):
return ("Wowser! I'm %s!" % self.name)
self.assertEqual(
set(dir(Test)),
set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these']),
)
self.assertEqual(
set(dir(Test.this)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser']),
)
def test_dir_on_sub_with_behavior_on_super(self):
# see issue22506
class SuperEnum(Enum):
def invisible(self):
return "did you see me?"
class SubEnum(SuperEnum):
sample = 5
self.assertEqual(
set(dir(SubEnum.sample)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
)
def test_enum_in_enum_out(self):
Season = self.Season
self.assertIs(Season(Season.WINTER), Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1):
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertIn(e, Season)
self.assertIs(type(e), Season)
self.assertIsInstance(e, Season)
self.assertEqual(str(e), 'Season.' + season)
self.assertEqual(
repr(e),
'<Season.{0}: {1}>'.format(season, i),
)
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
with self.assertRaises(AttributeError):
Season.SPRING.name = 'invierno'
with self.assertRaises(AttributeError):
Season.SPRING.value = 2
def test_changing_member(self):
Season = self.Season
with self.assertRaises(AttributeError):
Season.WINTER = 'really cold'
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
with self.assertRaises(AttributeError):
del Season.SPRING
with self.assertRaises(AttributeError):
del Season.DRY
with self.assertRaises(AttributeError):
del Season.SPRING.name
def test_bool_of_class(self):
class Empty(Enum):
pass
self.assertTrue(bool(Empty))
def test_bool_of_member(self):
class Count(Enum):
zero = 0
one = 1
two = 2
for member in Count:
self.assertTrue(bool(member))
def test_invalid_names(self):
with self.assertRaises(ValueError):
class Wrong(Enum):
mro = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_create_= 11
with self.assertRaises(ValueError):
class Wrong(Enum):
_get_mixins_ = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_find_new_ = 1
with self.assertRaises(ValueError):
class Wrong(Enum):
_any_name_ = 9
def test_bool(self):
# plain Enum members are always True
class Logic(Enum):
true = True
false = False
self.assertTrue(Logic.true)
self.assertTrue(Logic.false)
# unless overridden
class RealLogic(Enum):
true = True
false = False
def __bool__(self):
return bool(self._value_)
self.assertTrue(RealLogic.true)
self.assertFalse(RealLogic.false)
# mixed Enums depend on mixed-in type
class IntLogic(int, Enum):
true = 1
false = 0
self.assertTrue(IntLogic.true)
self.assertFalse(IntLogic.false)
def test_contains(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
self.assertNotIn(3, Season)
val = Season(3)
self.assertIn(val, Season)
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
def test_comparisons(self):
Season = self.Season
with self.assertRaises(TypeError):
Season.SPRING < Season.WINTER
with self.assertRaises(TypeError):
Season.SPRING > 4
self.assertNotEqual(Season.SPRING, 1)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
with self.assertRaises(TypeError):
Season.SPRING < Part.CLIP
def test_enum_duplicates(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertIs(Season.FALL, Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertIs(Season(3), Season.AUTUMN)
self.assertIs(Season(1), Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
[k for k,v in Season.__members__.items() if v.name != k],
['FALL', 'ANOTHER_SPRING'],
)
def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertIs(type(Huh.name), Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_format_enum(self):
Season = self.Season
self.assertEqual('{}'.format(Season.SPRING),
'{}'.format(str(Season.SPRING)))
self.assertEqual( '{:}'.format(Season.SPRING),
'{:}'.format(str(Season.SPRING)))
self.assertEqual('{:20}'.format(Season.SPRING),
'{:20}'.format(str(Season.SPRING)))
self.assertEqual('{:^20}'.format(Season.SPRING),
'{:^20}'.format(str(Season.SPRING)))
self.assertEqual('{:>20}'.format(Season.SPRING),
'{:>20}'.format(str(Season.SPRING)))
self.assertEqual('{:<20}'.format(Season.SPRING),
'{:<20}'.format(str(Season.SPRING)))
def test_format_enum_custom(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!')
def assertFormatIsValue(self, spec, member):
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{}', Konstants.TAU)
self.assertFormatIsValue('{:}', Konstants.TAU)
self.assertFormatIsValue('{:20}', Konstants.TAU)
self.assertFormatIsValue('{:^20}', Konstants.TAU)
self.assertFormatIsValue('{:>20}', Konstants.TAU)
self.assertFormatIsValue('{:<20}', Konstants.TAU)
self.assertFormatIsValue('{:n}', Konstants.TAU)
self.assertFormatIsValue('{:5.2}', Konstants.TAU)
self.assertFormatIsValue('{:f}', Konstants.TAU)
def test_format_enum_int(self):
Grades = self.Grades
self.assertFormatIsValue('{}', Grades.C)
self.assertFormatIsValue('{:}', Grades.C)
self.assertFormatIsValue('{:20}', Grades.C)
self.assertFormatIsValue('{:^20}', Grades.C)
self.assertFormatIsValue('{:>20}', Grades.C)
self.assertFormatIsValue('{:<20}', Grades.C)
self.assertFormatIsValue('{:+}', Grades.C)
self.assertFormatIsValue('{:08X}', Grades.C)
self.assertFormatIsValue('{:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{}', Directional.WEST)
self.assertFormatIsValue('{:}', Directional.WEST)
self.assertFormatIsValue('{:20}', Directional.WEST)
self.assertFormatIsValue('{:^20}', Directional.WEST)
self.assertFormatIsValue('{:>20}', Directional.WEST)
self.assertFormatIsValue('{:<20}', Directional.WEST)
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited(self):
class StrEnum(str, Enum):
pass
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target, 1):
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertIn(e, WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertIs(type(e), WeekDay)
self.assertIsInstance(e, int)
self.assertIsInstance(e, Enum)
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_intenum_from_bytes(self):
self.assertIs(IntStooges.from_bytes(b'\x00\x03', 'big'), IntStooges.MOE)
with self.assertRaises(ValueError):
IntStooges.from_bytes(b'\x00\x05', 'big')
def test_floatenum_fromhex(self):
h = float.hex(FloatStooges.MOE.value)
self.assertIs(FloatStooges.fromhex(h), FloatStooges.MOE)
h = float.hex(FloatStooges.MOE.value + 0.01)
with self.assertRaises(ValueError):
FloatStooges.fromhex(h)
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertIs, Stooges.CURLY)
test_pickle_dump_load(self.assertIs, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertIs, IntStooges.CURLY)
test_pickle_dump_load(self.assertIs, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertIs, FloatStooges.CURLY)
test_pickle_dump_load(self.assertIs, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertIs, Answer.him)
test_pickle_dump_load(self.assertIs, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertIs, Question.who)
test_pickle_dump_load(self.assertIs, Question)
def test_enum_function_with_qualname(self):
if isinstance(Theory, Exception):
raise Theory
self.assertEqual(Theory.__qualname__, 'spanish_inquisition')
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs)
def test_pickle_by_name(self):
class ReplaceGlobalInt(IntEnum):
ONE = 1
TWO = 2
ReplaceGlobalInt.__reduce_ex__ = enum._reduce_ex_by_name
for proto in range(HIGHEST_PROTOCOL):
self.assertEqual(ReplaceGlobalInt.TWO.__reduce_ex__(proto), 'TWO')
def test_exploding_pickle(self):
BadPickle = Enum(
'BadPickle', 'dill sweet bread-n-butter', module=__name__)
globals()['BadPickle'] = BadPickle
# now break BadPickle to test exception raising
enum._make_class_unpicklable(BadPickle)
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertIs(Period(2), Period.noon)
self.assertIs(getattr(Period, 'night'), Period.night)
self.assertIs(Period['morning'], Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__eq__'))
def test_iteration_order(self):
class Season(Enum):
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_reversed_iteration_order(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_programmatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', start=10)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 10):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list_with_start(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 20):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
OrderedDict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 30):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass_with_start(self):
SummerMonth = IntEnum('SummerMonth', 'june july august', start=40)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 40):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertIs(Name.BDFL, getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertIs, Name.BDFL)
def test_extending(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertIsNot(type(whatever.really), whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
with self.assertRaises(TypeError):
class Wrong(Enum, str):
NotHere = 'error before this point'
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.female), Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.male), Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertIs(Number.one._member_type_, int)
self.assertIs(Number._member_type_, int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertIs(String.yarn._member_type_, str)
self.assertIs(String._member_type_, str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertIs(Plain.vanilla._member_type_, object)
self.assertIs(Plain._member_type_, object)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
Color(4)
with self.assertRaises(KeyError):
Color['chartreuse']
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(type(Enum)):
def __new__(metacls, cls, bases, classdict):
temp = type(classdict)()
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v is Ellipsis:
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
class AutoNumberedEnum(Enum, metaclass=auto_enum):
pass
class AutoIntEnum(IntEnum, metaclass=auto_enum):
pass
class TestAutoNumber(AutoNumberedEnum):
a = ...
b = 3
c = ...
class TestAutoInt(AutoIntEnum):
a = ...
b = 3
c = ...
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_exception(self.assertRaises, TypeError, NEI.x)
test_pickle_exception(self.assertRaises, PicklingError, NEI)
def test_subclasses_without_direct_pickle_support_using_name(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple' # needed for pickle protocol 4
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertIs(type(SomeTuple.first), SomeTuple)
self.assertIsInstance(SomeTuple.second, tuple)
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertIs, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
first = ()
second = ()
third = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(
list(AutoNumber),
[AutoNumber.first, AutoNumber.second, AutoNumber.third],
)
self.assertEqual(int(AutoNumber.second), 2)
self.assertEqual(AutoNumber.third.value, 3)
self.assertIs(AutoNumber(1), AutoNumber.first)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_equality(self):
class AlwaysEqual:
def __eq__(self, other):
return True
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(AlwaysEqual(), OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, AlwaysEqual())
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertGreater(Grade.A, Grade.B)
self.assertLessEqual(Grade.F, Grade.C)
self.assertLess(Grade.D, Grade.A)
self.assertGreaterEqual(Grade.B, Grade.B)
self.assertEqual(Grade.B, Grade.B)
self.assertNotEqual(Grade.C, Grade.D)
def test_extending2(self):
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_no_duplicates(self):
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
for enum, value in zip(ColorInAList, range(3)):
value += 1
self.assertEqual(enum.value, [value])
self.assertIs(ColorInAList([value]), enum)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
def test_auto_number(self):
class Color(Enum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_name(self):
class Color(Enum):
def _generate_next_value_(name, start, count, last):
return name
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_name_inherit(self):
class AutoNameEnum(Enum):
def _generate_next_value_(name, start, count, last):
return name
class Color(AutoNameEnum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_garbage(self):
class Color(Enum):
red = 'red'
blue = auto()
self.assertEqual(Color.blue.value, 1)
def test_auto_garbage_corrected(self):
class Color(Enum):
red = 'red'
blue = 2
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
class TestOrder(unittest.TestCase):
def test_same_members(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
def test_same_members_with_aliases(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
verde = green
def test_same_members_wrong_order(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
blue = 3
green = 2
def test_order_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
def test_order_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
verde = green
def test_enum_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
def test_enum_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
verde = green
class TestFlag(unittest.TestCase):
"""Tests of the Flags."""
class Perm(Flag):
R, W, X = 4, 2, 1
class Open(Flag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm(0)), 'Perm.0')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.0')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO')
self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO')
self.assertEqual(str(~Open.WO), 'Open.CE|RW')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm(0)), '<Perm.0: 0>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: 3>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: 5>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: 6>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: 1>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.0: 0>')
self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: 7>')
Open = self.Open
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>')
self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: 524291>')
self.assertEqual(repr(~Open.WO), '<Open.CE|RW: 524290>')
self.assertEqual(repr(~Open.AC), '<Open.CE: 524288>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC: 3>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: 2>')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i | j), Perm(i.value | j.value))
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for i in Perm:
self.assertIs(i | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual((i & j).value, i.value & j.value)
self.assertIs(type(i & j), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & RWX, i)
self.assertIs(RWX & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for i in Perm:
self.assertIs(i ^ Perm(0), i)
self.assertIs(Perm(0) ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_programatic_function_string(self):
Perm = Flag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = Flag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = Flag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = Flag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = Flag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_pickle(self):
if isinstance(FlagStooges, Exception):
raise FlagStooges
test_pickle_dump_load(self.assertIs, FlagStooges.CURLY|FlagStooges.MOE)
test_pickle_dump_load(self.assertIs, FlagStooges)
def test_containment(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
def test_auto_number(self):
class Color(Flag):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 4)
def test_auto_number_garbage(self):
with self.assertRaisesRegex(TypeError, 'Invalid Flag value: .not an int.'):
class Color(Flag):
red = 'not an int'
blue = auto()
def test_cascading_failure(self):
class Bizarre(Flag):
c = 3
d = 4
f = 6
# Bizarre.c | Bizarre.d
self.assertRaisesRegex(ValueError, "5 is not a valid Bizarre", Bizarre, 5)
self.assertRaisesRegex(ValueError, "5 is not a valid Bizarre", Bizarre, 5)
self.assertRaisesRegex(ValueError, "2 is not a valid Bizarre", Bizarre, 2)
self.assertRaisesRegex(ValueError, "2 is not a valid Bizarre", Bizarre, 2)
self.assertRaisesRegex(ValueError, "1 is not a valid Bizarre", Bizarre, 1)
self.assertRaisesRegex(ValueError, "1 is not a valid Bizarre", Bizarre, 1)
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_bizarre(self):
class Bizarre(Flag):
b = 3
c = 4
d = 6
self.assertEqual(repr(Bizarre(7)), '<Bizarre.d|c|b: 7>')
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(Flag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with support.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestIntFlag(unittest.TestCase):
"""Tests of the IntFlags."""
class Perm(IntFlag):
X = 1 << 0
W = 1 << 1
R = 1 << 2
class Open(IntFlag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
def test_type(self):
Perm = self.Perm
Open = self.Open
for f in Perm:
self.assertTrue(isinstance(f, Perm))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Perm.W | Perm.X, Perm))
self.assertEqual(Perm.W | Perm.X, 3)
for f in Open:
self.assertTrue(isinstance(f, Open))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Open.WO | Open.RW, Open))
self.assertEqual(Open.WO | Open.RW, 3)
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm.R | 8), 'Perm.8|R')
self.assertEqual(str(Perm(0)), 'Perm.0')
self.assertEqual(str(Perm(8)), 'Perm.8')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.-8')
self.assertEqual(str(~(Perm.R | 8)), 'Perm.W|X')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
self.assertEqual(str(Perm(~8)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO')
self.assertEqual(str(Open(4)), 'Open.4')
self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO')
self.assertEqual(str(~Open.WO), 'Open.CE|RW')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC|RW|WO')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
self.assertEqual(str(Open(~4)), 'Open.CE|AC|RW|WO')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm.R | 8), '<Perm.8|R: 12>')
self.assertEqual(repr(Perm(0)), '<Perm.0: 0>')
self.assertEqual(repr(Perm(8)), '<Perm.8: 8>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: -5>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: -3>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: -2>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: -7>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.-8: -8>')
self.assertEqual(repr(~(Perm.R | 8)), '<Perm.W|X: -13>')
self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: -1>')
self.assertEqual(repr(Perm(~8)), '<Perm.R|W|X: -9>')
Open = self.Open
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>')
self.assertEqual(repr(Open(4)), '<Open.4: 4>')
self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: -1>')
self.assertEqual(repr(~Open.WO), '<Open.CE|RW: -2>')
self.assertEqual(repr(~Open.AC), '<Open.CE: -4>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC|RW|WO: -524289>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: -524290>')
self.assertEqual(repr(Open(~4)), '<Open.CE|AC|RW|WO: -5>')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i | j, i.value | j.value)
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for j in range(8):
self.assertEqual(i | j, i.value | j)
self.assertEqual((i | j).value, i.value | j)
self.assertIs(type(i | j), Perm)
self.assertEqual(j | i, j | i.value)
self.assertEqual((j | i).value, j | i.value)
self.assertIs(type(j | i), Perm)
for i in Perm:
self.assertIs(i | i, i)
self.assertIs(i | 0, i)
self.assertIs(0 | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual(i & j, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertEqual((i & j).value, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertIs(type(i & j), Perm, 'i is %r, j is %r' % (i, j))
for j in range(8):
self.assertEqual(i & j, i.value & j)
self.assertEqual((i & j).value, i.value & j)
self.assertIs(type(i & j), Perm)
self.assertEqual(j & i, j & i.value)
self.assertEqual((j & i).value, j & i.value)
self.assertIs(type(j & i), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & 7, i)
self.assertIs(7 & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i ^ j, i.value ^ j.value)
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for j in range(8):
self.assertEqual(i ^ j, i.value ^ j)
self.assertEqual((i ^ j).value, i.value ^ j)
self.assertIs(type(i ^ j), Perm)
self.assertEqual(j ^ i, j ^ i.value)
self.assertEqual((j ^ i).value, j ^ i.value)
self.assertIs(type(j ^ i), Perm)
for i in Perm:
self.assertIs(i ^ 0, i)
self.assertIs(0 ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertEqual(~i, ~i.value)
self.assertEqual((~i).value, ~i.value)
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_programatic_function_string(self):
Perm = IntFlag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = IntFlag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = IntFlag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = IntFlag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = IntFlag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_containment(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(IntFlag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with support.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestUnique(unittest.TestCase):
def test_unique_clean(self):
@unique
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
@unique
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@unique
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@unique
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
def test_unique_with_name(self):
@unique
class Silly(Enum):
one = 1
two = 'dos'
name = 3
@unique
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
expected_help_output_with_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| An enumeration.
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
| The name of the Enum member.
|\x20\x20
| value
| The value of the Enum member.
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumMeta:
|\x20\x20
| __members__
| Returns a mapping of member name->value.
|\x20\x20\x20\x20\x20\x20
| This mapping lists all enum members, including aliases. Note that this
| is a read-only view of the internal mapping."""
expected_help_output_without_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
|\x20\x20
| value
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumMeta:
|\x20\x20
| __members__"""
class TestStdLib(unittest.TestCase):
maxDiff = None
class Color(Enum):
red = 1
green = 2
blue = 3
def test_pydoc(self):
# indirectly test __objclass__
if StrEnum.__doc__ is None:
expected_text = expected_help_output_without_docs % __name__
else:
expected_text = expected_help_output_with_docs % __name__
output = StringIO()
helper = pydoc.Helper(output=output)
helper(self.Color)
result = output.getvalue().strip()
self.assertEqual(result, expected_text)
def test_inspect_getmembers(self):
values = dict((
('__class__', EnumMeta),
('__doc__', 'An enumeration.'),
('__members__', self.Color.__members__),
('__module__', __name__),
('blue', self.Color.blue),
('green', self.Color.green),
('name', Enum.__dict__['name']),
('red', self.Color.red),
('value', Enum.__dict__['value']),
))
result = dict(inspect.getmembers(self.Color))
self.assertEqual(values.keys(), result.keys())
failed = False
for k in values.keys():
if result[k] != values[k]:
print()
print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' %
('=' * 75, k, result[k], values[k], '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_inspect_classify_class_attrs(self):
# indirectly test __objclass__
from inspect import Attribute
values = [
Attribute(name='__class__', kind='data',
defining_class=object, object=EnumMeta),
Attribute(name='__doc__', kind='data',
defining_class=self.Color, object='An enumeration.'),
Attribute(name='__members__', kind='property',
defining_class=EnumMeta, object=EnumMeta.__members__),
Attribute(name='__module__', kind='data',
defining_class=self.Color, object=__name__),
Attribute(name='blue', kind='data',
defining_class=self.Color, object=self.Color.blue),
Attribute(name='green', kind='data',
defining_class=self.Color, object=self.Color.green),
Attribute(name='red', kind='data',
defining_class=self.Color, object=self.Color.red),
Attribute(name='name', kind='data',
defining_class=Enum, object=Enum.__dict__['name']),
Attribute(name='value', kind='data',
defining_class=Enum, object=Enum.__dict__['value']),
]
values.sort(key=lambda item: item.name)
result = list(inspect.classify_class_attrs(self.Color))
result.sort(key=lambda item: item.name)
failed = False
for v, r in zip(values, result):
if r != v:
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, enum)
# These are unordered here on purpose to ensure that declaration order
# makes no difference.
CONVERT_TEST_NAME_D = 5
CONVERT_TEST_NAME_C = 5
CONVERT_TEST_NAME_B = 5
CONVERT_TEST_NAME_A = 5 # This one should sort first.
CONVERT_TEST_NAME_E = 5
CONVERT_TEST_NAME_F = 5
class TestIntEnumConvert(unittest.TestCase):
def test_convert_value_lookup_priority(self):
test_type = enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# We don't want the reverse lookup value to vary when there are
# multiple possible names for a given value. It should always
# report the first lexigraphical name in that case.
self.assertEqual(test_type(5).name, 'CONVERT_TEST_NAME_A')
def test_convert(self):
test_type = enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_TEST_NAME_F,
test_type.CONVERT_TEST_NAME_A)
self.assertEqual(test_type.CONVERT_TEST_NAME_B, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_C, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_D, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_E, 5)
# Ensure that test_type only picked up names matching the filter.
self.assertEqual([name for name in dir(test_type)
if name[0:2] not in ('CO', '__')],
[], msg='Names other than CONVERT_TEST_* found.')
if __name__ == '__main__':
unittest.main()
|
yotchang4s/cafebabepy
|
src/main/python/test/test_enum.py
|
Python
|
bsd-3-clause
| 93,588
|
[
"MOE"
] |
514af0f4bd089d15564d81638f94dae367c920c92a92758978e93b4276b561df
|
"""
=================================================
Deterministic Tracking with EuDX on Tensor Fields
=================================================
In this example we do deterministic fiber tracking on Tensor fields with EuDX
(Garyfallidis, PhD thesis, 2012).
This example requires to import example `reconst_dti.py` to run. EuDX was
primarily made with cpu efficiency in mind. Therefore, it should be useful to
give you a quick overview of your reconstruction results with the help of
tracking.
"""
import os
import sys
import numpy as np
import nibabel as nib
if not os.path.exists('tensor_fa.nii.gz'):
import reconst_dti
"""
EuDX will use the directions (eigen vectors) of the Tensors to propagate
streamlines from voxel to voxel and fractional anisotropy to stop tracking.
"""
fa_img = nib.load('tensor_fa.nii.gz')
FA = fa_img.get_data()
evecs_img = nib.load('tensor_evecs.nii.gz')
evecs = evecs_img.get_data()
"""
In the background of the image the fitting will not be accurate because there all
measured signal is mostly noise and possibly we will find FA values with nans
(not a number). We can easily remove these in the following way.
"""
FA[np.isnan(FA)] = 0
"""
EuDX takes as input discretized voxel directions on a unit sphere. Therefore,
it is necessary to discretize the eigen vectors before feeding them in EuDX.
For the discretization procedure we use an evenly distributed sphere of 724
points which we can access using the get_sphere function.
"""
from dipy.data import get_sphere
sphere = get_sphere('symmetric724')
"""
We use quantize_evecs (evecs here stands for eigen vectors) to apply the
discretization.
"""
from dipy.reconst.dti import quantize_evecs
peak_indices = quantize_evecs(evecs, sphere.vertices)
"""
EuDX is the fiber tracking algorithm that we use in this example.
The most important parameters are the first one which represents the
magnitude of the peak of a scalar anisotropic function, the
second which represents the indices of the discretized directions of
the peaks and odf_vertices are the vertices of the input sphere.
"""
from dipy.tracking.eudx import EuDX
eu = EuDX(FA, peak_indices, odf_vertices = sphere.vertices, a_low=0.2)
tensor_streamlines = [streamline for streamline in eu]
"""
We can now save the results in the disk. For this purpose we can use the
TrackVis format (``*.trk``). First, we need to create a header.
"""
hdr = nib.trackvis.empty_header()
hdr['voxel_size'] = fa_img.get_header().get_zooms()[:3]
hdr['voxel_order'] = 'LAS'
hdr['dim'] = FA.shape
"""
Then we need to input the streamlines in the way that Trackvis format expects them.
"""
tensor_streamlines_trk = ((sl, None, None) for sl in tensor_streamlines)
ten_sl_fname = 'tensor_streamlines.trk'
"""
Save the streamlines.
"""
nib.trackvis.write(ten_sl_fname, tensor_streamlines_trk, hdr, points_space='voxel')
"""
If you don't want to use Trackvis to visualize the file you can use our
lightweight `fvtk` module.
"""
try:
from dipy.viz import fvtk
except ImportError:
raise ImportError('Python vtk module is not installed')
sys.exit()
"""
Create a scene.
"""
r=fvtk.ren()
"""
Every streamline will be coloured according to its orientation
"""
from dipy.viz.colormap import line_colors
"""
fvtk.line adds a streamline actor for streamline visualization
and fvtk.add adds this actor in the scene
"""
fvtk.add(r, fvtk.line(tensor_streamlines, line_colors(tensor_streamlines)))
print('Saving illustration as tensor_tracks.png')
fvtk.record(r, n_frames=1, out_path='tensor_tracking.png', size=(600, 600))
"""
.. figure:: tensor_tracking.png
:align: center
**Deterministic streamlines with EuDX on a Tensor Field**.
.. include:: ../links_names.inc
"""
|
maurozucchelli/dipy
|
doc/examples/tracking_eudx_tensor.py
|
Python
|
bsd-3-clause
| 3,725
|
[
"VTK"
] |
99bdbbcc61813415f0d5529a461c6e13541bd105889abcc05d82eef74b71763e
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyEspresso(CMakePackage):
"""ESPResSo is a highly versatile software package for performing and
analyzing scientific Molecular Dynamics many-particle simulations of
coarse-grained atomistic or bead-spring models as they are used in
soft matter research in physics, chemistry and molecular biology. It
can be used to simulate systems such as polymers, liquid crystals,
colloids, polyelectrolytes, ferrofluids and biological systems, for
example DNA and lipid membranes. It also has a DPD and lattice
Boltzmann solver for hydrodynamic interactions, and allows several
particle couplings to the LB fluid.
"""
homepage = "https://espressomd.org/"
git = "https://github.com/espressomd/espresso.git"
url = "https://github.com/espressomd/espresso/releases/download/4.0.0/espresso-4.0.0.tar.gz"
version('develop', branch='python')
version('4.0.2', sha256='89878ab44a58e90b69d56368e961b8ca13d9307f8d4b282967a1f3071a62c740')
version('4.0.1', sha256='17b7268eeba652a77f861bc534cdd05d206e7641d203a9dd5029b44bd422304b')
version('4.0.0', sha256='8e128847447eebd843de24be9b4ad14aa19c028ae48879a5a4535a9683836e6b')
# espressomd/espresso#2244 merge upstream
patch('2244.patch', when="@4.0.0")
depends_on("cmake@3.0:", type='build')
depends_on("mpi")
depends_on("boost+serialization+filesystem+system+python+mpi")
extends("python")
depends_on("py-cython@0.23:", type="build")
depends_on("py-numpy", type=("build", "run"))
depends_on("fftw")
depends_on("hdf5+hl+mpi")
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-espresso/package.py
|
Python
|
lgpl-2.1
| 1,821
|
[
"ESPResSo"
] |
3cc9ae87e7f4961bb7f76a1acccc63280b43162a347cbf31f5476e66570d9226
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import re
import pytest
from spack.url import UndetectableVersionError
from spack.main import SpackCommand
from spack.cmd.url import name_parsed_correctly, version_parsed_correctly
from spack.cmd.url import url_summary
url = SpackCommand('url')
class MyPackage:
def __init__(self, name, versions):
self.name = name
self.versions = versions
def test_name_parsed_correctly():
# Expected True
assert name_parsed_correctly(MyPackage('netcdf', []), 'netcdf')
assert name_parsed_correctly(MyPackage('r-devtools', []), 'devtools')
assert name_parsed_correctly(MyPackage('py-numpy', []), 'numpy')
assert name_parsed_correctly(MyPackage('octave-splines', []), 'splines')
assert name_parsed_correctly(MyPackage('imagemagick', []), 'ImageMagick') # noqa
assert name_parsed_correctly(MyPackage('th-data', []), 'TH.data')
# Expected False
assert not name_parsed_correctly(MyPackage('', []), 'hdf5')
assert not name_parsed_correctly(MyPackage('hdf5', []), '')
assert not name_parsed_correctly(MyPackage('yaml-cpp', []), 'yamlcpp')
assert not name_parsed_correctly(MyPackage('yamlcpp', []), 'yaml-cpp')
assert not name_parsed_correctly(MyPackage('r-py-parser', []), 'parser')
assert not name_parsed_correctly(MyPackage('oce', []), 'oce-0.18.0') # noqa
def test_version_parsed_correctly():
# Expected True
assert version_parsed_correctly(MyPackage('', ['1.2.3']), '1.2.3')
assert version_parsed_correctly(MyPackage('', ['5.4a', '5.4b']), '5.4a')
assert version_parsed_correctly(MyPackage('', ['5.4a', '5.4b']), '5.4b')
assert version_parsed_correctly(MyPackage('', ['1.63.0']), '1_63_0')
assert version_parsed_correctly(MyPackage('', ['0.94h']), '094h')
# Expected False
assert not version_parsed_correctly(MyPackage('', []), '1.2.3')
assert not version_parsed_correctly(MyPackage('', ['1.2.3']), '')
assert not version_parsed_correctly(MyPackage('', ['1.2.3']), '1.2.4')
assert not version_parsed_correctly(MyPackage('', ['3.4a']), '3.4')
assert not version_parsed_correctly(MyPackage('', ['3.4']), '3.4b')
assert not version_parsed_correctly(MyPackage('', ['0.18.0']), 'oce-0.18.0') # noqa
def test_url_parse():
url('parse', 'http://zlib.net/fossils/zlib-1.2.10.tar.gz')
def test_url_with_no_version_fails():
# No version in URL
with pytest.raises(UndetectableVersionError):
url('parse', 'http://www.netlib.org/voronoi/triangle.zip')
def test_url_list():
out = url('list')
total_urls = len(out.split('\n'))
# The following two options should not change the number of URLs printed.
out = url('list', '--color', '--extrapolation')
colored_urls = len(out.split('\n'))
assert colored_urls == total_urls
# The following options should print fewer URLs than the default.
# If they print the same number of URLs, something is horribly broken.
# If they say we missed 0 URLs, something is probably broken too.
out = url('list', '--incorrect-name')
incorrect_name_urls = len(out.split('\n'))
assert 0 < incorrect_name_urls < total_urls
out = url('list', '--incorrect-version')
incorrect_version_urls = len(out.split('\n'))
assert 0 < incorrect_version_urls < total_urls
out = url('list', '--correct-name')
correct_name_urls = len(out.split('\n'))
assert 0 < correct_name_urls < total_urls
out = url('list', '--correct-version')
correct_version_urls = len(out.split('\n'))
assert 0 < correct_version_urls < total_urls
def test_url_summary():
"""Test the URL summary command."""
# test url_summary, the internal function that does the work
(total_urls, correct_names, correct_versions,
name_count_dict, version_count_dict) = url_summary(None)
assert 0 < correct_names <= sum(name_count_dict.values()) <= total_urls # noqa
assert 0 < correct_versions <= sum(version_count_dict.values()) <= total_urls # noqa
# make sure it agrees with the actual command.
out = url('summary')
out_total_urls = int(
re.search(r'Total URLs found:\s*(\d+)', out).group(1))
assert out_total_urls == total_urls
out_correct_names = int(
re.search(r'Names correctly parsed:\s*(\d+)', out).group(1))
assert out_correct_names == correct_names
out_correct_versions = int(
re.search(r'Versions correctly parsed:\s*(\d+)', out).group(1))
assert out_correct_versions == correct_versions
|
skosukhin/spack
|
lib/spack/spack/test/cmd/url.py
|
Python
|
lgpl-2.1
| 5,814
|
[
"NetCDF"
] |
c33dca9a17eaa68786e6f68f377ac98c0d71b1ac2f64a26e499662b04223e652
|
## Automatically adapted for numpy.oldnumeric Mar 26, 2007 by alter_code1.py
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2012 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
## $Revision$
## last $Date$
## last $Author$
"""
Parallellized AmberEntropist calculation.
"""
import os.path, copy
import numpy.oldnumeric as N
import Biskit.tools as T
import Biskit.settings as settings
import Biskit.mathUtils as MU
from Biskit.PVM.TrackingJobMaster import TrackingJobMaster
from Biskit.PVM.hosts import cpus_all, nice_dic
from Biskit import PDBModel, PDBProfiles, EHandler, StdLog
from Biskit.Dock import Complex
slave_path = T.projectRoot()+"/Biskit/AmberEntropySlave.py"
class AmberEntropyMaster(TrackingJobMaster):
"""
Run many AmberEntropist calculations on many nodes. The Master has
a standard set of 13 protocols to run on rec, lig, and com
trajectories, as well as on every single member trajectory - in
total 113. It accepts one variable parameter, e.g. s(tart). Each
protocol is then run for all values of the variable parameter. A
protocol is simply a set of options that are passed on to the
AmberEntropist (which is run from within AmberEntropySlave).
Comparing the different protocols allows to more or less separate
random from real correlations, rigid body from intermolecular
vibrations, etc.
Results are put into a tree-shaped dictionary of dictionaries. The
first dimension/key is the member index -- None for the complete
ensemble trajectory, 0 for the first member, etc. The second
dimension/key is the name of the protocol, e.g. 'com_split' for
the complex trajectory with seperately fitted receptor and
ligand. The last dimension contains the different values obtained
from the ptraj run, e.g. 'S_total' points to the total entropy in
cal/mol/K, 'contributions' to the entropy contribution of each
mode, 'T' to the assumed temperature, 'vibes' gives the number of
vibrations with too low frequencies (according to ptraj). All these
are lists of values - one for each value of the variable option.
Example::
* r[None]['fcom']['S_vibes'][0] -> float
first vibr. Entropy of free fake complex for complete ensemble
* r[0]['com']['S_total'] -> [ float, float, .. ]
the total entropies of the complex calculated for the first
ensemble member and the different values of the variable option
"""
def __init__(self, rec=None, lig=None, com=None, out=None,
cr=None, var='s', vrange=[0], jack=0,
zfilter=None, clean=0, all=1,
exrec=[], exlig=[], excom=[],
hosts=cpus_all,
niceness=nice_dic,
w=0, a=1, debug=0,
restart=0,
**kw ):
"""
@param rec: free rec trajectory [required]
@type rec: str
@param lig: free lig trajectory [required]
@type lig: str
@param com: complex trajectory [required]
@type com: str
@param out: file name for pickled result [required]
@type out: str
@param cr: chains of receptor in complex trajectory [n_chains rec]
@type cr: [int]
@param var: name of variable option [ s ]
@type var: str
@param vrange: set of values used for variable option
OR 'start:stop:step', string convertable to
range() input
@type vrange: [any]
@param jack: set up leave-one-trajectory-out jackknife test
(default: 0) (replaces var with 'ex1' and vrange with
range(1,n_members+1))
@type jack: [0|1]
@param zfilter: kick out outlyer trajectories using z-score threshold
on RMSD trace (default: None->don't)
@type zfilter: float
@param clean: remove pickled ref models and member trajectories
(default: 0)
@type clean: 0|1
@param all: skip single member trajs (default: 1)
@type all: 0|1
@param exrec: exclude certain members of receptor ensemble [[]]
@type exrec: [int]
@param exlig: exclude certain members of ligand ensemble [[]]
@type exlig: [int]
@param excom: exclude certain members of complex ensemble [[]]
@type excom: [int]
@param hosts: nodes to be used (default: all known)
@type hosts: [str]
@param debug: don't delete output files (default: 0)
@type debug: 1|0
@param kw: additional key=value parameters for AmberEntropist,
AmberCrdEntropist, Executor and Master.
@type kw: key=value pairs
::
... parameters for AmberEntropist
cast - 1|0, equalize free and bound atom content [1]
s,e - int, start and stop frame [0, to end]
atoms - [ str ], names of atoms to consider [all]
protein - 1|0, remove non-protein atoms [0..don't]
step - int, frame offset [no offset]
thin - float, use randomly distributed fraction of frames [all]
(similar to step but perhaps better for entropy
calculations)
ex - [int] OR ([int],[int]), exclude member trajectories [[]]
ex_n - int, exclude last n members OR... [None]
ex3 - int, exclude |ex3|rd tripple of trajectories [0]
(index starts with 1! 0 to exclude nothing)
... parameters for AmberCrdEntropist
f_template - str, alternative ptraj input template [default]
... parameters for Executor:
log - Biskit.LogFile, program log (None->STOUT) [None]
verbose - 0|1, print progress messages to log [log != STDOUT]
... parameters for Master
w - 0|1, show X window for each slave [0]
a - 0|1, add hosts to PVM [1]
"""
## normal and error output
self.fout = T.absfile( out )
self.ferror = os.path.dirname(self.fout) +'/AmberEntropy_errors.log'
self.debug = debug
self.log = StdLog()
## input files and variable option
self.rec = T.absfile( rec, 0 )
self.lig = T.absfile( lig, 0 )
self.com = T.absfile( com, 0 )
self.cr = cr
self.cl = None
self.var = var
self.vrange = self.__vrange( vrange )
self.jack = jack
self.zfilter = zfilter
self.n_members = None
self.clean = clean
self.all = all
## members to exclude, outliers will be added if zfilter != None
self.ex_frec = exrec
self.ex_flig = exlig
self.ex_com = excom
## reserve for loaded reference models
self.ref_frec = self.ref_flig = None
self.ref_brec = self.ref_blig = self.ref_com = None
## reserve for extracted member trajectories
self.members_frec = self.members_flig = []
self.members_brec = self.members_blig = []
## options to be passed on to AmberEntropist
self.options = kw
if not restart:
## Load trajectories, dump references, identify outliers
self.processTrajs()
## prepare dict of protocols for AmberEntropist
self.protocols = self.protocols_var_range( **kw )
self.saveProtocols()
TrackingJobMaster.__init__(self, self.protocols,
chunk_size=1,
hosts=hosts,
niceness=niceness,
slave_script=slave_path,
show_output=w,
add_hosts=a)
print "JobMaster initialized."
def __vrange( self, v ):
"""
Interprete the vrange option -> [ int ] or [ float ]
@param v: vrange option
@type v: lst OR str
@return: range option
@rtype: [int] OR [float]
"""
if type( v ) is list:
return [ self.__float_int(x) for x in v ]
if type( v ) is str and ':' in v:
v = tuple( [ self.__float_int(x) for x in v.split(':') ] )
return N.arange( *v )
return self.__float_int( v )
def __float_int( self, v ):
"""
Convert v to int or, if necessary, float
@param v: value
@type v: any
@return: converted value
@rtype: int OR float
"""
if float(v) % 1. != 0:
return float( v )
return int( float(v) )
def loadTraj( self, fname, outliers=[], refname=None ):
"""
Load trajectory from file.
@param fname: path to trajectory
@type fname: str
@param outliers: Identify outlier trajectories (default: [], identify)
@type outliers: [int] OR []
@param refname: name of reference (efault: None)
@type refname: str
@return: t, outliers, members
@rtype: trajectoty, [int], [int]
"""
self.log.add('Loading ' + fname )
t = T.load( fname )
t.ref.addChainId()
t = t.compressAtoms( t.ref.maskProtein() )
outliers = self.getOutliers( t, outliers )
if refname:
self.dumpMissing( t.ref, refname )
members = None
if not self.all:
members = self.dumpMembers( t, self.rec )
return t, outliers, members
def processTrajs( self ):
"""
Extract reference model and member trajectories from rec, lig, and
com trajectories. Identify outlier member trajectories, if requested.
"""
## free rec
self.ref_frec = self.nameRef( self.rec )
t, self.ex_frec, self.members_frec = self.loadTraj(
self.rec, self.ex_frec, self.ref_frec )
n_rec_members = t.n_members
self.cr = self.cr or range( t.ref.lenChains( breaks=0 ) )
del t
## free lig
self.ref_flig = self.nameRef( self.lig )
t, self.ex_flig, self.members_flig = self.loadTraj(
self.lig, self.ex_flig, self.ref_flig )
n_lig_members = t.n_members
del t
## complex
fname = T.stripSuffix( T.absfile( self.com, resolveLinks=0 ) )
self.ref_com = fname + '_ref.complex'
self.ref_blig= fname + '_blig.model'
self.ref_brec= fname + '_brec.model'
t, self.ex_com, self.members_com = self.loadTraj(
self.com, self.ex_com )
n_com_members = t.n_members
self.cl = self.cl or MU.difference( range(t.ref.lenChains()), self.cr)
rec = t.ref.takeChains( self.cr, breaks=0 )
lig = t.ref.takeChains( self.cl, breaks=0 )
del t
self.dumpMissing( Complex( rec, lig ), self.ref_com )
self.dumpMissing( rec, self.ref_brec )
self.dumpMissing( lig, self.ref_blig )
self.equalizeMemberCount( n_rec_members, n_lig_members, n_com_members )
if self.jack: self.prepareJackknife()
def equalizeMemberCount( self, n_rec, n_lig, n_com ):
"""
Ensure we keep equal number of members trajectories from frec,
flig, and com.
@param n_rec: number of receptor trajectories
@type n_rec: int
@param n_lig: number of ligand trajectories
@type n_lig: int
@param n_com: number of complex trajectories
@type n_com: int
"""
ex = [ self.ex_frec, self.ex_flig, self.ex_com ]
n_members = [ n_rec, n_lig, n_com ]
## pair list of excluded members with number of remaining members
ex = [ ( ex[i], n_members[i] - len(ex[i]) ) for i in range(3) ]
## lowest number of members after exclusion
n_min = min( [ x[1] for x in ex ] )
self.log.add('excluding non-outliers to match member count: ')
label = ['com','lig','rec']
for x, n in ex:
i = 0
s = label.pop()
while n > n_min:
self.log.write( '%s: ' % s )
if not i in x:
x.append( i )
n -= 1
self.log.write('%i, ' % i )
i += 1
self.log.add('')
self.n_members = n_min
def prepareJackknife( self ):
"""
Prepare leave-one-trajectory-out jackknife test.
"""
self.vrange = range( self.n_members + 1 ) ## 0: exclude nothing
self.var = 'ex1'
def nameRef( self, fname ):
fname = T.stripSuffix( T.absfile( fname, resolveLinks=0 ) )
return fname + '_ref.model'
def nameRefCom( self, fname ):
fname = T.stripSuffix( T.absfile( fname, resolveLinks=0 ) )
return fname + '_ref.complex'
def dumpMissing( self, o, fname ):
"""
Pickle *o* to path *fname*, if it is not already there.
@param o: object to dump
@type o: any
@param fname: file name
@type fname: str
@return: file name
@rtype: str
"""
if os.path.exists( fname ):
self.log.add('using existing ' + fname )
else:
self.log.add('Saving ' + fname )
T.dump( o, fname )
return fname
def getOutliers( self, traj, outlaws=[] ):
"""
Identify member trajectories that haved moved much further than normal.
@param traj: Trajectory to analyze
@type traj: Trajectory
@param outlaws: members already marked for exclusion
@type outlaws: [int]
@return: member indices of outlyer trajectories (plus outlaws)
@rtype: [int]
"""
if not self.zfilter:
return outlaws
outliers = N.nonzero( traj.outliers( z=self.zfilter,
mask=traj.ref.maskCA(), step=10) )
self.log.add('identified %i outliers with z-threshold %3.1f' %\
( len(outliers), self.zfilter ) )
return MU.union( outliers, outlaws )
def dumpMembers( self, traj, fname ):
"""
Dump ensemble member trajectories
@param traj: Trajectory to dump
@type traj: Trajectory
@param fname: trajectory file name - used to derrive name for members
@type fname: str'
@return: list of trajectory files
@rtype: [str]
"""
fname = T.stripSuffix( T.absfile( fname, resolveLinks=0 ) )
members = range( traj.n_members )
r = []
for n in members:
f = fname + '_member_%02i.traj' % n
if os.path.exists( f ):
self.log.add('using existing ' + f )
else:
self.log.write('saving ' + f + '...')
m = traj.takeMember( n )
T.dump( m, f )
self.log.add('done')
r += [ f ]
return r
def getInitParameters(self, slave_tid):
"""
hand over parameters to slave once.
@param slave_tid: slave task id
@type slave_tid: int
@return: dictionary with init parameters
@rtype: {param:value}
"""
host = self.hostnameFromTID( slave_tid )
nice = self.niceness.get( host, self.niceness.get('default',0) )
return {'ferror':self.ferror,
'debug':self.debug, 'nice':nice, 'host':host}
def cleanup( self ):
"""
Tidy up
"""
if self.clean:
self.cleanCache()
def cleanCache( self ):
"""
Remove left-over cache files
"""
fs = [ self.ref_frec, self.ref_flig, self.ref_com, self.ref_brec,
self.ref_blig ]
fs.extend( self.members_frec + self.members_flig )
fs.extend( self.members_brec + self.members_blig )
fs.extend( self.members_com )
for f in fs:
self.log.add('removing %s: %i' % (f, T.tryRemove(f)) )
def saveProtocols( self ):
"""
Save protocol to file.
"""
f_prot = T.stripSuffix( T.absfile(self.fout) ) + '_protocols.dat'
self.log.write( 'Saving parameters to %s...' % f_prot )
T.dump( self.protocols, f_prot )
def done(self):
"""
Write result to file.
"""
tree = self.getResult()
self.log.add("Saving result to %s..." % self.fout)
T.dump( tree, self.fout )
self.log.add( "Done" )
##
## Assemble the protocols for many AmberEntropist runs
##
def __cpupdate( self, d1, d2 ):
"""
Merge 2 dictionaries *d1* and *d2* and return a copy
"""
r = copy.copy( d1 )
r.update( d2 )
return r
def protocols_standard( self, trec, tlig, tcom,
ex_frec=None, ex_flig=None, ex_com=None,
doshift=1,
**options ):
"""
Create 13 parameter sets for AmberEntropist that cover the calculation
of rec, lig, com and fcom entropies with and without splitting of the
complex, with and without shifting and shuffling of frames.
@param options: additional options (like cast, s, e, atoms, thin, step)
that are the same in all parameter sets
@type options: key=value
@return: each value of the returned dict contains a set of
arguments for one AmberEntropist run
@rtype: dict of dict
"""
fcp = self.__cpupdate
r = {}
S = self ## make rest more readable
d = { 'ref':None, 'cast':1, 'chains':None,
'split':0, 'shift':0, 'shuffle':0, 'ex_n':0, 'ex3':None,
'thin':None, 'step':1, 'ss':0, 'se':None, 'atoms':None }
d.update( options )
r['frec'] = fcp( d, {'traj':trec, 'ref':S.ref_brec, 'ex':ex_frec } )
r['flig'] = fcp( d, {'traj':tlig, 'ref':S.ref_blig, 'ex':ex_flig } )
r['brec'] = fcp( d, {'traj':tcom, 'ref':S.ref_frec, 'ex':ex_com,
'chains':S.cr } )
r['blig'] = fcp( d, {'traj':tcom, 'ref':S.ref_flig, 'ex':ex_com,
'chains':S.cl } )
r['fcom'] = fcp( d, {'traj':'%s+%s'%(trec, tlig),
'ex':(ex_frec, ex_flig),
'ref':S.ref_com, 'split':1 } )
## if doshift:
## r['fcom_shift'] = fcp( r['fcom'], {'shift':1 } )
r['fcom_shuff'] = fcp( r['fcom'], {'shuffle':1 } )
r['com'] = fcp( d, {'traj':tcom, 'ex':ex_com,
'ref':'%s+%s' % (S.ref_frec, S.ref_flig) } )
r['com_split'] = fcp( r['com'], { 'split':1, 'border':S.cl[0] } )
## r['com_shuff'] = fcp( r['com'], { 'shuffle':1, 'border':S.cl[0] } )
r['com_split_shuff'] = fcp( r['com'],
{'split':1,'shuffle':1,'border':S.cl[0] } )
if doshift:
## r['com_shift'] = fcp( r['com'], { 'shift':1,'border':S.cl[0] } )
r['com_split_shift'] = fcp( r['com'],
{'split':1,'shift':1, 'border':S.cl[0] } )
return r
def protocols_single_all( self, **options ):
"""
Set of protocols for all-member trajectories AND single-member traj.
with the different shuffle, shift, split settings.
Usually 11 x 13 protocols for AmberEntropist (10 members and 1 for all)
@param options: additional options (like cast, s, e, atoms, thin, step)
that are the same in all parameter sets
@type options: key=value
@return: each value of the returned dict contains a set of arguments
for one AmberEntropist run, each key is a tuple of the
member index and the protocol name, i.e. (0, 'fcom_shuffle')
The set of protocols for all-member trajectories has member
index None.
@rtype: dict of dict
"""
r = {}
## put all-member protocolls under member index 'None'
prots = self.protocols_standard( self.rec, self.lig, self.com,
self.ex_frec, self.ex_flig, self.ex_com,
**options )
for k,p in prots.items():
r[ (None, k) ] = p
if not self.all:
## put single-member protocols under their respective member index
for i in range( len( self.members_frec ) ):
prots = self.protocols_standard(self.members_frec[i],
self.members_flig[i],
self.members_com[i], doshift=0,
**options )
for k, p in prots.items():
r[ (i, k) ] = p
return r
def protocols_var_range( self, **options ):
"""
Complete set of protocols also considering different values of the
variable option.
"""
self.log.add( 'variable option %s with %i values' \
% (self.var, len(self.vrange)))
r = {}
for v in self.vrange:
d = copy.copy( options )
d[ self.var ] = v
prots = self.protocols_single_all( **d )
for k, p in prots.items():
r[ (v,) + k ] = p
return r
##
## Re-organize results
##
def dictionate( self, d ):
"""
Take dict with tuple keys (value, int_member, str_protocol) and build
a tree-like dict of dicts in which the values of d can be accessed
like::
d[value][int_member][str_protocol]
@param d: the raw results accumulated from the slave nodes
@type d: dict
@return: tree-like dict ordered by variable value, member, protocol
@rtype: dict of dict of dict of dict
"""
r = {}
keys = d.keys()
## only convert single value tuple keys into non-tuple keys
if len( keys[0] ) == 1:
for k in keys:
r[ k[0] ] = d[ k ]
return r
x_values = MU.nonredundant( [ k[0] for k in keys ] )
for x in x_values:
sub_keys = [ k for k in keys if k[0] == x ]
y_values = MU.nonredundant( [ k[1:] for k in sub_keys] )
r[ x ] = {}
for y in y_values:
r[x][y] = d[ (x,) + y ]
r[ x ] = self.dictionate( r[x] )
return r
def getResult( self, **arg ):
"""
Collapse the results for different values of the variable parameter
into lists and put the results into a tree ala::
r[ member_index ][ protocol_name ][ result_field ] -> [ values ]
@return: tree-like dict ordered by variable value, member, protocol
@rtype: dict of dict of dict of lists
"""
tree = self.dictionate( self.result )
vvalues = tree.keys()
vvalues.sort()
keys = self.result.keys()
sub_keys = [ k for k in keys if k[0] == vvalues[0] ]
r = {}
for v, member, protcl in sub_keys:
try:
if not member in r:
r[member] = {}
r[member][protcl] = {}
run_dic = tree[v][member][protcl]
for k in run_dic.keys():
r[member][protcl][k] = [ tree[v][member][protcl][k] \
for v in vvalues ]
except:
EHandler.warning('missing result: ' + str(T.lastError()))
r['var'] = self.var
r['vrange']= self.vrange
r['protocols'] = self.protocols
self.result_tree = r
return r
#### TEST #######
if __name__ == '__main__':
niceness = {'default': 0}
hosts = cpus_all[:80]
f = T.testRoot() + '/Amber/AmberEntropyMaster/'
rec = f + 'rec/traj.dat'
lig = f + 'lig/traj.dat'
com = f + 'com/traj.dat'
out = f + 'entropy.out'
master = AmberEntropyMaster( rec, lig, com, out, step=1,
atoms=['CA','CB'],
var='ex1', vrange='0:10',
exrec=[1],exlig=[0],
all=1,
hosts=hosts, niceness=niceness,
w=1 )
master.start()
|
ostrokach/biskit
|
Biskit/AmberEntropyMaster.py
|
Python
|
gpl-3.0
| 25,612
|
[
"Amber"
] |
7daf1d2edc6057c082d23298479ffcd58b388cb13d568eb3e198621311e20d97
|
from __future__ import print_function
from rdkit import RDConfig
import os.path
from rdkit.six.moves import cPickle
from rdkit import Chem
from rdkit.Chem import Descriptors
descrs = ['SMR_VSA1', 'SMR_VSA10', 'SMR_VSA2', 'SMR_VSA3', 'SMR_VSA4', 'SMR_VSA5', 'SMR_VSA6',
'SMR_VSA7', 'SMR_VSA8', 'SMR_VSA9', 'SlogP_VSA1', 'SlogP_VSA10', 'SlogP_VSA11',
'SlogP_VSA12', 'SlogP_VSA2', 'SlogP_VSA3', 'SlogP_VSA5', 'SlogP_VSA6', 'SlogP_VSA7',
'SlogP_VSA8', 'SlogP_VSA9']
def runIt(inFileName, outFileName, smiCol=0, maxMols=-1, delim=','):
outF = open(outFileName, 'w+')
outF.write('#' + ','.join(['SMILES'] + descrs))
outF.write('\n')
mols = []
nDone = 0
for line in inD:
if line[0] != '#':
splitL = line.strip().split(delim)
if not splitL:
continue
smi = splitL[smiCol].strip()
mol = Chem.MolFromSmiles(smi)
print(smi)
if mol:
vals = []
for descr in descrs:
fn = getattr(Descriptors, descr)
try:
v = fn(mol)
except Exception:
v = 666
vals.append(v)
outF.write(','.join([smi] + ['%.4f' % x for x in vals]))
outF.write('\n')
nDone += 1
if maxMols > 0 and nDone >= maxMols:
break
outF.close()
if __name__ == '__main__':
if 1:
inD = file(
os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', 'PP_descrs_regress.VSA.csv'),
'r').readlines()
outFileName = os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', 'PP_descrs_regress.VSA.csv')
runIt(inD, outFileName, smiCol=0, delim=',')
|
jandom/rdkit
|
rdkit/Chem/test_data/BuildDescrsTestSet.Crippen.py
|
Python
|
bsd-3-clause
| 1,610
|
[
"RDKit"
] |
c55c3d7cc0745ae1a877cc4151a369a44ce8c8b0e0c2c4c01c6c98ed403b19ff
|
########################################################################
# #
# Cyprium is a multifunction cryptographic, steganographic and #
# cryptanalysis tool developped by members of The Hackademy. #
# French White Hat Hackers Community! #
# cyprium.hackademics.fr # #
# Authors: SAKAROV, mont29, afranck64 #
# Contact: admin@hackademics.fr #
# Forum: hackademics.fr #
# Twitter: @hackademics_ #
# #
# Cyprium is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published #
# by the Free Software Foundation, either version 3 of the License, #
# or any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but without any warranty; without even the implied warranty of #
# merchantability or fitness for a particular purpose. See the #
# GNU General Public License for more details. #
# #
# The terms of the GNU General Public License is detailed in the #
# COPYING attached file. If not, see : http://www.gnu.org/licenses #
# #
########################################################################
import sys
import os
# In case we directly run that file, we need to add the whole cyprium to path,
# to get access to CLI stuff!
if __name__ == "__main__":
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
"..", "..", "..", "..",
"..")))
import app.cli
import kernel.crypto.text.brainfuck as brainfuck
import kernel.utils as utils
class Brainfuck(app.cli.Tool):
"""CLI wrapper for brainfuck crypto text tool."""
def main(self, ui):
ui.message("********** Welcome to Cyprium.Brainfuck! **********")
quit = False
while not quit:
options = [(self.about, "*about", "Show some help!"),
(self.demo, "*demo", "Show some examples"),
(self.cypher, "*cypher",
"Cypher some textual data"),
(self.decypher, "d*ecypher",
"Decypher code into text"),
(self.convert, "c*onvert",
"Convert code to another language"),
("", "-----", ""),
("tree", "*tree", "Show the whole tree"),
("quit", "*quit", "Quit Cyprium.Octopus")]
msg = "Cyprium.Brainfuck"
answ = ui.get_choice(msg, options)
if answ == 'tree':
self._tree.print_tree(ui, self._tree.FULL)
elif answ == 'quit':
self._tree.current = self._tree.current.parent
quit = True
else:
answ(ui)
ui.message("Back to Cyprium menus! Bye.")
def about(self, ui):
ui.message(brainfuck.__about__)
ui.get_choice("", [("", "Go back to $menu", "")], oneline=True)
def demo(self, ui):
ui.message("===== Demo Mode =====")
ui.message("Running a small demo/testing!")
ui.message("--- Encoding ---")
text = "Hello World!"
bf = brainfuck.brainfuck
ui.message("Data to cypher: {}\n".format(text))
ui.message("Brainfuck cyphered data (utf-8): {}"
"".format(brainfuck.cypher(text, lang=bf.BRAINFUCK)))
ui.message("Ook cyphered data (utf-8): {}"
"".format(brainfuck.cypher(text, lang=bf.OOK)))
ui.message("Spoon cyphered data (utf-8): {}"
"".format(brainfuck.cypher(text, lang=bf.SPOON)))
ui.message("SegFaultProg cyphered data (utf-8): {}"
"".format(brainfuck.cypher(text, lang=bf.SIGSEV)))
ui.message("")
ui.message("--- Decoding ---")
ui.message("+ Brainfuck will find out which language it is.")
htext = "++++++++++[>+>+++>+++++++>++++++++++<<<<-]>>>++.>---.++++++" \
"+++++++++..+++++++++.<<++.>>-----------.---------.+++++++++" \
"+++++++++.<<.>>++.--------------------.----.+++++++++++++++" \
"++.<<.++++++++++++++++++.--.+.+."
ui.message("Brainfuck code used as input: {}".format(htext))
ui.message("The decypherd data is: {}"
"".format(brainfuck.decypher(htext, codec="utf-8")))
ui.message("")
htext = "+8[>+4*2+8*3+12>+13>+14>+15>+16*8+19*9+20>+28<10-]*2+.*1.>4" \
"+.+4.*4+.*5-.*3+5.<2.>4-.*4.+4.>-3.<-.*3.*1.*3-.>2+5.*4+.." \
"*6+.<5.>9+2.<3.>+4.*5-.<2+.*5-.+.>5.<3.*8+.*10.*7.>2+6."
ui.message("Brainfuck code used as input: {}".format(htext))
ui.message("The decypherd data is: {}"
"".format(brainfuck.decypher(htext, codec="utf-8")))
ui.message("")
ui.message("--- Won’t work ---")
ui.message("+ The input code to decypher must be valid!")
htext = "Ook. Ook. Ook. Ook. Ook. Ook. Ook. Ook. Ook. Ook. Ook. Ook." \
" Ook. Ook. Ook. Ook! Ook?" \
"*4.+4.>-3.<-.*3.*1.*3-.>2+5.*4+..*6+.<5.>9+2.<3.>+4.*5-.<2+."
ui.message("“Numbers” text used as binary input: {}".format(htext))
try:
ui.message("The decypherd data is: {}"
"".format(brainfuck.decypher(htext)))
except Exception as e:
ui.message(str(e), level=ui.ERROR)
ui.message("")
ui.get_choice("", [("", "Go back to $menu", "")], oneline=True)
def cypher(self, ui):
"""Interactive version of cypher()."""
txt = ""
ui.message("===== Cypher Mode =====")
while 1:
done = False
while 1:
txt = ui.text_input("Text to cypher to code")
if txt is None:
break # Go back to main Cypher menu.
try:
# Get codec to use.
options = [(brainfuck.DEFAULT, "$utf-8", ""),
(None, "or specify another *codec", "")]
codec = ui.get_choice("Do you want to use", options,
oneline=True)
if codec is None:
codec = ui.get_data("Type the codec you want to use "
"(e.g. 'latin-9'): ")
# Get language.
bf = brainfuck.brainfuck
options = [(bf.BRAINFUCK, "$brainfuck", ""),
(bf.OOK, "*ook", ""),
(bf.FASTOOK, "*fast ook", ""),
(bf.SPOON, "*spoon", ""),
(bf.SIGSEV, "or se*gfaultprog", "")]
lang = ui.get_choice("Do you want to generate", options,
oneline=True)
# Get obfuscation.
obfs = ui.get_data("Do you want to generate obfuscated "
"code (0.0 or nothing for none, "
"value up to 1.0): ", sub_type=ui.FLOAT,
allow_void=True)
if not obfs:
obfs = 0.0
# Get seed for random generator.
options = [(None, "current $time", ""),
(-1, "*cyphered text", ""),
(1, "or specify a custom *seed", "")]
seed = ui.get_choice("To init the random generator, do "
"you want to use", options,
oneline=True)
if seed == 1:
seed = ui.get_data("Type the integer you want to "
"use: ", sub_type=ui.INT)
elif seed == -1:
seed = txt
txt = brainfuck.cypher(txt, lang, codec, obfs, seed)
done = True # Out of those loops, output result.
break
except Exception as e:
if utils.DEBUG:
import traceback
traceback.print_tb(sys.exc_info()[2])
ui.message(str(e), level=ui.ERROR)
options = [("retry", "*try again", ""),
("menu", "or go back to *menu", "")]
answ = ui.get_choice("Could not convert that data into "
"the chosen language, please",
options, oneline=True)
if answ in {None, "menu"}:
return # Go back to main Sema menu.
# Else, retry with another data to hide.
if done:
ui.text_output("Data successfully converted", txt,
"Code form of data")
options = [("redo", "*cypher another data", ""),
("quit", "or go back to *menu", "")]
answ = ui.get_choice("Do you want to", options, oneline=True)
if answ in {None, "quit"}:
return
def decypher(self, ui):
"""Interactive version of decypher()."""
txt = ""
ui.message("===== Decypher Mode =====")
while 1:
txt = ui.text_input("Please choose some “code” text")
# Get codec to use.
options = [(brainfuck.DEFAULT, "$utf-8", ""),
(None, "or specify another *codec", "")]
codec = ui.get_choice("Do you want to use", options,
oneline=True)
if codec is None:
codec = ui.get_data("Type the codec you want to use "
"(e.g. 'latin-9'): ")
try:
ui.text_output("Data successfully decypherd",
brainfuck.decypher(txt, codec),
"The hidden data is")
except Exception as e:
if utils.DEBUG:
import traceback
traceback.print_tb(sys.exc_info()[2])
ui.message(str(e), level=ui.ERROR)
options = [("redo", "*decypher another data", ""),
("quit", "or go back to *menu", "")]
answ = ui.get_choice("Do you want to", options, oneline=True)
if answ == "quit":
return
def convert(self, ui):
"""Interactive version of convert()."""
txt = ""
ui.message("===== Convert Mode =====")
while 1:
done = False
while 1:
code = ui.text_input("Code to convert to some language")
if code is None:
break # Go back to main Cypher menu.
try:
# Get language.
bf = brainfuck.brainfuck
options = [(bf.BRAINFUCK, "$brainfuck", ""),
(bf.OOK, "*ook", ""),
(bf.FASTOOK, "*fast ook", ""),
(bf.SPOON, "*spoon", ""),
(bf.SIGSEV, "or se*gfaultprog", "")]
lang = ui.get_choice("Do you want to convert to", options,
oneline=True)
# Get obfuscation.
obfs = ui.get_data("Do you want to generate obfuscated "
"code (0.0 or nothing for none, "
"value up to 1.0): ", sub_type=ui.FLOAT,
allow_void=True)
if not obfs:
obfs = 0.0
# Get seed for random generator.
options = [(None, "current $time", ""),
(-1, "*cyphered text", ""),
(1, "or specify a custom *seed", "")]
seed = ui.get_choice("To init the random generator, do "
"you want to use", options,
oneline=True)
if seed == 1:
seed = ui.get_data("Type the integer you want to "
"use: ", sub_type=ui.INT)
elif seed == -1:
seed = code
code = brainfuck.convert(code, lang, obfs, seed)
done = True # Out of those loops, output result.
break
except Exception as e:
if utils.DEBUG:
import traceback
traceback.print_tb(sys.exc_info()[2])
ui.message(str(e), level=ui.ERROR)
options = [("retry", "*try again", ""),
("menu", "or go back to *menu", "")]
answ = ui.get_choice("Could not convert that code into "
"the chosen language, please",
options, oneline=True)
if answ in {None, "menu"}:
return # Go back to main Sema menu.
# Else, retry with another data to hide.
if done:
ui.text_output("Code successfully converted", code, "Code")
options = [("redo", "*convert another data", ""),
("quit", "or go back to *menu", "")]
answ = ui.get_choice("Do you want to", options, oneline=True)
if answ in {None, "quit"}:
return
NAME = "brainfuck"
TIP = "Tool to convert text to/from brainfuck & co language."
TYPE = app.cli.Node.TOOL
CLASS = Brainfuck
# Allow tool to be used directly, without using Cyprium menu.
if __name__ == "__main__":
import app.cli.ui
ui = app.cli.ui.UI()
tree = app.cli.NoTree("Brainfuck")
Brainfuck(tree).main(ui)
|
underloki/Cyprium
|
app/cli/root/crypto/text/brainfuck.py
|
Python
|
gpl-3.0
| 15,009
|
[
"Octopus"
] |
61c671a0fb08c25e9062349819d31ccd80cb73c82294927ea6daace65b527ce0
|
## Automatically adapted for numpy.oldnumeric Jun 27, 2008 by -c
#
# Copyright (C) 2002-2008 greg Landrum and Rational Discovery LLC
#
""" unit tests for the model and descriptor packager """
from rdkit import RDConfig
from rdkit.ML.Data import DataUtils
import unittest,os,sys
import io
from rdkit.six.moves import cPickle
from rdkit.ML.ModelPackage import Packager
from rdkit import Chem
import random
def feq(a,b,tol=1e-4):
return abs(a-b)<=tol
class TestCase(unittest.TestCase):
def setUp(self):
self.dataDir =os.path.join(RDConfig.RDCodeDir,'ML/ModelPackage/test_data')
self.testD = [
# NOTE: the confidences here can be twitchy due to changes in descriptors:
('Fc1ccc(NC(=O)c2cccnc2Oc3cccc(c3)C(F)(F)F)c(F)c1',0,0.8 ),
#(r'CN/1(=C\C=C(/C=C1)\C\2=C\C=N(C)(Cl)\C=C2)Cl',0,0.70),
(r'NS(=O)(=O)c1cc(ccc1Cl)C2(O)NC(=O)c3ccccc32',1,0.70),
]
def _verify(self,pkg,testD):
for smi,pred,conf in testD:
try:
m = Chem.MolFromSmiles(smi)
except:
sys.stderr.write('SMILES: %s failed\n'%(smi))
else:
p,c = pkg.Classify(m)
assert p==pred,'bad prediction (%d) for smiles %s'%(p,smi)
assert feq(c,conf),'bad confidence (%f) for smiles %s'%(c,smi)
def _verify2(self,pkg,testD):
for smi,pred,conf in testD:
try:
m = Chem.MolFromSmiles(smi)
except:
sys.stderr.write('SMILES: %s failed\n'%(smi))
else:
p,c = pkg.Classify(m)
assert p==pred,'bad prediction (%d) for smiles %s'%(p,smi)
assert feq(c,conf),'bad confidence (%f) for smiles %s'%(c,smi)
p,c = pkg.Classify(m)
assert p==pred,'bad prediction (%d) for smiles %s'%(p,smi)
assert feq(c,conf),'bad confidence (%f) for smiles %s'%(c,smi)
def testBuild(self):
""" tests building and screening a packager """
with open(os.path.join(self.dataDir,'Jan9_build3_calc.dsc'),'r') as calcTF:
buf = calcTF.read().replace('\r\n', '\n').encode('utf-8')
calcTF.close()
with io.BytesIO(buf) as calcF:
calc = cPickle.load(calcF)
with open(os.path.join(self.dataDir,'Jan9_build3_model.pkl'),'rb') as modelF:
model = cPickle.load(modelF)
pkg = Packager.ModelPackage(descCalc=calc,model=model)
self._verify(pkg,self.testD)
def testLoad(self):
""" tests loading and screening a packager """
with open(os.path.join(self.dataDir,'Jan9_build3_pkg.pkl'),'r') as pkgTF:
buf = pkgTF.read().replace('\r\n', '\n').encode('utf-8')
pkgTF.close()
with io.BytesIO(buf) as pkgF:
pkg = cPickle.load(pkgF)
self._verify(pkg,self.testD)
def testLoad2(self):
""" tests loading and screening a packager 2 """
with open(os.path.join(self.dataDir,'Jan9_build3_pkg.pkl'),'r') as pkgTF:
buf = pkgTF.read().replace('\r\n', '\n').encode('utf-8')
pkgTF.close()
with io.BytesIO(buf) as pkgF:
pkg = cPickle.load(pkgF)
self._verify2(pkg,self.testD)
def testPerm1(self):
""" tests the descriptor remapping stuff in a packager """
from rdkit.Chem import Descriptors
with open(os.path.join(self.dataDir,'Jan9_build3_pkg.pkl'),'r') as pkgTF:
buf = pkgTF.read().replace('\r\n', '\n').encode('utf-8')
pkgTF.close()
with io.BytesIO(buf) as pkgF:
pkg = cPickle.load(pkgF)
calc = pkg.GetCalculator()
names = calc.GetDescriptorNames()
ref = {}
DataUtils.InitRandomNumbers((23,42))
for smi,pred,conf in self.testD:
for desc in names:
fn = getattr(Descriptors,desc,lambda x:777)
m = Chem.MolFromSmiles(smi)
ref[desc] = fn(m)
for i in range(5):
perm = list(names)
random.shuffle(perm,random=random.random)
m = Chem.MolFromSmiles(smi)
for desc in perm:
fn = getattr(Descriptors,desc,lambda x:777)
val = fn(m)
assert feq(val,ref[desc],1e-4),'%s: %s(%s): %f!=%f'%(str(perm),
smi,
desc,
val,
ref[desc])
def testPerm2(self):
""" tests the descriptor remapping stuff in a packager """
with open(os.path.join(self.dataDir,'Jan9_build3_pkg.pkl'),'r') as pkgTF:
buf = pkgTF.read().replace('\r\n', '\n').encode('utf-8')
pkgTF.close()
with io.BytesIO(buf) as pkgF:
pkg = cPickle.load(pkgF)
calc = pkg.GetCalculator()
names = calc.GetDescriptorNames()
DataUtils.InitRandomNumbers((23,42))
perm = list(names)
random.shuffle(perm,random=random.random)
calc.simpleList = perm
calc.descriptorNames = perm
pkg.Init()
self._verify(pkg,self.testD)
if __name__ == '__main__':
unittest.main()
|
soerendip42/rdkit
|
rdkit/ML/ModelPackage/UnitTestPackage.py
|
Python
|
bsd-3-clause
| 4,877
|
[
"RDKit"
] |
bd091908324400be0fd6ad0b40280713315bb17a554a04def6f408ec5f16aeba
|
# Copyright 2019 Brian Quinlan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Container for the game being trained and the agent that plays it."""
class Model:
def __init__(self, agent, game):
self._agent = agent
self._game = game
@property
def agent(self):
return self._agent
@property
def game(self):
return self._game
|
brianquinlan/learn-machine-learning
|
lunarlander/model.py
|
Python
|
mit
| 880
|
[
"Brian"
] |
46a9cbec2ca0ded626c4e32f1d5dc1633d645c771602b2f9ab327feacc5da785
|
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
#======================================================================
#
# emake.py - emake version 3.6.9
#
# history of this file:
# 2009.08.20 skywind create this file
# 2009.11.14 skywind new install() method
# 2009.12.22 skywind implementation execute interface
# 2010.01.18 skywind new project info
# 2010.03.14 skywind fixed source lex bug
# 2010.11.03 skywind new 'import' to import config section
# 2010.11.04 skywind new 'export' to export .def, .lib for windll
# 2010.11.27 skywind fixed link sequence with -Xlink -( -)
# 2012.03.26 skywind multiprocess building system, speed up
# 2012.08.18 skywind new 'flnk' to project
# 2012.09.09 skywind new system condition config, optimized
# 2013.12.19 skywind new $(target) config
# 2014.02.09 skywind new build-event and environ
# 2014.04.15 skywind new 'arglink' and 'argcc' config
# 2015.09.03 skywind new replace in config.parameters()
# 2016.01.14 skywind new compile flags with different source file
# 2016.04.27 skywind exit non-zero when error occurs
# 2016.09.01 skywind new lib composite method
# 2016.09.02 skywind more environ variables rather than $(target)
# 2017.08.16 skywind new: cflag, cxxflag, sflag, mflag, mmflag
# 2017.12.20 skywind new: --abs=1 to tell gcc to print fullpath
#
#======================================================================
import sys, time, os
import ConfigParser
#----------------------------------------------------------------------
# preprocessor: C/C++/Java 预处理器
#----------------------------------------------------------------------
class preprocessor(object):
# 初始化预编译器
def __init__ (self):
self.reset()
# 生成正文映射,将所有字符串及注释用 "$"和 "`"代替,排除分析干扰
def preprocess (self, text):
content = text
spaces = (' ', '\n', '\t', '\r')
import cStringIO
srctext = cStringIO.StringIO()
srctext.write(text)
srctext.seek(0)
memo = 0
i = 0
length = len(content)
output = srctext.write
while i < length:
char = content[i]
word = content[i : i + 2]
if memo == 0: # 正文中
if word == '/*':
output('``')
i += 2
memo = 1
continue
if word == '//':
output('``')
i += 2
while (i < len(content)) and (content[i] != '\n'):
if content[i] in spaces:
output(content[i])
i = i + 1
continue
output('`')
i = i + 1
continue
if char == '\"':
output('\"')
i += 1
memo = 2
continue
if char == '\'':
output('\'')
i += 1
memo = 3
continue
output(char)
elif memo == 1: # 注释中
if word == '*/':
output('``')
i += 2
memo = 0
continue
if char in spaces:
output(content[i])
i += 1
continue
output('`')
elif memo == 2: # 字符串中
if word == '\\\"':
output('$$')
i += 2
continue
if word == '\\\\':
output('$$')
i += 2
continue
if char == '\"':
output('\"')
i += 1
memo = 0
continue
if char in spaces:
output(char)
i += 1
continue
output('$')
elif memo == 3: # 字符中
if word == '\\\'':
output('$$')
i += 2
continue
if word == '\\\\':
output('$$')
i += 2
continue
if char == '\'':
output('\'')
i += 1
memo = 0
continue
if char in spaces:
output(char)
i += 1
continue
output('$')
i += 1
srctext.truncate()
return srctext.getvalue()
# 查找单一文件的头文件引用情况
def search_reference(self, source, heads):
content = ''
del heads[:]
try:
fp = open(source, "r")
except:
return ''
content = '\n'.join([ line.strip('\r\n') for line in fp ])
fp.close()
srctext = self.preprocess(content)
length = len(srctext)
start = 0
endup =-1
number = 0
while (start >= 0) and (start < length):
start = endup + 1
endup = srctext.find('\n', start)
if (endup < 0):
endup = length
number = number + 1
offset1 = srctext.find('#', start, endup)
if offset1 < 0: continue
offset2 = srctext.find('include', offset1, endup)
if offset2 < 0: continue
offset3 = srctext.find('\"', offset2, endup)
if offset3 < 0: continue
offset4 = srctext.find('\"', offset3 + 1, endup)
if offset4 < 0: continue
check_range = [ i for i in xrange(start, offset1) ]
check_range += [ i for i in xrange(offset1 + 1, offset2) ]
check_range += [ i for i in xrange(offset2 + 7, offset3) ]
check = 1
for i in check_range:
if not (srctext[i] in (' ', '`')):
check = 0
break
if check != 1:
continue
name = content[offset3 + 1 : offset4]
heads.append([name, offset1, offset4, number])
return content
# 合并引用的所有头文件,并返回文件依赖,及找不到的头文件
def parse_source(self, filename, history_headers, lost_headers):
headers = []
filename = os.path.abspath(filename)
import cStringIO
outtext = cStringIO.StringIO()
if not os.path.exists(filename):
sys.stderr.write('can not open %s\n'%(filename))
return outtext.getvalue()
if filename in self._references:
content, headers = self._references[filename]
else:
content = self.search_reference(filename, headers)
self._references[filename] = content, headers
save_cwd = os.getcwd()
file_cwd = os.path.dirname(filename)
if file_cwd == '':
file_cwd = '.'
os.chdir(file_cwd)
available = []
for head in headers:
if os.path.exists(head[0]):
available.append(head)
headers = available
offset = 0
for head in headers:
name = os.path.abspath(os.path.normcase(head[0]))
if not (name in history_headers):
history_headers.append(name)
position = len(history_headers) - 1
text = self.parse_source(name, history_headers, lost_headers)
del history_headers[position]
history_headers.append(name)
outtext.write(content[offset:head[1]] + '\n')
outtext.write('/*:: <%s> ::*/\n'%(head[0]))
outtext.write(text + '\n/*:: </:%s> ::*/\n'%(head[0]))
offset = head[2] + 1
else:
outtext.write(content[offset:head[1]] + '\n')
outtext.write('/*:: skip including "%s" ::*/\n'%(head[0]))
offset = head[2] + 1
outtext.write(content[offset:])
os.chdir(save_cwd)
return outtext.getvalue()
# 过滤代码注释
def cleanup_memo (self, text):
content = text
outtext = ''
srctext = self.preprocess(content)
space = ( ' ', '\t', '`' )
start = 0
endup = -1
sized = len(srctext)
while (start >= 0) and (start < sized):
start = endup + 1
endup = srctext.find('\n', start)
if endup < 0:
endup = sized
empty = 1
memod = 0
for i in xrange(start, endup):
if not (srctext[i] in space):
empty = 0
if srctext[i] == '`':
memod = 1
if empty and memod:
continue
for i in xrange(start, endup):
if srctext[i] != '`':
outtext = outtext + content[i]
outtext = outtext + '\n'
return outtext
# 复位依赖关系
def reset (self):
self._references = {}
return 0
# 直接返回依赖
def dependence (self, filename, reset = False):
head = []
lost = []
if reset: self.reset()
text = self.parse_source(filename, head, lost)
return head, lost, text
# 查询 Java的信息,返回:(package, imports, classname)
def java_preprocess (self, text):
text = self.preprocess(text)
content = text.replace('\r', '')
p1 = content.find('{')
p2 = content.rfind('}')
if p1 >= 0:
if p2 < 0:
p2 = len(content)
content = content[:p1] + ';\n' + content[p2 + 1:]
content = self.cleanup_memo(content).rstrip() + '\n'
info = { 'package': None, 'import': [], 'class': None }
for line in content.split(';'):
line = line.replace('\n', ' ').strip()
data = [ n.strip() for n in line.split() ]
if len(data) < 2: continue
name = data[0]
if name == 'package':
info['package'] = ''.join(data[1:])
elif name == 'import':
info['import'] += [''.join(data[1:])]
elif 'class' in data or 'interface' in data:
if 'extends' in data:
p = data.index('extends')
data = data[:p]
if 'implements' in data:
p = data.index('implements')
data = data[:p]
info['class'] = data[-1]
return info['package'], info['import'], info['class']
# returns: (package, imports, classname, srcpath)
def java_parse (self, filename):
try:
text = open(filename).read()
except:
return None, None, None, None
package, imports, classname = self.java_preprocess(text)
if package is None:
path = os.path.dirname(filename)
return None, imports, classname, os.path.abspath(path)
path = os.path.abspath(os.path.dirname(filename))
if sys.platform[:3] == 'win':
path = path.replace('\\', '/')
names = package.split('.')
root = path
srcpath = None
if sys.platform[:3] == 'win':
root = root.lower()
names = [n.lower() for n in names]
while 1:
part = os.path.split(root)
name = names[-1]
names = names[:-1]
if name != part[1]:
break
if len(names) == 0:
srcpath = part[0]
break
if root == part[0]:
break
root = part[0]
return package, imports, classname, srcpath
#----------------------------------------------------------------------
# execute and capture
#----------------------------------------------------------------------
def execute(args, shell = False, capture = False):
import sys, os
parameters = []
if type(args) in (type(''), type(u'')):
import shlex
cmd = args
if sys.platform[:3] == 'win':
ucs = False
if type(cmd) == type(u''):
cmd = cmd.encode('utf-8')
ucs = True
args = shlex.split(cmd.replace('\\', '\x00'))
args = [ n.replace('\x00', '\\') for n in args ]
if ucs:
args = [ n.decode('utf-8') for n in args ]
else:
args = shlex.split(cmd)
for n in args:
if sys.platform[:3] != 'win':
replace = { ' ':'\\ ', '\\':'\\\\', '\"':'\\\"', '\t':'\\t', \
'\n':'\\n', '\r':'\\r' }
text = ''.join([ replace.get(ch, ch) for ch in n ])
parameters.append(text)
else:
if (' ' in n) or ('\t' in n) or ('"' in n):
parameters.append('"%s"'%(n.replace('"', ' ')))
else:
parameters.append(n)
cmd = ' '.join(parameters)
if sys.platform[:3] == 'win' and len(cmd) > 255:
shell = False
if shell and (not capture):
os.system(cmd)
return ''
elif (not shell) and (not capture):
import subprocess
if 'call' in subprocess.__dict__:
subprocess.call(args)
return ''
import subprocess
if 'Popen' in subprocess.__dict__:
if sys.platform[:3] != 'win' and shell:
p = None
stdin, stdouterr = os.popen4(cmd)
else:
p = subprocess.Popen(args, shell = shell,
stdin = subprocess.PIPE, stdout = subprocess.PIPE,
stderr = subprocess.STDOUT)
stdin, stdouterr = (p.stdin, p.stdout)
else:
p = None
stdin, stdouterr = os.popen4(cmd)
text = stdouterr.read()
stdin.close()
stdouterr.close()
if p: p.wait()
if not capture:
sys.stdout.write(text)
sys.stdout.flush()
return ''
return text
#----------------------------------------------------------------------
# Default CFG File
#----------------------------------------------------------------------
ININAME = ''
INIPATH = ''
CFG = {'abspath':False, 'verbose':False, 'silent':False}
#----------------------------------------------------------------------
# configure: 确定gcc位置并从配置读出默认设置
#----------------------------------------------------------------------
class configure(object):
# 构造函数
def __init__ (self, ininame = ''):
self.dirpath = os.path.split(os.path.abspath(__file__))[0]
self.current = os.getcwd()
if not ininame:
ininame = ININAME and ININAME or 'emake.ini'
self.ininame = ininame
self.inipath = os.path.join(self.dirpath, self.ininame)
self.haveini = False
self.dirhome = ''
self.target = ''
self.config = {}
self.cp = ConfigParser.ConfigParser()
self.unix = 1
self.xlink = 1
self.searchdirs = None
self.environ = {}
self.exename = {}
self.replace = {}
self.cygwin = ''
for n in os.environ:
self.environ[n] = os.environ[n]
if sys.platform[:3] == 'win':
self.unix = 0
self.GetShortPathName = None
if sys.platform[:6] == 'darwin':
self.xlink = 0
if sys.platform[:3] == 'aix':
self.xlink = 0
self.cpus = 0
self.inited = False
self.fpic = 0
self.name = {}
ext = ('.c', '.cpp', '.c', '.cc', '.cxx', '.s', '.asm', '.m', '.mm')
self.extnames = ext
self.__jdk_home = None
self.reset()
# 配置信息复位
def reset (self):
self.inc = {} # include路径
self.lib = {} # lib 路径
self.flag = {} # 编译参数
self.pdef = {} # 预定义宏
self.link = {} # 连接库
self.flnk = {} # 连接参数
self.wlnk = {} # 连接传递
self.cond = {} # 条件参数
self.param_build = ''
self.param_compile = ''
return 0
# 初始化工具环境
def _cmdline_init (self, envname, exename):
if not envname in self.config:
return -1
config = self._env_config(envname)
PATH = []
EXEC = ''
sep = self.unix and ':' or ';'
envpath = config.get('PATH', '') + sep + self.environ.get('PATH', '')
condition = False
if os.path.exists(exename):
EXEC = exename
for path in envpath.split(sep):
if path.strip('\r\n\t ') == '':
continue
path = os.path.abspath(path)
if os.path.exists(path):
if not path in PATH:
PATH.append(path)
if not EXEC:
name = os.path.join(path, exename)
if os.path.exists(name):
EXEC = name
if not EXEC:
return -2
config['PATH'] = sep.join(PATH)
for n in config:
v = config[n]
if not n in ('PATH',):
os.environ[n] = v
os.environ['PATH'] = config['PATH']
if not self.unix:
EXEC = self.pathshort(EXEC)
return EXEC
# 工具加载
def _env_config (self, section):
config = {}
if section in self.config:
for n in self.config[section]:
config[n.upper()] = self.config[section][n]
for n in config:
config[n] = config[n].replace('$(INIROOT)', os.path.dirname(self.iniload))
for n in config:
config[n] = self._expand(config, self.environ, n)
return config
# 展开配置宏
def _expand (self, section, environ, item, d = 0):
if not environ: environ = {}
if not section: section = {}
text = ''
if item in environ:
text = environ[item]
if item in section:
text = section[item]
if d >= 20: return text
names = {}
index = 0
# print 'expanding', item
while 1:
index = text.find('$(', index)
if index < 0: break
p2 = text.find(')', index)
if p2 < 0: break
name = text[index + 2:p2]
index = p2 + 1
names[name] = name.upper()
for name in names:
if name != item:
value = self._expand(section, environ, name.upper(), d + 1)
elif name in environ:
value = environ[name]
else:
value = ''
text = text.replace('$(' + name + ')', value)
names[name] = value
# print '>', text
return text
# 取得短文件名
def pathshort (self, path):
path = os.path.abspath(path)
if self.unix:
return path
if not self.GetShortPathName:
self.kernel32 = None
self.textdata = None
try:
import ctypes
self.kernel32 = ctypes.windll.LoadLibrary("kernel32.dll")
self.textdata = ctypes.create_string_buffer('\000' * 1024)
self.GetShortPathName = self.kernel32.GetShortPathNameA
args = [ ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int ]
self.GetShortPathName.argtypes = args
self.GetShortPathName.restype = ctypes.c_uint32
except: pass
if not self.GetShortPathName:
return path
retval = self.GetShortPathName(path, self.textdata, 1024)
shortpath = self.textdata.value
if retval <= 0:
return ''
return shortpath
# 读取ini文件
def _readini (self, inipath):
self.cp = ConfigParser.ConfigParser()
if self.unix and '~' in inipath:
inipath = os.path.expanduser(inipath)
if os.path.exists(inipath):
self.iniload = os.path.abspath(inipath)
config = {}
try: self.cp.read(inipath)
except: pass
for sect in self.cp.sections():
for key, val in self.cp.items(sect):
lowsect, lowkey = sect.lower(), key.lower()
self.config.setdefault(lowsect, {})[lowkey] = val
config.setdefault(lowsect, {})[lowkey] = val
self.config['default'] = self.config.get('default', {})
config['default'] = config.get('default', {})
inihome = os.path.abspath(os.path.split(inipath)[0])
dirhome = config['default'].get('home', '')
if dirhome:
dirhome = os.path.join(inihome, dirhome)
if not os.path.exists(dirhome):
sys.stderr.write('error: %s: %s not exists\n'%(inipath, dirhome))
sys.stderr.flush()
else:
self.config['default']['home'] = dirhome
for exename in ('gcc', 'ld', 'ar', 'as', 'nasm', 'yasm', 'dllwrap'):
if not exename in config['default']:
continue
self.exename[exename] = config['default'][exename]
for bp in ('include', 'lib'):
if not bp in config['default']:
continue
data = []
for n in config['default'][bp].replace(';', ',').split(','):
n = os.path.normpath(os.path.join(inihome, self.pathconf(n)))
if not self.unix: n = n.replace('\\', '/')
data.append("'" + n + "'")
text = ','.join(data)
config['default'][bp] = text
self.config['default'][bp] = text
java = config['default'].get('java', '')
if java:
java = os.path.join(inihome, java)
if not os.path.exists(java):
sys.stderr.write('error: %s: %s not exists\n'%(inipath, java))
sys.stderr.flush()
else:
self.config['default']['java'] = os.path.abspath(java)
self.haveini = True
return 0
# 检查 dirhome
def check (self):
if not self.dirhome:
sys.stderr.write('error: cannot find gcc home in config\n')
sys.stderr.flush()
sys.exit(1)
return 0
# 初始化
def init (self):
if self.inited:
return 0
self.config = {}
self.reset()
fn = INIPATH
self.iniload = os.path.abspath(self.inipath)
if fn:
if os.path.exists(fn):
self._readini(fn)
self.iniload = os.path.abspath(fn)
else:
sys.stderr.write('error: cannot open %s\n'%fn)
sys.stderr.flush()
sys.exit(1)
else:
if self.unix:
self._readini('/etc/%s'%self.ininame)
self._readini('/usr/local/etc/%s'%self.ininame)
self._readini('~/.config/%s'%self.ininame)
self._readini(self.inipath)
self.dirhome = self._getitem('default', 'home', '')
cfghome = self.dirhome
if not self.haveini:
#sys.stderr.write('warning: %s cannot be open\n'%(self.ininame))
sys.stderr.flush()
defined = self.exename.get('gcc', None) and True or False
for name in ('gcc', 'ar', 'ld', 'as', 'nasm', 'yasm', 'dllwrap'):
exename = self.exename.get(name, name)
if not self.unix:
elements = list(os.path.splitext(exename)) + ['', '']
if not elements[1]: exename = elements[0] + '.exe'
self.exename[name] = exename
gcc = self.exename['gcc']
p1 = os.path.join(self.dirhome, '%s.exe'%gcc)
p2 = os.path.join(self.dirhome, '%s'%gcc)
if (not os.path.exists(p1)) and (not os.path.exists(p2)):
self.dirhome = ''
if sys.platform[:3] != 'win':
if self.dirhome[1:2] == ':':
self.dirhome = ''
if (not self.dirhome) and (not cfghome):
self.dirhome = self.__search_gcc()
if (not self.dirhome) and (not defined):
gcc = 'clang'
self.exename['gcc'] = gcc
self.dirhome = self.__search_gcc()
if self.dirhome:
self.dirhome = os.path.abspath(self.dirhome)
try:
cpus = self._getitem('default', 'cpu', '')
intval = int(cpus)
self.cpus = intval
except:
pass
cygwin = self._getitem('default', 'cygwin')
self.cygwin = ''
if cygwin and (not self.unix):
if os.path.exists(cygwin):
cygwin = os.path.abspath(cygwin)
bash = os.path.join(cygwin, 'bin/bash.exe')
if os.path.exists(bash):
self.cygwin = cygwin
self.name = {}
self.name[sys.platform.lower()] = 1
if sys.platform[:3] == 'win':
self.name['win'] = 1
if sys.platform[:7] == 'freebsd':
self.name['freebsd'] = 1
self.name['unix'] = 1
if sys.platform[:5] == 'linux':
self.name['linux'] = 1
self.name['unix'] = 1
if sys.platform[:6] == 'darwin':
self.name['darwin'] = 1
self.name['unix'] = 1
if sys.platform == 'cygwin':
self.name['unix'] = 1
if sys.platform[:5] == 'sunos':
self.name['sunos'] = 1
if os.name == 'posix':
self.name['unix'] = 1
if os.name == 'nt':
self.name['win'] = 1
if 'win' in self.name:
self.name['nt'] = 1
self.target = self._getitem('default', 'target')
names = self._getitem('default', 'name')
if names:
self.name = {}
for name in names.replace(';', ',').split(','):
name = name.strip('\r\n\t ').lower()
if not name: continue
self.name[name] = 1
if not self.target:
self.target = name
if not self.target:
self.target = sys.platform
self.target = self.target.strip('\r\n\t ')
if sys.platform[:3] in ('win', 'cyg'):
self.fpic = False
else:
self.fpic = True
#self.__python_config()
self.replace = {}
self.replace['home'] = self.dirhome
self.replace['emake'] = self.dirpath
self.replace['inihome'] = os.path.dirname(self.iniload)
self.replace['inipath'] = self.inipath
self.replace['target'] = self.target
self.inited = True
return 0
# 读取配置
def _getitem (self, sect, key, default = ''):
return self.config.get(sect, {}).get(key, default)
# 取得替换了$(HOME)变量的路径
def path (self, path):
path = path.replace('$(HOME)', self.dirhome).replace('\\', '/')
path = self.cygpath(path)
text = ''
issep = False
for n in path:
if n == '/':
if issep == False: text += n
issep = True
else:
text += n
issep = False
return os.path.abspath(text)
# 取得可用于参数的文本路径
def pathtext (self, name):
name = os.path.normpath(name)
name = self.cygpath(name)
name = name.replace('"', '""')
if ' ' in name:
return '"%s"'%(name)
if self.unix:
name = name.replace('\\', '/')
return name
# 取得短路径:当前路径的相对路径
def relpath (self, name, start = None):
name = os.path.abspath(name)
if not start:
start = os.getcwd()
if 'relpath' in os.path.__dict__:
try:
return os.path.relpath(name, start)
except:
pass
current = start.replace('\\', '/')
if len(current) > 0:
if current[-1] != '/':
current += '/'
name = self.path(name).replace('\\', '/')
size = len(current)
if self.unix:
if name[:size] == current:
name = name[size:]
else:
if name[:size].lower() == current.lower():
name = name[size:]
return name
# 取得短路径:当前路径的相对路径
def pathrel (self, name, start = None):
return self.pathtext(self.relpath(name, start))
# 转换到cygwin路径
def cygpath (self, path):
if self.unix and path[1:2] == ':':
path = '/cygdrive/%s%s'%(path[0], path[2:].replace('\\', '/'))
return path
# 转换到cygwin路径
def win2cyg (self, path):
path = os.path.abspath(path)
return '/cygdrive/%s%s'%(path[0], path[2:].replace('\\', '/'))
# 转换回cygwin路径
def cyg2win (self, path):
if path[1:2] == ':':
return os.path.abspath(path)
if path.lower().startswith('/cygdrive/'):
path = path[10] + ':' + path[11:]
return os.path.abspath(path)
if not path.startswith('/'):
raise Exception('cannot convert path: %s'%path)
if not self.cygwin:
raise Exception('cannot find cygwin root')
return os.path.abspath(os.path.join(self.cygwin, path[1:]))
# 添加头文件目录
def push_inc (self, inc):
path = self.path(inc)
if not os.path.exists(path):
sys.stderr.write('warning: ignore invalid path %s\n'%path)
return -1
path = self.pathtext(path)
self.inc[path] = 1
return 0
# 添加库文件目录
def push_lib (self, lib):
path = self.path(lib)
if not os.path.exists(path):
sys.stderr.write('warning: ignore invalid path %s\n'%path)
return -1
path = self.pathtext(path)
self.lib[path] = 1
return 0
# 添加参数
def push_flag (self, flag):
if not flag in self.flag:
self.flag[flag] = len(self.flag)
return 0
# 添加链接库
def push_link (self, link):
if link[-2:].lower() in ('.o', '.a'):
link = self.pathtext(self.path(link))
else:
link = '-l%s'%link.replace(' ', '_')
if not link in self.link:
self.link[link] = len(self.link)
#print 'push: ' + link
return 0
# 添加预定义
def push_pdef (self, define):
self.pdef[define] = 1
# 添加连接参数
def push_flnk (self, flnk):
if not flnk in self.flnk:
self.flnk[flnk] = len(self.flnk)
return 0
# 添加链接传递
def push_wlnk (self, wlnk):
if not wlnk in self.wlnk:
self.wlnk[wlnk] = len(self.wlnk)
return 0
# 添加条件参数
def push_cond (self, flag, condition):
key = (flag, condition)
if not key in self.cond:
self.cond[key] = len(self.cond)
return 0
# 搜索gcc
def __search_gcc (self):
dirpath = self.dirpath
gcc = self.exename['gcc']
splitter = self.unix and ':' or ';'
if os.path.exists(os.path.join(dirpath, '%s'%gcc)):
return os.path.abspath(dirpath)
if os.path.exists(os.path.join(dirpath, 'bin/%s'%gcc)):
return os.path.abspath(os.path.join(dirpath, 'bin'))
for d in os.environ.get('PATH', '').split(splitter):
n = os.path.abspath(os.path.join(d, '%s'%gcc))
if os.path.exists(n): return os.path.abspath(d)
if self.unix:
if os.path.exists('/bin/%s'%gcc):
return '/bin'
if os.path.exists('/usr/bin/%s'%gcc):
return '/usr/bin'
if os.path.exists('/usr/local/bin/%s'%gcc):
return '/usr/local/bin'
if os.path.exists('/opt/bin/%s'%gcc):
return '/opt/bin'
if os.path.exists('/opt/usr/bin/%s'%gcc):
return '/opt/usr/bin'
if os.path.exists('/opt/usr/local/bin/%s'%gcc):
return '/opt/usr/local/bin'
return ''
# 写默认的配置文件
def _write_default_ini (self):
default = ''' [default]
include=$(HOME)/include
lib=$(HOME)/lib
'''
text = '\n'.join([ n.strip('\t\r\n ') for n in default.split('\n') ])
if os.path.exists(self.inipath):
return -1
fp = open(self.inipath, 'w')
fp.write(text)
fp.close()
return 0
# 配置路径
def pathconf (self, path):
path = path.strip(' \t\r\n')
if path[:1] == '\'' and path[-1:] == '\'': path = path[1:-1]
if path[:1] == '\"' and path[-1:] == '\"': path = path[1:-1]
return path.strip(' \r\n\t')
# 刷新配置
def loadcfg (self, sect = 'default', reset = True):
self.init()
if reset: self.reset()
f1 = lambda n: (n[:1] != '\'' or n[-1:] != '\'') and n
config = lambda n: self._getitem(sect, n, '')
for path in config('include').replace(';', ',').split(','):
path = self.pathconf(path)
if not path: continue
self.push_inc(path)
for path in config('lib').replace(';', ',').split(','):
path = self.pathconf(path)
if not path: continue
self.push_lib(path)
for link in config('link').replace(';', ',').split(','):
link = self.pathconf(link)
if not link: continue
self.push_link(link)
for flag in config('flag').replace(';', ',').split(','):
flag = flag.strip(' \t\r\n')
if not flag: continue
self.push_flag(flag)
for pdef in config('define').replace(';', ',').split(','):
pdef = pdef.strip(' \t\r\n')
if not pdef: continue
self.push_pdef(pdef.replace(' ', '_'))
for flnk in config('flnk').replace(';', ',').split(','):
flnk = flnk.strip(' \t\r\n')
if not flnk: continue
self.push_flnk(flnk)
for wlnk in config('wlnk').replace(';', ',').split(','):
wlnk = wlnk.strip(' \t\r\n')
if not wlnk: continue
self.push_wlnk(wlnk)
for name in ('cflag', 'cxxflag', 'mflag', 'mmflag', 'sflag'):
for flag in config(name).replace(';', ',').split(','):
flag = flag.strip(' \t\r\n')
if not flag: continue
self.push_cond(flag, name)
self.parameters()
return 0
# 按字典值顺序取出配置
def sequence (self, data):
x = [ (n, k) for (k, n) in data.items() ]
x.sort()
y = [ n for (k, n) in x ]
return y
# 替换字符串
def __replace_key (self, text):
for key in self.replace:
value = self.replace[key]
check = '$(' + key + ')'
if check in text:
text = text.replace(check, value)
return text
# 返回条件参数
def condition (self, conditions):
flags = []
for flag, cond in self.sequence(self.cond):
if cond in conditions:
flags.append(flag)
return flags
# 返回序列化的参数
def parameters (self):
text = ''
for inc in self.sequence(self.inc):
text += '-I%s '%inc
for lib in self.sequence(self.lib):
text += '-L%s '%lib
for flag in self.sequence(self.flag):
text += '%s '%self.__replace_key(flag)
for pdef in self.sequence(self.pdef):
text += '-D%s '%pdef
self.param_compile = text.strip(' ')
text = ''
if self.xlink:
text = '-Xlinker "-(" '
for link in self.sequence(self.link):
text += '%s '%self.__replace_key(link)
if self.xlink:
text += ' -Xlinker "-)"'
else:
text = text + ' ' + text
self.param_build = self.param_compile + ' ' + text
for flnk in self.sequence(self.flnk):
self.param_build += ' %s'%self.__replace_key(flnk)
wl = ','.join([ self.__replace_key(n) for n in self.sequence(self.wlnk) ])
if wl and self.wlnk:
self.param_build += ' -Wl,' + wl
return text
# gcc 的search-dirs
def __searchdirs (self):
if self.searchdirs != None:
return self.searchdirs
path = os.path.abspath(os.path.join(self.dirhome, 'bin/gcc'))
if not self.unix:
name = self.pathshort(path)
if (not name) and os.path.exists(path + '.exe'):
name = self.pathshort(path + '.exe')
if name: path = name
cmdline = path + ' -print-search-dirs'
fp = os.popen(cmdline, 'r')
data = fp.read()
fp.close()
fp = None
body = ''
for line in data.split('\n'):
if line[:10] == 'libraries:':
body = line[10:].strip('\r\n ')
if body[:1] == '=': body = body[1:]
break
part = []
if sys.platform[:3] == 'win': part = body.split(';')
else: part = body.split(':')
data = []
dict = {}
for n in part:
path = os.path.abspath(os.path.normpath(n))
if not path in dict:
if os.path.exists(path):
data.append(path)
dict[path] = 1
else:
dict[path] = 0
self.searchdirs = data
return data
# 检测库是否存在
def checklib (self, name):
name = 'lib' + name + '.a'
for n in self.__searchdirs():
if os.path.exists(os.path.join(n, name)):
return True
for n in self.lib:
if os.path.exists(os.path.join(n, name)):
return True
return False
# 取得可执行名称
def getname (self, binname):
exename = self.exename.get(binname, binname)
path = os.path.abspath(os.path.join(self.dirhome, exename))
if not self.unix:
name = self.pathshort(path)
if (not name) and os.path.exists(path + '.exe'):
name = self.pathshort(path + '.exe')
if name: path = name
return path
# 执行GNU工具集
def execute (self, binname, parameters, printcmd = False, capture = False):
path = os.path.abspath(os.path.join(self.dirhome, binname))
if not self.unix:
name = self.pathshort(path)
if (not name) and os.path.exists(path + '.exe'):
name = self.pathshort(path + '.exe')
if name: path = name
cmd = '%s %s'%(self.pathtext(path), parameters)
#printcmd = True
text = ''
if printcmd:
if not capture: print cmd
else: text = cmd + '\n'
sys.stdout.flush()
sys.stderr.flush()
text = text + execute(cmd, shell = False, capture = capture)
return text
# 调用 gcc
def gcc (self, parameters, needlink, printcmd = False, capture = False):
param = self.param_build
if not needlink:
param = self.param_compile
parameters = '%s %s'%(parameters, param)
# printcmd = True
return self.execute(self.exename['gcc'], parameters, printcmd, capture)
# 编译
def compile (self, srcname, objname, cflags, printcmd = False, capture = False):
if CFG['abspath']:
srcname = self.pathtext(os.path.abspath(srcname))
else:
srcname = self.pathrel(srcname)
cmd = '-c %s -o %s %s'%(srcname, self.pathrel(objname), cflags)
extname = os.path.splitext(srcname)[-1].lower()
cond = []
if extname in ('.c', '.h'):
cond = self.condition({'cflag':1})
elif extname in ('.cpp', '.cc', '.cxx', '.hpp', '.hh'):
cond = self.condition({'cxxflag':1})
elif extname in ('.s', '.asm'):
cond = self.condition({'sflag':1})
elif extname in ('.m',):
cond = self.condition({'mflag':1})
elif extname in ('.mm',):
cond = self.condition({'mmflag':1})
if cond:
cmd = cmd + ' ' + (' '.join(cond))
return self.gcc(cmd, False, printcmd, capture)
# 使用 dllwrap
def dllwrap (self, parameters, printcmd = False, capture = False):
text = ''
for lib in self.sequence(self.lib):
text += '-L%s '%lib
for link in self.sequence(self.link):
text += '%s '%link
for flnk in self.sequence(self.flnk):
text += '%s '%flnk
parameters = '%s %s'%(parameters, text)
dllwrap = self.exename.get('dllwrap', 'dllwrap')
return self.execute(dllwrap, parameters, printcmd, capture)
# 生成lib库
def makelib (self, output, objs = [], printcmd = False, capture = False):
if 0:
name = ' '.join([ self.pathrel(n) for n in objs ])
parameters = 'crv %s %s'%(self.pathrel(output), name)
return self.execute(self.exename['ar'], parameters, printcmd, capture)
objs = [ n for n in objs ]
for link in self.sequence(self.wlnk):
if link[-2:] in ('.a', '.o'):
if os.path.exists(link):
objs.append(link)
return self.composite(output, objs, printcmd, capture)
# 生成动态链接:dll 或者 so
def makedll (self, output, objs = [], param = '', printcmd = False, capture = False):
if (not param) or (self.unix):
if sys.platform[:6] == 'darwin':
param = '-dynamiclib'
else:
param = '--shared'
if self.fpic:
param += ' -fPIC'
return self.makeexe(output, objs, param, printcmd, capture)
else:
name = ' '.join([ self.pathrel(n) for n in objs ])
parameters = '%s -o %s %s'%(param,
self.pathrel(output), name)
return self.dllwrap(parameters, printcmd, capture)
# 生成exe
def makeexe (self, output, objs = [], param = '', printcmd = False, capture = False):
name = ' '.join([ self.pathrel(n) for n in objs ])
if self.xlink:
name = '-Xlinker "-(" ' + name + ' -Xlinker "-)"'
parameters = '-o %s %s %s'%(self.pathrel(output), param, name)
return self.gcc(parameters, True, printcmd, capture)
# 合并.o .a文件为新的 .a文件
def composite (self, output, objs = [], printcmd = False, capture = False):
import os, tempfile, shutil
cwd = os.getcwd()
temp = tempfile.mkdtemp('.int', 'lib')
output = os.path.abspath(output)
libname = []
for name in [ os.path.abspath(n) for n in objs ]:
if not name in libname:
libname.append(name)
outpath = os.path.join(temp, 'out')
srcpath = os.path.join(temp, 'src')
os.mkdir(outpath)
os.mkdir(srcpath)
os.chdir(srcpath)
names = {}
for source in libname:
os.chdir(srcpath)
for fn in [ n for n in os.listdir('.') ]:
os.remove(fn)
files = []
filetype = os.path.splitext(source)[-1].lower()
if filetype == '.o':
files.append(source)
else:
args = '-x %s'%self.pathrel(source)
self.execute(self.exename['ar'], args, printcmd, capture)
for fn in os.listdir('.'):
files.append(os.path.abspath(fn))
for fn in files:
name = os.path.split(fn)[-1]
part = os.path.splitext(name)
last = None
for i in xrange(1000):
newname = (i > 0) and (part[0] + '_%d'%i + part[1]) or name
if not newname in names:
last = newname
break
if last and os.path.exists(fn):
names[last] = 1
shutil.copyfile(fn, os.path.join(outpath, last))
os.chdir(outpath)
args = ['crv', self.pathrel(output)]
args = ' '.join(args + [self.pathrel(n) for n in names])
try: os.remove(output)
except: pass
self.execute(self.exename['ar'], args, printcmd, capture)
os.chdir(cwd)
shutil.rmtree(temp)
return 0
# 运行工具
def cmdtool (self, sectname, exename, parameters, printcmd = False):
envsave = [ (n, os.environ[n]) for n in os.environ ]
hr = self._cmdline_init(sectname, exename)
if type(hr) != type(''):
if hr == -1:
msg = 'cmdtool error: can not find %s env !!'%(sectname)
else:
msg = 'cmdtool error: can not find %s exe !!'%(exename)
sys.stderr.write(msg + '\n')
sys.stderr.flush()
return -2
path = hr
cmd = '%s %s'%(path, parameters)
if printcmd:
print '>', cmd
sys.stdout.flush()
sys.stderr.flush()
os.system(cmd)
envflag = {}
remove = []
for n, v in envsave:
os.environ[n] = v
envflag[n] = True
for n in os.environ:
if not n in envflag:
remove.append(n)
for n in remove:
del os.environ[n]
return 0
# 调用 Cygwin Bash
def cygwin_bash (self, cmds, capture = False):
import subprocess
output = ''
bashpath = self.pathshort(os.path.join(self.cygwin, 'bin/bash.exe'))
if 'Popen' in subprocess.__dict__:
args = [ bashpath, '--login' ]
outmode = capture and subprocess.PIPE or None
p = subprocess.Popen(args, shell = False, \
stdin = subprocess.PIPE, stdout = outmode, \
stderr = subprocess.STDOUT)
stdin, stdouterr = (p.stdin, p.stdout)
stdin.write(cmds + '\nexit\n')
stdin.flush()
if capture:
output = stdouterr.read()
p.wait()
else:
p = None
stdin, stdouterr = os.popen4('%s --login'%bashpath, 'b')
stdin.write(cmds + '\nexit\n')
stdin.flush()
if not capture:
while True:
output = stdouterr.readline()
if output == '':
break
sys.stdout.write(output + '\n')
sys.stdout.flush()
else:
output = stdouterr.read()
stdin = None
stdouterr = None
return output
# 运行 Cygwin 命令行
def cygwin_execute (self, sect, exename, parameters = '', capture = 0):
capture = capture and True or False
sect = sect.lower()
home = self.win2cyg(os.getcwd())
cmds = 'export LANG=C\n'
if sect in self.config:
for n in self.config[sect]:
cmds += 'export %s="%s"\n'%(n.upper(), self.config[sect][n])
cmds += 'cd "%s"\n'%self.win2cyg(os.getcwd())
if exename:
exename = self.win2cyg(exename)
cmds += '"%s" %s\n'%(exename, parameters)
else:
cmds += '%s\n'%parameters
if 0:
print '-' * 72
print cmds
print '-' * 72
os.environ['EMAKECYGWIN'] = '1'
return self.cygwin_bash(cmds, capture)
# 读取连接基准地址
def readlink (self, fn):
if not self.unix:
return fn
while True:
try:
f2 = os.readlink(fn)
fn = f2
except:
break
return fn
# 搜索 Python开发路径
def python_config (self):
cflags = self._getitem('default', 'python_cflags', None)
ldflags = self._getitem('default', 'python_ldflags', None)
if cflags or ldflags:
return (cflags.strip('\r\n\t '), ldflags.strip('\r\n\t '))
pythoninc, pythonlib = [], []
import distutils.sysconfig
sysconfig = distutils.sysconfig
inc1 = sysconfig.get_python_inc()
inc2 = sysconfig.get_python_inc(plat_specific = True)
pythoninc.append('-I' + self.pathtext(inc1))
if inc2 != inc1:
pythoninc.append('-I' + self.pathtext(inc2))
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
if not pyver:
v1, v2 = sys.version_info[:2]
pyver = self.unix and '%s.%s'%(v1, v2) or '%s%s'%(v1, v2)
lib1 = getvar('LIBS')
pythonlib.extend(lib1 and lib1.split() or [])
prefix = sys.prefix
if os.path.exists(prefix):
if not pythoninc:
n1 = os.path.join(prefix, 'include/python%s'%pyver)
n2 = os.path.join(prefix, 'include')
if os.path.exists(n1 + '/Python.h'):
pythoninc.append('-I' + self.pathtext(n1))
elif os.path.exists(n2 + '/Python.h'):
pythoninc.append('-I' + self.pathtext(n2))
if not pythonlib:
n1 = os.path.join(prefix, 'lib/python%s'%pyver)
n2 = os.path.join(n1, 'config')
n3 = os.path.join(prefix, 'libs')
fn1 = 'libpython' + pyver + '.a'
fn2 = 'libpython' + pyver + '.dll.a'
done = False
for ff in (fn1, fn2):
for nn in (n1, n2, n3):
if os.path.exists(nn + '/' + ff):
pythonlib.append('-L' + self.pathtext(nn))
done = True
break
if done:
break
lib2 = getvar('SYSLIBS')
pythonlib.extend(lib2 and lib2.split() or [])
if not getvar('Py_ENABLE_SHARED'):
if getvar('LIBPL'):
pythonlib.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
if getvar('LINKFORSHARED'):
pythonlib.extend(getvar('LINKFORSHARED').split())
pythonlib.append('-lpython' + pyver)
cflags = ' '.join(pythoninc)
ldflags = ' '.join(pythonlib)
return cflags, ldflags
# 最终完成 java配置
def __java_final (self, home):
path = [ home ]
subdir = []
try:
for sub in os.listdir(home):
newpath = os.path.join(home, sub)
if os.path.isdir(newpath):
import difflib
m = difflib.SequenceMatcher(None, sys.platform, sub)
subdir.append((m.ratio(), sub))
except:
pass
subdir.sort()
if subdir:
path.append(os.path.join(home, subdir[-1][1]))
return ' '.join([ '-I%s'%self.pathtext(n) for n in path ])
# 取得 java配置
def java_home (self):
jdk = self._getitem('default', 'java', None)
if jdk:
jdk = os.path.abspath(jdk)
if os.path.exists(os.path.join(jdk, 'include/jni.h')):
return jdk
jdk = os.environ.get('JAVA_HOME', None)
if jdk:
jdk = os.path.abspath(jdk)
if os.path.exists(jdk):
return jdk
spliter = self.unix and ':' or ';'
PATH = os.environ.get('PATH', '')
for path in PATH.split(spliter):
path = path.strip('\r\n\t ')
if not os.path.exists(path):
continue
fn = os.path.join(path, 'javac')
if not self.unix: fn += '.exe'
if not os.path.exists(fn):
continue
fn = self.readlink(fn)
if not os.path.exists(fn):
continue
pp = os.path.abspath(os.path.join(os.path.dirname(fn), '..'))
pp = os.path.join(pp, 'include')
if not os.path.exists(pp):
continue
jni = os.path.join(pp, 'jni.h')
if os.path.exists(jni):
pp = os.path.join(pp, '../')
return os.path.abspath(pp)
if self.unix:
for i in xrange(20, 4, -1):
n = '/usr/local/openjdk%d'%i
if os.path.exists(os.path.join(n, 'include/jni.h')):
return os.path.abspath(n)
n = '/usr/jdk/instances/jdk1.%d.0'%i
if os.path.exists(os.path.join(n, 'include/jni.h')):
return os.path.abspath(n)
return ''
# 取得 java配置
def java_config (self):
cflags = self._getitem('default', 'java_cflags', None)
if cflags:
return cflags.strip('\r\n\t ')
jdk = self.java_home()
if not jdk:
return ''
return self.__java_final(os.path.join(jdk, 'include'))
# 执行 java命令: cmd 为 java, javac, jar等
def java_call (self, cmd, args = [], capture = False):
if self.__jdk_home == None:
self.__jdk_home = self.java_home()
if not self.__jdk_home:
sys.stderr.write('can not find java in $JAVA_HOME or $PATH\n')
sys.stderr.flush()
sys.exit(1)
return None
if not self.unix:
ext = os.path.splitext(cmd)[-1].lower()
if not ext:
cmd += '.exe'
cc = os.path.join(self.__jdk_home, 'bin/%s'%cmd)
if not os.path.exists(cc):
sys.stderr.write('can not find %s in %s\n'%(cmd, self.__jdk_home))
sys.stderr.flush()
sys.exit(1)
return None
cmd = cc
if not self.unix:
cmd = self.pathshort(cmd)
cmds = [ cmd ]
for n in args:
cmds.append(n)
return execute(cmds, False, capture)
#----------------------------------------------------------------------
# coremake: 核心工程编译,提供 Compile/Link/Build
#----------------------------------------------------------------------
class coremake(object):
# 构造函数
def __init__ (self, ininame = ''):
self.ininame = ininame
self.config = configure(self.ininame)
self.unix = self.config.unix
self.inited = 0
self.extnames = self.config.extnames
self.envos = {}
for k, v in os.environ.items():
self.envos[k] = v
self.reset()
# 复位配置
def reset (self):
self.config.reset()
self._out = '' # 最终输出的文件,比如abc.exe
self._int = '' # 中间文件的目录
self._main = '' # 主文件(工程文件)
self._mode = 'exe' # exe win dll lib
self._src = [] # 源代码
self._obj = [] # 目标文件
self._opt = []
self._export = {} # DLL导出配置
self._environ = {} # 环境变量
self.inited = 0
# 初始化:设置工程名字,类型,以及中间文件的目录
def init (self, main, out = 'a.out', mode = 'exe', intermediate = ''):
if not mode in ('exe', 'win', 'dll', 'lib'):
raise Exception("mode must in ('exe', 'win', 'dll', 'lib')")
self.reset()
self.config.init()
self.config.loadcfg()
self._main = os.path.abspath(main)
self._mode = mode
self._out = os.path.abspath(out)
self._int = intermediate
self._out = self.outname(self._out, mode)
# 取得源文件对应的目标文件:给定源文件名和中间文件目录名
def objname (self, srcname, intermediate = ''):
part = os.path.splitext(srcname)
ext = part[1].lower()
if ext in self.extnames:
if intermediate:
name = os.path.join(intermediate, os.path.split(part[0])[-1])
name = os.path.abspath(name + '.o')
else:
name = os.path.abspath(part[0] + '.o')
return name
if not ext in ('.o', '.obj'):
raise Exception('unknow ext-type of %s\n'%srcname)
return srcname
# 取得输出文件的文件名
def outname (self, output, mode = 'exe'):
if not mode in ('exe', 'win', 'dll', 'lib'):
raise Exception("mode must in ('exe', 'win', 'dll', 'lib')")
part = os.path.splitext(os.path.abspath(output))
output = part[0]
if mode == 'exe':
if self.unix == 0 and part[1] == '':
output += '.exe'
elif part[1]:
output += part[1]
elif mode == 'win':
if self.unix == 0 and part[1] == '':
output += '.exe'
elif part[1]:
output += part[1]
elif mode == 'dll':
if not part[1]:
if not self.unix: output += '.dll'
else: output += '.so'
else:
output += part[1]
elif mode == 'lib':
if not part[1]: output += '.a'
else: output += part[1]
return output
# 根据源文件列表取得目标文件列表
def scan (self, sources, intermediate = ''):
src2obj = {}
obj2src = {}
for src in sources:
obj = self.objname(src, intermediate)
if obj in obj2src:
p1, p2 = os.path.splitext(obj)
index = 1
while True:
name = '%s%d%s'%(p1, index, p2)
if not name in obj2src:
obj = name
break
index += 1
src2obj[src] = obj
obj2src[obj] = src
obj2src = None
return src2obj
# 添加源文件和目标文件
def push (self, srcname, objname, options):
self._src.append(os.path.abspath(srcname))
self._obj.append(os.path.abspath(objname))
self._opt.append(options)
# 创建目录
def mkdir (self, path):
path = os.path.abspath(path)
if os.path.exists(path):
return 0
name = ''
part = os.path.abspath(path).replace('\\', '/').split('/')
if self.unix:
name = '/'
if (not self.unix) and (path[1:2] == ':'):
part[0] += '/'
for n in part:
name = os.path.abspath(os.path.join(name, n))
if not os.path.exists(name):
os.mkdir(name)
return 0
# 删除目录
def remove (self, path):
try: os.remove(path)
except: pass
if os.path.exists(path):
sys.stderr.write('error: cannot remove \'%s\'\n'%path)
sys.stderr.flush()
sys.exit(0)
return 0
# DLL配置
def dllwrap (self, name):
if sys.platform[:3] != 'win':
return -1
if self._mode != 'dll':
return -2
name = name.lower()
main = os.path.splitext(os.path.abspath(self._out))[0]
main = os.path.split(main)[-1]
main = os.path.abspath(os.path.join(self._int, main))
if name == 'def':
self._export['def'] = main + '.def'
elif name == 'lib':
self._export['lib'] = main + '.a'
elif name in ('hidden', 'hide', 'none'):
self._export['hide'] = 1
elif name in ('msvc', 'MSVC'):
self._export['def'] = main + '.def'
self._export['msvc'] = main + '.lib'
self._export['msvc64'] = 0
elif name in ('msvc64', 'MSVC64'):
self._export['def'] = main + '.def'
self._export['msvc'] = main + '.lib'
self._export['msvc64'] = 1
return 0
# DLL export的参数
def _dllparam (self):
defname = self._export.get('def', '')
libname = self._export.get('lib', '')
msvclib = self._export.get('msvc', '')
hidden = self._export.get('hide', 0)
if (not defname) and (not libname):
return ''
param = ''
if not hidden: param += '--export-all '
if defname:
param += '--output-def %s '%self.config.pathrel(defname)
if libname:
param += '--implib %s '%self.config.pathrel(libname)
return param
# DLL 编译完成后的事情
def _dllpost (self):
defname = self._export.get('def', '')
libname = self._export.get('lib', '')
msvclib = self._export.get('msvc', '')
dllname = self._out
if not msvclib:
return 0
if not os.path.exists(defname):
return -1
machine = '/machine:i386'
msvc64 = self._export.get('msvc64', 0)
if msvc64:
machine = '/machine:x64'
defname = self.config.pathtext(self.config.pathrel(defname))
msvclib = self.config.pathtext(self.config.pathrel(msvclib))
parameters = '-nologo ' + machine + ' /def:' + defname
parameters += ' /out:' + msvclib
self.config.cmdtool('msvc', 'LIB.EXE', parameters, False)
return 0
# 单核编译:skipexist(是否需要跳过已有的obj文件)
def _compile_single (self, skipexist, printmode, printcmd):
retval = 0
for i in xrange(len(self._src)):
srcname = self._src[i]
objname = self._obj[i]
options = self._opt[i]
if srcname == objname:
continue
if skipexist and os.path.exists(objname):
continue
try: os.remove(os.path.abspath(objname))
except: pass
if printmode & 1:
name = self.config.pathrel(srcname)
if name[:1] == '"':
name = name[1:-1]
if CFG['abspath']:
name = os.path.abspath(srcname)
print name
self.config.compile(srcname, objname, options, printcmd)
if not os.path.exists(objname):
retval = -1
break
return retval
# 多核编译:skipexist(是否需要跳过已有的obj文件)
def _compile_threading (self, skipexist, printmode, printcmd, cpus):
# 估算编译时间,文件越大假设时间越长,放在最前面
ctasks = [ (os.path.getsize(s), s, o, t) for s, o, t in zip(self._src, self._obj, self._opt) ]
ctasks.sort()
import threading
self._task_lock = threading.Lock()
self._task_retval = 0
self._task_finish = False
self._task_queue = ctasks
self._task_thread = []
self._task_error = ''
for n in xrange(cpus):
parameters = (skipexist, printmode, printcmd, cpus - 1 - n)
th = threading.Thread(target = self._compile_working_thread, args = parameters)
self._task_thread.append(th)
for th in self._task_thread:
th.start()
for th in self._task_thread:
th.join()
self._task_thread = None
self._task_lock = None
self._task_queue = None
for objname in self._obj:
if not os.path.exists(objname):
self._task_retval = -1
break
return self._task_retval
# 具体编译线程
def _compile_working_thread (self, skipexist, printmode, printcmd, id):
mutex = self._task_lock
while True:
weight, srcname, objname = 0, '', ''
mutex.acquire()
if self._task_finish:
mutex.release()
break
if not self._task_queue:
mutex.release()
break
weight, srcname, objname, options = self._task_queue.pop()
mutex.release()
if srcname == objname:
continue
if skipexist and os.path.exists(objname):
continue
try: os.remove(os.path.abspath(objname))
except: pass
timeslap = time.time()
output = self.config.compile(srcname, objname, options, printcmd, True)
timeslap = time.time() - timeslap
result = True
if not os.path.exists(objname):
mutex.acquire()
self._task_retval = -1
self._task_finish = True
mutex.release()
result = False
mutex.acquire()
if printmode & 1:
name = self.config.pathrel(srcname)
if name[:1] == '"':
name = name[1:-1]
if CFG['abspath']:
name = os.path.abspath(srcname)
sys.stdout.write(name + '\n')
if sys.platform[:3] == 'win':
lines = [ x.rstrip('\r\n') for x in output.split('\n') ]
output = '\n'.join(lines)
sys.stdout.write(output)
sys.stdout.flush()
mutex.release()
time.sleep(0.01)
return 0
# 编译:skipexist(是否需要跳过已有的obj文件)
def compile (self, skipexist = False, printmode = 0, cpus = 0):
self.config.check()
self.mkdir(os.path.abspath(self._int))
printcmd = False
if printmode & 4:
printcmd = True
if printmode & 2:
print 'compiling ...'
t = time.time()
if cpus <= 1:
retval = self._compile_single(skipexist, printmode, printcmd)
else:
retval = self._compile_threading(skipexist, printmode, printcmd, cpus)
t = time.time() - t
#print 'time', t
return retval
# 连接:(是否跳过已有的文件)
def link (self, skipexist = False, printmode = 0):
self.config.check()
retval = 0
printcmd = False
if printmode & 4:
printcmd = True
if printmode & 2:
print 'linking ...'
output = self._out
if skipexist and os.path.exists(output):
return output
self.remove(output)
self.mkdir(os.path.split(output)[0])
if self._mode == 'exe':
self.config.makeexe(output, self._obj, '', printcmd)
elif self._mode == 'win':
param = '-mwindows'
self.config.makeexe(output, self._obj, param, printcmd)
elif self._mode == 'dll':
param = self._dllparam()
self.config.makedll(output, self._obj, param, printcmd)
if param and os.path.exists(output):
self._dllpost()
elif self._mode == 'lib':
self.config.makelib(output, self._obj, printcmd)
if not os.path.exists(output):
return ''
return output
# 执行编译事件
def event (self, scripts):
if not scripts:
return False
# 保存环境
envsave = {}
for k, v in os.environ.items():
envsave[k] = v
# 初始化环境
environ = {}
for k, v in self._environ.items():
environ[k] = v
environ['EMAKE'] = os.path.abspath(__file__)
environ['EMAKEP'] = os.path.dirname(os.path.abspath(__file__))
environ['EMHOME'] = self.config.dirhome
environ['EMOUT'] = self._out
environ['EMINT'] = self._int
environ['EMMAIN'] = self._main
environ['EMPATH'] = os.path.dirname(self._main)
environ['EMMODE'] = self._mode
environ['EMMAINN'] = os.path.splitext(self._main)[0]
environ['EMMAINE'] = os.path.splitext(self._main)[1]
environ['EMMAINP'] = os.path.dirname(self._main)
environ['EMOUTN'] = os.path.splitext(self._out)[0]
environ['EMOUTE'] = os.path.splitext(self._out)[1]
environ['EMOUTP'] = os.path.dirname(self._out)
for name in ('gcc', 'ar', 'ld', 'as', 'nasm', 'yasm', 'dllwrap'):
environ['EM' + name.upper()] = self.config.getname(name)
for k, v in environ.items(): # 展开宏
environ[k] = self.config._expand(environ, envsave, k)
for k, v in environ.items():
os.environ[k] = v
# 执行应用
workdir = os.path.dirname(self._main)
savecwd = os.getcwd()
for script in scripts:
if savecwd != workdir:
os.chdir(workdir)
os.system(script)
os.chdir(savecwd)
# 恢复环境
for k, v in envsave.items():
if os.environ.get(k) != v:
os.environ[k] = v
for k in os.environ.keys():
if not k in envsave:
del os.environ[k]
return True
# 编译与连接
def build (self, skipexist = False, printmode = 0):
if self.compile(skipexist, printmode) != 0:
return -1
output = self.link(skipexist, printmode)
if output == '':
return -2
return output
#----------------------------------------------------------------------
# iparser: 工程分析器,分析各种配置信息
#----------------------------------------------------------------------
class iparser (object):
# 构造函数
def __init__ (self, ininame = ''):
self.preprocessor = preprocessor()
self.coremake = coremake(ininame)
self.config = self.coremake.config
self.extnames = self.config.extnames
self.reset()
# 配置复位
def reset (self):
self.src = []
self.inc = []
self.lib = []
self.imp = []
self.exp = []
self.link = []
self.flag = []
self.flnk = []
self.wlnk = []
self.cond = []
self.environ = {}
self.events = {}
self.mode = 'exe'
self.define = {}
self.name = ''
self.home = ''
self.info = 3
self.out = ''
self.int = ''
self.makefile = ''
self.incdict = {}
self.libdict = {}
self.srcdict = {}
self.chkdict = {}
self.optdict = {}
self.impdict = {}
self.expdict = {}
self.linkdict = {}
self.flagdict = {}
self.flnkdict = {}
self.wlnkdict = {}
self.conddict = {}
self.makefile = ''
# 取得文件的目标文件名称
def __getitem__ (self, key):
return self.srcdict[key]
# 取得模块个数
def __len__ (self):
return len(self.srcdict)
# 检测是否包含模块
def __contains__ (self, key):
return (key in self.srcdict)
# 取得迭代器
def __iter__ (self):
return self.src.__iter__()
# 添加代码
def push_src (self, filename, options):
filename = os.path.abspath(filename)
realname = os.path.normcase(filename)
if filename in self.srcdict:
return -1
if realname in self.chkdict:
return -1
self.srcdict[filename] = ''
self.chkdict[realname] = ''
self.optdict[filename] = options
self.src.append(filename)
return 0
# 添加链接
def push_link (self, linkname):
if linkname in self.linkdict:
return -1
self.linkdict[linkname] = len(self.link)
self.link.append(linkname)
return 0
# 添加头路径
def push_inc (self, inc):
if inc in self.incdict:
return -1
self.incdict[inc] = len(self.inc)
self.inc.append(inc)
return 0
# 添加库路径
def push_lib (self, lib):
if lib in self.libdict:
return -1
self.libdict[lib] = len(self.lib)
self.lib.append(lib)
return 0
# 添加参数
def push_flag (self, flag):
if flag in self.flagdict:
return -1
self.flagdict[flag] = len(self.flag)
self.flag.append(flag)
return 0
# 添加宏定义
def push_define (self, define, value = 1):
self.define[define] = value
return 0
# 添加连接参数
def push_flnk (self, flnk):
if flnk in self.flnkdict:
return -1
self.flnkdict[flnk] = len(self.flnk)
self.flnk.append(flnk)
# 添加连接传递
def push_wlnk (self, wlnk):
if wlnk in self.wlnkdict:
return -1
self.wlnkdict[wlnk] = len(self.wlnk)
self.wlnk.append(wlnk)
# 添加条件编译
def push_cond (self, flag, condition):
key = (flag, condition)
if key in self.conddict:
return -1
self.conddict[key] = len(self.cond)
self.cond.append(key)
# 添加导入配置
def push_imp (self, name, fname = '', lineno = -1):
if name in self.impdict:
return -1
self.impdict[name] = len(self.imp)
self.imp.append((name, fname, lineno))
return 0
# 添加输出配置
def push_exp (self, name, fname = '', lineno = -1):
if name in self.expdict:
return -1
self.expdict[name] = len(self.exp)
self.exp.append((name, fname, lineno))
# 添加环境变量
def push_environ (self, name, value):
self.environ[name] = value
# 添加编译事件
def push_event (self, name, value):
if not name in self.events:
self.events[name] = []
self.events[name].append(value)
# 分析开始
def parse (self, makefile):
self.reset()
self.config.init()
makefile = os.path.abspath(makefile)
self.makefile = makefile
part = os.path.split(makefile)
self.home = part[0]
self.name = os.path.splitext(part[1])[0]
if not os.path.exists(makefile):
sys.stderr.write('error: %s cannot be open\n'%(makefile))
sys.stderr.flush()
return -1
cfg = self.config.config.get('default', {})
for name in ('prebuild', 'prelink', 'postbuild'):
body = cfg.get(name, '').strip('\r\n\t ').split('&&')
for script in body:
script = script.strip('\r\n\t ')
self.push_event(name, script)
extname = os.path.splitext(makefile)[1].lower()
if extname in ('.mak', '.em', '.emk', '.pyx', '.py'):
if self.scan_makefile() != 0:
return -3
elif extname in self.extnames:
if self.scan_mainfile() != 0:
return -4
else:
sys.stderr.write('error: unknow file type of "%s"\n'%makefile)
sys.stderr.flush()
return -5
if not self.out:
self.out = os.path.splitext(makefile)[0]
self.out = self.coremake.outname(self.out, self.mode)
self._update_obj_names()
return 0
# 取得相对路径
def pathrel (self, name, current = ''):
if not current:
current = os.getcwd()
current = current.replace('\\', '/')
if len(current) > 0:
if current[-1] != '/':
current += '/'
name = self.path(name).replace('\\', '/')
size = len(current)
if name[:size] == current:
name = name[size:]
return name
# 配置路径
def pathconf (self, path):
path = path.strip(' \r\n\t')
if path[:1] == '\'' and path[-1:] == '\'': path = path[1:-1]
if path[:1] == '\"' and path[-1:] == '\"': path = path[1:-1]
return path.strip(' \r\n\t')
# 扫描代码中 关键注释的工程信息
def _scan_memo (self, filename, prefix = '!'):
command = []
content = open(filename, 'U').read()
srctext = self.preprocessor.preprocess(content)
srcline = [ 0 for i in xrange(len(srctext)) ]
length = len(srctext)
lineno = 1
for i in xrange(len(srctext)):
srcline[i] = lineno
if srctext[i] == '\n':
lineno += 1
start = 0
endup = 0
while (start >= 0) and (start < length):
start = endup
endup = srctext.find('`', start)
if endup < 0:
break
start = endup
head = content[start:start + 2]
body = ''
if head == '//':
endup = srctext.find('\n', start)
if endup < 0: endup = length
body = content[start + 2:endup]
endup += 1
elif head == '/*':
endup = content.find('*/', start)
if endup < 0: endup = length
body = content[start + 2:endup]
endup += 2
else:
Exception ('error comment')
if body[:len(prefix)] != prefix:
continue
pos = start + 2 + len(prefix)
body = body[len(prefix):]
if pos >= length: pos = length - 1
lineno = srcline[pos]
for n in body.split('\n'):
command.append((lineno, n.strip('\r\n').strip(' \t')))
lineno += 1
return command
# 扫描主文件
def scan_mainfile (self):
command = self._scan_memo(self.makefile)
savedir = os.getcwd()
os.chdir(os.path.split(self.makefile)[0])
retval = 0
for lineno, text in command:
if self._process(self.makefile, lineno, text) != 0:
retval = -1
break
os.chdir(savedir)
self.push_src(self.makefile, '')
return retval
# 扫描工程文件
def scan_makefile (self):
savedir = os.getcwd()
os.chdir(os.path.split(self.makefile)[0])
ext = os.path.splitext(self.makefile)[1].lower()
lineno = 1
retval = 0
for text in open(self.makefile, 'U'):
if ext in ('.pyx', '.py'):
text = text.strip('\r\n\t ')
if text[:3] != '##!':
continue
text = text[3:]
if self._process(self.makefile, lineno, text) != 0:
retval = -1
break
lineno += 1
os.chdir(savedir)
return retval
# 输出错误
def error (self, text, fname = '', line = -1):
message = ''
if fname and line > 0:
message = '%s:%d: '%(fname, line)
sys.stderr.write(message + text + '\n')
sys.stderr.flush()
return 0
# 处理源文件
def _process_src (self, textline, fname = '', lineno = -1):
ext1 = ('.c', '.cpp', '.cc', '.cxx', '.asm')
ext2 = ('.s', '.o', '.obj', '.m', '.mm')
pos = textline.find(':')
body, options = textline, ''
pos = textline.find(':')
if pos >= 0:
split = (sys.platform[:3] != 'win') and True or False
if sys.platform[:3] == 'win':
if not textline[pos:pos + 2] in (':/', ':\\'):
split = True
if split:
body = textline[:pos].strip('\r\n\t ')
options = textline[pos + 1:].strip('\r\n\t ')
for name in body.replace(';', ',').split(','):
srcname = self.pathconf(name)
if not srcname:
continue
if (not '*' in srcname) and (not '?' in srcname):
names = [ srcname ]
else:
import glob
names = glob.glob(srcname)
for srcname in names:
absname = os.path.abspath(srcname)
if not os.path.exists(absname):
self.error('error: %s: No such file'%srcname, \
fname, lineno)
return -1
extname = os.path.splitext(absname)[1].lower()
if (not extname in ext1) and (not extname in ext2):
self.error('error: %s: Unknow file type'%absname, \
fname, lineno)
return -2
self.push_src(absname, options)
return 0
# 处理:分析信息
def _process (self, fname, lineno, text):
text = text.strip(' \t\r\n')
if not text: # 空行
return 0
if text[:1] in (';', '#'): # 跳过注释
return 0
pos = text.find(':')
if pos < 0:
self.error('unknow emake command', fname, lineno)
return -1
command, body = text[:pos].lower(), text[pos + 1:]
pos = command.find('/')
if pos >= 0:
condition, command = command[:pos].lower(), command[pos + 1:]
match = False
for cond in condition.replace(';', ',').split(','):
cond = cond.strip('\r\n\t ')
if not cond: continue
if cond in self.config.name:
match = True
break
if not match:
#print '"%s" not in %s'%(condition, self.config.name)
return 0
environ = {}
environ['target'] = self.config.target
environ['int'] = self.int
environ['out'] = self.out
environ['mode'] = self.mode
environ['home'] = os.path.dirname(os.path.abspath(fname))
environ['bin'] = self.config.dirhome
for name in ('gcc', 'ar', 'ld', 'as', 'nasm', 'yasm', 'dllwrap'):
if name in self.config.exename:
data = self.config.exename[name]
environ[name] = os.path.join(self.config.dirhome, data)
environ['cc'] = environ['gcc']
for name in environ:
key = '$(%s)'%name
val = environ[name]
if key in body:
body = body.replace(key, val)
if command in ('out', 'output'):
self.out = os.path.abspath(self.pathconf(body))
return 0
if command in ('int', 'intermediate'):
self.int = os.path.abspath(self.pathconf(body))
return 0
if command in ('src', 'source'):
retval = self._process_src(body, fname, lineno)
return retval
if command in ('mode', 'mod'):
body = body.lower().strip(' \r\n\t')
if not body in ('exe', 'win', 'lib', 'dll'):
self.error('error: %s: mode is not supported'%body, \
fname, lineno)
return -1
self.mode = body
return 0
if command == 'link':
for name in body.replace(';', ',').split(','):
srcname = self.pathconf(name)
if not srcname:
continue
self.push_link(srcname)
return 0
if command in ('inc', 'lib'):
for name in body.replace(';', ',').split(','):
srcname = self.pathconf(name)
if not srcname:
continue
absname = os.path.abspath(srcname)
if not os.path.exists(absname):
self.error('error: %s: No such directory'%srcname, \
fname, lineno)
return -1
if command == 'inc':
self.push_inc(absname)
elif command == 'lib':
self.push_lib(absname)
return 0
if command == 'flag':
for name in body.replace(';', ',').split(','):
srcname = self.pathconf(name)
if not srcname:
continue
if srcname[:2] in ('-o', '-I', '-B', '-L'):
self.error('error: %s: invalid option'%srcname, \
fname, lineno)
self.push_flag(srcname)
return 0
if command in ('flnk', 'linkflag', 'flink'):
for name in body.replace(';', ',').split(','):
srcname = self.pathconf(name)
if not srcname:
continue
self.push_flnk(srcname)
return 0
if command in ('wlnk', 'wl', 'ld', 'wlink'):
for name in body.replace(';', ',').split(','):
srcname = self.pathconf(name)
if not srcname:
continue
self.push_wlnk(srcname)
return 0
for cond in ('cflag', 'cxxflag', 'sflag', 'mflag', 'mmflag'):
if command == cond or command.rstrip('s') == cond:
for name in body.replace(';', ',').split(','):
flag = self.pathconf(name)
if not flag:
continue
if flag[:2] in ('-o', '-I', '-B', '-L'):
self.error('error: %s: invalid option'%flag, \
fname, lineno)
self.push_cond(flag, cond)
return 0
if command in ('arglink', 'al'):
self.push_flnk(body.strip('\r\n\t '))
return 0
if command in ('argcc', 'ac'):
self.push_flag(body.strip('\r\n\t '))
return 0
if command == 'define':
for name in body.replace(';', ',').split(','):
srcname = self.pathconf(name).replace(' ', '_')
if not srcname:
continue
self.push_define(srcname)
return 0
if command == 'info':
body = body.strip(' \t\r\n').lower()
if body in ('0', 'false', 'off'):
self.info = 0
else:
try: info = int(body)
except: info = 3
self.info = info
return 0
if command in ('cexe', 'clib', 'cdll' ,'cwin', 'exe', 'dll', 'win'):
if not self.int:
self.int = os.path.abspath(os.path.join('objs', self.config.target))
self.mode = command[-3:]
retval = self._process_src(body, fname, lineno)
return retval
if command in ('swf', 'swc', 'elf'):
self.mode = 'exe'
if not self.out:
self.out = os.path.splitext(fname)[0] + '.' + command
if not self.int:
self.int = os.path.abspath(os.path.join('objs', self.config.target))
body = body.strip('\r\n\t ')
if command == 'swf':
self.push_flnk('-emit-swf')
pos = body.find('x')
if pos >= 0:
try:
t1 = int(body[:pos])
t2 = int(body[pos + 1:])
except:
self.error('error: %s: bad size'%body, fname, lineno)
return -1
self.push_flnk('-swf-size=%dx%d'%(t1, t2))
elif body:
self.error('error: %s: bad size'%body, fname, lineno)
return -1
elif command == 'swc':
if not body:
self.error('error: namespace empty', fname, lineno)
return -1
self.push_flnk('-emit-swc=' + body.strip('\t\n\r '))
else:
return self._process_src(body, fname, lineno)
return 0
if command in ('imp', 'import'):
for name in body.replace(';', ',').split(','):
name = self.pathconf(name)
if not name:
continue
self.push_imp(name, fname, lineno)
return 0
if command in ('exp', 'export'):
self.dllexp = 'yes'
for name in body.replace(';', ',').split(','):
name = self.pathconf(name).lower()
if not name:
continue
self.push_exp(name, fname, lineno)
return 0
if command == 'echo':
print body
return 0
if command == 'color':
self.console(int(body.strip('\r\n\t '), 0))
return 0
if command in ('prebuild', 'prelink', 'postbuild'):
self.push_event(command, body)
return 0
if command == 'environ':
for name in body.replace(';', ',').split(','):
name = name.strip('\r\n\t ')
k, v = (name.split('=') + ['',''])[:2]
self.push_environ(k.strip('\r\n\t '), v.strip('\r\n\t '))
return 0
if command == 'use':
for name in body.replace(';', ',').split(','):
name = name.strip('\r\n\t ')
if name == 'python':
cflags, ldflags = self.config.python_config()
if cflags:
self.push_flag(cflags)
if ldflags:
self.push_flnk(ldflags)
elif name == 'java':
java = self.config.java_config()
if java:
self.push_flag(java)
else:
tt = 'error: %s: invalid name to use, only python or java'
self.error(tt%command, fname, lineno)
return -1
return 0
self.error('error: %s: invalid command'%command, fname, lineno)
return -1
# 扫描并确定目标文件
def _update_obj_names (self):
src2obj = self.coremake.scan(self.src, self.int)
for fn in self.src:
obj = src2obj[fn]
self.srcdict[fn] = os.path.abspath(obj)
return 0
# 设置终端颜色
def console (self, color):
if not os.isatty(sys.stdout.fileno()):
return False
if sys.platform[:3] == 'win':
try: import ctypes
except: return 0
kernel32 = ctypes.windll.LoadLibrary('kernel32.dll')
GetStdHandle = kernel32.GetStdHandle
SetConsoleTextAttribute = kernel32.SetConsoleTextAttribute
GetStdHandle.argtypes = [ ctypes.c_uint32 ]
GetStdHandle.restype = ctypes.c_size_t
SetConsoleTextAttribute.argtypes = [ ctypes.c_size_t, ctypes.c_uint16 ]
SetConsoleTextAttribute.restype = ctypes.c_long
handle = GetStdHandle(0xfffffff5)
if color < 0: color = 7
result = 0
if (color & 1): result |= 4
if (color & 2): result |= 2
if (color & 4): result |= 1
if (color & 8): result |= 8
if (color & 16): result |= 64
if (color & 32): result |= 32
if (color & 64): result |= 16
if (color & 128): result |= 128
SetConsoleTextAttribute(handle, result)
else:
if color >= 0:
foreground = color & 7
background = (color >> 4) & 7
bold = color & 8
sys.stdout.write("\033[%s3%d;4%dm"%(bold and "01;" or "", foreground, background))
sys.stdout.flush()
else:
sys.stdout.write("\033[0m")
sys.stdout.flush()
return 0
#----------------------------------------------------------------------
# dependence: 工程编译,Compile/Link/Build
#----------------------------------------------------------------------
class dependence (object):
def __init__ (self, parser = None):
self.parser = parser
self.preprocessor = preprocessor()
self.reset()
def reset (self):
self._mtime = {}
self._dirty = {}
self._depinfo = {}
self._depname = ''
self._outchg = False
def mtime (self, fname):
fname = os.path.abspath(fname)
if fname in self._mtime:
return self._mtime[fname]
try: mtime = os.path.getmtime(fname)
except: mtime = 0.0
mtime = float('%.6f'%mtime)
self._mtime[fname] = mtime
return mtime
def _scan_src (self, srcname):
srcname = os.path.abspath(srcname)
if not srcname in self.parser:
return None
if not os.path.exists(srcname):
return None
objname = self.parser[srcname]
head, lost, src = self.preprocessor.dependence(srcname)
filelist = [srcname] + head
dependence = []
for fn in filelist:
name = os.path.abspath(fn)
dependence.append((name, self.mtime(name)))
return dependence
def _update_dep (self, srcname):
srcname = os.path.abspath(srcname)
if not srcname in self.parser:
return -1
retval = 0
debug = 0
if debug: print '\n<dep:%s>'%srcname
objname = self.parser[srcname]
srctime = self.mtime(srcname)
objtime = self.mtime(objname)
update = False
info = self._depinfo.setdefault(srcname, {})
if len(info) == 0:
update = True
if not update:
for fn in info:
if not os.path.exists(fn):
update = True
break
oldtime = info[fn]
newtime = self.mtime(fn)
if newtime > oldtime:
update = True
#print '%f %f %f'%(newtime, oldtime, newtime - oldtime)
break
if update:
dependence = self._scan_src(srcname)
info = {}
self._depinfo[srcname] = info
if not dependence:
return -2
for fname, mtime in dependence:
info[fname] = mtime
info = self._depinfo[srcname]
for fn in info:
oldtime = info[fn]
if oldtime > objtime:
self._dirty[srcname] = 1
retval = 1
break
if debug: print '</dep:%s>\n'%srcname
return retval
def _load_dep (self):
lineno = -1
retval = 0
if os.path.exists(self._depname):
for line in open(self._depname, 'U'):
line = line.strip(' \t\r\n')
if not line: continue
pos = line.find('=')
if pos < 0: continue
src, body = line[:pos], line[pos + 1:]
src = os.path.abspath(src)
if not os.path.exists(src): continue
item = body.replace(';', ',').split(',')
count = len(item) / 2
info = {}
self._depinfo[src] = info
for i in xrange(count):
fname = item[i * 2 + 0].strip(' \r\n\t')
mtime = item[i * 2 + 1].strip(' \r\n\t')
fname = self.parser.pathconf(fname)
info[fname] = float(mtime)
retval = 0
for fn in self.parser:
self._update_dep(fn)
return retval
def _save_dep (self):
path = os.path.split(self._depname)[0]
if not os.path.exists(path):
self.parser.coremake.mkdir(path)
fp = open(self._depname, 'w')
names = self._depinfo.keys()
names.sort()
for src in names:
info = self._depinfo[src]
fp.write('%s = '%(src))
part = []
keys = info.keys()
keys.sort()
for fname in keys:
mtime = info[fname]
if ' ' in fname: fname = '"%s"'%fname
part.append('%s, %.6f'%(fname, mtime))
fp.write(', '.join(part) + '\n')
fp.close()
return 0
def process (self):
self.reset()
parser = self.parser
depname = parser.name + '.p'
self._depname = os.path.join(parser.home, depname)
if parser.int:
self._depname = os.path.join(parser.int, depname)
self._depname = os.path.abspath(self._depname)
self._load_dep()
self._save_dep()
for info in self._depinfo:
dirty = (info in self._dirty) and 1 or 0
#print info, '=', dirty
return 0
#----------------------------------------------------------------------
# emake: 工程编译,Compile/Link/Build
#----------------------------------------------------------------------
class emake (object):
def __init__ (self, ininame = ''):
if ininame == '': ininame = 'emake.ini'
self.parser = iparser(ininame)
self.coremake = self.parser.coremake
self.dependence = dependence(self.parser)
self.config = self.coremake.config
self.unix = self.coremake.unix
self.cpus = -1
self.loaded = 0
def reset (self):
self.parser.reset()
self.coremake.reset()
self.dependence.reset()
self.loaded = 0
def open (self, makefile):
self.reset()
self.config.init()
environ = {}
cfg = self.config.config
if 'environ' in cfg:
for k, v in cfg['environ'].items():
environ[k.upper()] = v
retval = self.parser.parse(makefile)
if retval != 0:
return -1
parser = self.parser
self.coremake.init(makefile, parser.out, parser.mode, parser.int)
#print 'open', parser.out, parser.mode, parser.int
for src in self.parser:
obj = self.parser[src]
opt = self.parser.optdict[src]
self.coremake.push(src, obj, opt)
savedir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(makefile)))
hr = self._config()
os.chdir(savedir)
if hr != 0:
return -2
self.coremake._environ = {}
for k, v in environ.items():
self.coremake._environ[k] = v
for k, v in self.parser.environ.items():
self.coremake._environ[k] = v
self.dependence.process()
self.loaded = 1
return 0
def _config (self):
self.config.replace['makefile'] = self.coremake._main
self.config.replace['workspace'] = os.path.dirname(self.coremake._main)
for name, fname, lineno in self.parser.imp:
if not name in self.config.config:
self.parser.error('error: %s: No such config section'%name, \
fname, lineno)
return -1
self.config.loadcfg(name, True)
for inc in self.parser.inc:
self.config.push_inc(inc)
#print 'inc', inc
for lib in self.parser.lib:
self.config.push_lib(lib)
#print 'lib', lib
for flag in self.parser.flag:
self.config.push_flag(flag)
#print 'flag', flag
for link in self.parser.link:
self.config.push_link(link)
#print 'link', link
for pdef in self.parser.define:
self.config.push_pdef(pdef)
#print 'pdef', pdef
for flnk in self.parser.flnk:
self.config.push_flnk(flnk)
#print 'flnk', flnk
for wlnk in self.parser.wlnk:
self.config.push_wlnk(wlnk)
for cond in self.parser.cond:
self.config.push_cond(cond[0], cond[1])
if self.parser.mode == 'dll' and self.config.unix:
if self.config.fpic:
self.config.push_flag('-fPIC')
for name, fname, lineno in self.parser.exp:
self.coremake.dllwrap(name)
self.config.parameters()
#print 'replace', self.config.replace
return 0
def compile (self, printmode = 0):
if not self.loaded:
return 1
dirty = 0
for src in self.parser:
if src in self.dependence._dirty:
obj = self.parser[src]
if obj != src:
self.coremake.remove(obj)
dirty += 1
if dirty:
self.coremake.remove(self.parser.out)
self.coremake.event(self.parser.events.get('prebuild', []))
cpus = self.config.cpus
if self.cpus >= 0:
cpus = self.cpus
retval = self.coremake.compile(True, printmode, cpus)
if retval != 0:
return 2
return 0
def link (self, printmode = 0):
if not self.loaded:
return 1
update = False
outname = self.parser.out
outtime = self.dependence.mtime(outname)
for src in self.parser:
obj = self.parser[src]
mtime = self.dependence.mtime(obj)
if mtime == 0 or mtime > outtime:
update = True
break
if update:
self.coremake.remove(self.parser.out)
self.coremake.event(self.parser.events.get('prelink', []))
retval = self.coremake.link(True, printmode)
if retval:
self.coremake.event(self.parser.events.get('postbuild', []))
return 0
return 3
def build (self, printmode = 0):
if not self.loaded:
return 1
retval = self.compile(printmode)
if retval != 0:
return 2
retval = self.link(printmode)
if retval != 0:
return 3
return 0
def clean (self):
if not self.loaded:
return 1
for src in self.parser:
obj = self.parser[src]
if obj != src:
self.coremake.remove(obj)
if self.loaded:
self.coremake.remove(self.parser.out)
return 0
def rebuild (self, printmode = -1):
if not self.loaded:
return 1
self.clean()
return self.build(printmode)
def execute (self):
if not self.loaded:
return 1
outname = os.path.abspath(self.parser.out)
if not self.parser.mode in ('exe', 'win'):
sys.stderr.write('cannot execute: \'%s\'\n'%outname)
sys.stderr.flush()
return 8
if not os.path.exists(outname):
sys.stderr.write('cannot find: \'%s\'\n'%outname)
sys.stderr.flush()
return 9
os.system('"%s"'%outname)
return 0
def call (self, cmdline):
if not self.loaded:
return 1
self.coremake.event([cmdline])
return 0
def info (self, name = ''):
name = name.lower()
if name == '': name = 'out'
if name in ('out', 'outname'):
print self.parser.out
elif name in ('home', 'base'):
print self.parser.home
elif name in ('list'):
for src in self.parser:
print src
elif name in ('dirty', 'changed'):
for src in self.parser:
if src in self.dependence._dirty:
print src
return 0
#----------------------------------------------------------------------
# speed up
#----------------------------------------------------------------------
def _psyco_speedup():
try:
import psyco
psyco.bind(preprocessor)
psyco.bind(configure)
psyco.bind(coremake)
psyco.bind(emake)
#print 'full optimaze'
except:
return False
return True
#----------------------------------------------------------------------
# distribution
#----------------------------------------------------------------------
def install():
filepath = os.path.abspath(sys.argv[0])
if not os.path.exists(filepath):
print 'error: cannot open "%s"'%filepath
return -1
if sys.platform[:3] == 'win':
print 'error: install must under unix'
return -2
try:
f1 = open(filepath, 'r')
except:
print 'error: cannot read "%s"'%filepath
return -3
content = f1.read()
f1.close()
name2 = '/usr/local/bin/emake.py'
name3 = '/usr/local/bin/emake'
if os.path.exists(name2):
print '/usr/local/bin/emake.py already exists, you should delete it'
return -6
if os.path.exists(name3):
print '/usr/local/bin/emake already exists, you should delete it'
return -7
try:
f2 = open(name2, 'w')
except:
print 'error: cannot write "%s"'%name2
return -4
try:
f3 = open(name3, 'w')
except:
print 'error: cannot write "%s"'%name3
f2.close()
return -5
f2.write(content)
f3.write(content)
f2.close()
f3.close()
os.system('chmod 755 /usr/local/bin/emake.py')
os.system('chmod 755 /usr/local/bin/emake')
os.system('chown root /usr/local/bin/emake.py 2> /dev/null')
os.system('chown root /usr/local/bin/emake 2> /dev/null')
print 'install completed. you can uninstall by deleting the following two files:'
print '/usr/local/bin/emake.py'
print '/usr/local/bin/emake'
return 0
__updated_files = {}
def __update_file(name, content):
source = ''
name = os.path.abspath(name)
if name in __updated_files:
return 0
__updated_files[name] = 1
try:
fp = open(name, 'r')
source = fp.read()
fp.close()
except:
source = ''
if content == source:
print '%s up-to-date'%name
return 0
try:
fp = open(name, 'w')
fp.write(content)
fp.close()
except:
print 'can not write to %s'%name
return -1
print '%s update succeeded'%name
return 1
def getemake():
import urllib2
url1 = 'http://skywind3000.github.io/emake/emake.py'
url2 = 'http://www.skywind.me/php/getemake.php'
success = True
content = ''
for url in (url1, url2):
print 'fetching', url, ' ...',
sys.stdout.flush();
success = True
try:
content = urllib2.urlopen(url).read()
except urllib2.URLError, e:
success = False
print 'failed '
print e
head = content.split('\n')[0].strip('\r\n\t ')
if head[:22] != '#! /usr/bin/env python':
if success:
print 'error'
success = False
if success:
print 'ok'
return content
return ''
def update():
content = getemake()
if not content:
print 'update failed'
return -1
name1 = os.path.abspath(sys.argv[0])
name2 = '/usr/local/bin/emake.py'
name3 = '/usr/local/bin/emake'
__update_file(name1, content)
if sys.platform[:3] == 'win':
return 0
r1 = __update_file(name2, content)
r2 = __update_file(name3, content)
if r1 > 0:
os.system('chmod 755 /usr/local/bin/emake.py')
os.system('chown root /usr/local/bin/emake.py 2> /dev/null')
if r2 > 0:
os.system('chmod 755 /usr/local/bin/emake')
os.system('chown root /usr/local/bin/emake 2> /dev/null')
print 'update finished !'
return 0
def help():
print "Emake 3.6.9 Dec.24 2017"
print "By providing a completely new way to build your projects, Emake"
print "is a easy tool which controls the generation of executables and other"
print "non-source files of a program from the program's source files. "
return 0
#----------------------------------------------------------------------
# extract param
#----------------------------------------------------------------------
def extract(parameter):
if parameter[:2] != '${' or parameter[-1:] != '}':
return parameter
data = parameter[2:-1]
pos = data.find(':')
if pos < 0:
return parameter
fname, cname = data[:pos], data[pos + 1:]
if not os.path.exists(fname):
return parameter
parser = iparser()
command = parser._scan_memo(fname)
value = ''
for lineno, text in command:
pos = text.find(':')
if pos >= 0:
name, data = text[:pos], text[pos + 1:]
name = name.strip('\r\n\t ')
if name == cname:
value = data.strip('\r\n\t ')
return value
#----------------------------------------------------------------------
# main program
#----------------------------------------------------------------------
def main(argv = None):
# using psyco to speed up
_psyco_speedup()
# create main object
make = emake()
if argv == None:
argv = sys.argv
args = argv
argv = argv[:1]
options = {}
for arg in args[1:]:
if arg[:2] != '--':
argv.append(arg)
continue
key = arg[2:].strip('\r\n\t ')
val = None
p1 = key.find('=')
if p1 >= 0:
val = key[p1 + 1:].strip('\r\n\t')
key = key[:p1].strip('\r\n\t')
options[key] = val
inipath = ''
if options.get('cfg', None) is not None:
cfg = options['cfg']
cfg = os.path.expanduser('~/.config/emake/%s.ini'%cfg)
if not 'ini' in options:
options['ini'] = cfg
if options.get('ini', None) is not None:
inipath = options['ini']
if '~' in inipath:
inipath = os.path.expanduser(inipath)
inipath = os.path.abspath(inipath)
if len(argv) <= 1:
version = '(emake 3.6.9 Dec.21 2017 %s)'%sys.platform
print 'usage: "emake.py [option] srcfile" %s'%version
print 'options : -b | -build build project'
print ' -c | -compile compile project'
print ' -l | -link link project'
print ' -r | -rebuild rebuild project'
print ' -e | -execute execute project'
print ' -o | -out show output file name'
print ' -d | -cmdline call cmdline tool in given environ'
if sys.platform[:3] == 'win':
print ' -g | -cygwin cygwin execute'
print ' -s | -cshell cygwin shell'
print ' -i | -install install emake on unix'
print ' -u | -update update itself from github'
print ' -h | -help show help page'
return 0
if os.path.exists(inipath):
global INIPATH
INIPATH = inipath
elif inipath:
sys.stderr.write('error: not find %s\n'%inipath)
sys.stderr.flush()
return -1
if argv[1] == '-check':
make.config.init()
make.config.check()
dirhome = make.config.dirhome
print 'home:', dirhome
print 'gcc:', os.path.join(dirhome, make.config.exename['gcc'])
print 'name:', make.config.name.keys()
print 'target:', make.config.target
return 0
cmd, name = 'build', ''
if len(argv) == 2:
name = argv[1].strip(' ')
if name in ('-i', '-install', '-install'):
install()
return 0
if name in ('-u', '-update', '-update'):
update()
return 0
if name in ('-h', '-help', '-help'):
help()
return 0
if len(argv) <= 3:
if name in ('-d', '-cmdline'):
print 'usage: emake.py -cmdline envname exename [parameters]'
print 'call the cmdline tool in the given environment:'
print '- envname is a section name in emake.ini which defines environ for this tool'
print '- exename is the tool\'s executable file name'
return 0
if len(argv) >= 3:
cmd = argv[1].strip(' ').lower()
name = argv[2]
else:
if name[:1] == '-':
print 'not enough parameter: %s'%name
return 0
printmode = 3
def int_safe(text, defval):
num = defval
try: num = int(text)
except: pass
return num
def bool_safe(text, defval):
if text is None:
return True
if text.lower() in ('true', '1', 'yes'):
return True
if text.lower() in ('0', 'false', 'no'):
return False
return defval
if 'cpu' in options:
make.cpus = int_safe(options['cpu'], 1)
if 'print' in options:
printmode = int_safe(options['print'], 3)
if 'abs' in options:
CFG['abspath'] = bool_safe(options['abs'], True)
ext = os.path.splitext(name)[-1].lower()
ft1 = ('.c', '.cpp', '.cxx', '.cc', '.m', '.mm')
ft2 = ('.h', '.hpp', '.hxx', '.hh', '.inc')
ft3 = ('.mak', '.em', '.emk', '.py', '.pyx')
if cmd in ('-d', '-cmdline', '-cmdline', '-m'):
config = configure()
config.init()
argv += ['', '', '', '', '']
envname = argv[2]
exename = argv[3]
parameters = ''
for n in [ argv[i] for i in xrange(4, len(argv)) ]:
if cmd in ('-m',):
if n[:2] == '${' and n[-1:] == '}':
n = extract(n)
if not n: continue
if config.unix:
n = n.replace('\\', '\\\\').replace('"', '\\"')
n = n.replace("'", "\\'").replace(' ', '\\ ')
n = n.replace('\t', '\\t')
else:
if ' ' in n:
n = '"' + n + '"'
parameters += n + ' '
config.cmdtool(envname, exename, parameters)
return 0
if cmd in ('-g', '-cygwin'):
config = configure()
config.init()
if not config.cygwin:
print 'not find "cygwin" in "default" sect of %s'%config.ininame
sys.exit()
argv += ['', '', '', '', '']
envname = argv[2]
exename = argv[3]
parameters = ''
for n in [ argv[i] for i in xrange(4, len(argv)) ]:
if ' ' in n: n = '"' + n + '"'
parameters += n + ' '
config.cygwin_execute(envname, exename, parameters)
return 0
if cmd in ('-s', '-cshell'):
config = configure()
config.init()
if not config.cygwin:
print 'not find "cygwin" in "default" sect of %s'%config.ininame
sys.exit()
argv += ['', '', '', '', '']
envname = argv[2]
exename = argv[3]
parameters = ''
for n in [ argv[i] for i in xrange(4, len(argv)) ]:
if ' ' in n: n = '"' + n + '"'
parameters += n + ' '
cmds = '"%s" %s'%(exename, parameters)
config.cygwin_execute(envname, '', cmds)
return 0
if cmd == '-dump':
if not name: name = '.'
if not os.path.exists(name):
print 'can not read: %s'%name
return -1
for root, dirs, files in os.walk(name):
for fn in files:
if os.path.splitext(fn)[-1].lower() in ('.c', '.cpp', '.cc'):
xp = os.path.join(root, fn)
if sys.platform[:3] == 'win':
xp = xp.replace('\\', '/')
if xp[:2] == './':
xp = xp[2:]
print 'src: ' + xp
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
if '.svn' in dirs:
dirs.remove('.svn')
if '.git' in dirs:
dirs.remove('.git')
return 0
if not ((ext in ft1) or (ext in ft3)):
sys.stderr.write('error: %s: unsupported file type\n'%(name))
sys.stderr.flush()
return -1
retval = 0
if cmd in ('b', '-b', 'build', '-build'):
make.open(name)
retval = make.build(printmode)
elif cmd in ('c', '-c', 'compile', '-compile'):
make.open(name)
retval = make.compile(printmode)
elif cmd in ('l', '-l', 'link', '-link'):
make.open(name)
retval = make.link(printmode)
elif cmd in ('clean', '-clean'):
make.open(name)
retval = make.clean()
elif cmd in ('r', '-r', 'rebuild', '-rebuild'):
make.open(name)
retval = make.rebuild(printmode)
elif cmd in ('e', '-e', 'execute', '-execute'):
make.open(name)
retval = make.execute()
elif cmd in ('a', '-a', 'call', '-call'):
make.open(name)
retval = make.call(' '.join(argv[3:]))
elif cmd in ('o', '-o', 'out', '-out'):
make.open(name)
make.info('outname');
elif cmd in ('dirty', '-dirty'):
make.open(name)
make.info('dirty')
elif cmd in ('list', '-list'):
make.open(name)
make.info('list')
elif cmd in ('home', '-home'):
make.open(name)
make.info('home')
else:
sys.stderr.write('unknow command: %s\n'%cmd)
sys.stderr.flush()
retval = 127
return retval
#----------------------------------------------------------------------
# testing case
#----------------------------------------------------------------------
if __name__ == '__main__':
def test1():
make = coremake()
name = 'e:/zombie/demo01.c'
make.mkdir(r'e:\lab\malloc\obj list')
make.mkdir(r'e:\lab\malloc\abc c\01 2\3 4\5\6')
make.init('mainmod', 'exe', 'malloc\obj')
make.push('malloc/main.c')
make.push('malloc/mod1.c')
make.push('malloc/mod2.c')
make.push('malloc/mod3.c')
make.build(printmode = 7)
print os.path.getmtime('malloc/main.c')
def test2():
pst = preprocessor()
head, lost, text = pst.dependence('voice/fastvoice/basewave.cpp')
for n in head: print n
pp = pst.preprocess(file('voice/fastvoice/basewave.cpp', 'U').read())
print pp
def test3():
parser = iparser()
parser._pragma_scan('malloc/main.c')
def test4():
parser = iparser()
cmaker = coremake()
parser.parse('malloc/main.c')
print '"%s", "%s", "%s"'%(parser.out, parser.int, parser.mode)
print parser.home, parser.name
for n in parser:
print 'src:', n, '->', cmaker.objname(n, ''), parser[n]
def test5():
parser = iparser()
parser.parse('malloc/main.c')
dep = dependence(parser)
dep.process()
def test6():
make = emake()
make.open('malloc/main.c')
make.clean()
make.build(3)
def test7():
config = configure()
config.init()
print config.checklib('liblinwei.a')
print config.checklib('winmm')
print config.checklib('pixia')
config.push_lib('d:/dev/local/lib')
print config.checklib('pixia')
def test8():
sys.argv = [sys.argv[0], '-d', 'msvc', 'cl.exe', '-help' ]
sys.argv = [sys.argv[0], '-r', 'd:/acm/aprcode/pixellib/PixelBitmap.cpp' ]
main()
#os.chdir('d:/acm/aprcode/pixellib/')
#os.system('d:/dev/python27/python.exe d:/acm/opensrc/easymake/testing/emake.py -r PixelBitmap.cpp')
def test9():
sys.argv = ['emake.py', '-t', 'msvc', 'cl.exe', '-help' ]
sys.argv = [sys.argv[0], '-t', 'watcom', 'wcl386.exe', '-help' ]
main()
def test10():
sys.argv = [sys.argv[0], '-g', 'default', 'd:/dev/flash/alchemy5/tutorials/01_HelloWorld/hello.exe', '--version']
main()
#test10()
sys.exit( main() )
#install()
|
skywind3000/collection
|
script/emake.py
|
Python
|
mit
| 95,752
|
[
"VisIt"
] |
b492e8440595e2528427cd6d75ed0ed9d3fc80dae64616b51fb5750ec59ace58
|
'''
***************************************************************.
GOOGLE PHONE TECHNICAL INTERVIEW TIPS:
Please be prepared for the engineers to ask you questions in the following areas:
- Google products (i.e. what you use)
- Coding ability (you will code in a Google Doc)
- Algorithm Design/Analysis.
- System Design
The links below may help you prepare for your interview:
Interviewing at Google - http://www.youtube.com/watch?v=w887NIa_V9w
Google Products - http://www.google.com/intl/en/options/
Five Essential Phone Screen Questions by Steve Yegge (Google Engineer) - https://sites.google.com/site/st ... ne-screen-questions
Types of algorithm questions Google asks: TopCoder Tutorials - http://www.topcoder.com/tc?modul ... ls&d2=alg_index
The Official Google Blog: “Baby steps to a new job” by Gretta Cook (Google Engineer)
http://googleblog.blogspot.com/2008/01/baby-steps-to-new-job.html
“How to Get Hired” by Dan Kegel (Google Engineer)
http://www.kegel.com/academy/getting-hired.html. 1point3acres.com/bbs
BOOKS:
(#2 was highly recommended by several engineers and quite representative of the types of coding questions asked)
1. Review of Basic Algorithms: Introduction to the Design and Analysis of Algorithms by Anany Levitin
2. Types of coding questions Google asks: Programming Interviews Exposed; Secrets to Landing Your Next Job (Programmer to Programmer)
by John Mongan, Noah Suojanen,
and Eric Giguere.
TIPS DIRECTLY FROM OUR ENGINEERS:
One of our engineers drafted this overview of the main areas software engineers should prepare to have a successful interviews with Google:
1.) Algorithm Complexity: You need to know Big-O. If you struggle with basic big-O complexity analysis, then you are almost guaranteed
not to get hired.
For more information on Algorithms you can visit:
http://www.topcoder.com/tc?modul ... ls&d2=alg_index
. visit 1point3acres.com for more.
2.) Coding: You should know at least one programming language really well, and it should preferably be C++ or Java. C# is OK too,
since it's pretty similar to Java.
You will be expected to write some code in at least some of your interviews. You will be expected to know a fair amount of detail
about your favorite programming language.
*Strongly recommended* for information on Coding: Programming Interviews Exposed; Secrets to landing your next job by John Monagan
and Noah Suojanen (Wiley Computer Publishing)
http://www.wiley.com/WileyCDA/Wi ... tCd-047012167X.html
3.) System Design: http://research.google.com/pubs/ ... allelComputing.html. from: 1point3acres.com/bbs
Google File System http://research.google.com/archive/gfs.html
Google Bigtable http://research.google.com/archive/bigtable.html
Google MapReduce http://research.google.com/archive/mapreduce.html
4.) Sorting: Know how to sort. Don't do bubble-sort. You should know the details of at least one n*log(n) sorting algorithm,
preferably two (say, quicksort and merge sort). Merge sort can be highly useful in situations where quicksort is impractical,
so take a look at it.
5.) Hashtables: Arguably the single most important data structure known to mankind. You absolutely should know how they work.
Be able to implement one using only arrays in your favorite language, in about the space of one interview.
6.) Trees: Know about trees; basic tree construction, traversal and manipulation algorithms. Familiarize yourself with binary
trees, n-ary trees, and trie-trees. Be familiar with at least one type of balanced binary tree, whether it's a red/black tree,
a splay tree or an AVL tree, and know how it's implemented. Understand tree traversal algorithms: BFS and DFS, and know the
difference between inorder, postorder and preorder.. 1point3acres.com/bbs
7.) Graphs: Graphs are really important at Google. There are 3 basic ways to represent a graph in memory (objects and pointers,
matrix, and adjacency list); familiarize yourself with each representation and its pros & cons. You should know the basic graph
traversal algorithms: breadth-first search and depth-first search. Know their computational complexity, their tradeoffs, and
how to implement them in real code. If you get a chance, try to study up on fancier algorithms, such as Dijkstra and A*.
8.) Other data structures: You should study up on as many other data structures and algorithms as possible. You should
especially know about the most famous classes of NP-complete problems, such as traveling salesman and the knapsack problem,
and be able to recognize them when an interviewer asks you them in disguise. Find out what NP-complete means.
9.) Mathematics: Some interviewers ask basic discrete math questions. This is more prevalent at Google than at other companies
because we are surrounded by counting problems, probability problems, and other Discrete Math 101 situations. Spend some time
before the interview refreshing your memory on (or teaching yourself) the essentials of combinatorics and probability.
You should be familiar with n-choose-k problems and their ilk – the more the better.
10.) Operating Systems: Know about processes, threads and concurrency issues. Know about locks and mutexes and semaphores
and monitors and how they work. Know about deadlock and livelock and how to avoid them. Know what resources a processes needs,
and a thread needs, and how context switching works, and how it's initiated by the operating system and underlying hardware.
Know a little about scheduling. The world is rapidly moving towards multi-core, so know the fundamentals of "modern" concurrency
constructs.
For information on System Design:
http://research.google.com/pubs/ ... allelComputing.html
11.) Also, you can review some of our research publications: http://research.google.com/pubs/papers.html
***************************************************************
GOOGLE PHONE TECHNICAL INTERVIEW TIPS:
Please be prepared for the engineers to ask you questions in the following areas:
- Google products (i.e. what you use)
- Coding ability (you will code in a Google Doc)
- Algorithm Design/Analysis
- System Design
The links below may help you prepare for your interview:
Interviewing at Google - http://www.youtube.com/watch?v=w887NIa_V9w
Google Products - http://www.google.com/intl/en/options/
Five Essential Phone Screen Questions by Steve Yegge (Google Engineer) - https://sites.google.com/site/st ... ne-screen-questions
Types of algorithm questions Google asks: TopCoder Tutorials - http://www.topcoder.com/tc?modul ... ls&d2=alg_index
The Official Google Blog: “Baby steps to a new job” by Gretta Cook (Google Engineer)
http://googleblog.blogspot.com/2008/01/baby-steps-to-new-job.html
“How to Get Hired” by Dan Kegel (Google Engineer)
http://www.kegel.com/academy/getting-hired.html
BOOKS:
(#2 was highly recommended by several engineers and quite representative of the types of coding questions asked)
1. Review of Basic Algorithms: Introduction to the Design and Analysis of Algorithms by Anany Levitin
2. Types of coding questions Google asks: Programming Interviews Exposed; Secrets to Landing Your Next Job (Programmer to Programmer)
by John Mongan, Noah Suojanen, and Eric Giguere
TIPS DIRECTLY FROM OUR ENGINEERS:
One of our engineers drafted this overview of the main areas software engineers should prepare to have a successful interviews with Google:
1.) Algorithm Complexity: You need to know Big-O. If you struggle with basic big-O complexity analysis, then you are almost
guaranteed not to get hired. For more information on Algorithms you can visit: http://www.topcoder.com/tc?modul ... ls&d2=alg_index
2.) Coding: You should know at least one programming language really well, and it should preferably be C++ or Java.
C# is OK too, since it's pretty similar to Java. You will be expected to write some code in at least some of your interviews.
You will be expected to know a fair amount of detail about your favorite programming language.
*Strongly recommended* for information on Coding: Programming Interviews Exposed; Secrets to landing your next job by John
Monagan and Noah Suojanen (Wiley Computer Publishing)
http://www.wiley.com/WileyCDA/Wi ... tCd-047012167X.html
3.) System Design: http://research.google.com/pubs/ ... allelComputing.html
Google File System http://research.google.com/archive/gfs.html
Google Bigtable http://research.google.com/archive/bigtable.html
Google MapReduce http://research.google.com/archive/mapreduce.html
4.) Sorting: Know how to sort. Don't do bubble-sort. You should know the details of at least one n*log(n) sorting algorithm,
preferably two (say, quicksort and merge sort). Merge sort can be highly useful in situations where quicksort is impractical,
so take a look at it.
5.) Hashtables: Arguably the single most important data structure known to mankind. You absolutely should know how they work.
Be able to implement one using only arrays in your favorite language, in about the space of one interview.
6.) Trees: Know about trees; basic tree construction, traversal and manipulation algorithms. Familiarize yourself with binary
trees, n-ary trees, and trie-trees. Be familiar with at least one type of balanced binary tree, whether it's a red/black tree,
a splay tree or an AVL tree, and know how it's implemented. Understand tree traversal algorithms: BFS and DFS, and know the
difference between inorder, postorder and preorder.. 鍥磋鎴戜滑@1point 3 acres
7.) Graphs: Graphs are really important at Google. There are 3 basic ways to represent a graph in memory (objects and pointers,
matrix, and adjacency list); familiarize yourself with each representation and its pros & cons. You should know the basic graph
traversal algorithms: breadth-first search and depth-first search. Know their computational complexity, their tradeoffs, and
how to implement them in real code. If you get a chance, try to study up on fancier algorithms, such as Dijkstra and A*.
8.) Other data structures: You should study up on as many other data structures and algorithms as possible. You should especially
know about the most famous classes of NP-complete problems, such as traveling salesman and the knapsack problem, and be able to
recognize them when an interviewer asks you them in disguise. Find out what NP-complete means.
9.) Mathematics: Some interviewers ask basic discrete math questions. This is more prevalent at Google than at other companies
because we are surrounded by counting problems, probability problems, and other Discrete Math 101 situations. Spend some time
before the interview refreshing your memory on (or teaching yourself) the essentials of combinatorics and probability.
You should be familiar with n-choose-k problems and their ilk – the more the better.
10.) Operating Systems: Know about processes, threads and concurrency issues. Know about locks and mutexes and semaphores and
monitors and how they work. Know about deadlock and livelock and how to avoid them. Know what resources a processes needs,
and a thread needs, and how context switching works, and how it's initiated by the operating system and underlying hardware.
Know a little about scheduling. The world is rapidly moving towards multi-core, so know the fundamentals of "modern" concurrency
constructs.
For information on System Design:
http://research.google.com/pubs/ ... allelComputing.html
11.) Also, you can review some of our research publications: http://research.google.com/pubs/papers.html
'''
http://www.1point3acres.com/bbs/forum.php?mod=viewthread&tid=97950&extra=page%3D1%26filter%3Dsortid%26sortid%3D311%26sortid%3D311
|
UmassJin/Leetcode
|
Experience/May/Google_Interview_prepare_lists(recruiter).py
|
Python
|
mit
| 11,670
|
[
"VisIt"
] |
a0dc36491de276d4cd2ace0b8974e63a09e896bd29f436f77e87485b9cf9c7ac
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Classes for reading/manipulating/writing VASP input files. All major VASP input
files.
"""
__author__ = "Shyue Ping Ong, Geoffroy Hautier, Rickard Armiento, " + \
"Vincent L Chevrier, Stephen Dacek"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Jul 16, 2012"
import os
import re
import itertools
import warnings
import logging
import six
import numpy as np
from numpy.linalg import det
from collections import OrderedDict, namedtuple
from hashlib import md5
from monty.io import zopen
from monty.os.path import zpath
from monty.json import MontyDecoder
from pymatgen.core.lattice import Lattice
from pymatgen.core.physical_constants import BOLTZMANN_CONST
from pymatgen.core.design_patterns import Enum
from pymatgen.core.structure import Structure
from pymatgen.core.periodic_table import Element, get_el_sp
from monty.design_patterns import cached_class
from pymatgen.util.string_utils import str_aligned, str_delimited
from pymatgen.util.io_utils import clean_lines
from pymatgen.serializers.json_coders import PMGSONable
logger = logging.getLogger(__name__)
class Poscar(PMGSONable):
"""
Object for representing the data in a POSCAR or CONTCAR file.
Please note that this current implementation. Most attributes can be set
directly.
Args:
structure (Structure): Structure object.
comment (str): Optional comment line for POSCAR. Defaults to unit
cell formula of structure. Defaults to None.
selective_dynamics (Nx3 array): bool values for selective dynamics,
where N is number of sites. Defaults to None.
true_names (bool): Set to False is the names in the POSCAR are not
well-defined and ambiguous. This situation arises commonly in
vasp < 5 where the POSCAR sometimes does not contain element
symbols. Defaults to True.
velocities (Nx3 array): Velocities for the POSCAR. Typically parsed
in MD runs or can be used to initialize velocities.
predictor_corrector (Nx3 array): Predictor corrector for the POSCAR.
Typically parsed in MD runs.
.. attribute:: structure
Associated Structure.
.. attribute:: comment
Optional comment string.
.. attribute:: true_names
Boolean indication whether Poscar contains actual real names parsed
from either a POTCAR or the POSCAR itself.
.. attribute:: selective_dynamics
Selective dynamics attribute for each site if available. A Nx3 array of
booleans.
.. attribute:: velocities
Velocities for each site (typically read in from a CONTCAR). A Nx3
array of floats.
.. attribute:: predictor_corrector
Predictor corrector coordinates for each site (typically read in from a
MD CONTCAR).
.. attribute:: temperature
Temperature of velocity Maxwell-Boltzmann initialization. Initialized
to -1 (MB hasn"t been performed).
"""
def __init__(self, structure, comment=None, selective_dynamics=None,
true_names=True, velocities=None, predictor_corrector=None):
if structure.is_ordered:
self.structure = structure
self.true_names = true_names
self.selective_dynamics = selective_dynamics
self.comment = structure.formula if comment is None else comment
self.velocities = velocities
self.predictor_corrector = predictor_corrector
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into POSCAR!")
self.temperature = -1
@property
def site_symbols(self):
"""
Sequence of symbols associated with the Poscar. Similar to 6th line in
vasp 5+ POSCAR.
"""
syms = [site.specie.symbol for site in self.structure]
return [a[0] for a in itertools.groupby(syms)]
@property
def natoms(self):
"""
Sequence of number of sites of each type associated with the Poscar.
Similar to 7th line in vasp 5+ POSCAR or the 6th line in vasp 4 POSCAR.
"""
syms = [site.specie.symbol for site in self.structure]
return [len(tuple(a[1])) for a in itertools.groupby(syms)]
def __setattr__(self, name, value):
if name in ("selective_dynamics", "velocities"):
if value is not None and len(value) > 0:
value = np.array(value)
dim = value.shape
if dim[1] != 3 or dim[0] != len(self.structure):
raise ValueError(name + " array must be same length as" +
" the structure.")
value = value.tolist()
elif name == "structure":
#If we set a new structure, we should discard the velocities and
#predictor_corrector and selective dynamics.
self.velocities = None
self.predictor_corrector = None
self.selective_dynamics = None
super(Poscar, self).__setattr__(name, value)
@staticmethod
def from_file(filename, check_for_POTCAR=True):
"""
Reads a Poscar from a file.
The code will try its best to determine the elements in the POSCAR in
the following order:
1. If check_for_POTCAR is True, the code will try to check if a POTCAR
is in the same directory as the POSCAR and use elements from that by
default. (This is the VASP default sequence of priority).
2. If the input file is Vasp5-like and contains element symbols in the
6th line, the code will use that if check_for_POTCAR is False or there
is no POTCAR found.
3. Failing (2), the code will check if a symbol is provided at the end
of each coordinate.
If all else fails, the code will just assign the first n elements in
increasing atomic number, where n is the number of species, to the
Poscar. For example, H, He, Li, .... This will ensure at least a
unique element is assigned to each site and any analysis that does not
require specific elemental properties should work fine.
Args:
filename (str): File name containing Poscar data.
check_for_POTCAR (bool): Whether to check if a POTCAR is present
in the same directory as the POSCAR. Defaults to True.
Returns:
Poscar object.
"""
dirname = os.path.dirname(os.path.abspath(filename))
names = None
if check_for_POTCAR:
for f in os.listdir(dirname):
if f == "POTCAR":
try:
potcar = Potcar.from_file(os.path.join(dirname, f))
names = [sym.split("_")[0] for sym in potcar.symbols]
[get_el_sp(n) for n in names] # ensure valid names
except:
names = None
with zopen(filename, "rt") as f:
return Poscar.from_string(f.read(), names)
@staticmethod
def from_string(data, default_names=None):
"""
Reads a Poscar from a string.
The code will try its best to determine the elements in the POSCAR in
the following order:
1. If default_names are supplied and valid, it will use those. Usually,
default names comes from an external source, such as a POTCAR in the
same directory.
2. If there are no valid default names but the input file is Vasp5-like
and contains element symbols in the 6th line, the code will use that.
3. Failing (2), the code will check if a symbol is provided at the end
of each coordinate.
If all else fails, the code will just assign the first n elements in
increasing atomic number, where n is the number of species, to the
Poscar. For example, H, He, Li, .... This will ensure at least a
unique element is assigned to each site and any analysis that does not
require specific elemental properties should work fine.
Args:
data (str): String containing Poscar data.
default_names ([str]): Default symbols for the POSCAR file,
usually coming from a POTCAR in the same directory.
Returns:
Poscar object.
"""
# "^\s*$" doesn't match lines with no whitespace
chunks = re.split("\n\s*\n", data.rstrip(), flags=re.MULTILINE)
try:
if chunks[0] == "":
chunks.pop(0)
chunks[0] = "\n" + chunks[0]
except IndexError:
raise ValueError("Empty POSCAR")
#Parse positions
lines = tuple(clean_lines(chunks[0].split("\n"), False))
comment = lines[0]
scale = float(lines[1])
lattice = np.array([[float(i) for i in line.split()]
for line in lines[2:5]])
if scale < 0:
# In vasp, a negative scale factor is treated as a volume. We need
# to translate this to a proper lattice vector scaling.
vol = abs(det(lattice))
lattice *= (-scale / vol) ** (1 / 3)
else:
lattice *= scale
vasp5_symbols = False
try:
natoms = [int(i) for i in lines[5].split()]
ipos = 6
except ValueError:
vasp5_symbols = True
symbols = lines[5].split()
natoms = [int(i) for i in lines[6].split()]
atomic_symbols = list()
for i in range(len(natoms)):
atomic_symbols.extend([symbols[i]] * natoms[i])
ipos = 7
postype = lines[ipos].split()[0]
sdynamics = False
# Selective dynamics
if postype[0] in "sS":
sdynamics = True
ipos += 1
postype = lines[ipos].split()[0]
cart = postype[0] in "cCkK"
nsites = sum(natoms)
# If default_names is specified (usually coming from a POTCAR), use
# them. This is in line with Vasp"s parsing order that the POTCAR
# specified is the default used.
if default_names:
try:
atomic_symbols = []
for i in range(len(natoms)):
atomic_symbols.extend([default_names[i]] * natoms[i])
vasp5_symbols = True
except IndexError:
pass
if not vasp5_symbols:
ind = 3 if not sdynamics else 6
try:
# Check if names are appended at the end of the coordinates.
atomic_symbols = [l.split()[ind]
for l in lines[ipos + 1:ipos + 1 + nsites]]
# Ensure symbols are valid elements
if not all([Element.is_valid_symbol(sym)
for sym in atomic_symbols]):
raise ValueError("Non-valid symbols detected.")
vasp5_symbols = True
except (ValueError, IndexError):
# Defaulting to false names.
atomic_symbols = []
for i in range(len(natoms)):
sym = Element.from_Z(i + 1).symbol
atomic_symbols.extend([sym] * natoms[i])
warnings.warn("Elements in POSCAR cannot be determined. "
"Defaulting to false names {}."
.format(" ".join(atomic_symbols)))
# read the atomic coordinates
coords = []
selective_dynamics = list() if sdynamics else None
for i in range(nsites):
toks = lines[ipos + 1 + i].split()
crd_scale = scale if cart else 1
coords.append([float(j) * crd_scale for j in toks[:3]])
if sdynamics:
selective_dynamics.append([tok.upper()[0] == "T"
for tok in toks[3:6]])
struct = Structure(lattice, atomic_symbols, coords,
to_unit_cell=False, validate_proximity=False,
coords_are_cartesian=cart)
# Parse velocities if any
velocities = []
if len(chunks) > 1:
for line in chunks[1].strip().split("\n"):
velocities.append([float(tok) for tok in line.split()])
predictor_corrector = []
if len(chunks) > 2:
lines = chunks[2].strip().split("\n")
predictor_corrector.append([int(lines[0])])
for line in lines[1:]:
predictor_corrector.append([float(tok)
for tok in line.split()])
return Poscar(struct, comment, selective_dynamics, vasp5_symbols,
velocities=velocities,
predictor_corrector=predictor_corrector)
def get_string(self, direct=True, vasp4_compatible=False,
significant_figures=6):
"""
Returns a string to be written as a POSCAR file. By default, site
symbols are written, which means compatibility is for vasp >= 5.
Args:
direct (bool): Whether coordinates are output in direct or
cartesian. Defaults to True.
vasp4_compatible (bool): Set to True to omit site symbols on 6th
line to maintain backward vasp 4.x compatibility. Defaults
to False.
significant_figures (int): No. of significant figures to
output all quantities. Defaults to 6. Note that positions are
output in fixed point, while velocities are output in
scientific format.
Returns:
String representation of POSCAR.
"""
# This corrects for VASP really annoying bug of crashing on lattices
# which have triple product < 0. We will just invert the lattice
# vectors.
latt = self.structure.lattice
if np.linalg.det(latt.matrix) < 0:
latt = Lattice(-latt.matrix)
lines = [self.comment, "1.0", str(latt)]
if self.true_names and not vasp4_compatible:
lines.append(" ".join(self.site_symbols))
lines.append(" ".join([str(x) for x in self.natoms]))
if self.selective_dynamics:
lines.append("Selective dynamics")
lines.append("direct" if direct else "cartesian")
format_str = "{{:.{0}f}}".format(significant_figures)
for (i, site) in enumerate(self.structure):
coords = site.frac_coords if direct else site.coords
line = " ".join([format_str.format(c) for c in coords])
if self.selective_dynamics is not None:
sd = ["T" if j else "F" for j in self.selective_dynamics[i]]
line += " %s %s %s" % (sd[0], sd[1], sd[2])
line += " " + site.species_string
lines.append(line)
if self.velocities:
lines.append("")
for v in self.velocities:
lines.append(" ".join([format_str.format(i) for i in v]))
if self.predictor_corrector:
lines.append("")
lines.append(str(self.predictor_corrector[0][0]))
lines.append(str(self.predictor_corrector[1][0]))
for v in self.predictor_corrector[2:]:
lines.append(" ".join([format_str.format(i) for i in v]))
return "\n".join(lines) + "\n"
def __repr__(self):
return self.get_string()
def __str__(self):
"""
String representation of Poscar file.
"""
return self.get_string()
def write_file(self, filename, **kwargs):
"""
Writes POSCAR to a file. The supported kwargs are the same as those for
the Poscar.get_string method and are passed through directly.
"""
with zopen(filename, "wt") as f:
f.write(self.get_string(**kwargs))
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"true_names": self.true_names,
"selective_dynamics": np.array(
self.selective_dynamics).tolist(),
"velocities": self.velocities,
"predictor_corrector": self.predictor_corrector,
"comment": self.comment}
@classmethod
def from_dict(cls, d):
return Poscar(Structure.from_dict(d["structure"]),
comment=d["comment"],
selective_dynamics=d["selective_dynamics"],
true_names=d["true_names"],
velocities=d.get("velocities", None),
predictor_corrector=d.get("predictor_corrector", None))
def set_temperature(self, temperature):
"""
Initializes the velocities based on Maxwell-Boltzmann distribution.
Removes linear, but not angular drift (same as VASP)
Scales the energies to the exact temperature (microcanonical ensemble)
Velocities are given in A/fs. This is the vasp default when
direct/cartesian is not specified (even when positions are given in
direct coordinates)
Overwrites imported velocities, if any.
Args:
temperature (float): Temperature in Kelvin.
"""
# mean 0 variance 1
velocities = np.random.randn(len(self.structure), 3)
#in AMU, (N,1) array
atomic_masses = np.array([site.specie.atomic_mass.to("kg")
for site in self.structure])
dof = 3 * len(self.structure) - 3
#scale velocities due to atomic masses
#mean 0 std proportional to sqrt(1/m)
velocities /= atomic_masses[:, np.newaxis] ** (1 / 2)
#remove linear drift (net momentum)
velocities -= np.average(atomic_masses[:, np.newaxis] * velocities,
axis=0) / np.average(atomic_masses)
#scale velocities to get correct temperature
energy = np.sum(1 / 2 * atomic_masses *
np.sum(velocities ** 2, axis=1))
scale = (temperature * dof / (2 * energy / BOLTZMANN_CONST)) ** (1 / 2)
velocities *= scale * 1e-5 # these are in A/fs
self.temperature = temperature
self.selective_dynamics = None
self.predictor_corrector = None
# returns as a list of lists to be consistent with the other
# initializations
self.velocities = velocities.tolist()
class Incar(dict, PMGSONable):
"""
INCAR object for reading and writing INCAR files. Essentially consists of
a dictionary with some helper functions
"""
def __init__(self, params=None):
"""
Creates an Incar object.
Args:
params (dict): A set of input parameters as a dictionary.
"""
super(Incar, self).__init__()
if params:
self.update(params)
def __setitem__(self, key, val):
"""
Add parameter-val pair to Incar. Warns if parameter is not in list of
valid INCAR tags. Also cleans the parameter and val by stripping
leading and trailing white spaces.
"""
super(Incar, self).__setitem__(
key.strip(), Incar.proc_val(key.strip(), val.strip())
if isinstance(val, six.string_types) else val)
def as_dict(self):
d = dict(self)
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
return Incar({k: v for k, v in d.items() if k not in ("@module",
"@class")})
def get_string(self, sort_keys=False, pretty=False):
"""
Returns a string representation of the INCAR. The reason why this
method is different from the __str__ method is to provide options for
pretty printing.
Args:
sort_keys (bool): Set to True to sort the INCAR parameters
alphabetically. Defaults to False.
pretty (bool): Set to True for pretty aligned output. Defaults
to False.
"""
keys = self.keys()
if sort_keys:
keys = sorted(keys)
lines = []
for k in keys:
if k == "MAGMOM" and isinstance(self[k], list):
value = []
for m, g in itertools.groupby(self[k]):
value.append("{}*{}".format(len(tuple(g)), m))
lines.append([k, " ".join(value)])
elif isinstance(self[k], list):
lines.append([k, " ".join([str(i) for i in self[k]])])
else:
lines.append([k, self[k]])
if pretty:
return str_aligned(lines) + "\n"
else:
return str_delimited(lines, None, " = ") + "\n"
def __str__(self):
return self.get_string(sort_keys=True, pretty=False)
def write_file(self, filename):
"""
Write Incar to a file.
Args:
filename (str): filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
@staticmethod
def from_file(filename):
"""
Reads an Incar object from a file.
Args:
filename (str): Filename for file
Returns:
Incar object
"""
with zopen(filename, "rt") as f:
return Incar.from_string(f.read())
@staticmethod
def from_string(string):
"""
Reads an Incar object from a string.
Args:
string (str): Incar string
Returns:
Incar object
"""
lines = list(clean_lines(string.splitlines()))
params = {}
for line in lines:
m = re.match("(\w+)\s*=\s*(.*)", line)
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = Incar.proc_val(key, val)
params[key] = val
return Incar(params)
@staticmethod
def proc_val(key, val):
"""
Static helper method to convert INCAR parameters to proper types, e.g.,
integers, floats, lists, etc.
Args:
key: INCAR parameter key
val: Actual value of INCAR parameter.
"""
list_keys = ("LDAUU", "LDAUL", "LDAUJ", "MAGMOM")
bool_keys = ("LDAU", "LWAVE", "LSCALU", "LCHARG", "LPLANE",
"LHFCALC", "ADDGRID")
float_keys = ("EDIFF", "SIGMA", "TIME", "ENCUTFOCK", "HFSCREEN",
"POTIM", "EDIFFG")
int_keys = ("NSW", "NBANDS", "NELMIN", "ISIF", "IBRION", "ISPIN",
"ICHARG", "NELM", "ISMEAR", "NPAR", "LDAUPRINT", "LMAXMIX",
"ENCUT", "NSIM", "NKRED", "NUPDOWN", "ISPIND", "LDAUTYPE")
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
else:
return int(numstr)
try:
if key in list_keys:
output = []
toks = re.findall(r"(-?\d+\.?\d*)\*?(-?\d+\.?\d*)?", val)
for tok in toks:
if tok[1]:
output.extend([smart_int_or_float(tok[1])]
* int(tok[0]))
else:
output.append(smart_int_or_float(tok[0]))
return output
if key in bool_keys:
m = re.match(r"^\.?([T|F|t|f])[A-Za-z]*\.?", val)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(key + " should be a boolean type!")
if key in float_keys:
return float(re.search(r"^-?\d*\.?\d*[e|E]?-?\d*", val).group(0))
if key in int_keys:
return int(re.match(r"^-?[0-9]+", val).group(0))
except ValueError:
pass
#Not in standard keys. We will try a hirerachy of conversions.
try:
val = int(val)
return val
except ValueError:
pass
try:
val = float(val)
return val
except ValueError:
pass
if "true" in val.lower():
return True
if "false" in val.lower():
return False
try:
if key not in ("TITEL", "SYSTEM"):
return re.search(r"^-?[0-9]+", val.capitalize()).group(0)
else:
return val.capitalize()
except:
return val.capitalize()
def diff(self, other):
"""
Diff function for Incar. Compares two Incars and indicates which
parameters are the same and which are not. Useful for checking whether
two runs were done using the same parameters.
Args:
other (Incar): The other Incar object to compare to.
Returns:
Dict of the following format:
{"Same" : parameters_that_are_the_same,
"Different": parameters_that_are_different}
Note that the parameters are return as full dictionaries of values.
E.g. {"ISIF":3}
"""
similar_param = {}
different_param = {}
for k1, v1 in self.items():
if k1 not in other:
different_param[k1] = {"INCAR1": v1, "INCAR2": None}
elif v1 != other[k1]:
different_param[k1] = {"INCAR1": v1, "INCAR2": other[k1]}
else:
similar_param[k1] = v1
for k2, v2 in other.items():
if k2 not in similar_param and k2 not in different_param:
if k2 not in self:
different_param[k2] = {"INCAR1": None, "INCAR2": v2}
return {"Same": similar_param, "Different": different_param}
def __add__(self, other):
"""
Add all the values of another INCAR object to this object.
Facilitates the use of "standard" INCARs.
"""
params = {k: v for k, v in self.items()}
for k, v in other.items():
if k in self and v != self[k]:
raise ValueError("Incars have conflicting values!")
else:
params[k] = v
return Incar(params)
class Kpoints(PMGSONable):
"""
KPOINT reader/writer.
"""
supported_modes = Enum(("Gamma", "Monkhorst", "Automatic", "Line_mode",
"Cartesian", "Reciprocal"))
def __init__(self, comment="Default gamma", num_kpts=0,
style=supported_modes.Gamma,
kpts=((1, 1, 1),), kpts_shift=(0, 0, 0),
kpts_weights=None, coord_type=None, labels=None,
tet_number=0, tet_weight=0, tet_connections=None):
"""
Highly flexible constructor for Kpoints object. The flexibility comes
at the cost of usability and in general, it is recommended that you use
the default constructor only if you know exactly what you are doing and
requires the flexibility. For most usage cases, the three automatic
schemes can be constructed far more easily using the convenience static
constructors (automatic, gamma_automatic, monkhorst_automatic) and it
is recommended that you use those.
Args:
comment (str): String comment for Kpoints
num_kpts: Following VASP method of defining the KPOINTS file, this
parameter is the number of kpoints specified. If set to 0
(or negative), VASP automatically generates the KPOINTS.
style: Style for generating KPOINTS. Use one of the
Kpoints.supported_modes enum types.
kpts (2D array): 2D array of kpoints. Even when only a single
specification is required, e.g. in the automatic scheme,
the kpts should still be specified as a 2D array. e.g.,
[[20]] or [[2,2,2]].
kpts_shift (3x1 array): Shift for Kpoints.
kpts_weights: Optional weights for kpoints. Weights should be
integers. For explicit kpoints.
coord_type: In line-mode, this variable specifies whether the
Kpoints were given in Cartesian or Reciprocal coordinates.
labels: In line-mode, this should provide a list of labels for
each kpt. It is optional in explicit kpoint mode as comments for
k-points.
tet_number: For explicit kpoints, specifies the number of
tetrahedrons for the tetrahedron method.
tet_weight: For explicit kpoints, specifies the weight for each
tetrahedron for the tetrahedron method.
tet_connections: For explicit kpoints, specifies the connections
of the tetrahedrons for the tetrahedron method.
Format is a list of tuples, [ (sym_weight, [tet_vertices]),
...]
The default behavior of the constructor is for a Gamma centered,
1x1x1 KPOINTS with no shift.
"""
if num_kpts > 0 and (not labels) and (not kpts_weights):
raise ValueError("For explicit or line-mode kpoints, either the "
"labels or kpts_weights must be specified.")
if style in (Kpoints.supported_modes.Automatic,
Kpoints.supported_modes.Gamma,
Kpoints.supported_modes.Monkhorst) and len(kpts) > 1:
raise ValueError("For fully automatic or automatic gamma or monk "
"kpoints, only a single line for the number of "
"divisions is allowed.")
self.comment = comment
self.num_kpts = num_kpts
self.style = style
self.coord_type = coord_type
self.kpts = kpts
self.kpts_weights = kpts_weights
self.kpts_shift = kpts_shift
self.labels = labels
self.tet_number = tet_number
self.tet_weight = tet_weight
self.tet_connections = tet_connections
@staticmethod
def automatic(subdivisions):
"""
Convenient static constructor for a fully automatic Kpoint grid, with
gamma centered Monkhorst-Pack grids and the number of subdivisions
along each reciprocal lattice vector determined by the scheme in the
VASP manual.
Args:
subdivisions: Parameter determining number of subdivisions along
each reciprocal lattice vector.
Returns:
Kpoints object
"""
return Kpoints("Fully automatic kpoint scheme", 0,
style=Kpoints.supported_modes.Automatic,
kpts=[[subdivisions]])
@staticmethod
def gamma_automatic(kpts=(1, 1, 1), shift=(0, 0, 0)):
"""
Convenient static constructor for an automatic Gamma centered Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (1,1,1)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object
"""
return Kpoints("Automatic kpoint scheme", 0,
Kpoints.supported_modes.Gamma, kpts=[kpts],
kpts_shift=shift)
@staticmethod
def monkhorst_automatic(kpts=(2, 2, 2), shift=(0, 0, 0)):
"""
Convenient static constructor for an automatic Monkhorst pack Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (2,2,2)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object
"""
return Kpoints("Automatic kpoint scheme", 0,
Kpoints.supported_modes.Monkhorst, kpts=[kpts],
kpts_shift=shift)
@staticmethod
def automatic_density(structure, kppa, force_gamma=False):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes for hexagonal cells and
Monkhorst-Pack grids otherwise.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure (Structure): Input structure
kppa (int): Grid density
force_gamma (bool): Force a gamma centered mesh (default is to
use gamma only for hexagonal cells or odd meshes)
Returns:
Kpoints
"""
comment = "pymatgen generated KPOINTS with grid density = " + \
"{} / atom".format(kppa)
latt = structure.lattice
lengths = latt.abc
ngrid = kppa / structure.num_sites
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)
num_div = [int(round(mult / l)) for l in lengths]
if all([k <= 1 for k in num_div]):
return Kpoints(comment, 0, Kpoints.supported_modes.Gamma,
[[1, 1, 1]], [0, 0, 0])
#ensure that numDiv[i] > 0
num_div = [i if i > 0 else 1 for i in num_div]
is_hexagonal = latt.is_hexagonal()
# VASP documentation recommends to use even grids for n <= 8 and odd
# grids for n > 8.
num_div = [i + i % 2 if i <= 8 else i - i % 2 + 1 for i in num_div]
has_odd = any([i % 2 == 1 for i in num_div])
if has_odd or is_hexagonal or force_gamma:
style = Kpoints.supported_modes.Gamma
else:
style = Kpoints.supported_modes.Monkhorst
return Kpoints(comment, 0, style, [num_div], [0, 0, 0])
@staticmethod
def automatic_gamma_density(structure, kppa):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes always. For GW.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure:
Input structure
kppa:
Grid density
"""
latt = structure.lattice
lengths = latt.abc
ngrid = kppa / structure.num_sites
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)
num_div = [int(round(mult / l)) for l in lengths]
#ensure that numDiv[i] > 0
num_div = [i if i > 0 else 1 for i in num_div]
# VASP documentation recommends to use even grids for n <= 8 and odd
# grids for n > 8.
num_div = [i + i % 2 if i <= 8 else i - i % 2 + 1 for i in num_div]
style = Kpoints.supported_modes.Gamma
comment = "pymatgen generated KPOINTS with grid density = " + \
"{} / atom".format(kppa)
num_kpts = 0
return Kpoints(comment, num_kpts, style, [num_div], [0, 0, 0])
@staticmethod
def automatic_density_by_vol(structure, kppvol, force_gamma=False):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density per inverse Angstrom of reciprocal cell.
Algorithm:
Same as automatic_density()
Args:
structure (Structure): Input structure
kppvol (int): Grid density per Angstrom^(-3) of reciprocal cell
force_gamma (bool): Force a gamma centered mesh
Returns:
Kpoints
"""
vol = structure.lattice.reciprocal_lattice.volume
kppa = int(round(kppvol * vol * structure.num_sites))
return Kpoints.automatic_density(structure, kppa, force_gamma=force_gamma)
@staticmethod
def automatic_linemode(divisions, ibz):
"""
Convenient static constructor for a KPOINTS in mode line_mode.
gamma centered Monkhorst-Pack grids and the number of subdivisions
along each reciprocal lattice vector determined by the scheme in the
VASP manual.
Args:
divisions: Parameter determining the number of k-points along each
hight symetry lines.
ibz: HighSymmKpath object (pymatgen.symmetry.bandstructure)
Returns:
Kpoints object
"""
kpoints = list()
labels = list()
for path in ibz.kpath["path"]:
kpoints.append(ibz.kpath["kpoints"][path[0]])
labels.append(path[0])
for i in range(1, len(path) - 1):
kpoints.append(ibz.kpath["kpoints"][path[i]])
labels.append(path[i])
kpoints.append(ibz.kpath["kpoints"][path[i]])
labels.append(path[i])
kpoints.append(ibz.kpath["kpoints"][path[-1]])
labels.append(path[-1])
return Kpoints("Line_mode KPOINTS file",
style=Kpoints.supported_modes.Line_mode,
coord_type="Reciprocal",
kpts=kpoints,
labels=labels,
num_kpts=int(divisions))
@staticmethod
def from_file(filename):
"""
Reads a Kpoints object from a KPOINTS file.
Args:
filename (str): filename to read from.
Returns:
Kpoints object
"""
with zopen(filename, "rt") as f:
return Kpoints.from_string(f.read())
@staticmethod
def from_string(string):
"""
Reads a Kpoints object from a KPOINTS string.
Args:
string (str): KPOINTS string.
Returns:
Kpoints object
"""
lines = [line.strip() for line in string.splitlines()]
comment = lines[0]
num_kpts = int(lines[1].split()[0].strip())
style = lines[2].lower()[0]
#Fully automatic KPOINTS
if style == "a":
return Kpoints.automatic(int(lines[3]))
coord_pattern = re.compile("^\s*([\d+\.\-Ee]+)\s+([\d+\.\-Ee]+)\s+"
"([\d+\.\-Ee]+)")
#Automatic gamma and Monk KPOINTS, with optional shift
if style == "g" or style == "m":
kpts = [int(i) for i in lines[3].split()]
kpts_shift = (0, 0, 0)
if len(lines) > 4 and coord_pattern.match(lines[4]):
try:
kpts_shift = [int(i) for i in lines[4].split()]
except ValueError:
pass
return Kpoints.gamma_automatic(kpts, kpts_shift) if style == "g" \
else Kpoints.monkhorst_automatic(kpts, kpts_shift)
#Automatic kpoints with basis
if num_kpts <= 0:
style = Kpoints.supported_modes.Cartesian if style in "ck" \
else Kpoints.supported_modes.Reciprocal
kpts = [[float(j) for j in lines[i].split()] for i in range(3, 6)]
kpts_shift = [float(i) for i in lines[6].split()]
return Kpoints(comment=comment, num_kpts=num_kpts, style=style,
kpts=kpts, kpts_shift=kpts_shift)
#Line-mode KPOINTS, usually used with band structures
if style == "l":
coord_type = "Cartesian" if lines[3].lower()[0] in "ck" \
else "Reciprocal"
style = Kpoints.supported_modes.Line_mode
kpts = []
labels = []
patt = re.compile("([e0-9\.\-]+)\s+([e0-9\.\-]+)\s+([e0-9\.\-]+)"
"\s*!*\s*(.*)")
for i in range(4, len(lines)):
line = lines[i]
m = patt.match(line)
if m:
kpts.append([float(m.group(1)), float(m.group(2)),
float(m.group(3))])
labels.append(m.group(4).strip())
return Kpoints(comment=comment, num_kpts=num_kpts, style=style,
kpts=kpts, coord_type=coord_type, labels=labels)
#Assume explicit KPOINTS if all else fails.
style = Kpoints.supported_modes.Cartesian if style in "ck" \
else Kpoints.supported_modes.Reciprocal
kpts = []
kpts_weights = []
labels = []
tet_number = 0
tet_weight = 0
tet_connections = None
for i in range(3, 3 + num_kpts):
toks = lines[i].split()
kpts.append([float(j) for j in toks[0:3]])
kpts_weights.append(float(toks[3]))
if len(toks) > 4:
labels.append(toks[4])
else:
labels.append(None)
try:
#Deal with tetrahedron method
if lines[3 + num_kpts].strip().lower()[0] == "t":
toks = lines[4 + num_kpts].split()
tet_number = int(toks[0])
tet_weight = float(toks[1])
tet_connections = []
for i in range(5 + num_kpts, 5 + num_kpts + tet_number):
toks = lines[i].split()
tet_connections.append((int(toks[0]),
[int(toks[j])
for j in range(1, 5)]))
except IndexError:
pass
return Kpoints(comment=comment, num_kpts=num_kpts, style=style,
kpts=kpts, kpts_weights=kpts_weights,
tet_number=tet_number, tet_weight=tet_weight,
tet_connections=tet_connections, labels=labels)
def write_file(self, filename):
"""
Write Kpoints to a file.
Args:
filename (str): Filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
def __str__(self):
lines = [self.comment, str(self.num_kpts), self.style]
style = self.style.lower()[0]
if style == "l":
lines.append(self.coord_type)
for i in range(len(self.kpts)):
lines.append(" ".join([str(x) for x in self.kpts[i]]))
if style == "l":
lines[-1] += " ! " + self.labels[i]
if i % 2 == 1:
lines[-1] += "\n"
elif self.num_kpts > 0:
if self.labels is not None:
lines[-1] += " %i %s" % (self.kpts_weights[i],
self.labels[i])
else:
lines[-1] += " %i" % (self.kpts_weights[i])
#Print tetrahedron parameters if the number of tetrahedrons > 0
if style not in "lagm" and self.tet_number > 0:
lines.append("Tetrahedron")
lines.append("%d %f" % (self.tet_number, self.tet_weight))
for sym_weight, vertices in self.tet_connections:
lines.append("%d %d %d %d %d" % (sym_weight, vertices[0],
vertices[1], vertices[2],
vertices[3]))
#Print shifts for automatic kpoints types if not zero.
if self.num_kpts <= 0 and tuple(self.kpts_shift) != (0, 0, 0):
lines.append(" ".join([str(x) for x in self.kpts_shift]))
return "\n".join(lines) + "\n"
def as_dict(self):
"""json friendly dict representation of Kpoints"""
d = {"comment": self.comment, "nkpoints": self.num_kpts,
"generation_style": self.style, "kpoints": self.kpts,
"usershift": self.kpts_shift,
"kpts_weights": self.kpts_weights, "coord_type": self.coord_type,
"labels": self.labels, "tet_number": self.tet_number,
"tet_weight": self.tet_weight,
"tet_connections": self.tet_connections}
optional_paras = ["genvec1", "genvec2", "genvec3", "shift"]
for para in optional_paras:
if para in self.__dict__:
d[para] = self.__dict__[para]
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
comment = d.get("comment", "")
generation_style = d.get("generation_style")
kpts = d.get("kpoints", [[1, 1, 1]])
kpts_shift = d.get("usershift", [0, 0, 0])
num_kpts = d.get("nkpoints", 0)
#coord_type = d.get("coord_type", None)
return cls(comment=comment, kpts=kpts, style=generation_style,
kpts_shift=kpts_shift, num_kpts=num_kpts,
kpts_weights=d.get("kpts_weights"),
coord_type=d.get("coord_type"),
labels=d.get("labels"), tet_number=d.get("tet_number", 0),
tet_weight=d.get("tet_weight", 0),
tet_connections=d.get("tet_connections"))
def get_potcar_dir():
if "VASP_PSP_DIR" in os.environ:
return os.environ["VASP_PSP_DIR"]
return None
def parse_string(s):
return "{}".format(s.strip())
def parse_bool(s):
m = re.match(r"^\.?([T|F|t|f])[A-Za-z]*\.?", s)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(s + " should be a boolean type!")
def parse_float(s):
return float(re.search(r"^-?\d*\.?\d*[e|E]?-?\d*", s).group(0))
def parse_int(s):
return int(re.match(r"^-?[0-9]+", s).group(0))
def parse_list(s):
return [float(y) for y in re.split("\s+", s.strip()) if not y.isalpha()]
@cached_class
class PotcarSingle(object):
"""
Object for a **single** POTCAR. The builder assumes the complete string is
the POTCAR contains the complete untouched data in "data" as a string and
a dict of keywords.
Args:
data:
Complete and single potcar file as a string.
.. attribute:: data
POTCAR data as a string.
.. attribute:: keywords
Keywords parsed from the POTCAR as a dict. All keywords are also
accessible as attributes in themselves. E.g., potcar.enmax,
potcar.encut, etc.
"""
functional_dir = {"PBE": "POT_GGA_PAW_PBE", "LDA": "POT_LDA_PAW",
"PW91": "POT_GGA_PAW_PW91", "LDA_US": "POT_LDA_US"}
functional_tags = {"pe": {"name": "PBE", "class": "GGA"},
"91": {"name": "PW91", "class": "GGA"},
"rp": {"name": "revPBE", "class": "GGA"},
"am": {"name": "AM05", "class": "GGA"},
"ps": {"name": "PBEsol", "class": "GGA"},
"pw": {"name": "PW86", "class": "GGA"},
"lm": {"name": "Langreth-Mehl-Hu", "class": "GGA"},
"pb": {"name": "Perdew-Becke", "class": "GGA"},
"ca": {"name": "Perdew-Zunger81", "class": "LDA"},
"hl": {"name": "Hedin-Lundquist", "class": "LDA"},
"wi": {"name": "Wigner Interpoloation", "class": "LDA"}}
parse_functions = {"LULTRA": parse_bool,
"LCOR": parse_bool,
"LPAW": parse_bool,
"EATOM": parse_float,
"RPACOR": parse_float,
"POMASS": parse_float,
"ZVAL": parse_float,
"RCORE": parse_float,
"RWIGS": parse_float,
"ENMAX": parse_float,
"ENMIN": parse_float,
"EAUG": parse_float,
"DEXC": parse_float,
"RMAX": parse_float,
"RAUG": parse_float,
"RDEP": parse_float,
"RDEPT": parse_float,
"QCUT": parse_float,
"QGAM": parse_float,
"RCLOC": parse_float,
"IUNSCR": parse_int,
"ICORE": parse_int,
"NDATA": parse_int,
"VRHFIN": parse_string,
"LEXCH": parse_string,
"TITEL": parse_string,
"STEP": parse_list,
"RRKJ": parse_list,
"GGA": parse_list}
Orbital = namedtuple('Orbital', ['n', 'l', 'j', 'E', 'occ'])
Description = namedtuple('OrbitalDescription', ['l', 'E',
'Type', "Rcut",
"Type2", "Rcut2"])
def __init__(self, data):
self.data = data # raw POTCAR as a string
#Vasp parses header in vasprun.xml and this differs from the titel
self.header = data.split("\n")[0].strip()
search_lines = re.search(r"(?s)(parameters from PSCTR are:"
r".*?END of PSCTR-controll parameters)",
data).group(1)
self.keywords = {}
for key, val in re.findall(r"(\S+)\s*=\s*(.*?)(?=;|$)",
search_lines, flags=re.MULTILINE):
self.keywords[key] = self.parse_functions[key](val)
PSCTR = OrderedDict()
array_search = re.compile(r"(-*[0-9\.]+)")
orbitals = []
descriptions = []
atomic_configuration = re.search(r"Atomic configuration\s*\n?"
r"(.*?)Description", search_lines)
if atomic_configuration:
lines = atomic_configuration.group(1).splitlines()
num_entries = re.search(r"([0-9]+)", lines[0]).group(1)
num_entries = int(num_entries)
PSCTR['nentries'] = num_entries
for line in lines[1:]:
orbit = array_search.findall(line)
if orbit:
orbitals.append(self.Orbital(int(orbit[0]),
int(orbit[1]),
float(orbit[2]),
float(orbit[3]),
float(orbit[4])))
PSCTR['Orbitals'] = tuple(orbitals)
description_string = re.search(r"(?s)Description\s*\n"
r"(.*?)Error from kinetic"
r" energy argument \(eV\)",
search_lines)
for line in description_string.group(1).splitlines():
description = array_search.findall(line)
if description:
descriptions.append(self.Description(int(description[0]),
float(description[1]),
int(description[2]),
float(description[3]),
int(description[4]) if
len(description) > 4
else None,
float(description[5]) if
len(description) > 4
else None))
if descriptions:
PSCTR['OrbitalDescriptions'] = tuple(descriptions)
RRKJ_kinetic_energy_string = re.search(r"(?s)Error from kinetic "
r"energy argument \(eV\)\s*\n"
r"(.*?)END of PSCTR-controll"
r" parameters",
search_lines)
RRKJ_array = []
for line in RRKJ_kinetic_energy_string.group(1).splitlines():
if "=" not in line:
RRKJ_array += parse_list(line.strip('\n'))
if RRKJ_array:
PSCTR['RRKJ'] = tuple(RRKJ_array)
PSCTR.update(self.keywords)
self.PSCTR = OrderedDict(sorted(PSCTR.items(), key=lambda x: x[0]))
self.hash = self.get_potcar_hash()
def __str__(self):
return self.data + "\n"
@property
def electron_configuration(self):
el = Element.from_Z(self.atomic_no)
full_config = el.full_electronic_structure
nelect = self.nelectrons
config = []
while nelect > 0:
e = full_config.pop(-1)
config.append(e)
nelect -= e[-1]
return config
def write_file(self, filename):
with zopen(filename, "wt") as f:
f.write(self.__str__())
@staticmethod
def from_file(filename):
with zopen(filename, "rt") as f:
return PotcarSingle(f.read())
@staticmethod
def from_symbol_and_functional(symbol, functional="PBE"):
funcdir = PotcarSingle.functional_dir[functional]
d = get_potcar_dir()
if d is None:
raise ValueError("No POTCAR directory found. Please set "
"the VASP_PSP_DIR environment variable")
paths_to_try = [os.path.join(d, funcdir, "POTCAR.{}".format(symbol)),
os.path.join(d, funcdir, symbol, "POTCAR")]
for p in paths_to_try:
p = os.path.expanduser(p)
p = zpath(p)
if os.path.exists(p):
return PotcarSingle.from_file(p)
raise IOError("You do not have the right POTCAR with functional " +
"{} and label {} in your VASP_PSP_DIR".format(functional,
symbol))
@property
def symbol(self):
"""
Symbol of POTCAR, e.g., Fe_pv
"""
return self.keywords["TITEL"].split(" ")[1].strip()
@property
def element(self):
"""
Attempt to return the atomic symbol based on the VRHFIN keyword.
"""
element = self.keywords["VRHFIN"].split(":")[0].strip()
#VASP incorrectly gives the element symbol for Xe as "X"
return "Xe" if element == "X" else element
@property
def atomic_no(self):
"""
Attempt to return the atomic number based on the VRHFIN keyword.
"""
return Element(self.element).Z
@property
def nelectrons(self):
return self.zval
@property
def potential_type(self):
if self.lultra:
return "US"
elif self.lpaw:
return "PAW"
else:
return "NC"
@property
def functional(self):
return self.functional_tags.get(self.LEXCH.lower(), {}).get('name')
@property
def functional_class(self):
return self.functional_tags.get(self.LEXCH.lower(), {}).get('class')
def get_potcar_hash(self):
hash_str = ""
for k, v in self.PSCTR.items():
hash_str += "{}".format(k)
if isinstance(v, int):
hash_str += "{}".format(v)
elif isinstance(v, float):
hash_str += "{:.3f}".format(v)
elif isinstance(v, bool):
hash_str += "{}".format(bool)
elif isinstance(v, (tuple, list)):
for item in v:
if isinstance(item, float):
hash_str += "{:.3f}".format(item)
elif isinstance(item, (self.Orbital, self.Description)):
for item_v in item:
if isinstance(item_v, (int, str)):
hash_str += "{}".format(item_v)
elif isinstance(item_v, float):
hash_str += "{:.3f}".format(item_v)
else:
hash_str += "{}".format(item_v) if item_v else ""
else:
hash_str += v.replace(" ", "")
self.hash_str = hash_str
return md5(hash_str.lower().encode('utf-8')).hexdigest()
def __getattr__(self, a):
"""
Delegates attributes to keywords. For example, you can use
potcarsingle.enmax to get the ENMAX of the POTCAR.
For float type properties, they are converted to the correct float. By
default, all energies in eV and all length scales are in Angstroms.
"""
try:
return self.keywords[a.upper()]
except:
raise AttributeError(a)
class Potcar(list, PMGSONable):
"""
Object for reading and writing POTCAR files for calculations. Consists of a
list of PotcarSingle.
Args:
symbols ([str]): Element symbols for POTCAR. This should correspond
to the symbols used by VASP. E.g., "Mg", "Fe_pv", etc.
functional (str): Functional used.
sym_potcar_map (dict): Allows a user to specify a specific element
symbol to raw POTCAR mapping.
"""
DEFAULT_FUNCTIONAL = "PBE"
def __init__(self, symbols=None, functional=DEFAULT_FUNCTIONAL,
sym_potcar_map=None):
super(Potcar, self).__init__()
self.functional = functional
if symbols is not None:
self.set_symbols(symbols, functional, sym_potcar_map)
def as_dict(self):
return {"functional": self.functional, "symbols": self.symbols,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return Potcar(symbols=d["symbols"], functional=d["functional"])
@staticmethod
def from_file(filename):
with zopen(filename, "rt") as reader:
fdata = reader.read()
potcar = Potcar()
potcar_strings = re.compile(r"\n?(\s*.*?End of Dataset)",
re.S).findall(fdata)
functionals = []
for p in potcar_strings:
single = PotcarSingle(p)
potcar.append(single)
functionals.append(single.functional)
if len(set(functionals)) != 1:
raise ValueError("File contains incompatible functionals!")
else:
potcar.functional = functionals[0]
return potcar
def __str__(self):
return "\n".join([str(potcar).strip("\n") for potcar in self]) + "\n"
def write_file(self, filename):
"""
Write Potcar to a file.
Args:
filename (str): filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
@property
def symbols(self):
"""
Get the atomic symbols of all the atoms in the POTCAR file.
"""
return [p.symbol for p in self]
@symbols.setter
def symbols(self, symbols):
self.set_symbols(symbols, functional=self.functional)
@property
def spec(self):
"""
Get the atomic symbols and hash of all the atoms in the POTCAR file.
"""
return [{"symbol": p.symbol, "hash": p.get_potcar_hash()} for p in self]
def set_symbols(self, symbols, functional=DEFAULT_FUNCTIONAL,
sym_potcar_map=None):
"""
Initialize the POTCAR from a set of symbols. Currently, the POTCARs can
be fetched from a location specified in the environment variable
VASP_PSP_DIR or in a pymatgen.cfg or specified explicitly in a map.
Args:
symbols ([str]): A list of element symbols
functional (str): The functional to use from the config file
sym_potcar_map (dict): A map of symbol:raw POTCAR string. If
sym_potcar_map is specified, POTCARs will be generated from
the given map data rather than the config file location.
"""
del self[:]
if sym_potcar_map:
for el in symbols:
self.append(PotcarSingle(sym_potcar_map[el]))
else:
for el in symbols:
p = PotcarSingle.from_symbol_and_functional(el, functional)
self.append(p)
class VaspInput(dict, PMGSONable):
"""
Class to contain a set of vasp input objects corresponding to a run.
Args:
incar: Incar object.
kpoints: Kpoints object.
poscar: Poscar object.
potcar: Potcar object.
optional_files: Other input files supplied as a dict of {
filename: object}. The object should follow standard pymatgen
conventions in implementing a as_dict() and from_dict method.
"""
def __init__(self, incar, kpoints, poscar, potcar, optional_files=None,
**kwargs):
super(VaspInput, self).__init__(**kwargs)
self.update({'INCAR': incar,
'KPOINTS': kpoints,
'POSCAR': poscar,
'POTCAR': potcar})
if optional_files is not None:
self.update(optional_files)
def __str__(self):
output = []
for k, v in self.items():
output.append(k)
output.append(str(v))
output.append("")
return "\n".join(output)
def as_dict(self):
d = {k: v.as_dict() for k, v in self.items()}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
sub_d = {"optional_files": {}}
for k, v in d.items():
if k in ["INCAR", "POSCAR", "POTCAR", "KPOINTS"]:
sub_d[k.lower()] = dec.process_decoded(v)
elif k not in ["@module", "@class"]:
sub_d["optional_files"][k] = dec.process_decoded(v)
return cls(**sub_d)
def write_input(self, output_dir=".", make_dir_if_not_present=True):
"""
Write VASP input to a directory.
Args:
output_dir (str): Directory to write to. Defaults to current
directory (".").
make_dir_if_not_present (bool): Create the directory if not
present. Defaults to True.
"""
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
for k, v in self.items():
with zopen(os.path.join(output_dir, k), "wt") as f:
f.write(v.__str__())
@staticmethod
def from_directory(input_dir, optional_files=None):
"""
Read in a set of VASP input from a directory. Note that only the
standard INCAR, POSCAR, POTCAR and KPOINTS files are read unless
optional_filenames is specified.
Args:
input_dir (str): Directory to read VASP input from.
optional_files (dict): Optional files to read in as well as a
dict of {filename: Object type}. Object type must have a
static method from_file.
"""
sub_d = {}
for fname, ftype in [("INCAR", Incar), ("KPOINTS", Kpoints),
("POSCAR", Poscar), ("POTCAR", Potcar)]:
fullzpath = zpath(os.path.join(input_dir, fname))
sub_d[fname.lower()] = ftype.from_file(fullzpath)
sub_d["optional_files"] = {}
if optional_files is not None:
for fname, ftype in optional_files.items():
sub_d["optional_files"][fname] = \
ftype.from_file(os.path.join(input_dir, fname))
return VaspInput(**sub_d)
|
sonium0/pymatgen
|
pymatgen/io/vasp/inputs.py
|
Python
|
mit
| 64,386
|
[
"VASP",
"pymatgen"
] |
493cd86047a7a28f70d4c4ffe07cc3e34d94ee09e7faed5c9aba60a2a638a4e1
|
"""
This module contains a method for easy ShakeMap creation using
string-parameters.
"""
from eqep.interpolation.rbf_pgvinterpolator import RbfPGVInterpolator
from eqep.shakemap.display.scales import *
from eqep.shakemap.display.styles import *
from eqep.shakemap.preinitialized_shakemap import PreinitializedShakeMap
# mappings
# string -> interpolation algorithm mapping
algorithms = {
'linear': RbfPGVInterpolator('linear'),
'cubic': RbfPGVInterpolator('cubic'),
'multiquadric': RbfPGVInterpolator('multiquadric'),
'inverse': RbfPGVInterpolator('inverse'),
'quintic': RbfPGVInterpolator('quintic'),
'thin_plate': RbfPGVInterpolator('thin_plate'),
'gaussian': RbfPGVInterpolator('gaussian')
}
# string -> scale mapping
scales = {
'green': GreenSMScale(),
'rainbow': RainbowSMScale(),
'hard_bounds': HardBoundsSMScale(),
'smooth_bounds': SmoothBoundsSMScale(),
'partly_transparent': PartlyTransparentSMScale(),
'fully_transparent': FullyTransparentSMScale(),
'black_white': FullyTransparentWhiteToBlackSMScale(),
'blue_red': FullyTransparentBlueToRedSMScale(),
}
#: string -> style mapping
styles = {
'plain': PlainStyle,
'labels': StationMarkers,
'with_grid': NoStyle,
}
def create_shakemap(data, alg_str='multiquadric',
scale_str='fully_transparent', styles_str='plain',
steps=None, bounds=None, img_size=None,
interpolation='bilinear'):
"""Creates a new ShakeMap from the given parameters.
The main feature here is that `algorithm`, `scale`, `styles` can be passed
as string and are then automatically converted to the appropriate classes.
Available algorithms are:
* "linear"
* "cubic"
* "multiquadric"
* "inverse"
* "quintic"
* "thin_plate"
* "gaussian"
Configurable scales:
* "green"
* "rainbow"
* "hard_bounds"
* "smooth_bounds"
* "partly_transparent"
* "fully_transparent"
* "black_white"
* "blue_red"
Selectable styles:
* "plain"
* "labels"
* "with_grid"
Parameters
----------
data : EQData
the earthquake-data to create the ShakeMap from
alg_str : str, optional
the algorithm name
scale_str : str, optional
the name of the scale to use for display
style_str : str, optional
the name of the styles to use for display; can be a comma-separated list
steps : tuple of int, optional
the number of points to calculate in (x, y)-direction
bounds : quadruple of float
the (long_min, long_max, lat_min, lat_max)-coordinate bound of the
ShakeMap
img_size : tuple of float
the size of the output image in inches
interpolation : str
the extra interpolation algorithm to use; 'none' for no interpolation
"""
# prepare the scale
scale = scales[scale_str]
# prepare the interpolation algorithm
alg = algorithms[alg_str]
# if there are no bounds defined, calculate some automatically
if bounds is None:
boundary = 0.1
min_long = min(data.longs)
max_long = max(data.longs)
min_lat = min(data.lats)
max_lat = max(data.lats)
bounds = (min_long-boundary, max_long+boundary, min_lat-boundary,
max_lat+boundary)
# prepare the ShakeMap style
style = None
# apply all requested styles in given order
for style_str in styles_str.replace(' ', '').split(',')[::-1]:
style_cls = styles[style_str]
# handle the instantiation depending on the class type
if style_cls == PlainStyle or style_cls == NoStyle:
style = style_cls(style)
elif style_cls == StationMarkers:
style = style_cls(data.longs, data.lats, style)
# create and return the ShakeMap
return PreinitializedShakeMap(data, alg, scale, style,
bounds[:2], bounds[2:], steps, img_size,
interpolation)
|
TGM-HIT/eqep-api
|
eqep/shakemap/shakemap_factory.py
|
Python
|
mit
| 4,257
|
[
"Gaussian"
] |
e3c8f78e8e23c480644ecb0a4093d89f9559e6e3c70cf76ea2da04df97332ecc
|
# -*- coding: utf-8 -*-
import operator
import pyjsparser
from autojsdoc.parser import jsdoc, parser
params = operator.attrgetter('name', 'type', 'doc')
def parse(s, source=None):
tree = pyjsparser.parse(s)
mods = parser.ModuleMatcher(source).visit(tree)
post(mods)
return mods
def post(mods):
modules = dict(BASE_MODULES)
modules.update((m.name, m) for m in mods)
for mod in mods:
mod.post_process(modules)
BASE_MODULES = {
'other': jsdoc.ModuleDoc({
'module': 'other',
'_members': [
('<exports>', jsdoc.LiteralDoc({'name': 'value', 'value': "ok"})),
],
}),
'dep2': jsdoc.ModuleDoc({
'module': 'dep2',
'_members': [
('<exports>', jsdoc.LiteralDoc({'value': 42.})),
],
}),
'dep3': jsdoc.ModuleDoc({
'module': 'dep3',
'_members': [
('<exports>', jsdoc.LiteralDoc({'value': 56.})),
],
}),
'Class': jsdoc.ModuleDoc({
'module': 'Class',
'_members': [
('<exports>', jsdoc.ClassDoc({
'name': 'Class',
'doc': "Base Class"
})),
],
}),
'mixins': jsdoc.ModuleDoc({
'module': 'mixins',
'_members': [
('<exports>', jsdoc.NSDoc({
'name': 'mixins',
'_members': [
('Bob', jsdoc.ClassDoc({'class': "Bob"})),
]
})),
],
}),
'Mixin': jsdoc.ModuleDoc({
'module': 'Mixin',
'_members': [
('<exports>', jsdoc.MixinDoc({
'_members': [
('a', jsdoc.FunctionDoc({'function': 'a'})),
]
})),
],
})
}
|
t3dev/odoo
|
doc/_extensions/autojsdoc/parser/tests/support.py
|
Python
|
gpl-3.0
| 1,767
|
[
"VisIt"
] |
ad8d16fd15505c575676ab821f2deca2727ddb21aa8d8e1b358f1df56834004d
|
from aiida import load_dbenv
load_dbenv()
from aiida.orm import Code, DataFactory, WorkflowFactory
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
import numpy as np
# Silicon structure
a = 5.404
cell = [[a, 0, 0],
[0, a, 0],
[0, 0, a]]
symbols=['Si'] * 8
scaled_positions = [(0.875, 0.875, 0.875),
(0.875, 0.375, 0.375),
(0.375, 0.875, 0.375),
(0.375, 0.375, 0.875),
(0.125, 0.125, 0.125),
(0.125, 0.625, 0.625),
(0.625, 0.125, 0.625),
(0.625, 0.625, 0.125)]
structure = StructureData(cell=cell)
positions = np.dot(scaled_positions, cell)
for i, scaled_position in enumerate(scaled_positions):
structure.append_atom(position=np.dot(scaled_position, cell).tolist(),
symbols=symbols[i])
structure.store()
# QE input parameters
qe_dict = {'CONTROL': {
'calculation': 'scf',
'restart_mode': 'from_scratch',
'wf_collect': True,
},
'SYSTEM': {
'ecutwfc': 30.,
'ecutrho': 240.,
},
'ELECTRONS': {
'conv_thr': 1.e-6,
}}
pseudo_dict = {'family': 'pbe_test'}
# Monkhorst-pack
kpoints_dict = {'points': [2, 2, 2],
'shift': [0.0, 0.0, 0.0]}
# Cluster information
machine_dict = {
'num_machines': 1,
'parallel_env':'mpi*',
'tot_num_mpiprocs': 16}
# Phonopy input parameters
phonopy_parameters = {'supercell': [[2, 0, 0],
[0, 2, 0],
[0, 0, 2]],
'primitive': [[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0]],
'distance': 0.01,
'mesh': [40, 40, 40],
'symmetry_precision': 1e-4}
# Collect workflow input data
wf_parameters = {
'structure': structure,
'phonopy_input': {'parameters': phonopy_parameters},
'input_force': {'code': 'pw@boston',
'parameters': qe_dict,
'resources': machine_dict,
'pseudo': pseudo_dict,
'kpoints': kpoints_dict},
'input_optimize': {'code': 'pw@boston',
'parameters': qe_dict,
'resources': machine_dict,
'pseudo': pseudo_dict,
'kpoints': kpoints_dict},
}
#Submit workflow
WorkflowPhonon = WorkflowFactory('wf_phonon')
wf = WorkflowPhonon(params=wf_parameters, optimize=True)
wf.label = 'QE Si'
wf.start()
print ('pk: {}'.format(wf.pk))
|
abelcarreras/aiida_extensions
|
workflows/launcher/launch_phonon_qe_si.py
|
Python
|
mit
| 2,789
|
[
"phonopy"
] |
dddfca638c787a1554674977852e3548f3ed77d02957c56db499a932fe49a521
|
#!python
#############################################################################
# cubit2specfem3d.py #
# this file is part of GEOCUBIT #
# #
# Created by Emanuele Casarotti #
# Copyright (c) 2008 Istituto Nazionale di Geofisica e Vulcanologia #
# #
#############################################################################
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., #
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. #
# #
#############################################################################
#
# for a complete definition of the format of the mesh in SPECFEM3D check the
# manual (http://www.geodynamics.org/cig/software/specfem3d):
#
# USAGE
#
#############################################################################
# PREREQUISITE
# The mesh must be prepared
# automatically using the module boundary_definition
# (see boundary_definition.py for more information)
# or
# manually following the convention:
# - each material should have a block defined by material domain_flag
# (acoustic/elastic/poroelastic) name,flag of the material (integer),
# p velocity (or the full description: name, flag, vp, vs, rho, Q ...
# if not present these last 3 parameters will be interpolated by
# module mat_parameter)
# - each mesh should have the block definition for the face on the
# free_surface (topography), the name of this block must be 'face_topo'
# or you can change the default name in mesh.topo defined in profile.
# - each mesh should have the block definition for the faces on the
# absorbing boundaries, one block for each surface with x=Xmin,x=Xmax,
# y=Ymin,y=Ymax and z=bottom. The names of the blocks should contain
# the strings "xmin,xmax,ymin,ymax,bottom"
#
# ############################################################################
# RUN
# In a python script or in the cubit python tab call:
#
# export2SPECFEM3D(path_exporting_mesh_SPECFEM3D)
#
# the module creates a python class for the mesh: ex. profile=mesh()
# and it export the files of the mesh needed by the partitioner of SPECFEM3D
#
# ############################################################################
# OUTPUT
# The default output are 11 ASCII files:
# ___________________________________________________________________________
# mesh_name='mesh_file' -> the connectity of the all mesh
# format:
# number of elements
# id_elements id_node1 id_node2 .... id_node7 id_node8
# .....
#
# ___________________________________________________________________________
# #nodecoord_name='nodes_coords_file' -> coordinates of the nodes of the mesh
# format:
# number of nodes
# id_node x_coordinate y_coordinate z_coordinate
# .....
#
# __________________________________________________________________________
# #material_name='materials_file' -> material flag of the elements
# format:
# id_element flag
# .....
#
# __________________________________________________________________________
# #nummaterial_name='nummaterial_velocity_file' -> material properties
# format:
# #material_domain_id #material_id #rho #vp #vs #Q_mu #anisotropy
# .....
# #material_domain_id 'tomography' file_name
# for interpolation with tomography
# .....
# ___________________________________________________________________________
# #absname='absorbing_surface_file' -> all the face in all the abs boundaries
# #absname_local='absorbing_surface_file'+'_xmin' -> boundary defined by x=Xmin
# #absname_local='absorbing_surface_file'+'_xmax' -> boundary defined by x=Xmax
# #absname_local='absorbing_surface_file'+'_ymin' -> boundary defined by y=Ymin
# #absname_local='absorbing_surface_file'+'_ymax' -> boundary defined by y=Ymax
# #absname_local='absorbing_surface_file'+'_bottom' -> boundary z=bottom
# format:
# number of faces
# id_(element containg the face) id_node1_face .... id_node4_face
# ....
#
# __________________________________________________________________________
# #freename='free_surface_file' -> face on the free surface (topography)
# format:
# number of faces
# id_(element containg the face) id_node1_face .... id_node4_face
#
# ___________________________________________________________________________
# ___________________________________________________________________________
# # surface='*_surface_file' -> face on any surface
# (define by the word 'surface' in the name of the block, ex: moho_surface)
# optional surfaces, e.g. moho_surface
# should be created like e.g.:
# > block 10 face in surface 2
# > block 10 name 'moho_surface'
#
#
# format:
# number of faces
# id_(element containg the face) id_node1_face ..... id_node4_face
#
# ____________________________________________________________________________
# it is possible save only one (or more) file singularly:
# for example if you want only the nodecoord_file call
# module mesh.nodescoord_write(full path name)
#
#############################################################################
try:
import start as start
cubit = start.start_cubit()
except:
try:
import cubit
except:
print 'error importing cubit, check if cubit is installed'
pass
from utilities import get_cubit_version
class mtools(object):
def __init__(self, frequency, list_surf, list_vp):
super(mtools, self).__init__()
self.frequency = frequency
self.list_surf = list_surf
self.list_vp = list_vp
self.ngll = 5
self.percent_gll = 0.172
self.point_wavelength = 5
def __repr__(self):
txt = 'Meshing for frequency up to ' + str(self.frequency) + 'Hz\n'
for surf, vp in zip(self.list_surf, self.list_vp):
txt = txt + 'surface ' + str(surf) + ', vp =' + str(vp) +\
' -> size ' + str(self.freq2meshsize(vp)[0]) + ' -> dt ' +\
str(self.freq2meshsize(vp)[0]) + '\n'
return txt
def freq2meshsize(self, vp):
velocity = vp * .5
self.size = (1 / 2.5) * velocity / self.frequency * \
(self.ngll - 1) / self.point_wavelength
self.dt = .4 * self.size / vp * self.percent_gll
return self.size, self.dt
def mesh_it(self):
for surf, vp in zip(self.list_surf, self.list_vp):
command = "surface " + \
str(surf) + " size " + str(self.freq2meshsize(vp)[0])
cubit.cmd(command)
command = "surface " + str(surf) + 'scheme pave'
cubit.cmd(command)
command = "mesh surf " + str(surf)
cubit.cmd(command)
class block_tools():
def __int__(self):
pass
def create_blocks(self, mesh_entity, list_entity=None,):
if mesh_entity == 'surface':
txt = ' face in surface '
elif mesh_entity == 'curve':
txt = ' edge in curve '
elif mesh_entity == 'group':
txt = ' face in group '
if list_entity:
if not isinstance(list_entity, list):
list_entity = [list_entity]
for entity in list_entity:
iblock = cubit.get_next_block_id()
command = "block " + str(iblock) + txt + str(entity)
cubit.cmd(command)
def material_file(self, filename):
matfile = open(filename, 'w')
material = []
for record in matfile:
mat_name, vp_str = record.split()
vp = float(vp_str)
material.append([mat_name, vp])
self.material = dict(material)
def assign_block_material(self, id_block, mat_name, vp=None):
try:
material = self.material
except:
material = None
cubit.cmd('block ' + str(id_block) + ' attribute count 2')
cubit.cmd('block ' + str(id_block) +
' attribute index 1 ' + str(id_block))
if material:
if mat_name in material.keys():
cubit.cmd('block ' + str(id_block) +
' attribute index 2 ' + str(material[mat_name]))
print 'block ' + str(id_block) + ' - material ' + mat_name +\
' - vp ' + str(material[mat_name]) + ' from database'
elif vp:
cubit.cmd('block ' + str(id_block) +
' attribute index 2 ' + str(vp))
print 'block ' + str(id_block) + ' - material ' + mat_name +\
' - vp ' + str(vp)
else:
print 'assignment impossible: check if ' + mat_name +\
' is in the database or specify vp'
class mesh_tools(block_tools):
"""Tools for the mesh
#########
dt,edge_dt,freq,edge_freq=seismic_resolution(edges,velocity,bins_d=None,
bins_u=None,sidelist=None,
ngll=5,np=8)
Given the velocity of a list of edges, seismic_resolution provides
the minimum Dt required for the stability condition (and the
corrisponding edge).
Furthermore, given the number of gll point in the element (ngll) and
the number of GLL point for wavelength, it provide the
maximum resolved frequency.
#########
length=edge_length(edge)
return the length of a edge
#########
edge_min,length=edge_min_length(surface)
given the cubit id of a surface, it return the edge with minimun length
#########
"""
def __int__(self):
pass
def seismic_resolution(self, edges, velocity, bins_d=None, bins_u=None,
sidelist=None):
"""
dt,edge_dt,freq,edge_freq=seismic_resolution(edges,velocity,
bins_d=None,bins_u=None,
sidelist=None,ngll=5,np=8)
Given the velocity of a list of edges, seismic_resolution provides
the minimum Dt required for the stability condition
(and the corrisponding edge).
Furthermore, given the number of gll point in the element (ngll)
and the number of GLL point for wavelength, it provide the
maximum resolved frequency.
"""
ratiostore = 1e10
dtstore = 1e10
edgedtstore = -1
edgeratiostore = -1
for edge in edges:
d = self.edge_length(edge)
ratio = (1 / 2.5) * velocity / d * \
(self.ngll - 1) / self.point_wavelength
dt = .4 * d / velocity * self.percent_gll
if dt < dtstore:
dtstore = dt
edgedtstore = edge
if ratio < ratiostore:
ratiostore = ratio
edgeratiostore = edge
try:
for bin_d, bin_u, side in zip(bins_d, bins_u, sidelist):
if ratio >= bin_d and ratio < bin_u:
command = "sideset " + str(side) + " edge " + str(edge)
cubit.cmd(command)
break
except:
pass
return dtstore, edgedtstore, ratiostore, edgeratiostore
def edge_length(self, edge):
"""
length=edge_length(edge)
return the length of a edge
"""
from math import sqrt
nodes = cubit.get_connectivity('Edge', edge)
x0, y0, z0 = cubit.get_nodal_coordinates(nodes[0])
x1, y1, z1 = cubit.get_nodal_coordinates(nodes[1])
d = sqrt((x1 - x0)**2 + (y1 - y0)**2 + (z1 - z0)**2)
return d
def edge_min_length(self, surface):
"""
edge_min,length=edge_min_length(surface)
given the cubit id of a surface,
it return the edge with minimun length
"""
self.dmin = 99999
edge_store = 0
command = "group 'list_edge' add edge in surf " + str(surface)
command = command.replace("[", " ").replace("]", " ")
cubit.cmd(command)
group = cubit.get_id_from_name("list_edge")
edges = cubit.get_group_edges(group)
command = "delete group " + str(group)
cubit.cmd(command)
for edge in edges:
d = self.edge_length(edge)
if d < self.dmin:
self.dmin = d
edge_store = edge
self.edgemin = edge_store
return self.edgemin, self.dmin
def normal_check(self, nodes, normal):
p0 = cubit.get_nodal_coordinates(nodes[0])
p1 = cubit.get_nodal_coordinates(nodes[1])
p2 = cubit.get_nodal_coordinates(nodes[2])
a = [p1[0] - p0[0], p1[1] - p0[1], p1[2] - p0[2]]
b = [p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2]]
axb = [a[1] * b[2] - a[2] * b[1], a[2] * b[0] -
a[0] * b[2], a[0] * b[1] - a[1] * b[0]]
dot = 0.0
for i in (0, 1, 2):
dot = dot + axb[i] * normal[i]
if dot > 0:
return nodes
elif dot < 0:
return nodes[0], nodes[3], nodes[2], nodes[1]
else:
print 'error: surface normal, dot=0', axb, normal, dot, p0, p1, p2
def mesh_analysis(self, frequency):
from sets import Set
cubit.cmd('set info off')
cubit.cmd('set echo off')
cubit.cmd('set journal off')
bins_d = [0.0001] + range(0, int(frequency) + 1) + [1000]
bins_u = bins_d[1:]
dt = []
ed_dt = []
r = []
ed_r = []
nstart = cubit.get_next_sideset_id()
command = "del sideset all"
cubit.cmd(command)
for bin_d, bin_u in zip(bins_d, bins_u):
nsideset = cubit.get_next_sideset_id()
command = 'create sideset ' + str(nsideset)
cubit.cmd(command)
command = "sideset " + \
str(nsideset) + " name " + \
"'ratio-[" + str(bin_d) + "_" + str(bin_u) + "['"
cubit.cmd(command)
nend = cubit.get_next_sideset_id()
sidelist = range(nstart, nend)
for block in self.block_mat:
name = cubit.get_exodus_entity_name('block', block)
velocity = self.material[name][1]
if velocity > 0:
faces = cubit.get_block_faces(block)
edges = []
for face in faces:
es = cubit.get_sub_elements("face", face, 1)
edges = edges + list(es)
edges = Set(edges)
dtstore, edgedtstore, ratiostore, edgeratiostore =\
self.seismic_resolution(edges, velocity, bins_d,
bins_u, sidelist)
dt.append(dtstore)
ed_dt.append(edgedtstore)
r.append(ratiostore)
ed_r.append(edgeratiostore)
self.ddt = zip(ed_dt, dt)
self.dr = zip(ed_r, r)
def sorter(x, y):
return cmp(x[1], y[1])
self.ddt.sort(sorter)
self.dr.sort(sorter)
print self.ddt, self.dr
print 'Deltat minimum => edge:' + str(self.ddt[0][0]) +\
' dt: ' + str(self.ddt[0][1])
print 'minimum frequency resolved => edge:' + str(self.dr[0][0]) +\
' frequency: ' + str(self.dr[0][1])
return self.ddt[0], self.dr[0]
class mesh(object, mesh_tools):
def __init__(self, hex27=False, cpml=False, cpml_size=False,
top_absorbing=False):
super(mesh, self).__init__()
self.netcdf_db = False
self._netcdf_num_nod_hex = 8
self._netcdf_num_nod_quad = 4
self.abs_block_ids = [1001, 1002, 1003, 1004, 1005, 1006]
self._netcdf_len_string = 33
self._netcdf_four = 4
self._netcdf_len_name = 33
self._netcdf_len_line = 81
self.netcdf = False
self.ncname = False
self.mesh_name = 'mesh_file'
self.nodecoord_name = 'nodes_coords_file'
self.material_name = 'materials_file'
self.nummaterial_name = 'nummaterial_velocity_file'
self.absname = 'absorbing_surface_file'
self.cpmlname = 'absorbing_cpml_file'
self.freename = 'free_or_absorbing_surface_file_zmax'
self.recname = 'STATIONS'
version_cubit = get_cubit_version()
if version_cubit >= 15:
self.face = 'SHELL'
elif version_cubit >= 12:
self.face = 'SHELL4'
else:
self.face = 'QUAD4'
self.hex = 'HEX'
if version_cubit <= 13:
if hex27:
print "ATTENTION **********************\n\nCubit <=\
12.2 doesn't support HEX27\nassuming HEX8 .....\n\n"
self.hex27 = False
else:
self.hex27 = hex27
self.edge = 'BAR2'
self.topo = 'face_topo'
self.topography = None
self.free = None
self.freetxt = 'free'
self.rec = 'receivers'
self.cpml = cpml
if cpml:
if cpml_size:
self.size = cpml_size
else:
print 'please specify cmpl size if you want to use cpml'
self.top_absorbing = top_absorbing
if hex27:
cubit.cmd('block all except block 1001 1002 1003 1004 1005 1006 \
element type hex27')
if version_cubit >= 15:
cubit.cmd('block 1001 1002 1003 1004 1005 1006 \
element type shell9')
self._netcdf_num_nod_hex = 27
self._netcdf_num_nod_quad = 9
self.block_definition()
self.ngll = 5
self.percent_gll = 0.172
self.point_wavelength = 5
self.xmin = False
self.ymin = False
self.zmin = False
self.xmax = False
self.ymax = False
self.zmax = False
cubit.cmd('compress all')
def __repr__(self):
pass
def block_definition(self):
block_flag = []
block_mat = []
block_bc = []
block_bc_flag = []
material = {}
bc = {}
blocks = cubit.get_block_id_list()
for block in blocks:
name = cubit.get_exodus_entity_name('block', block)
ty = cubit.get_block_element_type(block)
# print block,blocks,ty,self.hex,self.face
if self.hex in ty:
flag = None
vel = None
vs = None
rho = None
q = 0
ani = 0
# material domain id
if "acoustic" in name:
imaterial = 1
elif "elastic" in name:
imaterial = 2
elif "poroelastic" in name:
imaterial = 3
else:
imaterial = 0
#
nattrib = cubit.get_block_attribute_count(block)
if nattrib > 1:
# material flag:
# positive => material properties,
# negative => interface/tomography domain
flag = int(cubit.get_block_attribute_value(block, 0))
if flag > 0 and nattrib >= 2:
# material properties
# vp
vel = cubit.get_block_attribute_value(block, 1)
if nattrib >= 3:
# vs
vs = cubit.get_block_attribute_value(block, 2)
if nattrib >= 4:
# density
rho = cubit.get_block_attribute_value(block, 3)
if nattrib >= 5:
# next: Q_kappa or Q_mu (new/old format
# style)
q = cubit.get_block_attribute_value(
block, 4)
if nattrib == 6:
# only 6 parameters given (skipping
# Q_kappa ), old format style
qmu = q
# Q_kappa is 10 times stronger than
# Q_mu
qk = q * 10
# last entry is anisotropic flag
ani = cubit.get_block_attribute_value(
block, 5)
elif nattrib > 6:
# Q_kappa
qk = q
# Q_mu
qmu = cubit.get_block_attribute_value(
block, 5)
if nattrib == 7:
# anisotropy_flag
ani = \
cubit.get_block_attribute_value(
block, 6)
# for q to be valid: it must be positive
if qk < 0 or qmu < 0:
print 'error, Q value invalid:', \
qk, qmu
break
elif flag < 0:
# interface/tomography domain
# velocity model
vel = name
attrib = cubit.get_block_attribute_value(block, 1)
if attrib == 1:
kind = 'interface'
flag_down = cubit.get_block_attribute_value(
block, 2)
flag_up = cubit.get_block_attribute_value(block, 3)
elif attrib == 2:
kind = 'tomography'
elif nattrib == 1:
flag = cubit.get_block_attribute_value(block, 0)
# print 'only 1 attribute ', name,block,flag
vel, vs, rho, qk, qmu, ani = (0, 0, 0, 9999., 9999., 0)
else:
flag = block
vel, vs, rho, qk, qmu, ani = (name, 0, 0, 9999., 9999., 0)
block_flag.append(int(flag))
block_mat.append(block)
if (flag > 0) and nattrib != 1:
par = tuple([imaterial, flag, vel, vs, rho, qk, qmu, ani])
elif flag < 0 and nattrib != 1:
if kind == 'interface':
par = tuple(
[imaterial, flag, kind, name, flag_down, flag_up])
elif kind == 'tomography':
par = tuple([imaterial, flag, kind, name])
elif flag == 0 or nattrib == 1:
par = tuple([imaterial, flag, name])
material[block] = par
elif ty == self.face or ty == 'SHELL4' or ty == 'SHELL9':
block_bc_flag.append(4)
block_bc.append(block)
bc[block] = 4 # face has connectivity = 4
if name == self.topo or block == 1001:
self.topography = block
if self.freetxt in name:
self.free = block
elif ty == 'SPHERE':
pass
else:
# block elements differ from HEX8/QUAD4/SHELL4
print '****************************************'
print 'block not properly defined:'
print ' name:', name
print ' type:', ty
print
print 'please check your block definitions!'
print
print 'only supported types are:'
print ' HEX/HEX8/HEX27 for volumes'
print ' QUAD4 for surface'
print ' SHELL4/SHELL9 for surface'
print '****************************************'
continue
return None, None, None, None, None, None, None, None
nsets = cubit.get_nodeset_id_list()
if len(nsets) == 0:
self.receivers = None
for nset in nsets:
name = cubit.get_exodus_entity_name('nodeset', nset)
if name == self.rec:
self.receivers = nset
else:
print 'nodeset ' + name + ' not defined'
self.receivers = None
try:
self.block_mat = block_mat
self.block_flag = block_flag
self.block_bc = block_bc
self.block_bc_flag = block_bc_flag
self.material = material
self.bc = bc
print 'HEX Blocks:'
for m, f in zip(self.block_mat, self.block_flag):
print 'block ', m, 'material flag ', f
print 'Absorbing Boundary Conditions:'
for m, f in zip(self.block_bc, self.block_bc_flag):
print 'bc ', m, 'bc flag ', f
print 'Topography (free surface)'
print self.topography
print 'Free surface'
print self.free
except:
print '****************************************'
print 'sorry, no blocks or blocks not properly defined'
print block_mat
print block_flag
print block_bc
print block_bc_flag
print material
print bc
print '****************************************'
def get_hex_connectivity(self, ind):
if self.hex27:
cubit.silent_cmd('group "nh" add Node in hex ' + str(ind))
group1 = cubit.get_id_from_name("nh")
result = cubit.get_group_nodes(group1)
if len(result) != 27:
raise RuntimeError(
'Error: hexes with less than 27 nodes, hex27 True')
cubit.cmd('del group ' + str(group1))
else:
result = cubit.get_connectivity('hex', ind)
return result
def get_face_connectivity(self, ind):
if self.hex27:
cubit.silent_cmd('group "nf" add Node in face ' + str(ind))
group1 = cubit.get_id_from_name("nf")
result = cubit.get_group_nodes(group1)
cubit.cmd('del group ' + str(group1))
else:
result = cubit.get_connectivity('face', ind)
return result
def mat_parameter(self, properties):
# print properties
# format nummaterials file: #material_domain_id #material_id #rho #vp
# #vs #Q_kappa #Q_mu #anisotropy_flag
flag = properties[1]
print 'number of material:', flag
if flag > 0:
vel = properties[2]
if properties[2] is None and type(vel) != str:
# velocity model scales with given vp value
if vel >= 30:
m2km = 1000.
else:
m2km = 1.
vp = vel / m2km
rho = (1.6612 * vp - 0.472 * vp**2 + 0.0671 * vp **
3 - 0.0043 * vp**4 + 0.000106 * vp**4) * m2km
txt = '%1i %3i %20f %20f %20f %1i %1i\n' % (
properties[0], properties[1], rho, vel, vel / (3**.5), 0, 0)
elif type(vel) != str and vel != 0.:
# velocity model given as vp,vs,rho,..
# format nummaterials file: #material_domain_id #material_id
# #rho #vp #vs #Q_kappa #Q_mu #anisotropy_flag
try:
qk = properties[5]
except:
qk = 9999.
try:
qmu = properties[6]
except:
qmu = 9999.
try:
ani = properties[7]
except:
ani = 0
# format: #material_domain_id #material_id #rho #vp #vs
# #Q_kappa #Q_mu #anisotropy_flag
txt = '%1i %3i %20f %20f %20f %20f %20f %2i\n' % (
properties[0], properties[1], properties[4], properties[2],
properties[3], qk, qmu, ani)
elif type(vel) != str and vel != 0.:
helpstring = "#material_domain_id #material_id #rho #vp #vs \
#Q_kappa #Q_mu #anisotropy"
txt = '%1i %3i %s \n' % (
properties[0], properties[1], helpstring)
else:
helpstring = " --> syntax: #material_domain_id \
#material_id #rho #vp #vs #Q_kappa #Q_mu #anisotropy"
txt = '%1i %3i %s %s\n' % (properties[0], properties[
1], properties[2], helpstring)
elif flag < 0:
if properties[2] == 'tomography':
txt = '%1i %3i %s %s\n' % (properties[0], properties[
1], properties[2], properties[3])
elif properties[2] == 'interface':
txt = '%1i %3i %s %s %1i %1i\n' % (
properties[0], properties[1], properties[2], properties[3],
properties[4], properties[5])
else:
helpstring = " --> syntax: #material_domain_id \
'tomography' #file_name "
txt = '%1i %3i %s %s \n' % (properties[0], properties[
1], properties[2], helpstring)
#
# print txt
return txt
def nummaterial_write(self, nummaterial_name, placeholder=True):
print 'Writing ' + nummaterial_name + '.....'
nummaterial = open(nummaterial_name, 'w')
for block in self.block_mat:
# name=cubit.get_exodus_entity_name('block',block)
nummaterial.write(str(self.mat_parameter(self.material[block])))
if placeholder:
txt = '''
! note: format of nummaterial_velocity_file must be
! #(1)domain_id #(2)material_id #(3)rho #(4)vp #(5)vs #(6)Q_k #(7)Q_mu #(8)ani
!
! where
! domain_id : 1=acoustic / 2=elastic
! material_id : POSITIVE integer identifier of material block
! rho : density
! vp : P-velocity
! vs : S-velocity
! Q_k : 9999 = no Q_kappa attenuation
! Q_mu : 9999 = no Q_mu attenuation
! ani : 0=no anisotropy/ 1,2,.. check with aniso_model.f90
!
!example:
!2 1 2300 2800 1500 9999.0 9999.0 0
!or
! #(1)domain_id #(2)material_id tomography elastic #(3)filename #(4)positive
!
! where
! domain_id : 1=acoustic / 2=elastic
! material_id : NEGATIVE integer identifier of material block
! filename : filename of the tomography file
! positive : a positive unique identifier
!
!example:
!2 -1 tomography elastic tomo.xyz 1
'''
nummaterial.write(txt)
nummaterial.close()
print 'Ok'
def create_hexnode_string(self, hexa, hexnode_string=True):
nodes = self.get_hex_connectivity(hexa)
# nodes=self.jac_check(nodes) #is it valid for 3D? TODO
if self.hex27:
ordered_nodes = [hexa] + list(nodes[:20]) + [nodes[21]] +\
[nodes[25]] + [nodes[24]] + [nodes[26]] +\
[nodes[23]] + [nodes[22]] + [nodes[20]]
txt = ' '.join(str(x) for x in ordered_nodes)
txt = txt + '\n'
else:
txt = str(hexa) + ' ' + ' '.join(str(x) for x in nodes)
txt = txt + '\n'
if hexnode_string:
return txt
else:
map(int, txt.split())
def create_facenode_string(self, hexa, face, normal=None, cknormal=True,
facenode_string=True):
nodes = self.get_face_connectivity(face)
if cknormal:
nodes_ok = self.normal_check(nodes[0:4], normal)
if self.hex27:
nodes_ok2 = self.normal_check(nodes[4:8], normal)
else:
nodes_ok = nodes[0:4]
if self.hex27:
nodes_ok2 = nodes[4:8]
#
if self.hex27:
ordered_nodes = [hexa] + \
list(nodes_ok) + list(nodes_ok2) + [nodes[8]]
txt = ' '.join(str(x) for x in ordered_nodes)
txt = txt + '\n'
else:
txt = str(hexa) + ' ' + ' '.join(str(x) for x in nodes_ok)
txt = txt + '\n'
if facenode_string:
return txt
else:
map(int, txt.split())
def mesh_write(self, mesh_name):
print 'Writing ' + mesh_name + '..... v2'
num_elems = cubit.get_hex_count()
meshfile = open(mesh_name, 'w')
print ' total number of elements:', str(num_elems)
meshfile.write(str(num_elems) + '\n')
for block, flag in zip(self.block_mat, self.block_flag):
hexes = cubit.get_block_hexes(block)
print 'block ', block, ' hexes ', len(hexes)
for hexa in hexes:
txt = self.create_hexnode_string(hexa)
meshfile.write(txt)
meshfile.close()
print 'Ok'
def material_write(self, mat_name):
mat = open(mat_name, 'w')
print 'Writing ' + mat_name + '.....'
for block, flag in zip(self.block_mat, self.block_flag):
print 'block ', block, 'flag ', flag
hexes = cubit.get_block_hexes(block)
for hexa in hexes:
mat.write(('%10i %10i\n') % (hexa, flag))
mat.close()
print 'Ok'
def get_extreme(self, c, cmin, cmax):
if not cmin and not cmax:
cmin = c
cmax = c
else:
if c < cmin:
cmin = c
if c > cmax:
cmax = c
return cmin, cmax
def nodescoord_write(self, nodecoord_name):
nodecoord = open(nodecoord_name, 'w')
print 'Writing ' + nodecoord_name + '.....'
node_list = cubit.parse_cubit_list('node', 'all')
num_nodes = len(node_list)
print ' number of nodes:', str(num_nodes)
nodecoord.write('%10i\n' % num_nodes)
#
for node in node_list:
x, y, z = cubit.get_nodal_coordinates(node)
self.xmin, self.xmax = self.get_extreme(x, self.xmin, self.xmax)
self.ymin, self.ymax = self.get_extreme(y, self.ymin, self.ymax)
self.zmin, self.zmax = self.get_extreme(z, self.zmin, self.zmax)
txt = ('%10i %20f %20f %20f\n') % (node, x, y, z)
nodecoord.write(txt)
nodecoord.close()
print 'Ok'
def free_write(self, freename=None):
# free surface
cubit.cmd('set info off')
cubit.cmd('set echo off')
cubit.cmd('set journal off')
normal = (0, 0, 1)
if not freename:
freename = self.freename
# writes free surface file
print 'Writing ' + freename + '.....'
freehex = open(freename, 'w')
#
# searches block definition with name face_topo
for block, flag in zip(self.block_bc, self.block_bc_flag):
if block == self.topography:
name = cubit.get_exodus_entity_name('block', block)
print 'free surface (topography) block name:', \
name, 'id:', block
quads_all = cubit.get_block_faces(block)
print ' number of faces = ', len(quads_all)
dic_quads_all = dict(zip(quads_all, quads_all))
freehex.write('%10i\n' % len(quads_all))
list_hex = cubit.parse_cubit_list('hex', 'all')
for h in list_hex:
faces = cubit.get_sub_elements('hex', h, 2)
for f in faces:
if f in dic_quads_all.keys():
txt = self.create_facenode_string(
h, f, normal, cknormal=True)
freehex.write(txt)
freehex.close()
elif block == self.free:
name = cubit.get_exodus_entity_name('block', block)
print 'free surface block name:', name, 'id:', block
quads_all = cubit.get_block_faces(block)
print ' number of faces = ', len(quads_all)
dic_quads_all = dict(zip(quads_all, quads_all))
freehex.write('%10i\n' % len(quads_all))
list_hex = cubit.parse_cubit_list('hex', 'all')
for h in list_hex:
faces = cubit.get_sub_elements('hex', h, 2)
for f in faces:
if f in dic_quads_all.keys():
txt = self.create_facenode_string(
h, f, normal, cknormal=False)
freehex.write(txt)
freehex.close()
print 'Ok'
cubit.cmd('set info on')
cubit.cmd('set echo on')
def check_cmpl_size(self, case='x'):
if case == 'x':
vmaxtmp = self.xmax
vmintmp = self.xmin
elif case == 'y':
vmaxtmp = self.ymax
vmintmp = self.ymin
elif case == 'z':
vmaxtmp = self.zmax
vmintmp = self.zmin
if self.size > .3 * (vmaxtmp - vmintmp):
print 'please select the size of cpml less than 30% of the ' +\
case + ' size of the volume'
print vmaxtmp - vmintmp, .3 * (vmaxtmp - vmintmp)
print 'cmpl set to false, no ' + self.cpmlname +\
' file will be created'
return False, False
else:
vmin = vmintmp + self.size
vmax = vmaxtmp - self.size
return vmin, vmax
def select_cpml(self):
xmin, xmax = self.check_cmpl_size(case='x')
ymin, ymax = self.check_cmpl_size(case='y')
zmin, zmax = self.check_cmpl_size(case='z')
#
if xmin is False or xmax is False or ymin is False or ymax is False or\
zmin is False or zmax is False:
return False
else:
txt = "group 'hxmin' add hex with X_coord < " + str(xmin)
cubit.cmd(txt)
txt = "group 'hxmax' add hex with X_coord > " + str(xmax)
cubit.cmd(txt)
txt = "group 'hymin' add hex with Y_coord < " + str(ymin)
cubit.cmd(txt)
txt = "group 'hymax' add hex with Y_coord > " + str(ymax)
cubit.cmd(txt)
txt = "group 'hzmin' add hex with Z_coord < " + str(zmin)
cubit.cmd(txt)
txt = "group 'hzmax' add hex with Z_coord > " + str(zmax)
cubit.cmd(txt)
from sets import Set
group1 = cubit.get_id_from_name("hxmin")
cpml_xmin = Set(list(cubit.get_group_hexes(group1)))
group1 = cubit.get_id_from_name("hymin")
cpml_ymin = Set(list(cubit.get_group_hexes(group1)))
group1 = cubit.get_id_from_name("hxmax")
cpml_xmax = Set(list(cubit.get_group_hexes(group1)))
group1 = cubit.get_id_from_name("hymax")
cpml_ymax = Set(list(cubit.get_group_hexes(group1)))
group1 = cubit.get_id_from_name("hzmin")
cpml_zmin = Set(list(cubit.get_group_hexes(group1)))
if self.top_absorbing:
group1 = cubit.get_id_from_name("hzmax")
cpml_zmax = Set(list(cubit.get_group_hexes(group1)))
else:
cpml_zmax = Set([])
cpml_all = cpml_ymin | cpml_ymax | cpml_xmin | cpml_xmax | \
cpml_zmin | cpml_zmax
cpml_x = cpml_all - cpml_zmin - cpml_ymin - cpml_ymax - cpml_zmax
cpml_y = cpml_all - cpml_zmin - cpml_xmin - cpml_xmax - cpml_zmax
cpml_xy = cpml_all - cpml_zmin - cpml_y - cpml_x - cpml_zmax
cpml_z = cpml_all - cpml_xmin - cpml_ymin - cpml_ymax - cpml_xmax
cpml_xz = cpml_zmin - cpml_ymin - cpml_ymax - cpml_z
cpml_yz = cpml_zmin - cpml_xmin - cpml_xmax - cpml_z
cpml_xyz = cpml_zmin - cpml_xz - cpml_yz - cpml_z
txt = ' '.join(str(h) for h in cpml_x)
cubit.cmd("group 'x_cpml' add hex " + txt)
txt = ' '.join(str(h) for h in cpml_y)
cubit.cmd("group 'y_cpml' add hex " + txt)
txt = ' '.join(str(h) for h in cpml_z)
cubit.cmd("group 'z_cpml' add hex " + txt)
txt = ' '.join(str(h) for h in cpml_xy)
cubit.cmd("group 'xy_cpml' add hex " + txt)
txt = ' '.join(str(h) for h in cpml_xz)
cubit.cmd("group 'xz_cpml' add hex " + txt)
txt = ' '.join(str(h) for h in cpml_yz)
cubit.cmd("group 'yz_cpml' add hex " + txt)
txt = ' '.join(str(h) for h in cpml_xyz)
cubit.cmd("group 'xyz_cpml' add hex " + txt)
return cpml_x, cpml_y, cpml_z, cpml_xy, cpml_xz, cpml_yz, cpml_xyz
def abs_write(self, absname=None):
# absorbing boundaries
import re
cubit.cmd('set info off')
cubit.cmd('set echo off')
cubit.cmd('set journal off')
if not absname:
absname = self.absname
if self.cpml:
if not absname:
absname = self.cpmlname
print 'Writing cpml' + absname + '.....'
list_cpml = self.select_cpml()
if list_cpml is False:
print 'error writing cpml files'
return
else:
abshex_cpml = open(absname, 'w')
hexcount = sum(map(len, list_cpml))
abshex_cpml.write(('%10i\n') % (hexcount))
for icpml, lcpml in enumerate(list_cpml):
for hexa in lcpml:
abshex_cpml.write(('%10i %10i\n') % (hexa, icpml))
stacey_absorb = True
if stacey_absorb:
#
#
if not absname:
absname = self.absname
# loops through all block definitions
list_hex = cubit.parse_cubit_list('hex', 'all')
for block, flag in zip(self.block_bc, self.block_bc_flag):
if block != self.topography:
name = cubit.get_exodus_entity_name('block', block)
print ' block name:', name, 'id:', block
cknormal = True
abshex_local = False
# opens file
if re.search('xmin', name):
print 'xmin'
abshex_local = open(absname + '_xmin', 'w')
normal = (-1, 0, 0)
elif re.search('xmax', name):
print "xmax"
abshex_local = open(absname + '_xmax', 'w')
normal = (1, 0, 0)
elif re.search('ymin', name):
print "ymin"
abshex_local = open(absname + '_ymin', 'w')
normal = (0, -1, 0)
elif re.search('ymax', name):
print "ymax"
abshex_local = open(absname + '_ymax', 'w')
normal = (0, 1, 0)
elif re.search('bottom', name):
print "bottom"
abshex_local = open(absname + '_bottom', 'w')
normal = (0, 0, -1)
elif re.search('abs', name):
print "abs all - experimental, check the output"
cknormal = False
abshex_local = open(absname, 'w')
else:
if block == 1003:
print 'xmin'
abshex_local = open(absname + '_xmin', 'w')
normal = (-1, 0, 0)
elif block == 1004:
print "ymin"
abshex_local = open(absname + '_ymin', 'w')
normal = (0, -1, 0)
elif block == 1005:
print "xmax"
abshex_local = open(absname + '_xmax', 'w')
normal = (1, 0, 0)
elif block == 1006:
print "ymax"
abshex_local = open(absname + '_ymax', 'w')
normal = (0, 1, 0)
elif block == 1002:
print "bottom"
abshex_local = open(absname + '_bottom', 'w')
normal = (0, 0, -1)
elif block == 1000:
print "custumized"
abshex_local = open(absname, 'w')
cknormal = False
normal = None
#
#
if abshex_local:
# gets face elements
quads_all = cubit.get_block_faces(block)
dic_quads_all = dict(zip(quads_all, quads_all))
print ' number of faces = ', len(quads_all)
abshex_local.write('%10i\n' % len(quads_all))
for h in list_hex:
faces = cubit.get_sub_elements('hex', h, 2)
for f in faces:
if f in dic_quads_all.keys():
txt = self.create_facenode_string(
h, f, normal=normal, cknormal=cknormal)
abshex_local.write(txt)
abshex_local.close()
print 'Ok'
cubit.cmd('set info on')
cubit.cmd('set echo on')
def surface_write(self, pathdir=None):
# optional surfaces, e.g. moho_surface
# should be created like e.g.:
# > block 10 face in surface 2
# > block 10 name 'moho_surface'
import re
for block in self.block_bc:
if block != self.topography:
name = cubit.get_exodus_entity_name('block', block)
# skips block names like face_abs**, face_topo**
if re.search('abs', name):
continue
elif re.search('topo', name):
continue
elif re.search('surface', name):
filename = pathdir + name + '_file'
else:
continue
# gets face elements
print ' surface block name: ', name, 'id: ', block
quads_all = cubit.get_block_faces(block)
print ' face = ', len(quads_all)
if len(quads_all) == 0:
continue
# writes out surface infos to file
print 'Writing ' + filename + '.....'
surfhex_local = open(filename, 'w')
dic_quads_all = dict(zip(quads_all, quads_all))
# writes number of surface elements
surfhex_local.write('%10i\n' % len(quads_all))
# writes out element node ids
list_hex = cubit.parse_cubit_list('hex', 'all')
for h in list_hex:
faces = cubit.get_sub_elements('hex', h, 2)
for f in faces:
if f in dic_quads_all.keys():
txt = self.create_facenode_string(
h, f, cknormal=False)
surfhex_local.write(txt)
# closes file
surfhex_local.close()
print 'Ok'
def rec_write(self, recname):
print 'Writing ' + self.recname + '.....'
recfile = open(self.recname, 'w')
nodes = cubit.get_nodeset_nodes(self.receivers)
for i, n in enumerate(nodes):
x, y, z = cubit.get_nodal_coordinates(n)
recfile.write('ST%i XX %20f %20f 0.0 0.0 \n' % (i, x, z))
recfile.close()
print 'Ok'
def write(self, path='', netcdf_name=False):
cubit.cmd('set info off')
cubit.cmd('set echo off')
cubit.cmd('set journal off')
cubit.cmd('compress all')
if len(path) != 0:
if path[-1] != '/':
path = path + '/'
if netcdf_name:
self._write_netcdf(path=path, name=netcdf_name)
else:
self._write_ascii(path=path)
cubit.cmd('set info on')
cubit.cmd('set echo on')
# def _write_netcdf(self, path='.', name='mesh.specfem3D'):
# if self.cpml:
# raise NotImplementedError(
# 'cmpl not implemented for netcdf specfem3d mesh format')
#
# try:
# from netCDF4 import Dataset
# except:
# raise ImportError('error importing NETCDF4')
# self.netcdf_db = Dataset(name, mode='w', format='NETCDF4')
# self.netcdf_db.createDimension("len_string", self._netcdf_len_string)
# self.netcdf_db.createDimension("len_line", self._netcdf_len_line)
# self.netcdf_db.createDimension("four", self._netcdf_four)
# self.netcdf_db.createDimension("len_name", self._netcdf_len_name)
# self.netcdf_db.createDimension("time_step", 0)
# self.netcdf_db.createDimension("num_dim", 3)
# self.netcdf_db.createDimension(
# "num_node_hex", self._netcdf_num_nod_hex)
# self.netcdf_db.createDimension(
# "num_node_quad", self._netcdf_num_nod_quad)
# num_nodes = len(cubit.get_node_count())
# self.netcdf_db.createDimension("num_nodes", num_nodes)
# self.netcdf_db.createDimension("num_elem", cubit.get_hex_count())
# num_block = len(cubit.get_block_id_list())
# self.netcdf_db.createDimension("num_el_blk", num_block)
#
# self.netcdf_db.createVariable(
# "eb_prop1", "i4", ("num_el_blk",)) # [1 1001 1002 ...]
# self.netcdf_db.setncattr('name', 'ID')
# self.netcdf_db.createVariable(
# "node_coord", "f8", ("num_nodes", self.num_dim))
# self.netcdf_db.createVariable("node_map", "i4", ("num_nodes", 1))
#
# self.netcdf_db.createVariable(
# "eb_names", "S1", ("num_el_blk", "len_name")) #[["v","o","l"..]..]
# self.netcdf_db.createVariable(
# "coor_names", "S1", ("num_dim", "len_name")) #[["x","",""..]..]
#
# self.netcdf_db.createVariable(
# "mesh", "i4", ("num_elem", str(self.num_node_hex + 1)))
# self.netcdf_db.createVariable(
# "free", "i4", ("num_elem", str(self.num_node_quad + 1)))
# self.netcdf_db.createVariable("material", "i4", ("num_elem", 2))
# self.netcdf_db.createVariable("block_hex", "i4", ("num_elem", 2))
#
# self.netcdf_db.variables['eb_prop1'][:] = list(
# self.block_mat) + list(self.block_bc)
# self.netcdf_db.variables['mesh'][:] = self._netcdf_mesh_array()
# self.netcdf_db.variables['material'][:] = \
# self._netcdf_material_array()
# self.netcdf_db.variables['node_map'][:] = \
# self._netcdf_nodescoord_array()[0]
# self.netcdf_db.variables['node_coord'][:] = \
# self._netcdf_nodescoord_array()[1]
# self.netcdf_db.variables['free'][:] = self._netcdf_free_array()
#
# for block, flag in zip(self.block_bc, self.block_bc_flag):
# if block != self.topography and block != self.free:
# label, normal, cknormal = self._get_bc_flag(block)
# quads_all = cubit.get_block_faces(block)
# print label
# print ' number of faces = ', len(quads_all)
# self.netcdf_db.createDimension(
# 'num_el_' + label, len(quads_all))
# self.netcdf_db.createVariable(
# "abs_" + label, "i4", ('num_el_' + label, \
# str(self.num_node_quad + 1)))
# self.netcdf_db.variables[
# "abs_" + label][:] = self._netcdf_abs_array(quads_all,
# normal,
# cknormal)
# elif block == self.topography or block == self.free:
# quads_all = cubit.get_block_faces(block)
# self.netcdf_db.createDimension('num_el_free', len(quads_all))
# self.netcdf_db.createVariable(
# "free", "i4", ("num_elem", str(self.num_node_quad + 1)))
# self.netcdf_db.variables['free'][:] = \
# self._netcdf_free_array()
#
# self.nummaterial_write(path + self.nummaterial_name)
def _get_bc_flag(self, block):
import re
name = cubit.get_exodus_entity_name('block', block)
print ' block name:', name, 'id:', block
cknormal = True
if re.search('xmin', name):
label = 'xmin'
normal = (-1, 0, 0)
elif re.search('xmax', name):
label = "xmax"
normal = (1, 0, 0)
elif re.search('ymin', name):
label = "ymin"
normal = (0, -1, 0)
elif re.search('ymax', name):
label = "ymax"
normal = (0, 1, 0)
elif re.search('bottom', name):
label = "bottom"
normal = (0, 0, -1)
elif re.search('abs', name):
label = "all"
print "abs all - experimental, check the output"
cknormal = False
else:
if block == 1003:
label = 'xmin'
normal = (-1, 0, 0)
elif block == 1004:
label = "ymin"
normal = (0, -1, 0)
elif block == 1005:
label = "xmax"
normal = (1, 0, 0)
elif block == 1006:
label = "ymax"
normal = (0, 1, 0)
elif block == 1002:
label = "bottom"
normal = (0, 0, -1)
elif block == 1000:
label = "all"
cknormal = False
normal = None
return label, normal, cknormal
# def _netcdf_abs_array(self, quads_all, normal, cknormal):
# # absorbing boundaries
# cubit.cmd('set info off')
# cubit.cmd('set echo off')
# cubit.cmd('set journal off')
# list_hex = cubit.parse_cubit_list('hex', 'all')
# dic_quads_all = dict(zip(quads_all, quads_all))
# abs_array = []
# for h in list_hex:
# faces = cubit.get_sub_elements('hex', h, 2)
# for f in faces:
# if dic_quads_all.has_key(f):
# abs_array.append(self.create_facenode_string(
# h, f, normal=normal, cknormal=cknormal,
# facenode_string=False))
# print 'Ok'
# cubit.cd('set info on')
# cubit.cmd('set echo on')
# return abs_array
# def _netcdf_nodescoord_array(self):
# print 'Writing node coordinates..... netcdf'
# node_list = cubit.parse_cubit_list('node', 'all')
# num_nodes = len(node_list)
# print ' number of nodes:', str(num_nodes)
# #
# coord_array = []
# map_array = []
# for node in node_list:
# x, y, z = cubit.get_nodal_coordinates(node)
# self.xmin, self.xmax = self.get_extreme(x, self.xmin, self.xmax)
# self.ymin, self.ymax = self.get_extreme(y, self.ymin, self.ymax)
# self.zmin, self.zmax = self.get_extreme(z, self.zmin, self.zmax)
# map_array.append([map])
# coord_array.append([x, y, z])
# print 'Ok'
# return map_array, coord_array
# def _netcdf_free_array(self):
# # free surface
# cubit.cmd('set info off')
# cubit.cmd('set echo off')
# cubit.cmd('set journal off')
# normal = (0, 0, 1)
# # writes free surface file
# print 'Writing free surface..... netcdf'
# #
# # searches block definition with name face_topo
# free_array = []
# for block, flag in zip(self.block_bc, self.block_bc_flag):
# if block == self.topography:
# name = cubit.get_exodus_entity_name('block', block)
# print 'free surface (topography) block name:',name,'id:',block
# quads_all = cubit.get_block_faces(block)
# print ' number of faces = ', len(quads_all)
# dic_quads_all = dict(zip(quads_all, quads_all))
# list_hex = cubit.parse_cubit_list('hex', 'all')
# for h in list_hex:
# faces = cubit.get_sub_elements('hex', h, 2)
# for f in faces:
# if dic_quads_all.has_key(f):
# # print f
# free_array.append(self.create_facenode_string(
# h, f, normal, cknormal=True,
# facenode_string=False))
# elif block == self.free:
# name = cubit.get_exodus_entity_name('block', block)
# print 'free surface block name:', name, 'id:', block
# quads_all = cubit.get_block_faces(block)
# print ' number of faces = ', len(quads_all)
# dic_quads_all = dict(zip(quads_all, quads_all))
# list_hex = cubit.parse_cubit_list('hex', 'all')
# for h in list_hex:
# faces = cubit.get_sub_elements('hex', h, 2)
# for f in faces:
# if dic_quads_all.has_key(f):
# free_array.append(self.create_facenode_string(
# h, f, normal, cknormal=False,
# facenode_string=False))
# print 'Ok'
# cubit.cmd('set info on')
# cubit.cmd('set echo on')
# return free_array
# def _netcdf_material_array(self):
# print 'Writing material...... netcdf'
# material_array = []
# for block, flag in zip(self.block_mat, self.block_flag):
# print 'block ', block, 'flag ', flag
# hexes = cubit.get_block_hexes(block)
# for hexa in hexes:
# material_array.append([hexa, flag])
# print 'Ok'
# return material_array
# def _netcdf_mesh_array(self):
# print 'Writing ' + mesh_name + '..... netcdf'
# print 'total number of elements:', str(cubit.get_hex_count())
# mesh_array = []
# for block, flag in zip(self.block_mat, self.block_flag):
# hexes = cubit.get_block_hexes(block)
# print 'block ', block, ' hexes ', len(hexes)
# for hexa in hexes:
# mesh_array.append(create_hexnode_string(
# hexa, hexnode_string=False))
# print 'Ok'
# return mesh_array
#
def _write_ascii(self, path=''):
# mesh file
self.mesh_write(path + self.mesh_name)
# mesh material
self.material_write(path + self.material_name)
# mesh coordinates
self.nodescoord_write(path + self.nodecoord_name)
# free surface: face_top
self.free_write(path + self.freename)
# absorbing surfaces: abs_***
if self.cpml:
self.abs_write(path + self.cpmlname)
else:
self.abs_write(path + self.absname)
# material definitions
self.nummaterial_write(path + self.nummaterial_name)
# any other surfaces: ***surface***
self.surface_write(path)
# receivers
if self.receivers:
self.rec_write(path + self.recname)
def export2SPECFEM3D(path_exporting_mesh_SPECFEM3D='.', hex27=False,
cpml=False, cpml_size=False, top_absorbing=False):
sem_mesh = mesh(hex27, cpml, cpml_size, top_absorbing)
# sem_mesh.block_definition()
# print sem_mesh.block_mat
# print sem_mesh.block_flag
#
sem_mesh.write(path=path_exporting_mesh_SPECFEM3D)
print 'END SPECFEM3D exporting process......'
if cpml:
cmd = 'save as "cpml.cub" overwrite'
cubit.cmd(cmd)
|
komatits/specfem3d
|
CUBIT_GEOCUBIT/geocubitlib/cubit2specfem3d.py
|
Python
|
gpl-2.0
| 62,504
|
[
"NetCDF"
] |
57b871002acdb37b4e504dd4fb36d1ce6a4fedddc805897f31e0b7cf747e586c
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 07 13:37:47 2016
@author: Greeff
"""
import numpy as np
import cv2
import matplotlib.pyplot as plt
from math import sqrt as math_sqrt
from QtLoadSaveObjects import make_config_dict
np.seterr(invalid='raise')
#==============================================================================
class SmallAreaDenseMethod():
def __init__(self,frame,roi_pnts,**kwargs):
'''
Apply Dense Optical Flow Method to the a small area ROI
frame = roi area in main frame
roi_pnts = (x1,y1,x2,y2), coords relative to main frame
gear_centre = (cx,xy), coords relative to main frame
roi size 40x40
'''
#info
self.name_id = kwargs.get('name_id','DM_0')
#debug control
self.plot_on = kwargs.get('plot_on',False) # plots data
self.draw_on = kwargs.get('draw_on',False) #draws addionational screens
self.collect_on = kwargs.get('collect_on',False) #collects data in list
self.collect_on = self.collect_on or self.plot_on or self.draw_on
#plotting placeholder, for debuging/testing
self.plot_axes = None
#detection parameters
self.min_flow_speed = kwargs.get('min_flow_speed',2.0) #pxl/frame
#START INIT
#roi region,, size parameters and disk_mask - recalculate on change of ROI/params etc.
self.update_roi(roi_pnts,**kwargs)
#init first frames
frame_roi = frame[self.roi_y1:self.roi_y2,self.roi_x1:self.roi_x2]
self.pre_process(frame_roi) #determine frame_gray
self.prev_gray = self.frame_gray.copy()
self.flow = None
#data collection
self.speed_step_current = 0
self.time_step_current = 0
#addtional data for review, used with collect_on/plot_on
self.speed_step = [] #average speed in pxl/frame
self.speed_mms = [] #speed converted to mm/s
self.time_step = [] #processing time step as measured
self.num_rejected = []
self.num_total = []
# self.angle_x_pnt = None
# self.angle_y_pnt = None
# self.angle_pnt = None
# self.angle_flow = None
# self.angle_centre = None
# self.angle_indices = None
#set apply method
w_timestep = kwargs.get('w_timestep',False)
if w_timestep:
self.apply = self.apply_w_timestep
else:
self.apply = self.apply_wo_timestep
# print()
# print('cal_factor: {}'.format(self.cal_factor))
# print('fps: {}'.format(self.fps))
# print('speed_conversion: {}'.format(self.speed_conversion))
# print('downscale_factor: {}'.format(self.downscale_factor))
#------------------------------------------------------------------------------
def update_roi(self,roi_pnts,**kwargs):
'''
Update roi parameters and dependent vars
'''
self.roi_x1,self.roi_y1,self.roi_x2,self.roi_y2 = roi_pnts
self.roi_pnt1 = (self.roi_x1,self.roi_y1)
self.roi_pnt2 = (self.roi_x2,self.roi_y2)
#speed conversion
self.fps = kwargs.get('cam_fps',15)
self.cal_factor = kwargs.get('cal_factor',0.0212)
self.speed_conversion = self.cal_factor*self.fps
#------------------------------------------------------------------------------
def pre_process(self,frame):
'''
Preprocess raw frame, apply gear mask
'''
self.frame_gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#------------------------------------------------------------------------------
def apply_w_timestep(self,frame,*args,**kwargs):
'''
Estimate speed and measure timestep for whole process
'''
e1 = cv2.getTickCount()
self.apply_wo_timestep(frame)
e2 = cv2.getTickCount()
self.time_step_current = (e2 - e1)/ cv2.getTickFrequency()
if self.collect_on:
self.time_step.append(self.time_step_current)
#------------------------------------------------------------------------------
def apply_wo_timestep(self,frame,*args,**kwargs):
'''
Estimate speed, without measuring the time step
cv2.calcOpticalFlowFarneback(prev, next, flow,
pyr_scale, levels, winsize,
iterations, poly_n, poly_sigma,
flags) → flow
Parameters:
prev – first 8-bit single-channel input image.
next – second input image of the same size and the same type as prev.
flow – computed flow image that has the same size as prev and type CV_32FC2.
pyr_scale – parameter, specifying the image scale (<1) to build pyramids for each image;
pyr_scale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous one.
levels – number of pyramid layers including the initial image;
levels=1 means that no extra layers are created and only the original images are used.
winsize – averaging window size;
larger values increase the algorithm robustness to image noise
and give more chances for fast motion detection,
but yield more blurred motion field.
iterations – number of iterations the algorithm does at each pyramid level.
poly_n – size of the pixel neighborhood used to find polynomial expansion in each pixel;
larger values mean that the image will be approximated with smoother surfaces,
yielding more robust algorithm and more blurred motion field,
typically poly_n =5 or 7.
poly_sigma – standard deviation of the Gaussian that is used to smooth
derivatives used as a basis for the polynomial expansion;
for poly_n=5, you can set poly_sigma=1.1, for poly_n=7,
a good value would be poly_sigma=1.5.
flags –
operation flags that can be a combination of the following:
OPTFLOW_USE_INITIAL_FLOW
uses the input flow as an initial flow approximation.
OPTFLOW_FARNEBACK_GAUSSIAN
uses the Gaussian (winsize x winsize)
filter instead of a box filter of the same size for optical flow
estimation; usually, this option gives z more accurate flow than
with a box filter, at the cost of lower speed; normally,
winsize for a Gaussian window should be set to a larger value
to achieve the same level of robustness.
'''
self.pre_process(frame)
# self.flow = cv2.calcOpticalFlowFarneback(self.prev_gray,self.frame_gray,None,
# 0.5,3,15,
## 3,5,1.2,0)
# if self.use_prev_flow and self.flow is not None:
# self.flow = cv2.calcOpticalFlowFarneback(self.prev_gray,self.frame_gray,self.flow,
# pyr_scale = 0.5,levels = 3,winsize = 11,iterations = 3,
# poly_n = 7,poly_sigma = 1.5, flags = cv2.OPTFLOW_USE_INITIAL_FLOW)
# else:
self.flow = cv2.calcOpticalFlowFarneback(self.prev_gray,self.frame_gray,self.flow,
pyr_scale = 0.5,levels = 3,winsize = 11,iterations = 3,
poly_n = 7,poly_sigma = 1.5, flags = 0)
self.prev_gray = self.frame_gray
self.est_speed()
#------------------------------------------------------------------------------
def est_speed(self):
'''
estimate flow speed
'''
fy,fx = self.flow.T
try:
self.v_sqr = fx*fx + fy*fy
except FloatingPointError:
#Underflow: result so close to zero that some precision was lost.
print('FloatingPointError, underflow encountered in multiply')
# pass
v_sqrt = np.sqrt(self.v_sqr)
#remove small flow speed values
v0 = v_sqrt >= self.min_flow_speed
if np.any(v0):
#mean of all selected velocities
v_ave = np.mean(v_sqrt[v0])
else:
v_ave = 0
self.speed_step_current = v_ave
#testing
if self.collect_on:
self.speed_step.append(v_ave)
self.speed_mms.append(v_ave*self.speed_conversion)
if self.draw_on:
# plot the flow vectors
cv2.imshow("Optical flow",self.draw_flow(v0))
self.num_rejected.append(np.size(v0) - np.count_nonzero(v0))
self.num_total.append(np.size(v0))
if self.plot_on:
self.plot_data(v0,v_sqrt)
#------------------------------------------------------------------------------
def draw_flow(self,v0,resize = 4):
"""
Plot optical flow at sample points
spaced step pixels apart.
"""
vis = cv2.cvtColor(self.frame_gray,cv2.COLOR_GRAY2BGR)
# vis[x_indices,y_indices] = (0,0,255)
vis = cv2.resize(vis,(self.roi_new_w*resize,self.roi_new_h*resize))
#draw all lines
fy,fx = self.flow[self.x_indices,self.y_indices].T
lines = np.vstack([self.y_indices*resize,self.x_indices*resize,
(self.y_indices+fy)*resize,(self.x_indices+fx)*resize]).T.reshape(-1,2,2)
lines = np.int32(lines)
for (x1,y1),(x2,y2) in lines:
cv2.line(vis,(x1,y1),(x2,y2),(255,255,0),1)
#draw filtered lines
x_indices = self.x_indices[v0]
y_indices = self.y_indices[v0]
fy = fy[v0]
fx = fx[v0]
linesF = np.vstack([y_indices*resize,x_indices*resize,
(y_indices+fy)*resize,(x_indices+fx)*resize]).T.reshape(-1,2,2)
linesF = np.int32(linesF)
for (x1,y1),(x2,y2) in linesF:
cv2.line(vis,(x1,y1),(self.cx_local*resize,self.cy_local*resize),(255,0,255),1)
cv2.line(vis,(x1,y1),(x2,y2),(0,255,255),1)
# cv2.circle(vis,(x1,y1),1,(0,255,0))
cv2.circle(vis,(x2,y2),1,(0,0,255))
#cx and cy
cv2.line(vis,(self.cx_local*resize,0),(self.cx_local*resize,vis.shape[0]),(255,255,0))
cv2.line(vis,(0,self.cy_local*resize),(vis.shape[1],self.cy_local*resize),(255,255,0))
return vis
#------------------------------------------------------------------------------
def plot_init(self):
'''
'''
self.ax_pnts_acc = self.plot_axes[0,0]
self.ax_flow_vector_x = self.plot_axes[1,0]
self.ax_flow_vector_y = self.plot_axes[1,1]
self.ax_flow_vector_r = self.plot_axes[0,1]
self.ax_angle = self.plot_axes[2,0]
self.ax_angle_rej = self.plot_axes[2,1]
self.ax_flow_w = self.ax_flow_vector_r.twinx()
#------------------------------------------------------------------------------
def plot_data(self,v0,v_sqrt):
'''
'''
self.ax_pnts_acc.cla()
self.ax_pnts_acc.plot(self.num_total,'o-',label = 'num_total')
self.ax_pnts_acc.plot(self.num_rejected,'o-',label = 'num_rejected')
self.ax_pnts_acc.legend()
fx,fy = self.flow[self.x_indices,self.y_indices].T
# v = np.sqrt(fx*fx + fy*fy)
# fy_selected = fy[v0]
# fx_selected = fx[v0]
# v_sqrt = np.sqrt(self.v_sqr)
# w_rot_speed = v*self.indice_radii
# self.ax_flow_vector_r.plot(v,label = 'v')
v_sel = v_sqrt[v0]
v_sel.sort()
v_sel_len = v_sel.shape[0]
if v_sel_len > 10:
v_sel_half = int(v_sel_len /2) #half
v_sel_p10 = int(v_sel_len *0.1) #ten percent
upper_half = v_sel[v_sel_half:-v_sel_p10]
try:
mean_upqa = np.mean(upper_half)
except FloatingPointError:
mean_upqa = np.mean(v_sel)
elif v_sel_len > 1:
mean_upqa = np.mean(v_sel)
else:
mean_upqa = 0
self.ax_flow_vector_r.cla()
self.ax_flow_vector_r.plot(v_sel*self.speed_conversion,'+',label = 'v_sel sorted')
self.ax_flow_vector_r.axhline(self.min_flow_speed*self.speed_conversion,c='r',ls = '--')
self.ax_flow_vector_r.axhline(self.speed_step_current*self.speed_conversion,c='k',ls = '--')
self.ax_flow_vector_r.axhline(mean_upqa*self.speed_conversion,c='b',ls = '--')
self.ax_flow_vector_r.legend()
self.ax_flow_vector_r.axis(ymin = 0,ymax = 4)
self.ax_flow_w.cla()
# self.ax_flow_w.plot(w_rot_speed,'g',label = 'w_rot_speed')
# self.ax_flow_vector_x.cla()
# self.ax_flow_vector_x.plot(fx,'o',label = 'fx')
# self.ax_flow_vector_x.plot(fx_selected,label = 'fx_selected')
# self.ax_flow_vector_x.legend()
#
# self.ax_flow_vector_y.cla()
# self.ax_flow_vector_y.plot(fy,'o',label = 'fy')
# self.ax_flow_vector_y.plot(fy_selected,label = 'fy_selected')
# self.ax_flow_vector_y.legend()
#
## CW = fx*self.y_CW > self.x_CW*fy #CW rotation
#
# self.ax_angle.cla()
## self.ax_angle.plot(np.rad2deg(np.arctan(fy/fx)),'o',label='all')
# self.ax_angle.plot(np.rad2deg(np.arctan(fy_selected/fx_selected)),'o',label='fy/fx')
# self.ax_angle.plot(np.rad2deg(-np.arctan(self.y_indices[self.x_indices!=0]/self.x_indices[self.x_indices!=0])),'o',label='std')
# self.ax_angle.legend()
#
# self.ax_angle_rej.cla()
# self.ax_angle_rej.plot(self.indice_radii)
# self.ax_angle_rej.legend()
#
#==============================================================================
class GearDenseMethod():
def __init__(self,frame,roi_pnts,**kwargs):
'''
Apply Dense Optical Flow Method to the a gear ROI
frame = roi area in main frame
roi_pnts = (x1,y1,x2,y2), coords relative to main frame
gear_centre = (cx,xy), coords relative to main frame
kwargs:
fps = 15
downscale_factor = 2
plot_on = True
detection kwargs:
min_flow_speed:
reject flow speed values less than this
flow speed at min citeria = sqrt(fx**2 + fy**2)
'''
#info
self.name_id = kwargs.get('name_id','DM_0')
#debug control
self.plot_on = kwargs.get('plot_on',False) # plots data
self.draw_on = kwargs.get('draw_on',False) #draws addionational screens
self.collect_on = kwargs.get('collect_on',False) #collects data in list
self.collect_on = self.collect_on or self.plot_on or self.draw_on
#plotting placeholder, for debuging/testing
self.plot_axes = None
#detection parameters
self.min_flow_speed = kwargs.get('min_flow_speed',2.0) #pxl/frame
self.cw_filter = kwargs.get('cw_filter',True)
# self.use_prev_flow = kwargs.get('use_prev_flow',False) # quick check did not result in speed up
# self.angle_mod = 1.*np.pi #mod, 180 degrees
# self.min_angle_range = np.deg2rad(15) # +-15 degrees from 90
# self.min_angle = np.deg2rad(90) - self.min_angle_range
# self.max_angle = np.deg2rad(90) + self.min_angle_range
#START INIT
#roi region,, size parameters and disk_mask - recalculate on change of ROI/params etc.
self.update_roi(roi_pnts,**kwargs)
#init first frames
frame_roi = frame[self.roi_y1:self.roi_y2,self.roi_x1:self.roi_x2]
self.pre_process(frame_roi) #determine frame_gray
self.prev_gray = self.frame_gray.copy()
self.flow = None
#data collection
self.speed_step_current = 0
self.time_step_current = 0
#addtional data for review, used with collect_on/plot_on
self.speed_step = [] #average speed in pxl/frame
self.speed_mms = [] #speed converted to mm/s
self.time_step = [] #processing time step as measured
self.num_rejected = []
self.num_total = []
# self.angle_x_pnt = None
# self.angle_y_pnt = None
# self.angle_pnt = None
# self.angle_flow = None
# self.angle_centre = None
# self.angle_indices = None
#set apply method
w_timestep = kwargs.get('w_timestep',False)
if w_timestep:
self.apply = self.apply_w_timestep
else:
self.apply = self.apply_wo_timestep
# print()
# print('cal_factor: {}'.format(self.cal_factor))
# print('fps: {}'.format(self.fps))
# print('speed_conversion: {}'.format(self.speed_conversion))
# print('downscale_factor: {}'.format(self.downscale_factor))
#------------------------------------------------------------------------------
def update_roi(self,roi_pnts,**kwargs):
'''
Update roi parameters and dependent vars
'''
self.roi_x1,self.roi_y1,self.roi_x2,self.roi_y2 = roi_pnts
self.cx,self.cy = kwargs.get('gear_centre',(0,0))
self.roi_pnt1 = (self.roi_x1,self.roi_y1)
self.roi_pnt2 = (self.roi_x2,self.roi_y2)
self.cx_local = self.cx - self.roi_x1
self.cy_local = self.cy - self.roi_y1
#resize/shrink with downscale_factor
self.downscale_factor = float(kwargs.get('downscale_factor',1))
self.roi_new_h = int(round((self.roi_y2 - self.roi_y1)/self.downscale_factor))
self.roi_new_w = int(round((self.roi_x2 - self.roi_x1)/self.downscale_factor))
self.cx_local_dwn = self.cx_local/self.downscale_factor
self.cy_local_dwn = self.cy_local/self.downscale_factor
#roi disc mask setup
self.gear_mask_generate(**kwargs)
#speed conversion
self.fps = kwargs.get('cam_fps',15)
self.speed_conversion = self.cal_factor*self.fps
#------------------------------------------------------------------------------
def gear_mask_generate(self,**kwargs):
'''
Make an disc ROI by masking
cx and cy are relative to main frame shape
kwargs:
main_frame = (640,480)
steps = 8 # sample every i'th point in gear_mask_disk_inv in calculated flow
and use these samples to estimate the speed
gear_radius_mm mask radius, +cr_add -cr_min
cal_factor convert radius to pxl and speed_step to speed_mms
od_resize_factor ajdustment factor
cr_add
cr_min
'''
main_frame = kwargs.get('main_frame',(640,480))
#init disc (dougnut) mask size, based on an the gear radius
gear_radius_mm = kwargs.get('gear_radius_mm',4.0)
self.cal_factor = kwargs.get('cal_factor',0.0212)
self.od_resize_factor = kwargs.get('od_resize_factor',1.07)
self.cr_od = int(round(self.od_resize_factor * gear_radius_mm / self.cal_factor))
cr_add = kwargs.get('cr_add',20)
cr_min = kwargs.get('cr_min',10)
# cr_space = cr_add + cr_min
self.cr_max = self.cr_od + cr_add
self.cr_min = self.cr_od - cr_min
# print('cr_min: {}; cr_add: {}'.format(cr_min,cr_add))
# print('cr_min: {}; cr_max: {}'.format(self.cr_min,self.cr_max))
#draw disk - on main frame copy
self.gear_mask = np.zeros((main_frame[1], main_frame[0]), np.uint8)
cv2.circle(self.gear_mask,(self.cx,self.cy),self.cr_max,(255),-1)
cv2.circle(self.gear_mask,(self.cx,self.cy),self.cr_min,0,-1)
#select gear roi in main frame and resize
self.gear_mask = self.gear_mask[self.roi_y1:self.roi_y2,self.roi_x1:self.roi_x2] #
#from here on everything is donwscaled - shrunked
if self.downscale_factor != 1:
self.gear_mask = cv2.resize(self.gear_mask,(self.roi_new_w,self.roi_new_h))
self.min_flow_speed = self.min_flow_speed/self.downscale_factor
self.gear_mask_indices = self.gear_mask == 0
self.gear_mask_inv_indices = self.gear_mask != 0
#x and y coords used to sample flow
self.x_indices,self.y_indices = self.gear_mask_inv_indices.nonzero()
#radius at each evaluation point
dx = self.x_indices - self.cx_local_dwn
dy = self.y_indices - self.cy_local_dwn
all_radii = np.sqrt(dx*dx + dy*dy)
xyr = np.vstack((self.x_indices,self.y_indices,all_radii))
xyr = xyr.T
xyr = xyr[xyr[:,2].argsort()]
self.x_indices = xyr[:,0].astype(np.int)
self.y_indices = xyr[:,1].astype(np.int)
self.indice_radii = xyr[:,2]
#select only every i'th step
steps = kwargs.get('steps',32)
self.x_indices = self.x_indices[0::steps]
self.y_indices = self.y_indices[0::steps]
self.indice_radii = self.indice_radii[0::steps]
#vector for CW check
self.x_CW = self.x_indices + self.cx_local_dwn
self.y_CW = self.y_indices + self.cy_local_dwn
#array init
self.v_sqr = np.zeros_like(self.x_indices,dtype = np.float32) #prefer array over list
#testing
if self.draw_on:
cv2.imshow('gear_mask_DQM',self.gear_mask)
eval_pnts = self.gear_mask.copy()
eval_pnts = cv2.cvtColor(eval_pnts,cv2.COLOR_GRAY2BGR)
eval_pnts[self.x_indices,self.y_indices,:] = [0,0,255]
eval_pnts = cv2.resize(eval_pnts,(eval_pnts.shape[1]*4,eval_pnts.shape[0]*4))
cv2.imshow('gear_indices_eval_pnts',eval_pnts)
#------------------------------------------------------------------------------
def pre_process(self,frame):
'''
Preprocess raw frame, apply gear mask
'''
self.frame_gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
if self.downscale_factor != 1:
self.frame_gray = cv2.resize(self.frame_gray,(self.roi_new_w,self.roi_new_h))
self.frame_gray[self.gear_mask_indices] = 0
#------------------------------------------------------------------------------
def apply_w_timestep(self,frame,*args,**kwargs):
'''
Estimate speed and measure timestep for whole process
'''
e1 = cv2.getTickCount()
self.apply_wo_timestep(frame)
e2 = cv2.getTickCount()
self.time_step_current = (e2 - e1)/ cv2.getTickFrequency()
if self.collect_on:
self.time_step.append(self.time_step_current)
#------------------------------------------------------------------------------
def apply_wo_timestep(self,frame,*args,**kwargs):
'''
Estimate speed, without measuring the time step
cv2.calcOpticalFlowFarneback(prev, next, flow,
pyr_scale, levels, winsize,
iterations, poly_n, poly_sigma,
flags) → flow
Parameters:
prev – first 8-bit single-channel input image.
next – second input image of the same size and the same type as prev.
flow – computed flow image that has the same size as prev and type CV_32FC2.
pyr_scale – parameter, specifying the image scale (<1) to build pyramids for each image;
pyr_scale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous one.
levels – number of pyramid layers including the initial image;
levels=1 means that no extra layers are created and only the original images are used.
winsize – averaging window size;
larger values increase the algorithm robustness to image noise
and give more chances for fast motion detection,
but yield more blurred motion field.
iterations – number of iterations the algorithm does at each pyramid level.
poly_n – size of the pixel neighborhood used to find polynomial expansion in each pixel;
larger values mean that the image will be approximated with smoother surfaces,
yielding more robust algorithm and more blurred motion field,
typically poly_n =5 or 7.
poly_sigma – standard deviation of the Gaussian that is used to smooth
derivatives used as a basis for the polynomial expansion;
for poly_n=5, you can set poly_sigma=1.1, for poly_n=7,
a good value would be poly_sigma=1.5.
flags –
operation flags that can be a combination of the following:
OPTFLOW_USE_INITIAL_FLOW
uses the input flow as an initial flow approximation.
OPTFLOW_FARNEBACK_GAUSSIAN
uses the Gaussian (winsize x winsize)
filter instead of a box filter of the same size for optical flow
estimation; usually, this option gives z more accurate flow than
with a box filter, at the cost of lower speed; normally,
winsize for a Gaussian window should be set to a larger value
to achieve the same level of robustness.
'''
self.pre_process(frame)
# self.flow = cv2.calcOpticalFlowFarneback(self.prev_gray,self.frame_gray,None,
# 0.5,3,15,
## 3,5,1.2,0)
# if self.use_prev_flow and self.flow is not None:
# self.flow = cv2.calcOpticalFlowFarneback(self.prev_gray,self.frame_gray,self.flow,
# pyr_scale = 0.5,levels = 3,winsize = 11,iterations = 3,
# poly_n = 7,poly_sigma = 1.5, flags = cv2.OPTFLOW_USE_INITIAL_FLOW)
# else:
self.flow = cv2.calcOpticalFlowFarneback(self.prev_gray,self.frame_gray,self.flow,
pyr_scale = 0.5,levels = 3,winsize = 11,iterations = 3,
poly_n = 7,poly_sigma = 1.5, flags = 0)
self.prev_gray = self.frame_gray
self.est_speed()
#------------------------------------------------------------------------------
def est_speed(self):
'''
estimate flow speed
'''
fy_dwnscaled,fx_dwnscaled = self.flow[self.x_indices,self.y_indices].T
try:
self.v_sqr[:] = fx_dwnscaled*fx_dwnscaled + fy_dwnscaled*fy_dwnscaled
except FloatingPointError:
#Underflow: result so close to zero that some precision was lost.
# print('FloatingPointError, underflow encountered in multiply')
pass
v_sqrt = np.sqrt(self.downscale_factor*self.v_sqr)
#remove small flow speed values
v0 = v_sqrt >= self.min_flow_speed
# w_rot_speed = v_sqrt*self.indice_radii #should be component tangent to centre
#flow vector angle considerations possible
#flow vector angle should be approx. orthogonal to radius line
if self.cw_filter:
CW = fx_dwnscaled*self.y_CW > self.x_CW*fy_dwnscaled #CW rotation
#select indices - bitwise_and
v0 = v0 & CW #& ang_indices
if np.any(v0):
#mean of all selected velocities
# v_ave = np.mean(v_sqrt[v0])
#upper half selection
#sort the velocities, small to great
#then use mean or upper half (upto upper 10%)
#for mean velocity.
v_sel = v_sqrt[v0]
v_sel.sort()
self.v_sel_len = v_sel.shape[0]
if self.v_sel_len > 10:
self.v_sel_half = int(self.v_sel_len /2) #half
self.v_sel_p10 = int(self.v_sel_len *0.1) #ten percent
upper_half = v_sel[self.v_sel_half:-self.v_sel_p10]
try:
v_ave = np.mean(upper_half)
except FloatingPointError:
v_ave = np.mean(v_sel)
else:
v_ave = np.mean(v_sel)
#
#rotation speed method - to dependant on cx,cy and evaluation radius
# w_mean = np.mean(w_rot_speed[v0])
# v_ave = w_mean/self.cr_od
v_ave *= self.downscale_factor #upscale
else:
v_ave = 0
# print('v_ave: {}, v:{},v0:{} - {}'.format(v_ave,v.shape,v0.shape,v0.any()))
self.speed_step_current = v_ave
#testing
if self.collect_on:
self.speed_step.append(v_ave)
self.speed_mms.append(v_ave*self.speed_conversion)
if self.draw_on:
# plot the flow vectors
cv2.imshow("Optical flow",self.draw_flow(v0))
self.num_rejected.append(np.size(v0) - np.count_nonzero(v0))
self.num_total.append(np.size(v0))
if self.plot_on:
self.plot_data(v0,v_sqrt)
#------------------------------------------------------------------------------
def draw_flow(self,v0,resize = 4):
"""
Plot optical flow at sample points
spaced step pixels apart.
"""
vis = cv2.cvtColor(self.frame_gray,cv2.COLOR_GRAY2BGR)
# vis[x_indices,y_indices] = (0,0,255)
vis = cv2.resize(vis,(self.roi_new_w*resize,self.roi_new_h*resize))
#draw all lines
fy,fx = self.flow[self.x_indices,self.y_indices].T
lines = np.vstack([self.y_indices*resize,self.x_indices*resize,
(self.y_indices+fy)*resize,(self.x_indices+fx)*resize]).T.reshape(-1,2,2)
lines = np.int32(lines)
for (x1,y1),(x2,y2) in lines:
cv2.line(vis,(x1,y1),(x2,y2),(255,255,0),1)
#draw filtered lines
x_indices = self.x_indices[v0]
y_indices = self.y_indices[v0]
fy = fy[v0]
fx = fx[v0]
linesF = np.vstack([y_indices*resize,x_indices*resize,
(y_indices+fy)*resize,(x_indices+fx)*resize]).T.reshape(-1,2,2)
linesF = np.int32(linesF)
for (x1,y1),(x2,y2) in linesF:
cv2.line(vis,(x1,y1),(self.cx_local*resize,self.cy_local*resize),(255,0,255),1)
cv2.line(vis,(x1,y1),(x2,y2),(0,255,255),1)
# cv2.circle(vis,(x1,y1),1,(0,255,0))
cv2.circle(vis,(x2,y2),1,(0,0,255))
#cx and cy
cv2.line(vis,(self.cx_local*resize,0),(self.cx_local*resize,vis.shape[0]),(255,255,0))
cv2.line(vis,(0,self.cy_local*resize),(vis.shape[1],self.cy_local*resize),(255,255,0))
return vis
#------------------------------------------------------------------------------
def plot_init(self):
'''
'''
self.ax_pnts_acc = self.plot_axes[0,0]
self.ax_flow_vector_x = self.plot_axes[1,0]
self.ax_flow_vector_y = self.plot_axes[1,1]
self.ax_flow_vector_r = self.plot_axes[0,1]
self.ax_angle = self.plot_axes[2,0]
self.ax_angle_rej = self.plot_axes[2,1]
self.ax_flow_w = self.ax_flow_vector_r.twinx()
#------------------------------------------------------------------------------
def plot_data(self,v0,v_sqrt):
'''
'''
self.ax_pnts_acc.cla()
self.ax_pnts_acc.plot(self.num_total,'o-',label = 'num_total')
self.ax_pnts_acc.plot(self.num_rejected,'o-',label = 'num_rejected')
self.ax_pnts_acc.legend()
fx,fy = self.flow[self.x_indices,self.y_indices].T
# v = np.sqrt(fx*fx + fy*fy)
# fy_selected = fy[v0]
# fx_selected = fx[v0]
# v_sqrt = np.sqrt(self.v_sqr)
# w_rot_speed = v*self.indice_radii
self.ax_flow_vector_r.cla()
# self.ax_flow_vector_r.plot(v,label = 'v')
v_sel = v_sqrt[v0]*self.downscale_factor
v_sel.sort()
v_sel_len = v_sel.shape[0]
if v_sel_len > 10:
v_sel_half = int(v_sel_len /2) #half
v_sel_p10 = int(v_sel_len *0.1) #ten percent
upper_half = v_sel[v_sel_half:-v_sel_p10]
try:
mean_upqa = np.mean(upper_half)
except FloatingPointError:
mean_upqa = np.mean(v_sel)
elif v_sel_len > 1:
mean_upqa = np.mean(v_sel)
else:
mean_upqa = 0
self.ax_flow_vector_r.plot(v_sel*self.speed_conversion,'+',label = 'v_sel sorted')
self.ax_flow_vector_r.axhline(self.min_flow_speed*self.speed_conversion,c='r',ls = '--')
self.ax_flow_vector_r.axhline(self.speed_step_current*self.speed_conversion,c='k',ls = '--')
self.ax_flow_vector_r.axhline(mean_upqa*self.speed_conversion,c='b',ls = '--')
self.ax_flow_vector_r.legend()
self.ax_flow_vector_r.axis(ymin = 0,ymax = 4)
self.ax_flow_w.cla()
# self.ax_flow_w.plot(w_rot_speed,'g',label = 'w_rot_speed')
# self.ax_flow_vector_x.cla()
# self.ax_flow_vector_x.plot(fx,'o',label = 'fx')
# self.ax_flow_vector_x.plot(fx_selected,label = 'fx_selected')
# self.ax_flow_vector_x.legend()
#
# self.ax_flow_vector_y.cla()
# self.ax_flow_vector_y.plot(fy,'o',label = 'fy')
# self.ax_flow_vector_y.plot(fy_selected,label = 'fy_selected')
# self.ax_flow_vector_y.legend()
#
## CW = fx*self.y_CW > self.x_CW*fy #CW rotation
#
# self.ax_angle.cla()
## self.ax_angle.plot(np.rad2deg(np.arctan(fy/fx)),'o',label='all')
# self.ax_angle.plot(np.rad2deg(np.arctan(fy_selected/fx_selected)),'o',label='fy/fx')
# self.ax_angle.plot(np.rad2deg(-np.arctan(self.y_indices[self.x_indices!=0]/self.x_indices[self.x_indices!=0])),'o',label='std')
# self.ax_angle.legend()
#
# self.ax_angle_rej.cla()
# self.ax_angle_rej.plot(self.indice_radii)
# self.ax_angle_rej.legend()
#
#==============================================================================
class GearLKMethod():
def __init__(self,frame,roi_pnts,**kwargs):
'''
Apply Lukase-Kanade Optical Flow Method to the a gear ROI
'''
#info
self.name_id = kwargs.get('name_id','LK_0')
#debug control
self.plot_on = kwargs.get('plot_on',False) # plots data
self.draw_on = kwargs.get('draw_on',False) #draws addionational screens
self.collect_on = kwargs.get('collect_on',False) #collects data in list
self.collect_on = self.collect_on or self.plot_on or self.draw_on
self.plot_axes = None
# self.gear_use_mask = kwargs.get('use_mask',True)
# self.thresh_pnt = kwargs.get('thresh_pnt',125) #manual threshpoint value
#internal data
self.tracks = []
self.speed_xy_per_track = []
self.speed_w_per_track = [] #rotational speed
self.prev_gray = None
self.gray = None
self.thresh = None
self.frame_id = 0
self.first_analysis = True
#TRACKING
self.feature_params = dict(maxCorners = 50,
qualityLevel = 0.3,
minDistance = 2,
blockSize = 3 )
self.track_pnt_mask_radius = 2
lk_params_criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
# self.lk_params = dict( winSize = (30, 30),
# maxLevel = 2,
# criteria = lk_params_criteria)
self.lk_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = lk_params_criteria)
self.track_len = 2
self.detect_interval = 2
#point selection
self.gear_sp_min_h_sqrd = 2#self.gear_sp_min_R**2
#set roi and disk mask
self.update_roi(roi_pnts,**kwargs)
#data collection: measurands
self.speed_step_current = 0
self.time_step_current = 0
#list collection with self.collect_on
if self.collect_on:
self.pnt_raddii = []
self.speed_step = []
self.speed_mms = []
self.time_step = []
#set apply method
w_timestep = kwargs.get('w_timestep',False)
if w_timestep:
self.apply = self.apply_w_timestep
else:
self.apply = self.apply_wo_timestep
#------------------------------------------------------------------------------
def update_roi(self,roi_pnts,**kwargs):
'''
Update roi parameters and dependent vars
'''
self.roi_x1,self.roi_y1,self.roi_x2,self.roi_y2 = roi_pnts
self.cx,self.cy = kwargs.get('gear_centre',(0,0))
self.roi_pnt1 = (self.roi_x1,self.roi_y1)
self.roi_pnt2 = (self.roi_x2,self.roi_y2)
self.cy_flipped = (self.roi_y2 - self.roi_y1) - self.cy
self.cx_local = self.cx - self.roi_x1
self.cy_local = self.cy - self.roi_y1
#resize/shrink with downscale_factor
self.downscale_factor = kwargs.get('downscale_factor',2)
self.roi_new_h = int(round((self.roi_y2 - self.roi_y1)/self.downscale_factor))
self.roi_new_w = int(round((self.roi_x2 - self.roi_x1)/self.downscale_factor))
self.cx_local = int(round(self.cx_local/self.downscale_factor))
self.cy_local = int(round(self.cy_local/self.downscale_factor))
self.cy_flipped = int(round(self.cy_flipped/self.downscale_factor))
#roi disc mask setup
self.gear_mask_generate(**kwargs)
#speed conversion
self.fps = kwargs.get('cam_fps',15)
self.speed_conversion = self.cal_factor*self.fps
#------------------------------------------------------------------------------
def gear_mask_generate(self,**kwargs):
'''
Make an disc ROI by masking
cx and cy are relative to main frame shape
kwargs:
main_frame = (640,480)
gear_radius_mm mask radius, +cr_add -cr_min
cal_factor convert radius to pxl and speed_step to speed_mms
od_resize_factor ajdustment factor
cr_add
cr_min
'''
main_frame = kwargs.get('main_frame',(640,480))
#init disc (dougnut) mask size, based on an the gear radius
gear_radius_mm = kwargs.get('gear_radius_mm',4.0)
self.cal_factor = kwargs.get('cal_factor',0.0212)
self.od_resize_factor = kwargs.get('od_resize_factor',1.07)
self.cr_od = int(round(self.od_resize_factor * gear_radius_mm / self.cal_factor))
cr_add = kwargs.get('cr_add',20)
cr_min = kwargs.get('cr_min',10)
# cr_space = cr_add + cr_min
self.cr_max = self.cr_od + cr_add
self.cr_min = self.cr_od - cr_min
#draw disk - on main frame copy
self.gear_mask = np.zeros((main_frame[1], main_frame[0]), np.uint8)
cv2.circle(self.gear_mask,(self.cx,self.cy),self.cr_max,(255),-1)
cv2.circle(self.gear_mask,(self.cx,self.cy),self.cr_min,0,-1)
#select gear roi in main frame and resize
self.gear_mask = self.gear_mask[self.roi_y1:self.roi_y2,self.roi_x1:self.roi_x2] #
self.gear_mask = cv2.resize(self.gear_mask,(self.roi_new_w,self.roi_new_h))
self.gear_mask_indices = self.gear_mask == 0
self.gear_mask_inv_indices = self.gear_mask != 0
if self.draw_on:
cv2.imshow('gear_mask_LK',self.gear_mask)
#------------------------------------------------------------------------------
def apply_w_timestep(self,roi,frame_id):
'''
'''
e1 = cv2.getTickCount()
self.apply_wo_timestep(roi,frame_id)
e2 = cv2.getTickCount()
self.time_step_current = (e2 - e1)/ cv2.getTickFrequency()
if self.collect_on:
self.time_step.append(self.time_step_current)
self.speed_step.append(self.speed_step_current)
self.speed_mms.append(self.speed_step_current*self.speed_conversion)
if self.plot_on:
self.plot_data()
self.pnt_raddii = []
#------------------------------------------------------------------------------
def apply_wo_timestep(self,roi,frame_id):
'''
'''
self.frame_id = frame_id
self.gear_preprocess(roi)
gear_speed = self.flow_track_gear()
gear_speed *= self.downscale_factor
self.speed_step_current = gear_speed
self.first_analysis = False
#------------------------------------------------------------------------------
def gear_preprocess(self,roi):
'''
'''
# if self.draw_on:
# cv2.imshow('roi_LKM',roi)
self.roi_test = roi.copy()# cv2.GaussianBlur(roi, (3, 3),0)
self.roi_test[:,:,1:] = 0 #remove blue/red, helps somehow with blue background
self.gray = cv2.cvtColor(self.roi_test,cv2.COLOR_BGR2GRAY)
if self.downscale_factor != 1:
self.gray = cv2.resize(self.gray,(self.roi_new_w,self.roi_new_h))
self.gray = cv2.GaussianBlur(self.gray, (3, 3),0)
#mask gray
# ret_val,self.thresh = cv2.threshold(self.gray,0,255,cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# ret_val,self.thresh = cv2.threshold(self.gray,thresh_pnt,255,cv2.THRESH_BINARY_INV)
self.thresh = cv2.adaptiveThreshold(self.gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV, 11, 1)
self.thresh[self.gear_mask_indices] = 0
#threshold
self.thresh = cv2.GaussianBlur(self.thresh, (3, 3),0)
#------------------------------------------------------------------------------
def flow_track_gear(self):
'''
'''
self.speed_xy_per_track = []
#----------------------------------------------------------------------
if self.tracks:
self.track_points_update()
#----------------------------------------------------------------------
#update speed
if len(self.speed_xy_per_track) > 1:
# gear_speed = reject_outliers(np.array(self.speed_xy_per_track),
# return_option = 1)
# gear_speed = np.mean(self.speed_xy_per_track)
# gear_w_mean = np.mean(self.speed_w_per_track)
# gear_speed = gear_w_mean/self.cr_od
# gear_speed = reject_outliers(np.array(self.speed_xy_per_track),
# return_option = 1)
# gear_speed = math_sqrt(gear_speed)
#upper half for gear speed est
self.speed_xy_per_track.sort()
len_data = len(self.speed_xy_per_track)
if len_data > 10:
half = int(len_data/2)
p10 = int(len_data*0.1)
upper_half = self.speed_xy_per_track[half:-p10]
try:
gear_speed = np.mean(upper_half)
except FloatingPointError:
gear_speed = np.mean(self.speed_xy_per_track)
else:
gear_speed = np.mean(self.speed_xy_per_track)
else:
gear_speed = 0
#----------------------------------------------------------------------
#get new points
self.get_new_track_points()
return gear_speed
#------------------------------------------------------------------------------
def track_points_update(self):
'''
'''
p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
p1, st, err = cv2.calcOpticalFlowPyrLK(self.prev_gray, self.thresh, p0, None, **self.lk_params)
p0r, st, err = cv2.calcOpticalFlowPyrLK(self.thresh, self.prev_gray, p1, None, **self.lk_params) #reverse
d = abs(p0-p0r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []
for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
if not good_flag:
continue
tr.append((x, y))
if len(tr) > self.track_len:
del tr[0]
new_tracks.append(tr)
try:
prev_pnt = tr[-2]
self.gear_speed_append(x,y,prev_pnt) #this can be vectorized
except IndexError:
pass #first point in track
#end for loop
self.tracks = new_tracks
# if self.draw_on:
# cv2.circle(vis, (x, y), 2, (0, 0,255), -1)
# if self.draw_on:
# cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
#all points in a track
# for track in self.tracks:
# for x,y in track:
# cv2.circle(vis, (x, y), 2, (255, 0, 0), -1)
# cv2.imshow('track_pnts',vis)
#------------------------------------------------------------------------------
def get_new_track_points(self):
'''
'''
if self.frame_id % self.detect_interval == 0 or self.first_analysis:
try:
mask = self.new_pnts_mask.copy()
except AttributeError:
self.new_pnts_mask = np.zeros_like(self.thresh) + 255
mask = self.new_pnts_mask.copy()
for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
cv2.circle(mask, (x, y), self.track_pnt_mask_radius, 0, -1)
p = cv2.goodFeaturesToTrack(self.thresh, mask = mask, **self.feature_params)
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
self.tracks.append([(x, y)])
# if self.draw_on:
# cv2.circle(self.roi_test, (x, y), 2, (0, 255, 0), -1)
#update prev
self.prev_gray = self.thresh.copy()
#------------------------------------------------------------------------------
def gear_speed_append(self,x,y,prev_pnt):
'''
determine speed (step) for a new track pnt added to an existing track
x, y new pnt coordinates
prev_pnt = [x,y]
reject pnts with too small h (h = dx**2 + dy**2),
ie pnts that did not move
CW positive rotation
'''
dx = x - prev_pnt[0]
dy = y - prev_pnt[1]
h = dx*dx + dy*dy
if h > self.gear_sp_min_h_sqrd:
# angle_new = np.arctan2(y - self.cy_flipped,x - self.cx)
# angle_prev = np.arctan2(prev_pnt[1] - self.cy_flipped,prev_pnt[0]- self.cx)
x0 = prev_pnt[0] - self.cx
y0 = prev_pnt[1] - self.cy_flipped
# det = dx*y0 - x0*dy
if dx*y0 < x0*dy: #CW ROTATION
# self.speed_xy_per_track.append(h) #sqrt later
v = math_sqrt(h) #sqrt now
self.speed_xy_per_track.append(v)
pnt_radius_dx = x - self.cx_local
pnt_radius_dy = y - self.cy_local
pnt_radius = math_sqrt(pnt_radius_dx*pnt_radius_dx + pnt_radius_dy*pnt_radius_dy)
self.pnt_raddii.append(pnt_radius)
# w = v*pnt_radius
# self.speed_w_per_track.append(w)
#------------------------------------------------------------------------------
def plot_init(self):
'''
'''
self.ax_xy_p_track = self.plot_axes[0,0]
self.ax_01 = self.plot_axes[0,1]
self.ax_10 = self.plot_axes[1,0]
self.ax_11 = self.plot_axes[1,1]
#------------------------------------------------------------------------------
def plot_data(self):
'''
'''
self.ax_xy_p_track.cla()
vr_track = np.vstack([self.speed_xy_per_track,self.pnt_raddii])
vr_track = vr_track.T
vr_track = vr_track[vr_track[:,0].argsort()]
mean = np.mean(self.speed_xy_per_track)
#
# median = np.median(self.speed_xy_per_track)
data = vr_track[:,0]
# m = 2.
# d = np.abs(data - np.median(data))
# mdev = np.median(d)
# s = d/mdev if mdev else 0.
# indices = np.array(s < m,dtype = int)
# new_set = data[s < m]
# x = indices.nonzero()[0]
len_data = data.shape[0]
half = int(len_data/2)
p10 = int(len_data*0.1)
upper_half = data[half:-p10]
mean_upqa = np.mean(upper_half)
self.ax_xy_p_track.plot(vr_track[:,0],'go-',label = 'v_track')
# self.ax_xy_p_track.plot(x,new_set,'ro-',label = 'new_set')
self.ax_xy_p_track.plot(np.arange(half,len_data-p10,1),upper_half,'ko-',label = 'new_set')
self.ax_xy_p_track.axhline(math_sqrt(self.gear_sp_min_h_sqrd),c='r',ls = '--',label = 'min')
# self.ax_xy_p_track.axhline(self.speed_step_current,c='k',ls = '--',label='current')
self.ax_xy_p_track.axhline(mean,c='g',ls = '--',label='mean')
# self.ax_xy_p_track.axhline(median,c='cyan',ls = '--',label='median')
self.ax_xy_p_track.axhline(mean_upqa,c='b',ls = '--',label='mean_upqa')
self.ax_xy_p_track.legend()
self.ax_xy_p_track.axis(ymin=0,ymax=10)
self.ax_01.plot(vr_track[:,0]*self.speed_conversion,'o-',label = 'v_track')
self.ax_01.axhline(2,c='r',ls = '--')
self.ax_01.axhline(self.speed_step_current*self.speed_conversion,c='k',ls = '--')
self.ax_01.axis(ymin=0,ymax=3)
self.ax_01.legend()
#
self.ax_10.cla()
self.ax_10.plot(vr_track[:,1],'o-',label = 'radii_track')
# self.ax_10.legend()
#
self.ax_11.cla()
self.ax_11.plot(vr_track[:,1],vr_track[:,0],'o',label = 'v vs radii')
self.ax_11.legend()
#==============================================================================
#==============================================================================
class FilamentLKMethod():
def __init__(self,frame,roi_pnts,**kwargs):
'''
Apply Lukase-Kanade Optical Flow Method to the a filament ROI,
to estimate filament speed (assumption - downwards)
'''
#info
self.name_id = kwargs.get('name_id','fill_LK0')
#debug/test control
self.plot_on = kwargs.get('plot_on',False) # plots data
self.draw_on = kwargs.get('draw_on',False) #draws addionational screens
self.collect_on = kwargs.get('collect_on',False) #collects data in list
self.collect_on = self.collect_on or self.plot_on or self.draw_on
#configs
self.configs = kwargs
self.material = self.configs.get('mat_colour','white').lower()
#preprocess
self.thresh_inv = self.configs.get('thresh_inv',False) #delta ave method - invert min/max
if self.thresh_inv:
self.thresh_type = cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU
else:
self.thresh_type = cv2.THRESH_BINARY + cv2.THRESH_OTSU
if self.material == 'green':
self.filament_sp_filter_blue = True
self.filament_sp_use_right = False
elif self.material == 'white':
self.filament_sp_filter_blue = False
self.filament_sp_use_right = False
elif self.material == 'clear':
self.filament_sp_use_right = True
self.filament_sp_filter_blue = False
elif self.material == 'red':
self.filament_sp_use_right = True
self.filament_sp_filter_blue = True
elif self.material == 'model':
self.filament_sp_use_right = True
self.filament_sp_filter_blue = True
self.ave_method_use_red = True
#roi setup
self.update_roi(roi_pnts,**kwargs)
#zero arrays
# self.meas_vals_init()
#speed estimation
self.track_len = 2
self.detect_interval = 2
self.filament_sp_min_step = 0.75
self.ave_method_use_red = False
self.track_pnt_mask_radius = 2
self.feature_params = dict(maxCorners = kwargs.get('maxCorners',50),
qualityLevel = kwargs.get('qualityLevel',0.3),
minDistance = kwargs.get('minDistance',2),
blockSize = kwargs.get('blockSize',3))
lk_params_criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
# self.lk_params = dict( winSize = (30, 30),
# maxLevel = 2,
# criteria = lk_params_criteria)
self.lk_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = lk_params_criteria)
#data collection
self.filament_speed = 0
#internal
self.prev_gray = None
self.gray = None
self.tracks = []
#control
self.frame_id = 0
self.first_analysis = True
#set apply method
w_timestep = kwargs.get('w_timestep',False)
if w_timestep:
self.apply = self.apply_w_timestep
else:
self.apply = self.apply_wo_timestep
if self.collect_on:
# self.num_used_tracks = []
# self.num_not_vertical_x_vs_y_frame = []
# self.num_y_neg_frame = []
#list collection with self.collect_on
self.speed_step = []
self.speed_mms = []
self.time_step = []
self.speed_compare = []
#------------------------------------------------------------------------------
def update_roi(self,roi_pnts,**kwargs):
'''
Update roi parameters, dependent vars and other vars
'''
self.roi_x1,self.roi_y1,self.roi_x2,self.roi_y2 = roi_pnts
self.roi_pnt1 = (self.roi_x1,self.roi_y1)
self.roi_pnt2 = (self.roi_x2,self.roi_y2)
#speed conversion
self.fps = kwargs.get('cam_fps',15)
self.cal_factor = kwargs.get('cal_factor',0.0212)
self.speed_conversion = self.cal_factor*self.fps
#------------------------------------------------------------------------------
def apply_w_timestep(self,roi,frame_id):
'''
'''
e1 = cv2.getTickCount()
self.apply_wo_timestep(roi,frame_id)
e2 = cv2.getTickCount()
self.time_step_current = (e2 - e1)/ cv2.getTickFrequency()
if self.collect_on:
self.time_step.append(self.time_step_current)
self.speed_step.append(self.speed_step_current)
self.speed_mms.append(self.speed_step_current*self.speed_conversion)
if self.plot_on:
self.plot_data()
#------------------------------------------------------------------------------
def apply_wo_timestep(self,roi,frame_id):
'''
'''
self.frame_id = frame_id
self.fil_speed_preprocess(roi)
fil_speed = self.flow_track_filament()
# fil_speed *= self.downscale_factor
self.speed_step_current = fil_speed
self.first_analysis = False
#------------------------------------------------------------------------------
def fil_speed_preprocess(self,roi):
'''
'''
self.roi = cv2.GaussianBlur(roi, (3, 3),0)
roi_test = self.roi.copy()
if self.filament_sp_filter_blue:
roi_test[:,:,1:] = 0
self.gray = cv2.cvtColor(roi_test,cv2.COLOR_BGR2GRAY)
left_start = 0 #max(0,x_left_ave)
left_end = 30 #x_left_ave
ret_val,thresh_roi_1 = cv2.threshold(self.gray[:,left_start:left_end],0,255,self.thresh_type)
self.gray[:,left_start:left_end] = thresh_roi_1
if self.filament_sp_use_right:
right_start = -10
ret_val,thresh_roi_2 = cv2.threshold(self.gray[:,right_start:],0,255,self.thresh_type)
self.gray[:,right_start:] = thresh_roi_2
self.gray[:,left_end:right_start] = 0
else:
self.gray[:,left_end:] = 0
#------------------------------------------------------------------------------
def flow_track_filament(self):
'''
'''
self.speed_xy_per_track = []
#----------------------------------------------------------------------
if self.tracks:
self.track_points_update()
#----------------------------------------------------------------------
#update speed
if len(self.speed_xy_per_track) > 1:
fil_speed = reject_outliers(np.array(self.speed_xy_per_track),
return_option = 1)
# fil_speed = np.mean(self.speed_xy_per_track)
else:
fil_speed = 0
#----------------------------------------------------------------------
#get new points
self.get_new_track_points()
return fil_speed
#------------------------------------------------------------------------------
def track_points_update(self):
'''
'''
p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
p1, st, err = cv2.calcOpticalFlowPyrLK(self.prev_gray, self.gray, p0, None, **self.lk_params)
p0r, st, err = cv2.calcOpticalFlowPyrLK(self.gray, self.prev_gray, p1, None, **self.lk_params) #reverse
d = abs(p0-p0r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []
for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
if not good_flag:
continue
tr.append((x, y))
if len(tr) > self.track_len:
del tr[0]
new_tracks.append(tr)
try:
prev_pnt = tr[-2]
self.fil_speed_append(x,y,prev_pnt) #this can be vectorised
except IndexError:
pass #first point in track
#end for loop
self.tracks = new_tracks
# if self.draw_on:
# cv2.circle(vis, (x, y), 2, (0, 0,255), -1)
# if self.draw_on:
# cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
#all points in a track
# for track in self.tracks:
# for x,y in track:
# cv2.circle(vis, (x, y), 2, (255, 0, 0), -1)
# cv2.imshow('track_pnts',vis)
#------------------------------------------------------------------------------
def get_new_track_points(self):
'''
'''
if self.frame_id % self.detect_interval == 0 or self.first_analysis:
try:
mask = self.new_pnts_mask.copy()
except AttributeError:
self.new_pnts_mask = np.zeros_like(self.gray) + 255
mask = self.new_pnts_mask.copy()
for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
cv2.circle(mask, (x, y), self.track_pnt_mask_radius, 0, -1)
p = cv2.goodFeaturesToTrack(self.gray, mask = mask, **self.feature_params)
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
self.tracks.append([(x, y)])
# if self.draw_on:
# cv2.circle(self.roi_test, (x, y), 2, (0, 255, 0), -1)
#update prev
self.prev_gray = self.gray.copy()
#------------------------------------------------------------------------------
def fil_speed_append(self,x,y,prev_pnt):
'''
determine speed (step) for a new track pnt added to an existing track
x, y new pnt coordinates
prev_pnt = [x,y]
remove displacements:
upward (negative)
horizontal:
dx == 0
dx/dy > 0.9
'''
dy = y - prev_pnt[1]
if dy > self.filament_sp_min_step: #i.e downward and bigger than
dx = x - prev_pnt[0]
#vertical test
if round(dx,2) == 0.0 or abs(dy/dx) > 0.9:
self.speed_xy_per_track.append(dy)
# h = dx**2 + dy**2
# self.speed_xy_per_track.append(math_sqrt(h))
#------------------------------------------------------------------------------
def plot_init(self):
'''
'''
self.ax_xy_p_track = self.plot_axes[0,0]
self.ax_1 = self.plot_axes[1,0]
#------------------------------------------------------------------------------
def plot_data(self):
'''
'''
self.speed_xy_per_track.sort()
self.ax_xy_p_track.cla()
self.ax_xy_p_track.plot(self.speed_xy_per_track)
self.ax_xy_p_track.axis(ymin=0,ymax=10)
#==============================================================================
#==============================================================================
class FilamentWidthMethod():
def __init__(self,frame,roi_pnts,**kwargs):
'''
Estimate filament width
frame = roi area in main frame
roi_pnts = (x1,y1,x2,y2), coords relative to main frame
Method:
Assume user aligns roi so that vline (vertical alignment line) is
approx. on the left and right edge of the filament.
Then extract two sub-rois, left and right, assuming the filament edges
is in this area - within a certain distance from the left and right vline.
Then zoom (magnify) these areas.
Estimate edges using min/max of the first derivative
along the average over the x cols.
Convert back for final answer.
detection kwargs
'''
#info
self.name_id = kwargs.get('name_id','Width_0')
#debug control
self.plot_on = kwargs.get('plot_on',False) # plots data
self.draw_on = kwargs.get('draw_on',False) #draws addionational screens
self.collect_on = kwargs.get('collect_on',False) #collects data in list
self.collect_on = self.collect_on or self.plot_on or self.draw_on
#plotting placeholder, for debuging/testing
self.plot_axes = None
#configs
self.configs = kwargs
self.material =self.configs.get('mat_colour','white').lower()
#width estimation
#sub_pixel method
self.w_zf = self.configs.get('w_zf',4) #zoomfactor
self.w_detection_half_width = self.configs.get('w_detection_half_width',10)
self.w_vline_left_border = self.configs.get('w_vline_left_border',20)
self.w_vline_right_border = self.configs.get('w_vline_right_border',15)
#pre-process
#material specific
#START INIT
#roi region, size parameters - recalculate on change of ROI/params etc.
self.roi_x1 ,self.roi_y1,self.roi_x2,self.roi_y2 = roi_pnts
roi_bgr = frame[self.roi_y1:self.roi_y2,self.roi_x1:self.roi_x2]
self.roi = cv2.GaussianBlur(roi_bgr, (3, 3),0)
self.gray = cv2.cvtColor(self.roi,cv2.COLOR_BGR2GRAY)
self.update_roi(roi_pnts,**kwargs)
#init first frames
#data collection
self.time_step_current = 0
#addtional data for review, used with collect_on/plot_on
if self.collect_on:
self.width_history = []
self.time_step = []
#set apply method
w_timestep = kwargs.get('w_timestep',False)
if w_timestep:
self.apply = self.apply_w_timestep
else:
self.apply = self.apply_wo_timestep
#------------------------------------------------------------------------------
def update_roi(self,roi_pnts,**kwargs):
'''
Update roi parameters and dependent vars
'''
self.cal_factor = kwargs.get('cal_factor',0.0212)
#width values
# self.width_average = [] #average of w_vals_f per frame - track delta width
self.width_value = self.configs.get('width_default',150) #current value for filament width
#roi
self.roi_x1 ,self.roi_y1,self.roi_x2,self.roi_y2 = roi_pnts
self.roi_pnt1 = (self.roi_x1,self.roi_y1)
self.roi_pnt2 = (self.roi_x2,self.roi_y2)
self.roi_height = self.roi_y2 - self.roi_y1
self.roi_width = self.roi_x2 - self.roi_x1
self.w_roi_middle = int((self.roi_width)/2) #middle of roi rectangle, approx middle of filament
self.w_roi_middle_left = int(self.w_roi_middle/2) # middle of left half
self.w_roi_middle_right = int(self.w_roi_middle + self.w_roi_middle/2) #middle of right half
self.w_vline_left_roi = self.w_vline_left_border #x-position of vline in roi
self.w_vline_right_roi = self.roi_width - self.w_vline_right_border #x-position of vline right
self.width_left_pnt = self.w_vline_left_roi #min point - left edge
self.width_right_pnt = self.w_vline_right_roi #max point - right edge
# print(self.w_vline_left_roi,self.w_detection_half_width)
#assume left and right edge are in these sub-roi areas
self.left_left = self.w_vline_left_roi - self.w_detection_half_width
self.left_right = self.w_vline_left_roi + self.w_detection_half_width
roi_left = self.roi[:,self.left_left:self.left_right]
self.right_left = self.w_vline_right_roi - self.w_detection_half_width
self.right_right = self.w_vline_right_roi + self.w_detection_half_width
roi_right = self.roi[:,self.right_left : self.right_right]
self.w_vline_left_roix = roi_left.shape[1] - self.w_detection_half_width
self.w_vline_right_roix = self.w_detection_half_width
self.w_zoom_left = (roi_left.shape[1]*self.w_zf, roi_left.shape[0]*self.w_zf)
self.w_zoom_right= (roi_right.shape[1]*self.w_zf, roi_right.shape[0]*self.w_zf)
#------------------------------------------------------------------------------
def width_preprocess(self,frame):
'''
Preprocess raw frame, apply gear mask
'''
self.frame_gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
if self.downscale_factor != 1:
self.frame_gray = cv2.resize(self.frame_gray,(self.roi_new_w,self.roi_new_h))
self.frame_gray[self.gear_mask_indices] = 0
#------------------------------------------------------------------------------
def apply_w_timestep(self,frame,*args,**kwargs):
'''
Estimate speed and measure timestep for whole process
'''
e1 = cv2.getTickCount()
self.apply_wo_timestep(frame)
e2 = cv2.getTickCount()
self.time_step_current = (e2 - e1)/ cv2.getTickFrequency()
if self.collect_on:
self.time_step.append(self.time_step_current)
self.width_history.append(self.width_value)
if self.plot_on:
self.plot_data()
if self.draw_on:
self.draw_edges()
#------------------------------------------------------------------------------
def apply_wo_timestep(self,roi_bgr,*args,**kwargs):
'''
Estimate width, without measuring the time step
'''
#pre-process
self.roi = cv2.GaussianBlur(roi_bgr, (3, 3),0)
self.gray = cv2.cvtColor(self.roi,cv2.COLOR_BGR2GRAY)
#process
self.width_subpixel_method()
#------------------------------------------------------------------------------
def width_subpixel_method(self):
'''
The filament width ROI has 2 sub-ROIs - left and right.
Detect edge in each sub-ROI.
Subpixel resolution achieved by zooming image.
'''
#SUB ROI AREA
gray_left = self.gray[:,self.left_left:self.left_right]
gray_right = self.gray[:,self.right_left : self.right_right]
#ZOOM
self.gray_left = cv2.resize(gray_left,self.w_zoom_left)
self.gray_right = cv2.resize(gray_right,self.w_zoom_right)
#ESTIMATE
self.edge_left = self.width_est_edge_zoomed(self.gray_left,left_edge = True)
self.edge_right = self.width_est_edge_zoomed(self.gray_right,left_edge = False)
#SHIFT
self.width_left_pnt = self.edge_left/self.w_zf + self.left_left
self.width_right_pnt = self.edge_right/self.w_zf + self.right_left
#STORE
self.width_value = self.width_right_pnt - self.width_left_pnt
#------------------------------------------------------------------------------
def width_est_edge_zoomed(self,gray_roi,left_edge = True):
'''
gray_roi, roi over expected edge area, must be grayscale.
'''
#ESTIMATE
ave_over_columns = np.average(gray_roi,0) #for gray scale ROI
diff_ave = np.diff(ave_over_columns) #first derivative
#arg max/min of first derivative
if left_edge:
turn_pnt = diff_ave.argmax()
else:
turn_pnt = diff_ave.argmin()
#arg max average value
# max_ave = diff_ave[turn_pnt-self.w_zf:turn_pnt+self.w_zf]
# if len(max_ave) > 0:
# max_ave_centre = np.average(max_ave, weights=max_ave)
# edge_pos = turn_pnt-self.w_zf + max_ave_centre
# else:
# edge_pos = turn_pnt
# return edge_pos
return turn_pnt
#------------------------------------------------------------------------------
def plot_init(self):
'''
'''
self.axes_all = self.plot_axes[0,0]
self.axes_lr = self.plot_axes[0,1]
self.axes_avecols_left = self.plot_axes[1,0]
self.axes_avecols_right = self.plot_axes[1,1]
self.axes_diffcols_left = self.plot_axes[2,0]
self.axes_diffcols_right = self.plot_axes[2,1]
#------------------------------------------------------------------------------
def plot_data(self):
'''
'''
all_ave = np.average(self.gray,0)
gray_left_unz = self.gray[:,self.left_left:self.left_right]
gray_right_unz = self.gray[:,self.right_left : self.right_right]
xpnts_left = np.arange(gray_left_unz.shape[1])
xpnts_right = np.arange(gray_right_unz.shape[1]) + gray_left_unz.shape[1]
colave_left_unz = np.average(gray_left_unz,0)
colave_right_unz = np.average(gray_right_unz,0)
#zoomed
colave_left = np.average(self.gray_left,0)
colave_right = np.average(self.gray_right,0)
diff_left = np.diff(colave_left)
diff_right = np.diff(colave_right)
max_left = diff_left.argmax()
max_right = diff_right.argmin()
#PLOT WHOLE ROI
self.axes_all.cla()
self.axes_all.plot(all_ave,'gray',label = 'all_ave')
self.axes_all.axvline(self.w_roi_middle,c = 'k',linestyle = '--',label = 'w_roi_middle')
self.axes_all.axvline(self.w_roi_middle_left,c = 'gray',linestyle = '-.',label = 'w_roi_middle_left')
self.axes_all.axvline(self.w_roi_middle_right,c = 'gray',linestyle = '-.',label = 'w_roi_middle_right')
self.axes_all.axvline(self.left_left,c = 'gray',linestyle = '--',label = 'left_left')
self.axes_all.axvline(self.left_right,c = 'gray',linestyle = '--',label = 'left_right')
self.axes_all.axvline(self.right_left,c = 'gray',linestyle = '--',label = 'right_left')
self.axes_all.axvline(self.right_right,c = 'gray',linestyle = '--',label = 'right_right')
self.axes_all.axvline(self.width_left_pnt,c = 'r',linestyle = '-',label = 'width_left_pnt')
self.axes_all.axvline(self.width_right_pnt,c = 'r',linestyle = '-',label = 'width_right_pnt')
# self.axes_all.set_title('whole roi ave_cols')
#PLOT left and right, before zoom
self.axes_lr.cla()
self.axes_lr.plot(xpnts_left,colave_left_unz)
self.axes_lr.plot(xpnts_right,colave_right_unz)
#PLOT LEFT ZOOMED
self.axes_avecols_left.cla()
self.axes_avecols_left.plot(colave_left,'gray',label = 'colave_left')
self.axes_avecols_left.axvline(self.edge_left,c = 'r',linestyle = '--',label = 'left_edge_pos')
self.axes_diffcols_left.cla()
self.axes_diffcols_left.plot(diff_left)
self.axes_diffcols_left.axvline(max_left,c = 'k',linestyle = '--',label = 'max_left')
self.axes_diffcols_left.axvline(self.edge_left,c = 'r',linestyle = '--',label = 'left_edge_pos')
#PLOT RIGHT ZOOMED
self.axes_avecols_right.cla()
self.axes_avecols_right.plot(colave_right,'gray',label = 'colave_right')
self.axes_avecols_right.axvline(self.edge_right,c = 'r',linestyle = '--',label = 'right_edge_pos')
self.axes_diffcols_right.cla()
self.axes_diffcols_right.plot(diff_right)
self.axes_diffcols_right.axvline(max_right,c = 'k',linestyle = '--',label = 'max_right')
self.axes_diffcols_right.axvline(self.edge_right,c = 'r',linestyle = '--',label = 'right_edge_pos')
#------------------------------------------------------------------------------
def draw_edges(self):
'''
'''
gray = cv2.cvtColor(self.gray,cv2.COLOR_GRAY2BGR)
edge_left= int(round(self.width_left_pnt))
edge_right= int(round(self.width_right_pnt))
cv2.line(gray,(edge_left,0),(edge_left,gray.shape[1]),(0,0,255))
cv2.line(gray,(edge_right,0),(edge_right,gray.shape[1]),(0,0,255))
gray = cv2.resize(gray,(gray.shape[0]*16,gray.shape[1]*1))
cv2.imshow('gray',gray)
edge_left_zoomed = int(round(self.edge_left))
gray_left = cv2.cvtColor(self.gray_left,cv2.COLOR_GRAY2BGR)
cv2.line(gray_left,(edge_left_zoomed,0),(edge_left_zoomed,gray_left.shape[0]),(0,0,255))
cv2.imshow('gray_left',gray_left)
edge_right_zoomed = int(round(self.edge_right))
gray_right = cv2.cvtColor(self.gray_right,cv2.COLOR_GRAY2BGR)
cv2.line(gray_right,(edge_right_zoomed,0),(edge_right_zoomed,gray_right.shape[0]),(0,0,255))
cv2.imshow('gray_right',gray_right)
cv2.moveWindow('gray',700,0)
cv2.moveWindow('gray_left',700 + gray.shape[1] +50,0)
cv2.moveWindow('gray_right',700 + gray.shape[1] + gray_left.shape[1] +50,0)
#==============================================================================
class TestROI():
def __init__(self,video_fullname,video_config,apply_methods,**kwargs):
'''
apply_methods, list of dicts, defining which methods to apply.
'''
#options
self.profile = kwargs.get('profile',False) # run profiling, do not plot
self.video_playback = True
#data
self.video_fullname = video_fullname
self.config = video_config
#SOURCE
self.init_video()
#first frame
self.video_capture.set(cv2.CAP_PROP_POS_FRAMES,self.frame_id)
retval, frame = self.video_capture.read()
#ROI SETUP
self.init_windows()
#DETECTION METHODS SETUP
self.method_types = {}
self.method_types['GLK'] = [GearLKMethod,'gear'] #method and roi points
self.method_types['GDM'] = [GearDenseMethod,'gear']
self.method_types['FLK'] = [FilamentLKMethod,'fil_speed']
self.method_types['FWM'] = [FilamentWidthMethod,'width']
self.method_types['gearSDM'] = [SmallAreaDenseMethod,'gearSDM']
self.method_types['filSDM'] = [SmallAreaDenseMethod,'filSDM']
self.methods = [] #list of methods to apply
self.draw_roi_rects = {} #roi rectangles to draw
self.draw_roi_circles = {}
collect_data_all = not(self.profile)
if not(isinstance(apply_methods,list)):
apply_methods = [apply_methods]
for index,new_method_kwarg in enumerate(apply_methods):
method_name = new_method_kwarg.pop('method_name')
method_id = new_method_kwarg.pop('id',index)
method,method_type = self.method_types[method_name]
method_roi_pnts,method_spec_kwarg = self.get_roi_coords(method_type)
name_id = '{}_{}_{}'.format(method_name,method_id,index)
new_method_kwarg.update(self.config_dict['vision'])
new_method_kwarg.update(self.config_dict['camera'])
new_method_kwarg.update(method_spec_kwarg)
new_method = method(frame,method_roi_pnts,
name_id = name_id,
collect_on = collect_data_all,
**new_method_kwarg)
self.methods.append(new_method)
new_method.method_name = method_name
new_method.method_type = method_type
#roi mask circles
if method_name in ['GLK','GDM']:
m0 = new_method
self.draw_roi_circles['cr_max'] = [(0,255,125),(m0.cx, m0.cy),m0.cr_max]
self.draw_roi_circles['cr_od'] = [(0,255,255),(m0.cx, m0.cy),m0.cr_od]
self.draw_roi_circles['cr_min'] = [(0,255,125),(m0.cx, m0.cy),m0.cr_min]
#PLOT SETUP
self.plot_init()
#------------------------------------------------------------------------------
def main_loop(self):
'''
'''
#INTERACTIVE CONTROL FLAGS
self.pause = True and not(self.profile)
self.plot_update = False
self.first_it = True
self.prev_id = self.frame_id -1
self.plot_cont = False
self.prev_thresh_pnt = 100
self.process_frame = False
frame = self.video_capture.read()
exit_main_loop = False
while not(exit_main_loop):
# print(self.frame_id)
if self.video_playback: #vs realtime capture
self.video_capture.set(cv2.CAP_PROP_POS_FRAMES,self.frame_id)
if self.prev_id != self.frame_id or self.process_frame:
retval, frame = self.video_capture.read()
#APPLY METHOD
for method in self.methods:
# print('apply method; {}'.format(method.name_id))
if not(self.profile):
self.get_roi_coords(method.method_type)
roi = frame[method.roi_y1:method.roi_y2,method.roi_x1:method.roi_x2]
roi_send = roi.copy()
method.apply(roi_send,self.frame_id)
self.prev_id = self.frame_id #prev id processed
self.process_frame = False
#draw
if not(self.profile):
self.plot_data()
self.draw_frame(frame)
exit_main_loop = self.playback_control()
#------ end of main loop ---
self.exit_test()
#------------------------------------------------------------------------------
def draw_frame(self,frame):
'''
draw information on frame
'''
#POST FRAME
#ROIs
for roi in self.draw_roi_rects.values():
colour,xy1xy2 = roi
cv2.rectangle(frame, (xy1xy2[0],xy1xy2[1]), (xy1xy2[2],xy1xy2[3]), colour, 2)
#mask circle roi
for circles in self.draw_roi_circles.values():
colour,cxy,radius = circles
cv2.circle(frame, cxy, radius, colour, 1)
#cx and cx
cv2.line(frame,(self.gear_centre[0],0),(self.gear_centre[0],frame.shape[0]),(255,255,0))
cv2.line(frame,(0,self.gear_centre[1]),(frame.shape[1],self.gear_centre[1]),(255,255,0))
#vline left
cv2.line(frame,(self.controls['vline_left'],0),(self.controls['vline_left'],frame.shape[0]),(255,255,0))
#show
cv2.imshow('source',frame)
#------------------------------------------------------------------------------
def plot_init(self):
'''
'''
if not(self.profile):
plt.ion()
plt.show()
fig, all_axes = plt.subplots(1,4)
self.all_data_ax = all_axes[0]
self.all_pxl_ax = all_axes[1]
self.all_speed_ax = all_axes[2]
self.all_width_ax = all_axes[3]
for method in self.methods:
if method.plot_on:
fig, method.plot_axes = plt.subplots(3,2)
method.plot_init()
#polar plot
#fig, P_axes = plt.subplots(1,1,projection='polar')
# f_p = plt.figure()
# DM_polar_ax = plt.subplot(111, projection='polar')
#------------------------------------------------------------------------------
def plot_data(self):
'''
'''
if (self.plot_update or self.plot_cont):
self.plot_update = False
#plot all data
self.all_data_ax.cla()
self.all_pxl_ax.cla()
self.all_speed_ax.cla()
self.all_width_ax.cla()
for index,method in enumerate(self.methods):
self.all_data_ax.plot(method.time_step,label = method.name_id)
if method.method_name == 'FWM': #width
self.all_width_ax.plot(method.width_history,'o-')
else:
self.all_pxl_ax.plot(method.speed_step,label = method.name_id)
self.all_speed_ax.plot(method.speed_mms,label = method.name_id)
#anotate
self.all_data_ax.legend()
self.all_data_ax.set_title('Time Step (s)')
self.all_pxl_ax.legend()
self.all_pxl_ax.set_title('Speed (pxl/frame)')
self.all_speed_ax.legend()
self.all_speed_ax.set_title('Speed (mm/s)')
# self.all_width_ax.legend()
self.all_width_ax.set_title('Width (pxl)')
#------------------------------------------------------------------------------
def playback_control(self):
'''
poll keyboard for user input
'''
exit_flag = False
#CONTROL
ch = 0xFF & cv2.waitKey(5)
if ch == 27:
exit_flag = True
elif ch == ord('u'): #update plot
self.plot_update = True
self.pause = True
elif ch == ord('p'): #pause/play
self.pause = not(self.pause)
print('play/pause {}'.format(self.pause))
elif ch == ord('n'): #step next frame
self.pause = True
self.frame_id = self.frame_id + 1 if self.frame_id + 1 < self.num_frames else self.frame_id
elif ch == ord('b'): #step prev frame
self.pause = True
self.frame_id = self.frame_id - 1 if self.frame_id - 1 >= 0 else self.frame_id
elif ch == ord('x'): #step next frame and update
self.pause = True
self.plot_update = True
self.frame_id = self.frame_id + 1 if self.frame_id + 1 < self.num_frames else self.frame_id
elif ch == ord('y'): #step prev frame and update
self.pause = True
self.plot_update = True
self.frame_id = self.frame_id - 1 if self.frame_id - 1 >= 0 else self.frame_id
elif ch == ord('o'): #cont. mode and plot
self.plot_cont = True
self.pause = False
elif ch == ord('s'): #dump data
self.plot_cont = False
self.pause = True
for method in self.methods:
np.savetxt(method.name_id + '.txt',method.speed_step)
print('saved {}, -speed_step- '.format(method.name_id))
elif ch != 255:
print('unkown command/key {}'.format(ch))
print('u,p,n,b,x,y,o')
if not(self.pause) and self.video_playback:
if self.frame_id + 1 >= self.num_frames:
self.pause = True
self.plot_cont = False
self.plot_update = True
print('all frames')
if self.profile:
exit_flag = True
# np.savetxt('test_1.txt',np.array(LK.speed_mms))
# print('saved')
else:
self.frame_id += 1
return exit_flag
#------------------------------------------------------------------------------
def exit_test(self):
'''
'''
#END MAIN LOOP - EXIT
self.video_capture.release()
cv2.destroyAllWindows()
#------------------------------------------------------------------------------
def init_video(self):
'''
load video and video configs, if available
'''
self.video_capture = cv2.VideoCapture(self.video_fullname)
self.num_frames = int(self.video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
x_c = self.config.getint('vision','x_gear_c')
y_c = self.config.getint('vision','y_gear_c')
self.gear_centre = (x_c,y_c)
self.fps = self.config.getint('camera','cam_fps')
self.frame_id = self.config.getint('vision','frame_start')
self.od_resize_factor = self.config.getfloat('vision','od_resize_factor')
self.control_vars = []
self.control_vars.append(('vline_right',False))
self.control_vars.append(('vline_left',False))
self.control_vars.append(('y_pre_end',False))
self.control_vars.append(('y_post_start',False))
self.control_vars.append(('x_gear_c',True))
self.control_vars.append(('y_gear_c',False))
#
self.control_vars.append(('x_gear_teeth',False))
self.control_vars.append(('gear_roi_x2',False))
self.control_vars.append(('gear_roi_y2',False))
self.control_vars.append(('cr_add',False))
self.control_vars.append(('cr_min',False))
#
self.control_vars.append(('w_vline_left_border',False))
self.control_vars.append(('w_vline_right_border',False))
self.config_dict = make_config_dict(self.config)
self.video_capture.set(cv2.CAP_PROP_POS_FRAMES,self.frame_id)
#------------------------------------------------------------------------------
def init_windows(self,frame = None,**kwargs):
'''
'''
cv2.namedWindow('source')
self.controls = {}
for variable,show_trackbar in self.control_vars:
init_val = self.config_dict['vision'][variable]
if show_trackbar:
cv2.createTrackbar(variable,'source',init_val,640,self.on_trackbar)
self.controls[variable] = init_val
cv2.createTrackbar('aX','source',311,640,self.on_trackbar)
cv2.createTrackbar('aY','source',167,640,self.on_trackbar)
cv2.createTrackbar('bX','source',334,640,self.on_trackbar)
cv2.createTrackbar('bY','source',364,640,self.on_trackbar)
cv2.moveWindow('source',0,0)
# cv2.moveWindow('ROI',640,0)
# cv2.moveWindow('Thresh',640*2,0)
#------------------------------------------------------------------------------
def get_roi_coords(self,roi_type = 'gear'):
'''
'''
for variable,read_trackbar in self.control_vars:
if read_trackbar:
self.controls[variable] = cv2.getTrackbarPos(variable,'source')
self.gear_centre = (self.controls['x_gear_c'],self.controls['y_gear_c'])
method_kwargs = {}
if roi_type == 'width':
#PRE GEAR AREA- estimate filament width
x_start = self.controls['vline_left'] - self.controls['w_vline_left_border']#20
y_start = self.controls['y_pre_end'] - 40
x_end = self.controls['vline_right'] + self.controls['w_vline_right_border']#15
roi_pnts = (x_start,y_start, x_end, self.controls['y_pre_end'])
width_default = self.controls['vline_right'] - self.controls['vline_left']
method_kwargs['width_default'] = width_default
method_kwargs['w_vline_left_border'] = self.controls['w_vline_left_border']
method_kwargs['w_vline_right_border'] = self.controls['w_vline_right_border']
self.draw_roi_rects['width'] = [(75,125,225),roi_pnts]
elif roi_type == 'fil_speed':
#POST GEAR - filament speed
x_start = self.controls['vline_left'] -self.controls['x_gear_teeth']
x_end = self.controls['vline_right'] + 10
y_end = 480- 10 #self.controls['y_post_end_offset']
roi_pnts = (x_start,self.controls['y_post_start'],x_end, y_end)
self.draw_roi_rects['fil_speed'] = [(225,125,75),roi_pnts]
elif roi_type == 'gear':
#GEAR
x_start = max(0,self.gear_centre[0]) #left top pnt
y_start = max(0,self.gear_centre[1]) #left top pnt
x_end = self.controls['gear_roi_x2']
y_end = self.controls['gear_roi_y2']
roi_pnts = (x_start,y_start,x_end,y_end)
method_kwargs['gear_centre'] = self.gear_centre
self.draw_roi_rects['gear'] = [(75,125,225),roi_pnts]
elif roi_type == 'gearSDM':
#GEAR
x_start = cv2.getTrackbarPos('aX','source') #left top pnt
y_start = cv2.getTrackbarPos('aY','source') #left top pnt
x_end = x_start + 40
y_end = y_start + 40
roi_pnts = (x_start,y_start,x_end,y_end)
self.draw_roi_rects['gearSDM'] = [(125,75,225),roi_pnts]
elif roi_type == 'filSDM':
#fill
x_start = cv2.getTrackbarPos('bX','source') #left top pnt
y_start = cv2.getTrackbarPos('bY','source') #left top pnt
x_end = x_start + 40
y_end = y_start + 40*2
roi_pnts = (x_start,y_start,x_end,y_end)
self.draw_roi_rects['filSDM'] = [(125,125,225),roi_pnts]
return roi_pnts,method_kwargs
#------------------------------------------------------------------------------
def on_trackbar(self,new_val):
'''
'''
self.process_frame = True
#==============================================================================
#==============================================================================
def reject_outliers(data, m = 2.,return_option = 0,default_value = 0):
'''
http://stackoverflow.com/questions/11686720/
is-there-a-numpy-builtin-to-reject-outliers-from-a-list
data: np.array
m: threshold
return_option:
0 - values w/o outliers
1 - average of values w/o outliers
2 - mean of values w/o outliers
3 - non-outlier indices
4 - [option 0, option 2, option 3]
'''
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d/mdev if mdev else 0.
indices = np.array(s < m,dtype = int)
# indices = indices.astype(int)
#
try:
new_set = data[indices]
except IndexError:
if default_value is not None:
return default_value
new_set = data[-1]
# print('Warning IndexError - len(data) == 1')
if return_option == 0:
return new_set
if return_option == 1:
return np.average(new_set)
elif return_option == 2:
return np.mean(new_set)
elif return_option == 3:
return indices
elif return_option == 4:
return [np.std(new_set),np.mean(new_set)]
#==============================================================================
if __name__ == '__main__':
'''
method type, general kwargs:
method_name : LK, DM
plot_on: False
draw_on: False
downscale_factor: 1
w_timestep: False
'''
import configparser
print('main')
#------------------------------------------------------------------------
do_profiling = False
#------------------------------------------------------------------------
#DATA to load
# data_name = 'data\\Test_2017_02_17__08_45_46'
data_name = 'data\\Test_2017_04_11__14_27_24'
video_fullname = data_name +'.avi'
config_filename = data_name +'.cfg'
#get config
config = configparser.RawConfigParser()
config.optionxform = str
config.read(config_filename)
# video_fullname = 'data\\Test_2017_02_17__08_43_56.avi'
# config_filename = 'data\\Test_2017_02_17__08_43_56.cfg'
# video_fullname = 'data\\N5_Mwhite285_2016_05_24__11_39_22_T2300_L500_S20_0.avi'
# config_filename = 'data\\N5_Mwhite285_2016_05_24__11_39_22_T2300_L500_S20_0.cfg'
# video_fullname = 'data\\modelling\\unc_model_0b s2_L20.avi'
# config_filename = 'data\\modelling\\unc_model_0b s2_L20.cfg'
# video_fullname = 'data\\modelling\\unc_model_0c s2_L2_5_f262.avi'
# config_filename = 'data\\modelling\\unc_model_0c s2_L2_5_f262.cfg'
#------------------------------------------------------------------------
#METHODS to appply
methods = []
w_timestep = not(do_profiling)
GLK_11 = {'method_name': 'GLK',
'downscale_factor':1,
'w_timestep': w_timestep,
'plot_on':False,
'draw_on':False}
GDM_11 = {'method_name': 'GDM',
'downscale_factor':1,
'w_timestep': w_timestep,
'id': 'x1',
'plot_on':False,
'draw_on':False}
GDM_21 = {'method_name': 'GDM',
'downscale_factor':2,
'w_timestep': w_timestep,
'id': 'x2',
'plot_on':False,
'draw_on':False}
FLK_11 = {'method_name': 'FLK',
'w_timestep': w_timestep,
'id': 'fil',
'plot_on':False,
'draw_on':False}
FWM_11 = {'method_name': 'FWM',
'w_timestep': w_timestep,
'id': 'sub',
'plot_on':False,
'draw_on':False}
SMD_g1 = {'method_name': 'gearSDM',
'w_timestep': w_timestep,
'id': 'x',
'plot_on':False,
'draw_on':False}
SMD_f1 = {'method_name': 'filSDM',
'w_timestep': w_timestep,
'id': 'x',
'plot_on':False,
'draw_on':False}
# methods.append(GLK_11)
# methods.append(GDM_11)
methods.append(GDM_21)
methods.append(FLK_11)
methods.append(FWM_11)
methods.append(SMD_g1)
methods.append(SMD_f1)
#------------------------------------------------------------------------
test_roi = TestROI(video_fullname,config,methods,profile = do_profiling)
test_roi.main_loop()
|
gpgreeff/filament_vision_sensing
|
ROIAtesting.py
|
Python
|
mit
| 99,127
|
[
"Gaussian"
] |
bbb03a72810ad48b9fd851706c6d302d65d0e8937bf6d80f625e17a3272017b7
|
""" DIRAC FileCatalog Security Manager base class
"""
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Security.Properties import FC_MANAGEMENT
_readMethods = [
"exists",
"isFile",
"getFileSize",
"getFileMetadata",
"getReplicas",
"getReplicaStatus",
"getFileAncestors",
"getFileDescendents",
"listDirectory",
"isDirectory",
"getDirectoryReplicas",
"getDirectorySize",
"getDirectoryMetadata",
]
_writeMethods = [
"changePathOwner",
"changePathGroup",
"changePathMode",
"addFile",
"setFileStatus",
"removeFile",
"addReplica",
"removeReplica",
"setReplicaStatus",
"setReplicaHost",
"addFileAncestors",
"createDirectory",
"removeDirectory",
"setMetadata",
"__removeMetadata",
]
class SecurityManagerBase(object):
def __init__(self, database=None):
self.db = database
def setDatabase(self, database):
self.db = database
def getPathPermissions(self, paths, credDict):
"""Get path permissions according to the policy"""
return S_ERROR("The getPathPermissions method must be implemented in the inheriting class")
def hasAccess(self, opType, paths, credDict):
# Map the method name to Read/Write
if opType in _readMethods:
opType = "Read"
elif opType in _writeMethods:
opType = "Write"
# Check if admin access is granted first
result = self.hasAdminAccess(credDict)
if not result["OK"]:
return result
if result["Value"]:
# We are admins, allow everything
permissions = {}
for path in paths:
permissions[path] = True
return S_OK({"Successful": permissions, "Failed": {}})
successful = {}
failed = {}
if not opType.lower() in ["read", "write", "execute"]:
return S_ERROR("Operation type not known")
if self.db.globalReadAccess and (opType.lower() == "read"):
for path in paths:
successful[path] = True
resDict = {"Successful": successful, "Failed": {}}
return S_OK(resDict)
result = self.getPathPermissions(paths, credDict)
if not result["OK"]:
return result
permissions = result["Value"]["Successful"]
for path, permDict in permissions.items():
if permDict[opType]:
successful[path] = True
else:
successful[path] = False
failed.update(result["Value"]["Failed"])
resDict = {"Successful": successful, "Failed": failed}
return S_OK(resDict)
def hasAdminAccess(self, credDict):
if FC_MANAGEMENT in credDict["properties"]:
return S_OK(True)
return S_OK(False)
|
DIRACGrid/DIRAC
|
src/DIRAC/DataManagementSystem/DB/FileCatalogComponents/SecurityManager/SecurityManagerBase.py
|
Python
|
gpl-3.0
| 2,826
|
[
"DIRAC"
] |
7be7b8176c38a1cddb0e8b14a376ccdd42625129c4f671002e704e3202fc8589
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <johan.gronqvist@gmail.com>
# Copyright (C) 2007-2009 Gary Burton <gary.burton@zen.co.uk>
# Copyright (C) 2007-2009 Stephane Charette <stephanecharette@gmail.com>
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <jason@bohemianalps.com>
# Copyright (C) 2008-2011 Rob G. Healey <robhealey1@gmail.com>
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010- Serge Noiraud
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Benny Malengier
# Copyright (C) 2016 Allen Crider
# Copyright (C) 2018 Theo van Rijn
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Narrative Web Page generator.
Classe:
BasePage - super class for producing a web page. This class is instantiated
once for each page. Provdes various common functions.
"""
#------------------------------------------------
# python modules
#------------------------------------------------
from functools import partial
import os
import copy
import datetime
from decimal import getcontext
#------------------------------------------------
# Set up logging
#------------------------------------------------
import logging
from gi.repository import Gdk
#------------------------------------------------
# Gramps module
#------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.lib import (FamilyRelType, NoteType, NameType, Person,
UrlType, Name, PlaceType, EventRoleType,
Source, Attribute, Media, Repository, Event,
Family, Citation, Place, Date)
from gramps.gen.lib.date import Today
from gramps.gen.mime import is_image_type
from gramps.gen.const import PROGRAM_NAME, URL_HOMEPAGE
from gramps.version import VERSION
from gramps.gen.plug.report import Bibliography
from gramps.gen.plug.report import utils
from gramps.gen.utils.config import get_researcher
from gramps.gen.utils.string import conf_strings
from gramps.gen.utils.file import media_path_full
from gramps.gen.utils.thumbnails import get_thumbnail_path
from gramps.gen.display.name import displayer as _nd
from gramps.gen.display.place import displayer as _pd
from gramps.plugins.lib.libhtmlconst import _CC
from gramps.gen.utils.db import get_birth_or_fallback, get_death_or_fallback
from gramps.gen.datehandler import parser as _dp
from gramps.plugins.lib.libhtml import Html, xml_lang
from gramps.plugins.lib.libhtmlbackend import HtmlBackend, process_spaces
from gramps.gen.utils.place import conv_lat_lon
from gramps.gen.utils.location import get_main_location
from gramps.plugins.webreport.common import (_NAME_STYLE_DEFAULT, HTTP, HTTPS,
add_birthdate, CSS, html_escape,
_NARRATIVESCREEN, _NARRATIVEPRINT,
FULLCLEAR, _has_webpage_extension)
_ = glocale.translation.sgettext
LOG = logging.getLogger(".NarrativeWeb")
getcontext().prec = 8
TOGGLE = """
<script type="text/javascript">
function toggleContent(elem, icon) {
// Get the DOM reference
var contentId = document.getElementById(elem);
var icon = document.getElementById(icon);
// Toggle
if (contentId.style.display == "block") {
contentId.style.display = "none";
icon.className = 'icon icon-close';
} else {
contentId.style.display = "block";
icon.className = 'icon icon-open';
};
}
</script>
"""
GOTOTOP = """
<script>
//Get the button
var gototop = document.getElementById("gototop");
// When the user scrolls down 200px from the top of the document,
// show the button
window.onscroll = function() {scroll()};
function scroll() {
if (document.body.scrollTop > 200 ||
document.documentElement.scrollTop > 200) {
gototop.style.display = "block";
} else {
gototop.style.display = "none";
}
}
// When the user clicks on the button, scroll to the top of the document
function GoToTop() {
document.body.scrollTop = 0;
document.documentElement.scrollTop = 0;
}
</script>
"""
class BasePage:
"""
Manages all the functions, variables, and everything needed
for all of the classes contained within this plugin
"""
def __init__(self, report, the_lang, the_title, gid=None):
"""
@param: report -- The instance of the main report class for
this report
@param: the_lang -- Is the lang to process
@param: the_title -- Is the title of the web page
@param: gid -- The family gramps ID
"""
self.uplink = False
# class to do conversion of styled notes to html markup
self._backend = HtmlBackend()
self._backend.build_link = report.build_link
self.report = report
self.r_db = report.database
self.r_user = report.user
self.title_str = the_title
self.gid = gid
self.bibli = Bibliography()
self.the_lang = the_lang
self.the_title = the_title
self.not_holiday = True
self.page_title = ""
self.author = get_researcher().get_name()
if self.author:
self.author = self.author.replace(',,,', '')
# TODO. All of these attributes are not necessary, because we have
# also the options in self.options. Besides, we need to check which
# are still required.
self.html_dir = report.options['target']
self.ext = report.options['ext']
self.noid = not report.options['inc_id']
self.inc_tags = report.options['inc_tags']
self.linkhome = report.options['linkhome']
self.create_media = report.options['gallery']
self.create_unused_media = report.options['unused']
self.create_thumbs_only = report.options['create_thumbs_only']
self.create_images_index = report.options['create_images_index']
self.create_thumbs_index = report.options['create_thumbs_index']
self.inc_families = report.options['inc_families']
self.inc_events = report.options['inc_events']
self.usecms = report.options['usecms']
self.prevnext = report.options['prevnext']
self.target_uri = report.options['cmsuri']
self.usecal = report.options['usecal']
self.extrapage = report.options['extrapage']
self.extrapagename = report.options['extrapagename']
self.familymappages = None
self.reference_sort = report.options['reference_sort']
if the_lang:
self.rlocale = report.set_locale(the_lang)
else:
self.rlocale = report.set_locale(report.options['trans'])
self._ = self.rlocale.translation.sgettext
self.colon = self._(':') # Translators: needed for French, else ignore
if report.options['securesite']:
self.secure_mode = HTTPS
else:
self.secure_mode = HTTP
self.target_cal_uri = "cal/%s/index" % Today().get_year()
# Functions used when no Web Page plugin is provided
def add_instance(self, *param):
"""
Add an instance
"""
pass
def display_pages(self, the_lang, the_title):
"""
Display the pages
"""
pass
def sort_on_name_and_grampsid(self, handle):
""" Used to sort on name and gramps ID. """
person = self.r_db.get_person_from_handle(handle)
name = _nd.display(person)
return (name, person.get_gramps_id())
def sort_on_given_and_birth(self, handle):
""" Used to sort on given name and birth date. """
person = self.r_db.get_person_from_handle(handle)
name = _nd.display_given(person)
bd_event = get_birth_or_fallback(self.r_db, person)
birth = ""
if bd_event:
birth_iso = str(bd_event.get_date_object()).replace('abt ', '')
# we need to remove abt, bef, aft, ...
birth = birth_iso.replace('aft ', '').replace('bef ', '')
return (name, birth)
def sort_on_grampsid(self, event_ref):
"""
Sort on gramps ID
"""
evt = self.r_db.get_event_from_handle(
event_ref.ref)
return evt.get_gramps_id()
def copy_thumbnail(self, handle, photo, region=None):
"""
Given a handle (and optional region) make (if needed) an
up-to-date cache of a thumbnail, and call report.copy_file
to copy the cached thumbnail to the website.
Return the new path to the image.
@param: handle -- The handle for this thumbnail
@param: photo -- The image related to this thumbnail
@param: region -- The image region to associate
"""
to_dir = self.report.build_path('thumb', handle)
to_path = os.path.join(to_dir, handle) + (
('%d,%d-%d,%d.png' % region) if region else '.png'
)
if photo.get_mime_type():
full_path = media_path_full(self.r_db, photo.get_path())
from_path = get_thumbnail_path(full_path,
photo.get_mime_type(),
region)
if not os.path.isfile(from_path):
from_path = CSS["Document"]["filename"]
else:
from_path = CSS["Document"]["filename"]
if (self.the_lang is None or
self.the_lang == self.report.languages[0][0]):
# if multi languages, copy the thumbnail only for the first lang.
self.report.copy_file(from_path, to_path)
return to_path
def get_nav_menu_hyperlink(self, url_fname, nav_text, cal=0):
"""
Returns the navigation menu hyperlink
"""
uplink = self.uplink
sub_cal = cal if cal > 0 else 1
# check for web page file extension?
if not _has_webpage_extension(url_fname):
url_fname += self.ext
# get menu item url and begin hyperlink...
if self.usecms:
if self.the_lang:
url_name = "/".join([self.target_uri,
self.the_lang,
url_fname])
else:
url_name = "/".join([self.target_uri,
url_fname])
else:
if cal > 0:
url_fname = "/".join(([".."]*sub_cal + [url_fname]))
url_name = self.report.build_url_fname(url_fname, None, uplink)
return Html("a", nav_text, href=url_name, title=nav_text, inline=True)
def get_column_data(self, unordered, data_list, column_title):
"""
Returns the menu column for Drop Down Menus and Drop Down Citations
"""
if not data_list:
return
elif len(data_list) == 1:
url_fname, nav_text = data_list[0][0], data_list[0][1]
hyper = self.get_nav_menu_hyperlink(url_fname, nav_text)
unordered.extend(
Html("li", hyper, inline=True)
)
else:
col_list = Html("li") + (
Html("a", column_title, href="#",
title=column_title, inline=True)
)
unordered += col_list
unordered1 = Html("ul")
col_list += unordered1
for url_fname, nav_text in data_list:
hyper = self.get_nav_menu_hyperlink(url_fname, nav_text)
unordered1.extend(Html("li", hyper, inline=True))
def display_relationships(self, individual, place_lat_long):
"""
Displays a person's relationships ...
@param: family_handle_list -- families in this report database
@param: place_lat_long -- for use in Family Map Pages.
This will be None if called from
Family pages, which do not create
a Family Map
"""
family_list = individual.get_family_handle_list()
if not family_list:
return None
with Html("div", class_="subsection", id="families") as section:
with self.create_toggle("families") as h4_head:
section += h4_head
h4_head += self._("Families")
table_class = "infolist"
if len(family_list) > 1:
table_class += " fixed_subtables"
disp = "none" if self.report.options['toggle'] else "block"
with Html("table", class_=table_class,
id="toggle_families", style="display:%s" % disp) as table:
section += table
for family_handle in family_list:
family = self.r_db.get_family_from_handle(family_handle)
if family:
fam_name = self.report.get_family_name(family)
link = self.family_link(family_handle, fam_name,
gid=family.get_gramps_id(),
uplink=True)
link1 = Html("H4", link, class_="subsection")
trow = Html("tr", class_="BeginFamily") + (
Html("td", link1, class_="ColumnValue", colspan=3,
inline=True)
)
table += trow
# find the spouse of the principal individual and
# display that person
sp_hdl = utils.find_spouse(individual, family)
if sp_hdl:
spouse = self.r_db.get_person_from_handle(sp_hdl)
if spouse:
table += self.display_spouse(spouse, family,
place_lat_long)
details = self.display_family_details(family,
place_lat_long)
if details is not None:
table += details
return section
def display_family_relationships(self, family, place_lat_long):
"""
Displays a family's relationships ...
@param: family -- the family to be displayed
@param: place_lat_long -- for use in Family Map Pages. This will
be None if called from Family pages, which
do not create a Family Map
"""
with Html("div", class_="subsection", id="families") as section:
section += Html("h4", self._("Families"), inline=True)
table_class = "infolist"
with Html("table", class_=table_class) as table:
section += table
for person_hdl in [family.get_father_handle(),
family.get_mother_handle()]:
person = None
if person_hdl:
person = self.r_db.get_person_from_handle(person_hdl)
if person:
table += self.display_spouse(person,
family, place_lat_long)
details = self.display_family_details(family, place_lat_long)
if details is not None:
table += details
return section
def display_family_details(self, family, place_lat_long):
"""
Display details about one family: family events, children, family LDS
ordinances, family attributes
@param: family -- The family
@param: place_lat_long -- For use in Family Map Pages. This will be
None if called from Family pages, which do
not create a Family Map
"""
table = None
birthorder = self.report.options["birthorder"]
# display family events; such as marriage and divorce...
family_events = family.get_event_ref_list()
if family_events:
trow = Html("tr") + (
Html("td", " ", class_="ColumnType", inline=True),
Html("td", self.format_family_events(family_events,
place_lat_long),
class_="ColumnValue", colspan=2)
)
table = trow
# If the families pages are not output, display family notes
if not self.inc_families:
notelist = family.get_note_list()
for notehandle in notelist:
note = self.r_db.get_note_from_handle(notehandle)
if note:
trow = Html("tr") + (
Html("td", " ", class_="ColumnType", inline=True),
Html("td", self._("Narrative"),
class_="ColumnAttribute",
inline=True),
Html("td", self.get_note_format(note, True),
class_="ColumnValue")
)
table = table + trow if table is not None else trow
childlist = family.get_child_ref_list()
if childlist:
trow = Html("tr") + (
Html("td", self._("Children"), class_="ColumnAttribute",
inline=True)
)
table = table + trow if table is not None else trow
tcell = Html("td", class_="ColumnValue Child", close=False,
colspan=2)
trow += tcell
with Html("table", class_="infolist eventlist") as table2:
thead = Html("thead")
table2 += thead
header = Html("tr")
header.extend(
Html("th", label, class_=colclass, inline=True)
for (label, colclass) in [
[self._("Name"), "ColumnName"],
[self._("Birth Date"), "ColumnDate"],
[self._("Death Date"), "ColumnDate"],
]
)
thead += header
# begin table body
tbody = Html("tbody")
table2 += tbody
childlist = [child_ref.ref for child_ref in childlist]
# add individual's children event places to family map...
if self.familymappages:
for handle in childlist:
child = self.r_db.get_person_from_handle(handle)
if child:
self._get_event_place(child, place_lat_long)
children = add_birthdate(self.r_db, childlist, self.rlocale)
if birthorder:
children = sorted(children)
tbody.extend((Html("tr", inline=True) +
Html("td", inline=True, close=False) +
self.display_child_link(chandle) +
Html("td", birth, inline=True) +
Html("td", death, inline=True))
for birth_date, birth, death, chandle in children
)
trow += table2
# family LDS ordinance list
family_lds_ordinance_list = family.get_lds_ord_list()
if family_lds_ordinance_list:
trow = Html("tr") + (
Html("td", " ", class_="ColumnType", inline=True),
Html("td", self._("LDS Ordinance"), class_="ColumnAttribute",
inline=True),
Html("td", self.dump_ordinance(family, "Family",
toggle=False),
class_="ColumnValue")
)
table = table + trow if table is not None else trow
# Family Attribute list
family_attribute_list = family.get_attribute_list()
if family_attribute_list:
trow = Html("tr") + (
Html("td", " ", class_="ColumnType", inline=True),
Html("td", self._("Attributes"), class_="ColumnAttribute",
inline=True)
)
table = table + trow if table is not None else trow
tcell = Html("td", class_="ColumnValue")
trow += tcell
# we do not need the section variable for this instance
# of Attributes...
dummy, attrtable = self.display_attribute_header(toggle=False)
tcell += attrtable
self.display_attr_list(family_attribute_list, attrtable)
return table
def complete_people(self, tcell, first_person, handle_list, uplink=True):
"""
completes the person column for classes EventListPage and EventPage
@param: tcell -- table cell from its caller
@param: first_person -- Not used any more, done via css
@param: handle_list -- handle list from the backlink of
the event_handle
@param: uplink -- If True, then "../../../" is inserted in
front of the result.
"""
dummy_first_person = first_person
for (classname, handle) in handle_list:
# personal event
if classname == "Person":
tcell += Html("span", self.new_person_link(handle, uplink),
class_="person", inline=True)
# family event
else:
_obj = self.r_db.get_family_from_handle(handle)
if _obj:
# husband and spouse in this example,
# are called father and mother
husband_handle = _obj.get_father_handle()
if husband_handle:
hlink = self.new_person_link(husband_handle, uplink)
spouse_handle = _obj.get_mother_handle()
if spouse_handle:
slink = self.new_person_link(spouse_handle, uplink)
if spouse_handle and husband_handle:
tcell += Html("span", hlink, class_="father",
inline=True)
tcell += Html("span", slink, class_="mother",
inline=True)
elif spouse_handle:
tcell += Html("span", slink, class_="mother",
inline=True)
elif husband_handle:
tcell += Html("span", hlink, class_="father",
inline=True)
return tcell
def dump_attribute(self, attr):
"""
dump attribute for object presented in display_attr_list()
@param: attr = attribute object
"""
trow = Html("tr")
trow.extend(
Html("td", data or " ", class_=colclass,
inline=True if (colclass == "Type" or "Sources") else False)
for (data, colclass) in [
(str(attr.get_type()), "ColumnType"),
(attr.get_value(), "ColumnValue"),
(self.dump_notes(attr.get_note_list(), Attribute),
"ColumnNotes"),
(self.get_citation_links(attr.get_citation_list()),
"ColumnSources")
]
)
return trow
def get_citation_links(self, citation_handle_list):
"""
get citation link from the citation handle list
@param: citation_handle_list = list of gen/lib/Citation
"""
text = ""
for citation_handle in citation_handle_list:
citation = self.r_db.get_citation_from_handle(citation_handle)
if citation:
index, key = self.bibli.add_reference(citation)
id_ = "%d%s" % (index+1, key)
text += ' <a href="#sref%s">%s</a>' % (id_, id_)
return text
def get_note_format(self, note, uplink):
"""
will get the note from the database, and will return either the
styled text or plain note
@param: note -- the note to process
@param: uplink -- If True, then "../../../" is inserted in
front of the result.
"""
self.report.link_prefix_up = uplink
text = ""
if note is not None:
# retrieve the body of the note
note_text = note.get()
# styled notes
htmlnotetext = self.styled_note(
note.get_styledtext(), note.get_format(),
contains_html=(note.get_type() == NoteType.HTML_CODE))
text = htmlnotetext or Html("p", note_text)
# return text of the note to its callers
return text
def styled_note(self, styledtext, styled_format, contains_html=False):
"""
@param: styledtext -- assumed a StyledText object to write
@param: styled_format -- = 0 : Flowed, = 1 : Preformatted
@param: style_name -- name of the style to use for default
presentation
"""
text = str(styledtext)
if not text:
return ''
s_tags = styledtext.get_tags()
htmllist = Html("div", class_="grampsstylednote")
if contains_html:
markuptext = self._backend.add_markup_from_styled(text,
s_tags,
split='\n',
escape=False)
htmllist += markuptext
else:
markuptext = self._backend.add_markup_from_styled(text,
s_tags,
split='\n')
linelist = []
linenb = 1
for line in markuptext.split('\n'):
[line, sigcount] = process_spaces(line, styled_format)
if sigcount == 0:
# The rendering of an empty paragraph '<p></p>'
# is undefined so we use a non-breaking space
if linenb == 1:
linelist.append(' ')
htmllist.extend(Html('p') + linelist)
linelist = []
linenb = 1
else:
if linenb > 1:
linelist[-1] += '<br />'
linelist.append(line)
linenb += 1
if linenb > 1:
htmllist.extend(Html('p') + linelist)
# if the last line was blank, then as well as outputting
# the previous para, which we have just done,
# we also output a new blank para
if sigcount == 0:
linelist = [" "]
htmllist.extend(Html('p') + linelist)
return htmllist
def show_tags(self, obj):
"""
Show all tags associated to an object (Person, Family, Media,...)
@param: obj -- the object for which we show tags
"""
tags_text = ""
if obj is None:
return tags_text
tags = []
for tag_handle in obj.get_tag_list():
tags.append(self.r_db.get_tag_from_handle(tag_handle))
if tags and self.report.inc_tags:
for tag in tags:
if tags_text:
tags_text += ", "
# convert tag color to html format: #RRGGBB
rgba = Gdk.RGBA()
rgba.parse(tag.get_color())
color = '#%02x%02x%02x' % (int(rgba.red * 255),
int(rgba.green * 255),
int(rgba.blue * 255))
tags_text += ("<span style='background-color:%s;'>"
"%s</span>" % (color, self._(tag.get_name())))
return tags_text
def dump_notes(self, notelist, parent=None):
"""
dump out of list of notes with very little elements of its own
@param: notelist -- list of notes
@param: parent -- The parent object (Person, Family, Media,...)
"""
if not notelist:
return Html("div")
# begin unordered list
notesection = Html("div")
idx = 0
for notehandle in notelist:
this_note = self.r_db.get_note_from_handle(notehandle)
title = self._(this_note.type.xml_str())
if this_note is not None:
idx += 1
if len(notelist) > 1:
if (self.default_note(parent, int(this_note.type)) or
int(this_note.type) == NoteType.HTML_CODE):
title_text = self._("Note: %s") % str(idx)
else:
title = " (" + title + ")"
title_text = self._("Note: %s") % str(idx) + title
else:
if (self.default_note(parent, int(this_note.type)) or
int(this_note.type) == NoteType.HTML_CODE):
title_text = self._("Note")
else:
title_text = title
# Tags
if parent:
tags = self.show_tags(this_note)
if tags and self.report.inc_tags:
title_text += " (" + tags + ")"
notesection.extend(Html("i", title_text, class_="NoteType"))
notesection.extend(self.get_note_format(this_note, True))
return notesection
def event_header_row(self):
"""
creates the event header row for all events
"""
trow = Html("tr", close=None)
trow.extend(
Html("th", trans, class_=colclass, inline=True)
for trans, colclass in [
(self._("Event"), "ColumnEvent"),
(self._("Date"), "ColumnDate"),
(self._("Place"), "ColumnPlace"),
(self._("Description"), "ColumnDescription"),
(self._("Sources"), "ColumnSources")]
)
trow += Html("/tr", close=None)
return trow
def display_event_row(self, event, event_ref, place_lat_long,
uplink, hyperlink, omit):
"""
display the event row for IndividualPage
@param: evt -- Event object from report database
@param: evt_ref -- Event reference
@param: place_lat_long -- For use in Family Map Pages. This will be
None if called from Family pages, which do
not create a Family Map
@param: uplink -- If True, then "../../../" is inserted in
front of the result.
@param: hyperlink -- Add a hyperlink or not
@param: omit -- Role to be omitted in output
"""
event_gid = event.get_gramps_id()
place_handle = event.get_place_handle()
if place_handle:
place = self.r_db.get_place_from_handle(place_handle)
if place:
self.append_to_place_lat_long(place, event, place_lat_long)
# begin event table row
trow = Html("tr")
# get event type and hyperlink to it or not?
etype = self._(event.get_type().xml_str())
event_role = event_ref.get_role()
if not event_role == omit:
etype += " (%s)" % event_role
event_hyper = self.event_link(event_ref.ref,
etype,
event_gid,
uplink) if hyperlink else etype
trow += Html("td", event_hyper, class_="ColumnEvent", rowspan=2)
# get event data
event_data = self.get_event_data(event, event_ref, uplink)
trow.extend(
Html("td", data or " ", class_=colclass,
inline=(not data or colclass == "ColumnDate"))
for (label, colclass, data) in event_data
)
trow2 = Html("tr")
# get event source references
srcrefs = self.get_citation_links(event.get_citation_list()) or " "
trow += Html("td", srcrefs, class_="ColumnSources", rowspan=2)
# get event notes
notelist = event_ref.get_note_list()
notelist.extend(event.get_note_list()[:]) # we don't want to modify
# cached original
htmllist = self.dump_notes(notelist, Event)
# if the event or event reference has an attribute attached to it,
# get the text and format it correctly?
attrlist = event.get_attribute_list()[:] # we don't want to modify
# cached original
attrlist.extend(event_ref.get_attribute_list())
for attr in attrlist:
htmllist.extend(Html("p",
self._("%(str1)s: %(str2)s") % {
'str1' : Html("b", attr.get_type()),
'str2' : attr.get_value()
}))
#also output notes attached to the attributes
notelist = attr.get_note_list()
if notelist:
htmllist.extend(self.dump_notes(notelist, Event))
trow2 += Html("td", htmllist, class_="ColumnNotes", colspan=3)
trow += trow2
# return events table row to its callers
return trow
def append_to_place_lat_long(self, place, event, place_lat_long):
"""
Create a list of places with coordinates.
@param: place_lat_long -- for use in Family Map Pages. This will be
None if called from Family pages, which do
not create a Family Map
"""
if place_lat_long is None:
return
place_handle = place.get_handle()
event_date = event.get_date_object()
# 0 = latitude, 1 = longitude, 2 - placetitle,
# 3 = place handle, 4 = event
found = any(data[3] == place_handle and data[4] == event_date
for data in place_lat_long)
if not found:
placetitle = _pd.display(self.r_db, place)
latitude = place.get_latitude()
longitude = place.get_longitude()
if latitude and longitude:
latitude, longitude = conv_lat_lon(latitude, longitude, "D.D8")
if latitude is not None:
place_lat_long.append([latitude, longitude, placetitle,
place_handle, event])
def _get_event_place(self, person, place_lat_long):
"""
Retrieve from a person their events, and places for family map
@param: person -- Person object from the database
@param: place_lat_long -- For use in Family Map Pages. This will be
None if called from Family pages, which do
not create a Family Map
"""
if not person:
return
# check to see if this person is in the report database?
use_link = self.report.person_in_webreport(person.get_handle())
if use_link:
evt_ref_list = person.get_event_ref_list()
if evt_ref_list:
for evt_ref in evt_ref_list:
event = self.r_db.get_event_from_handle(evt_ref.ref)
if event:
pl_handle = event.get_place_handle()
if pl_handle:
place = self.r_db.get_place_from_handle(pl_handle)
if place:
self.append_to_place_lat_long(place, event,
place_lat_long)
def family_link(self, family_handle, name, gid=None, uplink=False):
"""
Create the url and link for FamilyPage
@param: family_handle -- The handle for the family to link
@param: name -- The family name
@param: gid -- The family gramps ID
@param: uplink -- If True, then "../../../" is inserted in
front of the result.
"""
name = html_escape(name)
if not self.noid and gid:
gid_html = Html("span", " [%s]" % gid, class_="grampsid",
inline=True)
else:
gid_html = ""
result = self.report.obj_dict.get(Family).get(family_handle)
if result is None:
# the family is not included in the webreport
return name + str(gid_html)
url = self.report.build_url_fname(result[0], uplink=uplink)
hyper = Html("a", name, href=url, title=name)
hyper += gid_html
return hyper
def event_link(self, event_handle, event_title, gid=None, uplink=False):
"""
Creates a hyperlink for an event based on its type
@param: event_handle -- Event handle
@param: event_title -- Event title
@param: gid -- The gramps ID for the event
@param: uplink -- If True, then "../../../" is inserted in front
of the result.
"""
if not self.inc_events:
return event_title
url = self.report.build_url_fname_html(event_handle, "evt", uplink)
hyper = Html("a", event_title, href=url, title=event_title)
if not self.noid and gid:
hyper += Html("span", " [%s]" % gid, class_="grampsid", inline=True)
return hyper
def format_family_events(self, event_ref_list, place_lat_long):
"""
displays the event row for events such as marriage and divorce
@param: event_ref_list -- List of events reference
@param: place_lat_long -- For use in Family Map Pages. This will be
None if called from Family pages, which do
not create a Family Map
"""
with Html("table", class_="infolist eventlist") as table:
thead = Html("thead")
table += thead
# attach event header row
thead += self.event_header_row()
# begin table body
tbody = Html("tbody")
table += tbody
for evt_ref in event_ref_list:
event = self.r_db.get_event_from_handle(evt_ref.ref)
# add event body row
tbody += self.display_event_row(event, evt_ref, place_lat_long,
uplink=True, hyperlink=True,
omit=EventRoleType.FAMILY)
return table
def get_event_data(self, evt, evt_ref,
uplink, gid=None):
"""
retrieve event data from event and evt_ref
@param: evt -- Event from database
@param: evt_ref -- Event reference
@param: uplink -- If True, then "../../../" is inserted in front of
the result.
"""
dummy_evt_ref = evt_ref
dummy_gid = gid
place = None
place_handle = evt.get_place_handle()
if place_handle:
place = self.r_db.get_place_from_handle(place_handle)
place_hyper = None
if place:
place_name = _pd.display(self.r_db, place, evt.get_date_object())
place_hyper = self.place_link(place_handle, place_name,
uplink=uplink)
evt_desc = evt.get_description()
# wrap it all up and return to its callers
# position 0 = translatable label, position 1 = column class
# position 2 = data
return [(self._("Date"), "ColumnDate",
self.rlocale.get_date(evt.get_date_object())),
(self._("Place"), "ColumnPlace", place_hyper),
(self._("Description"), "ColumnDescription", evt_desc)]
def dump_ordinance(self, ldsobj, ldssealedtype, toggle=True):
"""
will dump the LDS Ordinance information for either
a person or a family ...
@param: ldsobj -- Either person or family
@param: ldssealedtype -- Either Sealed to Family or Spouse
"""
dummy_ldssealedtype = ldssealedtype
objectldsord = ldsobj.get_lds_ord_list()
if not objectldsord:
return None
if toggle:
disp = "none" if self.report.options['toggle'] else "block"
ordin = Html("table", class_="infolist ldsordlist",
id="toggle_lds", style="display:%s" % disp)
else:
ordin = Html("table", class_="infolist ldsordlist")
# begin LDS ordinance table and table head
with ordin as table:
thead = Html("thead")
table += thead
# begin HTML row
trow = Html("tr")
thead += trow
trow.extend(
Html("th", label, class_=colclass, inline=True)
for (label, colclass) in [
[self._("Type"), "ColumnLDSType"],
[self._("Date"), "ColumnDate"],
[self._("Temple"), "ColumnLDSTemple"],
[self._("Place"), "ColumnLDSPlace"],
[self._("Status"), "ColumnLDSStatus"],
[self._("Sources"), "ColumnLDSSources"]
]
)
# start table body
tbody = Html("tbody")
table += tbody
for ordobj in objectldsord:
place_hyper = " "
place_handle = ordobj.get_place_handle()
if place_handle:
place = self.r_db.get_place_from_handle(place_handle)
if place:
place_title = _pd.display(self.r_db, place)
place_hyper = self.place_link(
place_handle, place_title,
place.get_gramps_id(), uplink=True)
# begin ordinance rows
trow = Html("tr")
trow.extend(
Html("td", value or " ", class_=colclass,
inline=(not value or colclass == "ColumnDate"))
for (value, colclass) in [
(ordobj.type2xml(), "ColumnType"),
(self.rlocale.get_date(ordobj.get_date_object()),
"ColumnDate"),
(ordobj.get_temple(), "ColumnLDSTemple"),
(place_hyper, "ColumnLDSPlace"),
(ordobj.get_status(), "ColumnLDSStatus"),
(self.get_citation_links(ordobj.get_citation_list()),
"ColumnSources")
]
)
tbody += trow
return table
def write_srcattr(self, srcattr_list):
"""
Writes out the srcattr for the different objects
@param: srcattr_list -- List of source attributes
"""
if not srcattr_list:
return None
# begin data map division and section title...
with Html("div", class_="subsection", id="data_map") as section:
with self.create_toggle("srcattr") as h4_head:
section += h4_head
h4_head += self._("Attributes")
disp = "none" if self.report.options['toggle'] else "block"
with Html("table", class_="infolist",
id="toggle_srcattr", style="display:%s" % disp) as table:
section += table
thead = Html("thead")
table += thead
trow = Html("tr") + (
Html("th", self._("Key"), class_="ColumnAttribute",
inline=True),
Html("th", self._("Value"), class_="ColumnValue",
inline=True)
)
thead += trow
tbody = Html("tbody")
table += tbody
for srcattr in srcattr_list:
trow = Html("tr") + (
Html("td", str(srcattr.get_type()),
class_="ColumnAttribute", inline=True),
Html("td", srcattr.get_value(),
class_="ColumnValue", inline=True)
)
tbody += trow
return section
def source_link(self, source_handle, source_title,
gid=None, cindex=None, uplink=False):
"""
Creates a link to the source object
@param: source_handle -- Source handle from database
@param: source_title -- Title from the source object
@param: gid -- Source gramps id from the source object
@param: cindex -- Count index
@param: uplink -- If True, then "../../../" is inserted in
front of the result.
"""
url = self.report.build_url_fname_html(source_handle, "src", uplink)
hyper = Html("a", source_title,
href=url,
title=source_title)
# if not None, add name reference to hyperlink element
if cindex:
hyper.attr += ' name ="sref%d"' % cindex
# add Gramps ID
if not self.noid and gid:
hyper += Html("span", ' [%s]' % gid, class_="grampsid", inline=True)
return hyper
def display_addr_list(self, addrlist, showsrc):
"""
Display a person's or repository's addresses ...
@param: addrlist -- a list of address handles
@param: showsrc -- True = show sources
False = do not show sources
None = djpe
"""
if not addrlist:
return None
# begin addresses division and title
with Html("div", class_="subsection", id="Addresses") as section:
with self.create_toggle("addr") as h4_head:
section += h4_head
h4_head += self._("Addresses")
# write out addresses()
section += self.dump_addresses(addrlist, showsrc)
# return address division to its caller
return section
def dump_addresses(self, addrlist, showsrc):
"""
will display an object's addresses, url list, note list,
and source references.
@param: addrlist = either person or repository address list
@param: showsrc = True -- person and their sources
False -- repository with no sources
None -- Address Book address with sources
"""
if not addrlist:
return None
# begin summaryarea division
disp = "none" if self.report.options['toggle'] else "block"
with Html("div", class_="AddressTable", id="toggle_addr",
style="display:%s" % disp) as summaryarea:
# begin address table
with Html("table") as table:
summaryarea += table
# get table class based on showsrc
if showsrc is True:
table.attr = 'class = "infolist addrlist"'
elif showsrc is False:
table.attr = 'class = "infolist repolist"'
else:
table.attr = 'class = "infolist addressbook"'
# begin table head
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
addr_header = [[self._("Date"), "Date"],
[self._("Street"), "StreetAddress"],
[self._("Locality"), "Locality"],
[self._("City"), "City"],
[self._("State/ Province"), "State"],
[self._("County"), "County"],
[self._("Postal Code"), "Postalcode"],
[self._("Country"), "Cntry"],
[self._("Phone"), "Phone"]]
# True, False, or None ** see docstring for explanation
if showsrc in [True, None]:
addr_header.append([self._("Sources"), "Sources"])
trow.extend(
Html("th", self._(label),
class_="Colummn" + colclass, inline=True)
for (label, colclass) in addr_header
)
# begin table body
tbody = Html("tbody")
table += tbody
# get address list from an object; either repository or person
for address in addrlist:
trow = Html("tr")
tbody += trow
addr_data_row = [
(self.rlocale.get_date(address.get_date_object()),
"ColumnDate"),
(address.get_street(), "ColumnStreetAddress"),
(address.get_locality(), "ColumnLocality"),
(address.get_city(), "ColumnCity"),
(address.get_state(), "ColumnState"),
(address.get_county(), "ColumnCounty"),
(address.get_postal_code(), "ColumnPostalCode"),
(address.get_country(), "ColumnCntry"),
(address.get_phone(), "ColumnPhone")
]
# get source citation list
if showsrc in [True, None]:
addr_data_row.append(
[self.get_citation_links(
address.get_citation_list()),
"ColumnSources"])
trow.extend(
Html("td", value or " ",
class_=colclass, inline=True)
for (value, colclass) in addr_data_row
)
# address: notelist
if showsrc is not None:
notelist = self.display_note_list(
address.get_note_list(), toggle=False)
if notelist is not None:
summaryarea += notelist
return summaryarea
def addressbook_link(self, person_handle, uplink=False):
"""
Creates a hyperlink for an address book link based on person's handle
@param: person_handle -- Person's handle from the database
@param: uplink -- If True, then "../../../" is inserted in
front of the result.
"""
url = self.report.build_url_fname_html(person_handle, "addr", uplink)
person = self.r_db.get_person_from_handle(person_handle)
person_name = self.get_name(person)
# return addressbook hyperlink to its caller
return Html("a", person_name, href=url,
title=html_escape(person_name))
def get_name(self, person, maiden_name=None):
""" I5118
Return person's name, unless maiden_name given, unless married_name
listed.
@param: person -- person object from database
@param: maiden_name -- Female's family surname
"""
# get name format for displaying names
name_format = self.report.options['name_format']
# Get all of a person's names
primary_name = person.get_primary_name()
married_name = None
names = [primary_name] + person.get_alternate_names()
for name in names:
if int(name.get_type()) == NameType.MARRIED:
married_name = name
break # use first
# Now, decide which to use:
if maiden_name is not None:
if married_name is not None:
name = Name(married_name)
else:
name = Name(primary_name)
surname_obj = name.get_primary_surname()
surname_obj.set_surname(maiden_name)
else:
name = Name(primary_name)
name.set_display_as(name_format)
return _nd.display_name(name)
def display_attribute_header(self, toggle=True):
"""
Display the attribute section and its table header
"""
# begin attributes division and section title
if toggle:
with Html("div", class_="subsection", id="attributes") as section:
with self.create_toggle("attr") as h4_head:
section += h4_head
h4_head += self._("Attributes")
disp = "none" if self.report.options['toggle'] else "block"
head = Html("table", class_="infolist attrlist",
id="toggle_attr", style="display:%s" % disp)
else:
section = Html("h4", self._("Attributes"), inline=True)
head = Html("table", class_="infolist attrlist")
# begin attributes table
with head as attrtable:
section += attrtable
thead = Html("thead")
attrtable += thead
trow = Html("tr")
thead += trow
trow.extend(
Html("th", label, class_=colclass, inline=True)
for (label, colclass) in [
(self._("Type"), "ColumnType"),
(self._("Value"), "ColumnValue"),
(self._("Notes"), "ColumnNotes"),
(self._("Sources"), "ColumnSources")]
)
return section, attrtable
def display_attr_list(self, attrlist,
attrtable):
"""
Will display a list of attributes
@param: attrlist -- a list of attributes
@param: attrtable -- the table element that is being added to
"""
tbody = Html("tbody")
attrtable += tbody
tbody.extend(
self.dump_attribute(attr) for attr in attrlist
)
def write_footer(self, date, cal=0):
"""
Will create and display the footer section of each page...
@param: bottom -- whether to specify location of footer section
or not?
"""
# begin footer division
with Html("div", id="footer") as footer:
footer_note = self.report.options['footernote']
if footer_note:
note = self.get_note_format(
self.r_db.get_note_from_gramps_id(footer_note),
False
)
user_footer = Html("div", id='user_footer')
footer += user_footer
# attach note
user_footer += note
msg = self._('Generated by %(gramps_home_html_start)s'
'Gramps%(html_end)s %(version)s'
) % {'gramps_home_html_start' :
'<a href="' + URL_HOMEPAGE + '">',
'html_end' : '</a>',
'version' : VERSION}
if date is not None and date > 0:
msg += "<br />"
last_modif = datetime.datetime.fromtimestamp(date).strftime(
'%Y-%m-%d %H:%M:%S')
msg += self._('Last change was the %(date)s') % {'date' :
last_modif}
else:
dat_txt = self._(' on %(date)s')
msg += dat_txt % {'date' : self.rlocale.get_date(Today())}
origin1 = self.report.filter.get_name(self.rlocale)
filt_number = self.report.options['filter']
# optional "link-home" feature; see bug report #2736
if self.report.options['linkhome']:
center_person = self.r_db.get_person_from_gramps_id(
self.report.options['pid'])
if (center_person and
self.report.person_in_webreport(center_person.handle)):
if cal > 0 and not self.usecms:
prfx = "/".join(([".."]*2 + ["ppl"]))
else:
prfx = "ppl"
center_person_url = self.report.build_url_fname_html(
center_person.handle, prfx, uplink=False)
#person_name = self.get_name(center_person)
if filt_number > 0 and filt_number < 5:
subject_url = '<a href="' + center_person_url + '">'
subject_url += origin1 + '</a>'
else:
subject_url = origin1
msg += self._(
'%(http_break)sCreated for %(subject_url)s') % {
'http_break' : '<br />',
'subject_url' : subject_url}
else:
msg += self._(
'%(http_break)sCreated for %(subject_url)s') % {
'http_break' : '<br />',
'subject_url' : origin1}
# creation author
footer += Html("p", msg, id='createdate')
# get copyright license for all pages
copy_nr = self.report.copyright
text = ''
if copy_nr == 0:
if self.author:
year = Today().get_year()
text = '© %(year)d %(person)s' % {
'person' : self.author, 'year' : year}
elif copy_nr < len(_CC):
# Note. This is a URL
sub_cal = cal + 1 if cal > 0 else 1
if self.usecms:
fname = "/".join(["images", "somerights20.gif"])
elif self.the_lang:
fname = "/".join(([".."]*sub_cal + ['images',
'somerights20.gif']))
else:
fname = "/".join(['images', "somerights20.gif"])
url = self.report.build_url_fname(fname, None, self.uplink,
image=True)
text = _CC[copy_nr] % {'gif_fname' : url}
footer += Html("p", text, id='copyright')
# return footer to its callers
return footer
def write_header(self, the_title, cal=0):
"""
Note. 'title' is used as currentsection in the navigation links and
as part of the header title.
@param: title -- Is the title of the web page
@param: cal -- The number of directories to use
"""
# If .php extension and a note selected, add it to the head section.
phpnote = self.report.options['phpnote']
note = None
if phpnote and self.ext == ".php":
# This is used to give the ability to have a php init session.
# This note must not contains formatting
# and should only contains php code. ie:
# <? php session_start (); ?>
note = self.r_db.get_note_from_gramps_id(phpnote).get()
# begin each html page...
if self.the_lang:
xmllang = self.the_lang.replace('_', '-')
else:
xmllang = xml_lang()
page, head, body = Html.page('%s - %s' %
(html_escape(self.title_str.strip()),
html_escape(the_title)),
self.report.encoding,
xmllang, cms=self.usecms, php_session=note)
# temporary fix for .php parsing error
if self.ext in [".php", ".cgi"]:
del page[0] # remove the "DOCTYPE" directive
# Header constants
_meta1 = 'name ="viewport" content="width=device-width, '
_meta1 += 'height=device-height, initial-scale=1.0, '
_meta1 += 'minimum-scale=0.5, maximum-scale=10.0, user-scalable=yes"'
_meta2 = 'name ="apple-mobile-web-app-capable" content="yes"'
_meta3 = 'name="generator" content="%s %s %s"' % (
PROGRAM_NAME, VERSION, URL_HOMEPAGE)
_meta4 = 'name="author" content="%s"' % self.author
# create additional meta tags
meta = Html("meta", attr=_meta1) + (
Html("meta", attr=_meta2, indent=False),
Html("meta", attr=_meta3, indent=False),
Html("meta", attr=_meta4, indent=False)
)
# Link to _NARRATIVESCREEN stylesheet
sub_cal = cal + 1 if cal > 0 else 1
if self.usecms:
fname = "/".join(["css", _NARRATIVESCREEN])
elif self.the_lang:
fname = "/".join(([".."]*sub_cal + ["css", _NARRATIVESCREEN]))
elif cal > 0:
fname = "/".join(([".."]*cal + ["css", _NARRATIVESCREEN]))
else:
fname = "/".join(["css", _NARRATIVESCREEN])
url2 = self.report.build_url_fname(fname, None, self.uplink)
# Link to _NARRATIVEPRINT stylesheet
if self.usecms:
fname = "/".join(["css", _NARRATIVEPRINT])
elif self.the_lang:
fname = "/".join(([".."]*sub_cal + ["css", _NARRATIVEPRINT]))
elif cal > 0:
fname = "/".join(([".."]*cal + ["css", _NARRATIVEPRINT]))
else:
fname = "/".join(["css", _NARRATIVEPRINT])
url3 = self.report.build_url_fname(fname, None, self.uplink)
# Link to Gramps favicon
if self.usecms:
fname = "/".join(["images", "favicon2.ico"])
elif self.the_lang:
fname = "/".join(([".."]*sub_cal + ['images', 'favicon2.ico']))
elif cal > 0:
fname = "/".join(([".."]*cal + ["images", "favicon2.ico"]))
else:
fname = "/".join(['images', 'favicon2.ico'])
url4 = self.report.build_url_fname(fname, None, self.uplink,
image=True)
# create stylesheet and favicon links
links = Html("link", type="image/x-icon",
href=url4, rel="shortcut icon")
# attach the ancestortree style sheet if ancestor
# graph is being created?
if self.report.options["ancestortree"]:
if self.usecms:
fname = "/".join(["css", "ancestortree.css"])
elif self.the_lang:
fname = "/".join(([".."]*sub_cal +
["css", "ancestortree.css"]))
elif cal > 0:
fname = "/".join(([".."]*cal + ["css", "ancestortree.css"]))
else:
fname = "/".join(["css", "ancestortree.css"])
url5 = self.report.build_url_fname(fname, None, self.uplink)
links += Html("link", type="text/css", href=url5,
media="screen", rel="stylesheet", indent=False)
links += Html("link", type="text/css", href=url3,
media='print', rel="stylesheet", indent=False)
links += Html("link", type="text/css", href=url2,
media="screen", rel="stylesheet", indent=False)
# create all alternate stylesheets
# Cannot use it on local files (file://)
for css_f in CSS:
already_done = []
for css_fn in ("UsEr_", "Basic", "Mainz", "Nebraska"):
if css_fn in css_f and css_f not in already_done:
css_f = css_f.replace("UsEr_", "")
already_done.append(css_f)
if self.usecms:
fname = "/".join(["css", css_f + ".css"])
elif self.the_lang:
fname = "/".join(([".."]*sub_cal + ["css",
css_f + ".css"]))
elif cal > 0:
fname = "/".join(([".."]*cal + ["css",
css_f + ".css"]))
else:
fname = "/".join(["css", css_f + ".css"])
urlx = self.report.build_url_fname(fname, None,
self.uplink)
links += Html("link", rel="alternate stylesheet",
title=self._(css_f), indent=False,
media="screen", type="text/css",
href=urlx)
# Link to Navigation Menus stylesheet
if CSS[self.report.css]["navigation"]:
if self.the_lang and not self.usecms:
fname = "/".join(["..", "css", "narrative-menus.css"])
else:
fname = "/".join(["css", "narrative-menus.css"])
url = self.report.build_url_fname(fname, None, self.uplink)
links += Html("link", type="text/css", href=url,
media="screen", rel="stylesheet", indent=False)
# add additional meta and link tags
head += meta
head += links
# Add the script to control the menu
menuscript = Html("<script>function navFunction() { "
"var x = document.getElementById(\"dropmenu\"); "
"if (x.className === \"nav\") { x.className += \""
" responsive\"; } else { x.className = \"nav\"; }"
" }</script>")
head += menuscript
# add outerwrapper to set the overall page width
outerwrapperdiv = Html("div", id='outerwrapper')
body += outerwrapperdiv
# begin header section
headerdiv = Html("div", id='header') + (
Html("<button href=\"javascript:void(0);\" class=\"navIcon\""
" onclick=\"navFunction()\">≡</button>"))
headerdiv += Html("h1", html_escape(self.title_str),
id="SiteTitle", inline=True)
outerwrapperdiv += headerdiv
header_note = self.report.options['headernote']
if header_note:
note = self.get_note_format(
self.r_db.get_note_from_gramps_id(header_note),
False)
user_header = Html("div", id='user_header')
headerdiv += user_header
# attach note
user_header += note
# Begin Navigation Menu--
# is the style sheet either Basic-Blue or Visually Impaired,
# and menu layout is Drop Down?
if (self.report.css == _("Basic-Blue") or
self.report.css == _("Visually Impaired")
) and self.report.navigation == "dropdown":
outerwrapperdiv += self.display_drop_menu()
else:
outerwrapperdiv += self.display_nav_links(the_title, cal=cal)
if self.report.options['toggle']:
head += TOGGLE
# Create a button to go to the top of the page
viewbox = "0 0 100 100"
points = "0,100 100,100, 50,10"
svg = Html("svg", viewBox=viewbox, class_="triangle", inline=False)
svg += Html("polygon", points=points)
outerwrapperdiv += Html("button", svg, id="gototop",
title=_("Go to top"), onclick="GoToTop()")
outerwrapperdiv += GOTOTOP # This must be positioned
# after the button for it to work
# message for Codacy :
# body is used in some modules to add functions like onload(),
# initialize(), ...
# some modules doesn't need that, so body is an unused variable
# in these modules.
# return page, head, and body to its classes...
return page, head, body, outerwrapperdiv
def display_nav_links(self, currentsection, cal=0):
"""
Creates the navigation menu
@param: currentsection = which menu item are you on
"""
# include repositories or not?
inc_repos = True
if (not self.report.inc_repository or
not self.r_db.get_repository_handles()):
inc_repos = False
# create media pages...
if self.create_media:
_create_media_link = True
if self.create_thumbs_only:
_create_media_link = False
# Determine which menu items will be available?
# Menu items have been adjusted to concide with Gramps Navigation
# Sidebar order...
navs = [
(self.report.index_fname, self._("Home", "Html"),
self.report.use_home),
(self.report.intro_fname, self._("Introduction"),
self.report.use_intro),
(self.report.extrapage, self.extrapagename, (self.extrapage != "")),
('individuals', self._("Individuals"), True),
(self.report.surname_fname, self._("Surnames"), True),
('families', self._("Families"), self.report.inc_families),
('events', self._("Events"), self.report.inc_events),
('places', self._("Places"), self.report.inc_places),
('sources', self._("Sources"), self.report.inc_sources),
('repositories', self._("Repositories"), inc_repos),
('media', self._("Media"), self.create_images_index and
self.report.inc_gallery and not self.report.create_thumbs_only),
('thumbnails', self._("Thumbnails"), self.create_thumbs_index and
self.report.inc_gallery),
('download', self._("Download"), self.report.inc_download),
("addressbook", self._("Address Book"),
self.report.inc_addressbook),
('contact', self._("Contact"), self.report.use_contact),
('updates', self._("Updates"), self.report.inc_updates),
('statistics', self._("Statistics"), self.report.inc_stats),
(self.target_cal_uri, self._("Web Calendar"), self.usecal)
]
# Remove menu sections if they are not being created?
navs = ((url_text, nav_text)
for url_text, nav_text, cond in navs if cond)
menu_items = [[url, text] for url, text in navs]
number_items = len(menu_items)
# begin navigation menu division...
with Html("div", class_="wrappernav",
id="nav", role="navigation") as navigation:
with Html("div", class_="container") as container:
index = 0
unordered = Html("ul", class_="nav", id="dropmenu")
while index < number_items:
url_fname, nav_text = menu_items[index]
hyper = self.get_nav_menu_hyperlink(url_fname, nav_text,
cal=cal)
# Define 'currentsection' to correctly set navlink item
# CSS id 'CurrentSection' for Navigation styling.
# Use 'self.report.cur_fname' to determine
# 'CurrentSection' for individual elements for
# Navigation styling.
# Figure out if we need <li class = "CurrentSection">
# or just <li>
check_cs = False
if nav_text == currentsection:
check_cs = True
elif nav_text == self._("Home", "Html"):
if "index" in self.report.cur_fname:
check_cs = True
elif nav_text == self._("Surnames"):
if "srn" in self.report.cur_fname:
check_cs = True
elif self._("Surnames") in currentsection:
check_cs = True
elif nav_text == self._("Individuals"):
if "ppl" in self.report.cur_fname:
check_cs = True
elif nav_text == self._("Families"):
if "fam" in self.report.cur_fname:
check_cs = True
elif nav_text == self._("Sources"):
if "src" in self.report.cur_fname:
check_cs = True
elif nav_text == self._("Repositories"):
if "repo" in self.report.cur_fname:
check_cs = True
elif nav_text == self._("Places"):
if "plc" in self.report.cur_fname:
check_cs = True
elif nav_text == self._("Events"):
if "evt" in self.report.cur_fname:
check_cs = True
elif nav_text == self._("Media"):
if "img" in self.report.cur_fname:
check_cs = True
elif nav_text == self._("Address Book"):
if "addr" in self.report.cur_fname:
check_cs = True
elif nav_text == self._("Updates"):
if "updates" in self.report.cur_fname:
check_cs = True
elif nav_text == self._("Statistics"):
if "statistics" in self.report.cur_fname:
check_cs = True
elif nav_text == self._("Web Calendar"):
if "cal/" in self.report.cur_fname:
check_cs = True
temp_cs = 'class = "CurrentSection"'
check_cs = temp_cs if check_cs else False
if check_cs:
unordered.extend(
Html("li", hyper, attr=check_cs, inline=True)
)
else:
if self.report.extrapage != "":
if (url_fname[:4] == "http" or
url_fname[:1] == "/"):
hyper = Html("a", nav_text, href=url_fname,
title=nav_text)
elif self.report.extrapagename == nav_text:
if cal > 0:
url_fname = "/".join(([".."]*cal +
[url_fname]))
hyper = Html("a", nav_text,
href=url_fname,
title=nav_text)
unordered.extend(
Html("li", hyper, inline=True)
)
index += 1
if self.report.options['multitrans'] and self.not_holiday:
langs = Html("li", self._("Language"), class_="lang")
en_locale = self.report.set_locale("en")
languages = en_locale.get_language_dict()
choice = Html("ul", class_="lang")
langs += choice
for language in languages:
for extra_lang, dummy_title in self.report.languages:
if languages[language] == extra_lang:
lang_txt = html_escape(self._(language))
n_lang = languages[language]
nfname = self.report.cur_fname
if "cal" in nfname:
(dummy_field, dummy_sep,
field2) = nfname.partition("cal/")
sub_cal = 3 if self.the_lang else 2
url = "/".join(([".."]*sub_cal + [n_lang,
"cal",
field2]
))
else:
upl = self.uplink
url = self.report.build_url_lang(nfname,
n_lang,
upl)
lnk = Html("a", lang_txt,
href=url, title=lang_txt)
choice += Html("li", lnk, inline=True)
unordered.extend(langs)
if self.prevnext:
prv = Html('<a onclick="history.go(-1);">%s</a>' %
self._("Previous"))
nxt = Html('<a onclick="history.go(+1);">%s</a>' %
self._("Next"))
unordered.extend(Html("li", prv, inline=True))
unordered.extend(Html("li", nxt, inline=True))
container += unordered
navigation += container
return navigation
def display_drop_menu(self):
"""
Creates the Drop Down Navigation Menu
"""
# include repositories or not?
inc_repos = True
if (not self.report.inc_repository or
not self.r_db.get_repository_handles()):
inc_repos = False
# create media pages...
_create_media_link = False
if self.create_media:
_create_media_link = True
if self.create_thumbs_only:
_create_media_link = False
personal = [
(self.report.intro_fname, self._("Introduction"),
self.report.use_intro),
("individuals", self._("Individuals"), True),
(self.report.surname_fname, self._("Surnames"), True),
("families", self._("Families"), self.report.inc_families)
]
personal = ((url_text, nav_text)
for url_text, nav_text, cond in personal if cond)
personal = [[url, text] for url, text in personal]
navs1 = [
("events", self._("Events"), self.report.inc_events),
("places", self._("Places"), True),
("sources", self._("Sources"), True),
("repositories", self._("Repositories"), inc_repos)
]
navs1 = ((url_text, nav_text)
for url_text, nav_text, cond in navs1 if cond)
navs1 = [[url, text] for url, text in navs1]
media = [
("media", self._("Media"), _create_media_link),
("thumbnails", self._("Thumbnails"), True)
]
media = ((url_text, nav_text)
for url_text, nav_text, cond in media if cond)
media = [[url, text] for url, text in media]
misc = [
('download', self._("Download"), self.report.inc_download),
("addressbook", self._("Address Book"), self.report.inc_addressbook)
]
misc = ((url_text, nav_text)
for url_text, nav_text, cond in misc if cond)
misc = [[url, text] for url, text in misc]
contact = [
('contact', self._("Contact"), self.report.use_contact)
]
contact = ((url_text, nav_text)
for url_text, nav_text, cond in contact if cond)
contact = [[url, text] for url, text in contact]
# begin navigation menu division...
with Html("div", class_="wrapper",
id="nav", role="navigation") as navigation:
with Html("div", class_="container") as container:
unordered = Html("ul", class_="menu", id="dropmenu")
if self.report.use_home:
list_html = Html("li",
self.get_nav_menu_hyperlink(
self.report.index_fname,
self._("Home", "Html")))
unordered += list_html
# add personal column
self.get_column_data(unordered, personal, self._("Personal"))
if navs1:
for url_fname, nav_text in navs1:
unordered.extend(
Html("li", self.get_nav_menu_hyperlink(url_fname,
nav_text),
inline=True)
)
# add media column
self.get_column_data(unordered, media, self._("Media"))
# add miscellaneous column
self.get_column_data(unordered, misc, self._("Miscellaneous"))
# add contact column
self.get_column_data(unordered, contact, _("Contact"))
container += unordered
navigation += container
return navigation
def add_image(self, option_name, head, height=0):
"""
Will add an image (if present) to the page
If this image contains regions, try to add them.
@param: option_name -- The name of the report option
@param: height -- Height of the image
"""
pic_id = self.report.options[option_name]
if pic_id:
obj = self.r_db.get_media_from_gramps_id(pic_id)
if obj is None:
return None
# get media rectangles
_region_items = self.media_ref_rect_regions(obj.get_handle(),
linkurl=self.uplink)
# if there are media rectangle regions, attach behaviour style sheet
if _region_items:
if self.the_lang and not self.usecms:
fname = "/".join(["..", "css", "behaviour.css"])
else:
fname = "/".join(["css", "behaviour.css"])
url = self.report.build_url_fname(fname, None, self.uplink)
head += Html("link", href=url, type="text/css",
media="screen", rel="stylesheet")
mime_type = obj.get_mime_type()
if mime_type and is_image_type(mime_type):
try:
newpath, dummy_tpath = self.report.prepare_copy_media(obj)
newpathc = newpath
if self.the_lang and self.report.archive:
(dummy_1_field, dummy_sep,
second_field) = newpath.partition("/")
newpathc = second_field
if self.usecms:
newpathc = newpathc.replace(self.target_uri + "/", "")
# In some case, we have the target_uri without
# the leading "/"
newpathc = newpathc.replace(self.target_uri[1:] + "/",
"")
self.report.copy_file(media_path_full(
self.r_db, obj.get_path()), newpathc)
# begin image
with Html("div", id="GalleryDisplay",
style='width: auto; height: auto') as image:
if _region_items:
# add all regions and links to persons
regions = Html("ol", class_="RegionBox")
while _region_items:
(name, coord_x, coord_y,
width, height, linkurl
) = _region_items.pop()
regions += Html(
"li",
style="left:%d%%; "
"top:%d%%; "
"width:%d%%; "
"height:%d%%;" % (
coord_x, coord_y,
width, height)) + (
Html("a", name,
href=linkurl)
)
image += regions
# add image
imag = Html("img")
imag.attr = ''
if height:
imag.attr += 'height = "%d"' % height
descr = html_escape(obj.get_description())
newpath = self.report.build_url_fname(newpath,
image=True)
imag.attr += ' src = "%s" alt = "%s"' % (newpath, descr)
fname = self.report.build_url_fname(obj.get_handle(),
"img", uplink=2,
image=True,
) + self.ext
inc_gallery = self.report.options['gallery']
if not self.create_thumbs_only and inc_gallery:
img_link = Html("a", href=fname, title=descr) + (
Html("img", src=newpath, alt=descr))
else:
# We can't show the image html page.
# This page doesn't exist.
img_link = Html("img", src=newpath, alt=descr)
image += img_link
return image
except (IOError, OSError) as msg:
self.r_user.warn(_("Could not add photo to page"),
str(msg))
# no image to return
return None
def media_ref_rect_regions(self, handle, linkurl=True):
"""
Gramps feature #2634 -- attempt to highlight subregions in media
objects and link back to the relevant web page.
This next section of code builds up the "records" we'll need to
generate the html/css code to support the subregions
@param: handle -- The media handle to use
"""
# get all of the backlinks to this media object; meaning all of
# the people, events, places, etc..., that use this image
_region_items = set()
for (classname, newhandle) in self.r_db.find_backlink_handles(
handle,
include_classes=["Person", "Family", "Event", "Place"]):
# for each of the backlinks, get the relevant object from the db
# and determine a few important things, such as a text name we
# can use, and the URL to a relevant web page
_obj = None
_name = ""
_linkurl = "#"
if classname == "Person":
# Is this a person for whom we have built a page:
if self.report.person_in_webreport(newhandle):
# If so, let's add a link to them:
_obj = self.r_db.get_person_from_handle(newhandle)
if _obj:
# What is the shortest possible name we could use
# for this person?
_name = (_obj.get_primary_name().get_call_name() or
_obj.get_primary_name().get_first_name() or
self._("Unknown")
)
_linkurl = self.report.build_url_fname_html(_obj.handle,
"ppl",
linkurl)
elif classname == "Family" and self.inc_families:
_obj = self.r_db.get_family_from_handle(newhandle)
partner1_handle = _obj.get_father_handle()
partner2_handle = _obj.get_mother_handle()
partner1 = None
partner2 = None
if partner1_handle:
partner1 = self.r_db.get_person_from_handle(
partner1_handle)
if partner2_handle:
partner2 = self.r_db.get_person_from_handle(
partner2_handle)
if partner2 and partner1:
_name = partner1.get_primary_name().get_first_name()
_linkurl = self.report.build_url_fname_html(partner1_handle,
"ppl", True)
elif partner1:
_name = partner1.get_primary_name().get_first_name()
_linkurl = self.report.build_url_fname_html(partner1_handle,
"ppl", True)
elif partner2:
_name = partner2.get_primary_name().get_first_name()
_linkurl = self.report.build_url_fname_html(partner2_handle,
"ppl", True)
if not _name:
_name = self._("Unknown")
elif classname == "Event" and self.inc_events:
_obj = self.r_db.get_event_from_handle(newhandle)
_name = _obj.get_description()
if not _name:
_name = self._("Unknown")
_linkurl = self.report.build_url_fname_html(_obj.handle,
"evt", True)
elif classname == "Place":
_obj = self.r_db.get_place_from_handle(newhandle)
_name = _pd.display(self.r_db, _obj)
if not _name:
_name = self._("Unknown")
_linkurl = self.report.build_url_fname_html(newhandle,
"plc", True)
# continue looking through the loop for an object...
if _obj is None:
continue
# get a list of all media refs for this object
media_list = _obj.get_media_list()
# go media refs looking for one that points to this image
for mediaref in media_list:
# is this mediaref for this image? do we have a rect?
if mediaref.ref == handle and mediaref.rect is not None:
(coord_x1, coord_y1, coord_x2, coord_y2) = mediaref.rect
# Gramps gives us absolute coordinates,
# but we need relative width + height
width = coord_x2 - coord_x1
height = coord_y2 - coord_y1
# remember all this information, cause we'll need
# need it later when we output the <li>...</li> tags
item = (_name, coord_x1, coord_y1, width, height, _linkurl)
_region_items.add(item)
# End of code that looks for and prepares the media object regions
return sorted(_region_items)
def media_ref_region_to_object(self, media_handle, obj):
"""
Return a region of this image if it refers to this object.
@param: media_handle -- The media handle to use
@param: obj -- The object reference
"""
# get a list of all media refs for this object
for mediaref in obj.get_media_list():
# is this mediaref for this image? do we have a rect?
if (mediaref.ref == media_handle and
mediaref.rect is not None):
return mediaref.rect # (x1, y1, x2, y2)
return None
def disp_first_img_as_thumbnail(self, photolist, object_):
"""
Return the Html of the first image of photolist that is
associated with object. First image might be a region in an
image. Or, the first image might have regions defined in it.
@param: photolist -- The list of media
@param: object_ -- The object reference
"""
if not photolist or not self.create_media:
return None
photo_handle = photolist[0].get_reference_handle()
photo = self.r_db.get_media_from_handle(photo_handle)
mime_type = photo.get_mime_type()
descr = photo.get_description()
# begin snapshot division
with Html("div", class_="snapshot") as snapshot:
if mime_type and is_image_type(mime_type):
region = self.media_ref_region_to_object(photo_handle, object_)
if region:
# make a thumbnail of this region
newpath = self.copy_thumbnail(photo_handle, photo, region)
newpath = self.report.build_url_fname(newpath, uplink=True,
image=True)
snapshot += self.media_link(photo_handle, newpath, descr,
uplink=self.uplink,
usedescr=False)
else:
dummy_rpath, newpath = self.report.prepare_copy_media(photo)
newpath = self.report.build_url_fname(newpath, image=True,
uplink=self.uplink)
snapshot += self.media_link(photo_handle, newpath,
descr,
uplink=self.uplink,
usedescr=False)
else:
# begin hyperlink
snapshot += self.doc_link(photo_handle, descr,
uplink=self.uplink, usedescr=False)
# return snapshot division to its callers
return snapshot
def disp_add_img_as_gallery(self, photolist, object_):
"""
Display additional image as gallery
@param: photolist -- The list of media
@param: object_ -- The object reference
"""
if not photolist or not self.create_media:
return None
# make referenced images have the same order as in media list:
photolist_handles = {}
for mediaref in photolist:
photolist_handles[mediaref.get_reference_handle()] = mediaref
photolist_ordered = []
for photoref in copy.copy(object_.get_media_list()):
if photoref.ref in photolist_handles:
photo = photolist_handles[photoref.ref]
photolist_ordered.append(photo)
# and add any that are left (should there be any?)
photolist_ordered += photolist
# begin individualgallery division and section title
with Html("div", class_="subsection", id="indivgallery") as section:
with self.create_toggle("media") as h4_head:
section += h4_head
h4_head += self._("Media")
disp = "none" if self.report.options['toggle'] else "block"
with Html("div", style="display:%s" % disp,
id="toggle_media") as toggle:
section += toggle
displayed = []
for mediaref in photolist_ordered:
photo_handle = mediaref.get_reference_handle()
photo = self.r_db.get_media_from_handle(photo_handle)
if photo_handle in displayed:
continue
mime_type = photo.get_mime_type()
# get media description
descr = photo.get_description()
if mime_type:
try:
# create thumbnail url
# extension needs to be added as it is not
# already there
url = (self.report.build_url_fname(photo_handle,
"thumb",
True,
image=True) +
".png")
# begin hyperlink
toggle += self.media_link(photo_handle, url,
descr,
uplink=self.uplink,
usedescr=True)
except (IOError, OSError) as msg:
self.r_user.warn(_("Could not add photo to page"),
str(msg))
else:
try:
# begin hyperlink
toggle += self.doc_link(photo_handle, descr,
uplink=self.uplink)
except (IOError, OSError) as msg:
self.r_user.warn(_("Could not add photo to page"),
str(msg))
displayed.append(photo_handle)
# add fullclear for proper styling
section += FULLCLEAR
# return indivgallery division to its caller
return section
def default_note(self, parent, notetype):
"""
return true if the notetype is the same as the parent
@param: parent -- The parent object (Person, Family, Media,...)
@param: notetype -- The type for the current note
"""
if parent == Person and notetype == NoteType.PERSON:
return True
elif parent == Family and notetype == NoteType.FAMILY:
return True
elif parent == Media and notetype == NoteType.MEDIA:
return True
elif parent == Repository and notetype == NoteType.REPO:
return True
elif parent == Source and notetype == NoteType.SOURCE:
return True
elif parent == Event and notetype == NoteType.EVENT:
return True
elif parent == Place and notetype == NoteType.PLACE:
return True
elif parent == Citation and notetype == NoteType.CITATION:
return True
elif parent == Attribute and notetype == NoteType.ATTRIBUTE:
return True
return False
def display_note_list(self, notelist=None, parent=None, toggle=True):
"""
Display note list
@param: notelist -- The list of notes
@param: parent -- The parent object (Person, Family, Media,...)
"""
if not notelist:
return None
# begin LDS ordinance table and table head
if toggle:
with Html("div", class_="subsection narrative") as hdiv:
with self.create_toggle("note") as h4_head:
hdiv += h4_head
h4_head += self._("Notes")
disp = "none" if self.report.options['toggle'] else "block"
with Html("div", class_="subsection narrative",
id="toggle_note",
style="display:%s" % disp) as section:
hdiv += section
else:
with Html("div", class_="subsection narrative") as section:
hdiv = section
idx = 0
for notehandle in notelist:
note = self.r_db.get_note_from_handle(notehandle)
title = self._(note.type.xml_str())
if note:
note_text = self.get_note_format(note, True)
idx += 1
if len(notelist) > 1:
if (self.default_note(parent, int(note.type)) or
int(note.type) == NoteType.HTML_CODE):
title_text = self._("Note: %s") % str(idx)
else:
title = " (" + title + ")"
title_text = self._("Note: %s") % str(idx) + title
else:
if (self.default_note(parent, int(note.type)) or
int(note.type) == NoteType.HTML_CODE):
title_text = self._("Note")
else:
title_text = title
# Tags
if parent:
tags = self.show_tags(note)
if tags and self.report.inc_tags:
title_text += " (" + tags + ")"
# add section title
section += Html("h4", title_text, inline=True)
# attach note
section += note_text
# return notes to its callers
return hdiv
def display_url_list(self, urllist=None):
"""
Display URL list
@param: urllist -- The list of urls
"""
if not urllist:
return None
# begin web links division
with Html("div", class_="subsection", id="WebLinks") as section:
with self.create_toggle("links") as h4_head:
section += h4_head
h4_head += self._("Web Links")
disp = "none" if self.report.options['toggle'] else "block"
with Html("table", class_="infolist weblinks",
id="toggle_links", style="display:%s" % disp) as table:
section += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
trow.extend(Html('th', label, class_=colclass, inline=True)
for (label, colclass) in [
(self._("Type"), "ColumnType"),
(self._("Description"), "ColumnDescription")]
)
tbody = Html("tbody")
table += tbody
for url in urllist:
trow = Html("tr")
tbody += trow
_type = self._(url.get_type().xml_str())
uri = url.get_path()
descr = url.get_description()
# Email address
if _type == UrlType.EMAIL:
if not uri.startswith("mailto:"):
uri = "mailto:%(email)s" % {'email' : uri}
# Web Site address
elif _type == UrlType.WEB_HOME:
if not (uri.startswith("http://") or
uri.startswith("https://")):
url = self.secure_mode
uri = url + "%(website)s" % {"website" : uri}
# FTP server address
elif _type == UrlType.WEB_FTP:
if not (uri.startswith("ftp://") or
uri.startswith("ftps://")):
uri = "ftp://%(ftpsite)s" % {"ftpsite" : uri}
descr = Html("p", html_escape(descr)) + (
Html("a", self._(" [Click to Go]"), href=uri, title=uri)
)
trow.extend(
Html("td", data, class_=colclass, inline=True)
for (data, colclass) in [
(str(_type), "ColumnType"),
(descr, "ColumnDescription")
]
)
return section
def display_lds_ordinance(self, db_obj_):
"""
Display LDS information for a person or family
@param: db_obj_ -- The database object
"""
ldsordlist = db_obj_.lds_ord_list
if not ldsordlist:
return None
# begin LDS Ordinance division and section title
with Html("div", class_="subsection", id="LDSOrdinance") as section:
with self.create_toggle("lds") as h4_head:
section += h4_head
h4_head += self._("Latter-Day Saints/ LDS Ordinance")
# dump individual LDS ordinance list
section += self.dump_ordinance(db_obj_, "Person")
# return section to its caller
return section
def display_ind_sources(self, srcobj):
"""
Will create the "Source References" section for an object
@param: srcobj -- Sources object
"""
list(map(
lambda i: self.bibli.add_reference(
self.r_db.get_citation_from_handle(i)),
srcobj.get_citation_list()))
sourcerefs = self.display_source_refs(self.bibli)
# return to its callers
return sourcerefs
# Only used in IndividualPage.display_ind_sources(),
# and MediaPage.display_media_sources()
def display_source_refs(self, bibli):
"""
Display source references
@param: bibli -- List of sources
"""
if bibli.get_citation_count() == 0:
return None
with Html("div", class_="subsection", id="sourcerefs") as section:
section += Html("h4", self._("Source References"), inline=True)
ordered = Html("ol", id="srcr")
cindex = 0
citationlist = bibli.get_citation_list()
for citation in citationlist:
cindex += 1
# Add this source and its references to the page
source = self.r_db.get_source_from_handle(
citation.get_source_handle())
if source is not None:
if source.get_author():
authorstring = source.get_author() + ": "
else:
authorstring = ""
list_html = Html("li",
self.source_link(
source.get_handle(),
authorstring + source.get_title(),
source.get_gramps_id(), cindex,
uplink=self.uplink))
else:
list_html = Html("li", "None")
ordered1 = Html("ol", id="citr")
citation_ref_list = citation.get_ref_list()
for key, sref in citation_ref_list:
cit_ref_li = Html("li", id="sref%d%s" % (cindex, key))
tmp = Html("ul")
conf = conf_strings.get(sref.confidence, self._('Unknown'))
if conf == conf_strings[Citation.CONF_NORMAL]:
conf = None
else:
conf = self._(conf)
for (label, data) in [[self._("Date"),
self.rlocale.get_date(sref.date)],
[self._("Page"), sref.page],
[self._("Confidence"), conf]]:
if data:
tmp += Html("li",
self._("%(str1)s: %(str2)s") % {
'str1' : label,
'str2' : data
})
if self.create_media:
for media_ref in sref.get_media_list():
media_handle = media_ref.get_reference_handle()
media = self.r_db.get_media_from_handle(
media_handle)
if media:
mime_type = media.get_mime_type()
if mime_type:
if mime_type.startswith("image/"):
real_path, new_path = \
self.report.prepare_copy_media(
media)
newpath = self.report.build_url_fname(
new_path, uplink=self.uplink)
self.report.copy_file(
media_path_full(self.r_db,
media.get_path()),
real_path)
tmp += Html("li",
self.media_link(
media_handle,
newpath,
media.get_description(),
self.uplink,
usedescr=False),
inline=True)
else:
tmp += Html("li",
self.doc_link(
media_handle,
media.get_description(),
self.uplink,
usedescr=False),
inline=True)
for handle in sref.get_note_list():
this_note = self.r_db.get_note_from_handle(handle)
if this_note is not None:
note_format = self.get_note_format(this_note, True)
tmp += Html("li",
self._("%(str1)s: %(str2)s") % {
'str1' : str(this_note.get_type()),
'str2' : note_format
})
if tmp:
cit_ref_li += tmp
ordered1 += cit_ref_li
if citation_ref_list:
list_html += ordered1
ordered += list_html
section += ordered
# return section to its caller
return section
def family_map_link(self, handle, url):
"""
Creates a link to the family map
@param: handle -- The family handle
@param: url -- url to be linked
"""
self.report.fam_link[handle] = url
return Html("a", self._("Family Map"), href=url,
title=self._("Family Map"), class_="familymap",
inline=True)
def display_spouse(self, partner, family, place_lat_long):
"""
Display an individual's partner
@param: partner -- The partner
@param: family -- The family
@param: place_lat_long -- For use in Family Map Pages. This will be
None if called from Family pages, which do
not create a Family Map
"""
gender = partner.get_gender()
reltype = family.get_relationship()
rtype = self._(str(family.get_relationship().xml_str()))
if reltype == FamilyRelType.MARRIED:
if gender == Person.FEMALE:
relstr = self._("Wife")
elif gender == Person.MALE:
relstr = self._("Husband")
else:
relstr = self._("Partner")
else:
relstr = self._("Partner")
# display family relationship status, and add spouse to FamilyMapPages
if self.familymappages:
self._get_event_place(partner, place_lat_long)
trow = Html("tr", class_="BeginFamily") + (
Html("td", rtype, class_="ColumnType", inline=True),
Html("td", relstr, class_="ColumnAttribute", inline=True)
)
tcell = Html("td", class_="ColumnValue")
trow += tcell
tcell += self.new_person_link(partner.get_handle(), uplink=True,
person=partner)
birth = death = ""
bd_event = get_birth_or_fallback(self.r_db, partner)
if bd_event:
birth = self.rlocale.get_date(bd_event.get_date_object())
dd_event = get_death_or_fallback(self.r_db, partner)
if dd_event:
death = self.rlocale.get_date(dd_event.get_date_object())
if death == "":
death = "..."
tcell += " ( * ", birth, " + ", death, " )"
return trow
def display_child_link(self, chandle):
"""
display child link ...
@param: chandle -- Child handle
"""
return self.new_person_link(chandle, uplink=True)
def new_person_link(self, person_handle, uplink=False, person=None,
name_style=_NAME_STYLE_DEFAULT):
"""
creates a link for a person. If a page is generated for the person, a
hyperlink is created, else just the name of the person. The returned
vale will be an Html object if a hyperlink is generated, otherwise
just a string
@param: person_handle -- Person in database
@param: uplink -- If True, then "../../../" is inserted in
front of the result
@param: person -- Person object. This does not need to be
passed. It should be passed if the person
object has already been retrieved, as it
will be used to improve performance
"""
result = self.report.obj_dict.get(Person).get(person_handle)
# construct link, name and gid
if result is None:
# The person is not included in the webreport
link = ""
if person is None:
person = self.r_db.get_person_from_handle(person_handle)
if person:
name = self.report.get_person_name(person)
gid = person.get_gramps_id()
else:
name = self._("Unknown")
gid = ""
else:
# The person has been encountered in the web report, but this does
# not necessarily mean that a page has been generated
(link, name, gid) = result
name = html_escape(name)
# construct the result
if not self.noid and gid != "":
gid_html = Html("span", " [%s]" % gid, class_="grampsid",
inline=True)
else:
gid_html = ""
if link != "":
url = self.report.build_url_fname(link, uplink=uplink)
hyper = Html("a", name, gid_html, href=url, inline=True)
else:
hyper = name + str(gid_html)
return hyper
def media_link(self, media_handle, img_url, name,
uplink=False, usedescr=True):
"""
creates and returns a hyperlink to the thumbnail image
@param: media_handle -- Photo handle from report database
@param: img_url -- Thumbnail url
@param: name -- Photo description
@param: uplink -- If True, then "../../../" is inserted in front
of the result.
@param: usedescr -- Add media description
"""
url = self.report.build_url_fname(media_handle, "img", uplink,
image=True) + self.ext
name = html_escape(name)
# begin thumbnail division
with Html("div", class_="thumbnail") as thumbnail:
# begin hyperlink
if not self.create_thumbs_only:
hyper = Html("a", href=url, title=name) + (
Html("img", src=img_url, alt=name)
)
else:
hyper = Html("img", src=img_url, alt=name)
thumbnail += hyper
if usedescr:
hyper += Html("p", name, inline=True)
return thumbnail
def doc_link(self, handle, name, uplink=False, usedescr=True):
"""
create a hyperlink for the media object and returns it
@param: handle -- Document handle
@param: name -- Document name
@param: uplink -- If True, then "../../../" is inserted in front of
the result.
@param: usedescr -- Add description to hyperlink
"""
url = self.report.build_url_fname_html(handle, "img", uplink)
name = html_escape(name)
# begin thumbnail division
with Html("div", class_="thumbnail") as thumbnail:
document_url = self.report.build_url_image("document.png",
"images", uplink)
if not self.create_thumbs_only:
document_link = Html("a", href=url, title=name) + (
Html("img", src=document_url, alt=name)
)
else:
document_link = Html("img", src=document_url, alt=name)
if usedescr:
document_link += Html('br') + (
Html("span", name, inline=True)
)
thumbnail += document_link
return thumbnail
def place_link(self, handle, name, gid=None, uplink=False):
"""
Returns a hyperlink for place link
@param: handle -- repository handle from report database
@param: name -- repository title
@param: gid -- gramps id
@param: uplink -- If True, then "../../../" is inserted in front of
the result.
"""
url = self.report.build_url_fname_html(handle, "plc", uplink)
hyper = Html("a", html_escape(name), href=url,
title=html_escape(name))
if not self.noid and gid:
hyper += Html("span", " [%s]" % gid, class_="grampsid", inline=True)
# return hyperlink to its callers
return hyper
def dump_place(self, place, table):
"""
Dump a place's information from within the database
@param: place -- Place object from the database
@param: table -- Table from Placedetail
"""
# add table body
tbody = Html("tbody")
table += tbody
gid = place.gramps_id
if not self.noid and gid:
trow = Html("tr") + (
Html("td", self._("Gramps ID"), class_="ColumnAttribute",
inline=True),
Html("td", gid, class_="ColumnValue", inline=True)
)
tbody += trow
data = place.get_latitude()
if data != "":
trow = Html('tr') + (
Html("td", self._("Latitude"), class_="ColumnAttribute",
inline=True),
Html("td", data, class_="ColumnValue", inline=True)
)
tbody += trow
data = place.get_longitude()
if data != "":
trow = Html('tr') + (
Html("td", self._("Longitude"), class_="ColumnAttribute",
inline=True),
Html("td", data, class_="ColumnValue", inline=True)
)
tbody += trow
mlocation = get_main_location(self.r_db, place)
for (label, data) in [
(self._("Street"), mlocation.get(PlaceType.STREET, '')),
(self._("Locality"), mlocation.get(PlaceType.LOCALITY, '')),
(self._("City"), mlocation.get(PlaceType.CITY, '')),
(self._("Church Parish"),
mlocation.get(PlaceType.PARISH, '')),
(self._("County"), mlocation.get(PlaceType.COUNTY, '')),
(self._("State/ Province"),
mlocation.get(PlaceType.STATE, '')),
(self._("Postal Code"), place.get_code()),
(self._("Province"), mlocation.get(PlaceType.PROVINCE, '')),
(self._("Country"), mlocation.get(PlaceType.COUNTRY, ''))]:
if data:
trow = Html("tr") + (
Html("td", label, class_="ColumnAttribute", inline=True),
Html("td", data, class_="ColumnValue", inline=True)
)
tbody += trow
# display all related locations
for placeref in place.get_placeref_list():
place_date = self.rlocale.get_date(placeref.get_date_object())
if place_date != "":
parent_place = self.r_db.get_place_from_handle(placeref.ref)
parent_name = parent_place.get_name().get_value()
trow = Html('tr') + (
Html("td", self._("Locations"), class_="ColumnAttribute",
inline=True),
Html("td", parent_name, class_="ColumnValue", inline=True),
Html("td", place_date, class_="ColumnValue", inline=True)
)
tbody += trow
altloc = place.get_alternative_names()
if altloc:
tbody += Html("tr") + Html("td", " ", colspan=2)
date_msg = self._("Date range in which the name is valid.")
trow = Html("tr") + (
Html("th", self._("Alternate Names"), colspan=1,
class_="ColumnAttribute", inline=True),
Html("th", self._("Language"), colspan=1,
class_="ColumnAttribute", inline=True),
Html("th", date_msg, colspan=1,
class_="ColumnAttribute", inline=True),
)
tbody += trow
for loc in altloc:
place_date = self.rlocale.get_date(loc.date)
trow = Html("tr") + (
Html("td", loc.get_value(), class_="ColumnValue",
inline=True),
Html("td", loc.get_language(), class_="ColumnValue",
inline=True),
Html("td", place_date, class_="ColumnValue",
inline=True),
)
tbody += trow
altloc = place.get_alternate_locations()
if altloc:
tbody += Html("tr") + Html("td", " ", colspan=2)
trow = Html("tr") + (
Html("th", self._("Alternate Locations"), colspan=2,
class_="ColumnAttribute", inline=True),
)
tbody += trow
for loc in (nonempt
for nonempt in altloc if not nonempt.is_empty()):
for (label, data) in [(self._("Street"), loc.street),
(self._("Locality"), loc.locality),
(self._("City"), loc.city),
(self._("Church Parish"), loc.parish),
(self._("County"), loc.county),
(self._("State/ Province"), loc.state),
(self._("Postal Code"), loc.postal),
(self._("Country"), loc.country),]:
if data:
trow = Html("tr") + (
Html("td", label, class_="ColumnAttribute",
inline=True),
Html("td", data, class_="ColumnValue", inline=True)
)
tbody += trow
tbody += Html("tr") + Html("td", " ", colspan=2)
# Tags
tags = self.show_tags(place)
if tags and self.report.inc_tags:
trow = Html("tr") + (
Html("td", self._("Tags"),
class_="ColumnAttribute", inline=True),
Html("td", tags,
class_="ColumnValue", inline=True)
)
tbody += trow
# enclosed by
tbody += Html("tr") + Html("td", " ")
trow = Html("tr") + (
Html("th", self._("Enclosed By"),
class_="ColumnAttribute", inline=True),
)
tbody += trow
def sort_by_enclosed_by(obj):
"""
Sort by enclosed by
"""
place_name = ""
parent_place = self.r_db.get_place_from_handle(obj.ref)
if parent_place:
place_name = parent_place.get_name().get_value()
return place_name
def sort_by_encl(obj):
"""
Sort by encloses
"""
return obj[0]
for placeref in sorted(place.get_placeref_list(),
key=sort_by_enclosed_by):
parent_place = self.r_db.get_place_from_handle(placeref.ref)
if parent_place:
place_name = parent_place.get_name().get_value()
if parent_place.handle in self.report.obj_dict[Place]:
place_hyper = self.place_link(parent_place.handle,
place_name,
uplink=self.uplink)
else:
place_hyper = place_name
trow = Html("tr") + (
Html("td", place_hyper, class_="ColumnPlace",
inline=True))
tbody += trow
# enclose
tbody += Html("tr") + Html("td", " ")
trow = Html("tr") + (
Html("th", self._("Place Encloses"),
class_="ColumnAttribute", inline=True),
)
tbody += trow
encloses = []
for link in self.r_db.find_backlink_handles(
place.handle, include_classes=['Place']):
child_place = self.r_db.get_place_from_handle(link[1])
placeref = None
for placeref in child_place.get_placeref_list():
if placeref.ref == place.handle:
place_name = child_place.get_name().get_value()
if link[1] in self.report.obj_dict[Place]:
encloses.append((place_name, link[1]))
else:
encloses.append((place_name, ""))
for (name, handle) in sorted(encloses, key=sort_by_encl):
place_name = child_place.get_name().get_value()
if handle and handle in self.report.obj_dict[Place]:
place_hyper = self.place_link(handle, name,
uplink=self.uplink)
else:
place_hyper = name
trow = Html("tr") + (
Html("td", place_hyper,
class_="ColumnPlace", inline=True))
tbody += trow
# return place table to its callers
return table
def repository_link(self, repository_handle, name,
gid=None, uplink=False):
"""
Returns a hyperlink for repository links
@param: repository_handle -- repository handle from report database
@param: name -- repository title
@param: gid -- gramps id
@param: uplink -- If True, then "../../../" is inserted in
front of the result.
"""
url = self.report.build_url_fname_html(repository_handle,
'repo', uplink)
name = html_escape(name)
hyper = Html("a", name, href=url, title=name)
if not self.noid and gid:
hyper += Html("span", '[%s]' % gid, class_="grampsid", inline=True)
return hyper
def dump_repository_ref_list(self, repo_ref_list):
"""
Dumps the repository
@param: repo_ref_list -- The list of repositories references
"""
if not repo_ref_list:
return None
# Repository list division...
with Html("div", class_="subsection",
id="repositories") as repositories:
repositories += Html("h4", self._("Repositories"), inline=True)
with Html("table", class_="infolist") as table:
repositories += table
thead = Html("thead")
table += thead
trow = Html("tr") + (
Html("th", self._("Number"), class_="ColumnRowLabel",
inline=True),
Html("th", self._("Title"), class_="ColumnName",
inline=True),
Html("th", self._("Type"), class_="ColumnName",
inline=True),
Html("th", self._("Call number"), class_="ColumnName",
inline=True)
)
thead += trow
tbody = Html("tbody")
table += tbody
index = 1
for repo_ref in repo_ref_list:
repo = self.r_db.get_repository_from_handle(repo_ref.ref)
if repo:
trow = Html("tr") + (
Html("td", index, class_="ColumnRowLabel",
inline=True),
Html("td",
self.repository_link(repo_ref.ref,
repo.get_name(),
repo.get_gramps_id(),
self.uplink)),
Html("td",
self._(repo_ref.get_media_type().xml_str()),
class_="ColumnName"),
Html("td", repo_ref.get_call_number(),
class_="ColumnName")
)
tbody += trow
index += 1
return repositories
def dump_residence(self, has_res):
"""
Creates a residence from the database
@param: has_res -- The residence to use
"""
if not has_res:
return None
# begin residence division
with Html("div", class_="content Residence") as residence:
residence += Html("h4", self._("Residence"), inline=True)
with Html("table", class_="infolist place") as table:
residence += table
place_handle = has_res.get_place_handle()
if place_handle:
place = self.r_db.get_place_from_handle(place_handle)
if place:
self.dump_place(place, table)
descr = has_res.get_description()
if descr:
trow = Html("tr")
if len(table) == 3:
# append description row to tbody element
# of dump_place
table[-2] += trow
else:
# append description row to table element
table += trow
trow.extend(Html("td", self._("Description"),
class_="ColumnAttribute", inline=True))
trow.extend(Html("td", descr, class_="ColumnValue",
inline=True))
# return information to its callers
return residence
def display_bkref(self, bkref_list, depth):
"""
Display a reference list for an object class
@param: bkref_list -- The reference list
@param: depth -- The style of list to use
"""
list_style = "1", "a", "I", "A", "i"
ordered = Html("ol", class_="Col1", role="Volume-n-Page")
ordered.attr += " type=%s" % list_style[depth]
if depth > len(list_style):
return ""
# Sort by the role of the object at the bkref_class, bkref_handle
def sort_by_role(obj):
"""
Sort by role
"""
if obj[2] == "Primary":
role = "0"
elif obj[2] == "Family":
role = "1"
else:
if self.reference_sort:
role = obj[2] # name
elif len(obj[2].split('-')) > 1:
dummy_cal, role = obj[2].split(':') # date in ISO format
# dummy_cal is the original calendar. remove it.
if len(role.split(' ')) == 2:
# for sort, remove the modifier before, after...
(dummy_modifier, role) = role.split(' ')
else:
role = "3"
return role
for (bkref_class, bkref_handle, role) in sorted(
bkref_list, key=lambda x:
sort_by_role(x)):
list_html = Html("li")
path = self.report.obj_dict[bkref_class][bkref_handle][0]
name = self.report.obj_dict[bkref_class][bkref_handle][1]
gid = self.report.obj_dict[bkref_class][bkref_handle][2]
if role != "":
if self.reference_sort:
role = self.birth_death_dates(gid)
elif role[1:2] == ':':
# cal is the original calendar
cal, role = role.split(':')
# conver ISO date to Date for translation.
# all modifiers are in english, so convert them
# to the local language
if len(role.split(' - ')) > 1:
(date1, date2) = role.split(' - ')
role = self._("between") + " " + date1 + " "
role += self._("and") + " " + date2
elif len(role.split(' ')) == 2:
(pref, date) = role.split(' ')
if "aft" in pref:
role = self._("after") + " " + date
elif "bef" in pref:
role = self._("before") + " " + date
elif pref in ("abt", "about"):
role = self._("about") + " " + date
elif "c" in pref:
role = self._("circa") + " " + date
elif "around" in pref:
role = self._("around") + " " + date
# parse is done in the default language
date = _dp.parse(role)
# reset the date to the original calendar
cdate = date.to_calendar(Date.calendar_names[int(cal)])
ldate = self.rlocale.get_date(cdate)
evtype = self.event_for_date(gid, cdate)
if evtype:
evtype = " " + evtype
role = " (%s) " % (ldate + evtype)
else:
role = " (%s) " % self._(role)
ordered += list_html
if path == "":
list_html += name
list_html += self.display_bkref(
self.report.bkref_dict[bkref_class][bkref_handle],
depth+1)
else:
url = self.report.build_url_fname(path, uplink=self.uplink)
if not self.noid and gid != "":
gid_html = Html("span", " [%s]" % gid,
class_="grampsid", inline=True)
else:
gid_html = ""
list_html += Html("a", href=url) + name + role + gid_html
return ordered
def event_for_date(self, gid, date):
"""
return the event type
@param: gid -- the person gramps ID
@param: date -- the event to look for this date
"""
pers = self.r_db.get_person_from_gramps_id(gid)
if pers:
evt_ref_list = pers.get_event_ref_list()
if evt_ref_list:
for evt_ref in evt_ref_list:
evt = self.r_db.get_event_from_handle(evt_ref.ref)
if evt:
evdate = evt.get_date_object()
# convert date to gregorian
_date = str(evdate.to_calendar("gregorian"))
if _date == str(date):
return self._(str(evt.get_type()))
return ""
def birth_death_dates(self, gid):
"""
return the birth and death date for the person
@param: gid -- the person gramps ID
"""
pers = self.r_db.get_person_from_gramps_id(gid)
if pers:
birth = death = ""
evt_birth = get_birth_or_fallback(self.r_db, pers)
if evt_birth:
birthd = evt_birth.get_date_object()
# convert date to gregorian to avoid strange years
birth = str(birthd.to_calendar("gregorian").get_year())
evt_death = get_death_or_fallback(self.r_db, pers)
if evt_death:
deathd = evt_death.get_date_object()
# convert date to gregorian to avoid strange years
death = str(deathd.to_calendar("gregorian").get_year())
return "(%s-%s)" % (birth, death)
else:
return ""
def display_bkref_list(self, obj_class, obj_handle):
"""
Display a reference list for an object class
@param: obj_class -- The object class to use
@param: obj_handle -- The handle to use
"""
bkref_list = self.report.bkref_dict[obj_class][obj_handle]
if not bkref_list:
return None
# begin references division and title
with Html("div", class_="subsection", id="references") as section:
section += Html("h4", self._("References"), inline=True)
depth = 0
ordered = self.display_bkref(bkref_list, depth)
section += ordered
return section
# -----------------------------------------------------------------------
# # Web Page Fortmatter and writer
# -----------------------------------------------------------------------
def xhtml_writer(self, htmlinstance, output_file, sio, date):
"""
Will format, write, and close the file
@param: output_file -- Open file that is being written to
@param: htmlinstance -- Web page created with libhtml
gramps/plugins/lib/libhtml.py
"""
htmlinstance.write(partial(print, file=output_file))
# closes the file
self.report.close_file(output_file, sio, date)
def create_toggle(self, element):
"""
will produce a toggle button
@param: element -- The html element name
"""
use_toggle = self.report.options['toggle']
if use_toggle:
viewbox = "0 0 100 100"
points = "5.9,88.2 50,11.8 94.1,88.2"
svg = Html("svg", viewBox=viewbox, class_="triangle", inline=False)
svg += Html("polygon", points=points)
toggle_name = 'toggle_' + element
id_name = 'icon_' + element
with Html("h4",
onclick="toggleContent('" + toggle_name
+ "', '" + id_name + "');") as toggle:
toggle += Html("button", svg,
id=id_name, class_='icon')
else:
toggle = Html("h4", inline=True)
return toggle
|
Nick-Hall/gramps
|
gramps/plugins/webreport/basepage.py
|
Python
|
gpl-2.0
| 142,223
|
[
"Brian"
] |
3fd7438bb640ecce31fb0777f5e80495def034661faae751dc00d52803605198
|
########################################################################
# $HeadURL$
# File : ModuleFactory.py
# Author : Stuart Paterson
########################################################################
""" The Module Factory instantiates a given Module based on a given input
string and set of arguments to be passed. This allows for VO specific
module utilities to be used in various contexts.
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR, gLogger
class ModuleFactory:
#############################################################################
def __init__( self ):
""" Standard constructor
"""
self.log = gLogger
#############################################################################
def getModule( self, importString, argumentsDict ):
"""This method returns the Module instance given the import string and
arguments dictionary.
"""
try:
moduleName = importString.split( '.' )[-1]
modulePath = importString.replace( '.%s' % ( moduleName ), '' )
importModule = __import__( '%s.%s' % ( modulePath, moduleName ), globals(), locals(), [moduleName] )
except Exception, x:
msg = 'ModuleFactory could not import %s.%s' % ( modulePath, moduleName )
self.log.warn( x )
self.log.warn( msg )
return S_ERROR( msg )
try:
# FIXME: should we use imp module?
moduleStr = 'importModule.%s(argumentsDict)' % ( moduleName )
moduleInstance = eval( moduleStr )
except Exception, x:
msg = 'ModuleFactory could not instantiate %s()' % ( moduleName )
self.log.warn( x )
self.log.warn( msg )
return S_ERROR( msg )
return S_OK( moduleInstance )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
sposs/DIRAC
|
Core/Utilities/ModuleFactory.py
|
Python
|
gpl-3.0
| 1,836
|
[
"DIRAC"
] |
8d7f9298ababfbe823173d4e3d12cd4711d6f43d0cee97b3d8e403c7f3a6084f
|
import sys
from ast import literal_eval
from itertools import islice, chain
from . import nodes
from ._compat import text_type
from .compiler import CodeGenerator, has_safe_repr
from .environment import Environment, Template
from .utils import concat, escape
def native_concat(nodes):
"""Return a native Python type from the list of compiled nodes. If the
result is a single node, its value is returned. Otherwise, the nodes are
concatenated as strings. If the result can be parsed with
:func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
string is returned.
"""
head = list(islice(nodes, 2))
if not head:
return None
if len(head) == 1:
out = head[0]
else:
out = u''.join([text_type(v) for v in chain(head, nodes)])
try:
return literal_eval(out)
except (ValueError, SyntaxError, MemoryError):
return out
class NativeCodeGenerator(CodeGenerator):
"""A code generator which avoids injecting ``to_string()`` calls around the
internal code Jinja uses to render templates.
"""
def visit_Output(self, node, frame):
"""Same as :meth:`CodeGenerator.visit_Output`, but do not call
``to_string`` on output nodes in generated code.
"""
if self.has_known_extends and frame.require_output_check:
return
finalize = self.environment.finalize
finalize_context = getattr(finalize, 'contextfunction', False)
finalize_eval = getattr(finalize, 'evalcontextfunction', False)
finalize_env = getattr(finalize, 'environmentfunction', False)
if finalize is not None:
if finalize_context or finalize_eval:
const_finalize = None
elif finalize_env:
def const_finalize(x):
return finalize(self.environment, x)
else:
const_finalize = finalize
else:
def const_finalize(x):
return x
# If we are inside a frame that requires output checking, we do so.
outdent_later = False
if frame.require_output_check:
self.writeline('if parent_template is None:')
self.indent()
outdent_later = True
# Try to evaluate as many chunks as possible into a static string at
# compile time.
body = []
for child in node.nodes:
try:
if const_finalize is None:
raise nodes.Impossible()
const = child.as_const(frame.eval_ctx)
if not has_safe_repr(const):
raise nodes.Impossible()
except nodes.Impossible:
body.append(child)
continue
# the frame can't be volatile here, because otherwise the as_const
# function would raise an Impossible exception at that point
try:
if frame.eval_ctx.autoescape:
if hasattr(const, '__html__'):
const = const.__html__()
else:
const = escape(const)
const = const_finalize(const)
except Exception:
# if something goes wrong here we evaluate the node at runtime
# for easier debugging
body.append(child)
continue
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
# if we have less than 3 nodes or a buffer we yield or extend/append
if len(body) < 3 or frame.buffer is not None:
if frame.buffer is not None:
# for one item we append, for more we extend
if len(body) == 1:
self.writeline('%s.append(' % frame.buffer)
else:
self.writeline('%s.extend((' % frame.buffer)
self.indent()
for item in body:
if isinstance(item, list):
val = repr(native_concat(item))
if frame.buffer is None:
self.writeline('yield ' + val)
else:
self.writeline(val + ',')
else:
if frame.buffer is None:
self.writeline('yield ', item)
else:
self.newline(item)
close = 0
if finalize is not None:
self.write('environment.finalize(')
if finalize_context:
self.write('context, ')
close += 1
self.visit(item, frame)
if close > 0:
self.write(')' * close)
if frame.buffer is not None:
self.write(',')
if frame.buffer is not None:
# close the open parentheses
self.outdent()
self.writeline(len(body) == 1 and ')' or '))')
# otherwise we create a format string as this is faster in that case
else:
format = []
arguments = []
for item in body:
if isinstance(item, list):
format.append(native_concat(item).replace('%', '%%'))
else:
format.append('%s')
arguments.append(item)
self.writeline('yield ')
self.write(repr(concat(format)) + ' % (')
self.indent()
for argument in arguments:
self.newline(argument)
close = 0
if finalize is not None:
self.write('environment.finalize(')
if finalize_context:
self.write('context, ')
elif finalize_eval:
self.write('context.eval_ctx, ')
elif finalize_env:
self.write('environment, ')
close += 1
self.visit(argument, frame)
self.write(')' * close + ', ')
self.outdent()
self.writeline(')')
if outdent_later:
self.outdent()
class NativeTemplate(Template):
def render(self, *args, **kwargs):
"""Render the template to produce a native Python type. If the result
is a single node, its value is returned. Otherwise, the nodes are
concatenated as strings. If the result can be parsed with
:func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
string is returned.
"""
vars = dict(*args, **kwargs)
try:
return native_concat(self.root_render_func(self.new_context(vars)))
except Exception:
exc_info = sys.exc_info()
return self.environment.handle_exception(exc_info, True)
class NativeEnvironment(Environment):
"""An environment that renders templates to native Python types."""
code_generator_class = NativeCodeGenerator
template_class = NativeTemplate
|
facelessuser/sublime-markdown-popups
|
st3/mdpopups/jinja2/nativetypes.py
|
Python
|
mit
| 7,279
|
[
"VisIt"
] |
38242398fef030a9d3a8a66e2c243ee79a31dbec4823179b2e86191a22146384
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import time
import numpy
import scipy.linalg
from pyscf import lib
from pyscf.lib import logger
from pyscf import gto
from pyscf.df import addons
from pyscf import __config__
MAX_MEMORY = getattr(__config__, 'df_outcore_max_memory', 2000) # 2GB
LINEAR_DEP_THR = getattr(__config__, 'df_df_DF_lindep', 1e-12)
# This funciton is aliased for backward compatibility.
format_aux_basis = addons.make_auxmol
def aux_e2(mol, auxmol, intor='int3c2e', aosym='s1', comp=None, out=None,
cintopt=None):
'''3-center AO integrals (ij|L), where L is the auxiliary basis.
Kwargs:
cintopt : Libcint-3.14 and newer version support to compute int3c2e
without the opt for the 3rd index. It can be precomputed to
reduce the overhead of cintopt initialization repeatedly.
cintopt = gto.moleintor.make_cintopt(mol._atm, mol._bas, mol._env, 'int3c2e')
'''
from pyscf.gto.moleintor import getints, make_cintopt
shls_slice = (0, mol.nbas, 0, mol.nbas, mol.nbas, mol.nbas+auxmol.nbas)
# Extract the call of the two lines below
# pmol = gto.mole.conc_mol(mol, auxmol)
# return pmol.intor(intor, comp, aosym=aosym, shls_slice=shls_slice, out=out)
intor = mol._add_suffix(intor)
hermi = 0
ao_loc = None
atm, bas, env = gto.mole.conc_env(mol._atm, mol._bas, mol._env,
auxmol._atm, auxmol._bas, auxmol._env)
return getints(intor, atm, bas, env, shls_slice, comp, hermi, aosym,
ao_loc, cintopt, out)
def aux_e1(mol, auxmol, intor='int3c2e', aosym='s1', comp=None, out=None):
'''3-center 2-electron AO integrals (L|ij), where L is the auxiliary basis.
Note aux_e1 is basically analogous to aux_e2 function. It can be viewed as
the version of transposed aux_e2 tensor:
if comp == 1:
aux_e1 = aux_e2().T
else:
aux_e1 = aux_e2().transpose(0,2,1)
The same arguments as function aux_e2 can be input to aux_e1.
'''
out = aux_e2(mol, auxmol, intor, aosym, comp, out)
if out.ndim == 2: # comp == 1
out = out.T
else:
out = out.transpose(0,2,1)
return out
def fill_2c2e(mol, auxmol, intor='int2c2e', comp=None, hermi=1, out=None):
'''2-center 2-electron AO integrals for auxiliary basis (auxmol)
'''
return auxmol.intor(intor, comp=comp, hermi=hermi, out=out)
# Note the temporary memory usage is about twice as large as the return cderi
# array
def cholesky_eri(mol, auxbasis='weigend+etb', auxmol=None,
int3c='int3c2e', aosym='s2ij', int2c='int2c2e', comp=1,
max_memory=MAX_MEMORY, verbose=0, fauxe2=aux_e2):
'''
Returns:
2D array of (naux,nao*(nao+1)/2) in C-contiguous
'''
from pyscf.df.outcore import _guess_shell_ranges
assert(comp == 1)
t0 = (time.clock(), time.time())
log = logger.new_logger(mol, verbose)
if auxmol is None:
auxmol = addons.make_auxmol(mol, auxbasis)
j2c = auxmol.intor(int2c, hermi=1)
try:
low = scipy.linalg.cholesky(j2c, lower=True)
tag = 'cd'
except scipy.linalg.LinAlgError:
w, v = scipy.linalg.eigh(j2c)
idx = w > LINEAR_DEP_THR
low = (v[:,idx] / numpy.sqrt(w[idx]))
v = None
tag = 'eig'
j2c = None
naoaux, naux = low.shape
log.debug('size of aux basis %d', naux)
t1 = log.timer_debug1('2c2e', *t0)
int3c = gto.moleintor.ascint3(mol._add_suffix(int3c))
atm, bas, env = gto.mole.conc_env(mol._atm, mol._bas, mol._env,
auxmol._atm, auxmol._bas, auxmol._env)
ao_loc = gto.moleintor.make_loc(bas, int3c)
nao = ao_loc[mol.nbas]
if aosym == 's1':
nao_pair = nao * nao
else:
nao_pair = nao * (nao+1) // 2
cderi = numpy.empty((naux, nao_pair))
max_words = max_memory*.98e6/8 - low.size - cderi.size
# Divide by 3 because scipy.linalg.solve may create a temporary copy for
# ints and return another copy for results
buflen = min(max(int(max_words/naoaux/comp/3), 8), nao_pair)
shranges = _guess_shell_ranges(mol, buflen, aosym)
log.debug1('shranges = %s', shranges)
cintopt = gto.moleintor.make_cintopt(atm, bas, env, int3c)
bufs1 = numpy.empty((comp*max([x[2] for x in shranges]),naoaux))
bufs2 = numpy.empty_like(bufs1)
p1 = 0
for istep, sh_range in enumerate(shranges):
log.debug('int3c2e [%d/%d], AO [%d:%d], nrow = %d', \
istep+1, len(shranges), *sh_range)
bstart, bend, nrow = sh_range
shls_slice = (bstart, bend, 0, mol.nbas, mol.nbas, mol.nbas+auxmol.nbas)
ints = gto.moleintor.getints3c(int3c, atm, bas, env, shls_slice, comp,
aosym, ao_loc, cintopt, out=bufs1)
if ints.ndim == 3 and ints.flags.f_contiguous:
ints = lib.transpose(ints.T, axes=(0,2,1), out=bufs2).reshape(naoaux,-1)
bufs1, bufs2 = bufs2, bufs1
else:
ints = ints.reshape((-1,naoaux)).T
p0, p1 = p1, p1 + nrow
if tag == 'cd':
if ints.flags.c_contiguous:
ints = lib.transpose(ints, out=bufs2).T
bufs1, bufs2 = bufs2, bufs1
dat = scipy.linalg.solve_triangular(low, ints, lower=True,
overwrite_b=True, check_finite=False)
if dat.flags.f_contiguous:
dat = lib.transpose(dat.T, out=bufs2)
cderi[:,p0:p1] = dat
else:
dat = numpy.ndarray((naux, ints.shape[1]), buffer=bufs2)
cderi[:,p0:p1] = lib.dot(low.T, ints, c=dat)
dat = ints = None
log.timer('cholesky_eri', *t0)
return cderi
# Debug version of cholesky_eri. Note the temporary memory usage is about
# twice as large as the return cderi array
def cholesky_eri_debug(mol, auxbasis='weigend+etb', auxmol=None,
int3c='int3c2e', aosym='s2ij', int2c='int2c2e', comp=1,
verbose=0, fauxe2=aux_e2):
'''
Returns:
2D array of (naux,nao*(nao+1)/2) in C-contiguous
'''
assert(comp == 1)
t0 = (time.clock(), time.time())
log = logger.new_logger(mol, verbose)
if auxmol is None:
auxmol = addons.make_auxmol(mol, auxbasis)
j2c = auxmol.intor(int2c, hermi=1)
naux = j2c.shape[0]
log.debug('size of aux basis %d', naux)
t1 = log.timer('2c2e', *t0)
j3c = fauxe2(mol, auxmol, intor=int3c, aosym=aosym).reshape(-1,naux)
t1 = log.timer('3c2e', *t1)
try:
low = scipy.linalg.cholesky(j2c, lower=True)
j2c = None
t1 = log.timer('Cholesky 2c2e', *t1)
cderi = scipy.linalg.solve_triangular(low, j3c.T, lower=True,
overwrite_b=True)
except scipy.linalg.LinAlgError:
w, v = scipy.linalg.eigh(j2c)
idx = w > LINEAR_DEP_THR
v = (v[:,idx] / numpy.sqrt(w[idx]))
cderi = lib.dot(v.T, j3c.T)
j3c = None
if cderi.flags.f_contiguous:
cderi = lib.transpose(cderi.T)
log.timer('cholesky_eri', *t0)
return cderi
if __name__ == '__main__':
from pyscf import scf
from pyscf import ao2mo
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom.extend([
["H", (0, 0, 0 )],
["H", (0, 0, 1 )],
])
mol.basis = 'cc-pvdz'
mol.build()
auxmol = format_aux_basis(mol)
j3c = aux_e2(mol, auxmol, intor='int3c2e_sph', aosym='s1')
nao = mol.nao_nr()
naoaux = auxmol.nao_nr()
j3c = j3c.reshape(nao,nao,naoaux)
atm, bas, env = \
gto.mole.conc_env(mol._atm, mol._bas, mol._env,
auxmol._atm, auxmol._bas, auxmol._env)
eri0 = numpy.empty((nao,nao,naoaux))
pi = 0
for i in range(mol.nbas):
pj = 0
for j in range(mol.nbas):
pk = 0
for k in range(mol.nbas, mol.nbas+auxmol.nbas):
shls = (i, j, k)
buf = gto.moleintor.getints_by_shell('int3c2e_sph',
shls, atm, bas, env)
di, dj, dk = buf.shape
eri0[pi:pi+di,pj:pj+dj,pk:pk+dk] = buf
pk += dk
pj += dj
pi += di
print(numpy.allclose(eri0, j3c))
j2c = fill_2c2e(mol, auxmol)
eri0 = numpy.empty_like(j2c)
pi = 0
for i in range(mol.nbas, len(bas)):
pj = 0
for j in range(mol.nbas, len(bas)):
shls = (i, j)
buf = gto.moleintor.getints_by_shell('int2c2e_sph',
shls, atm, bas, env)
di, dj = buf.shape
eri0[pi:pi+di,pj:pj+dj] = buf
pj += dj
pi += di
print(numpy.allclose(eri0, j2c))
j3c = aux_e2(mol, auxmol, intor='int3c2e_sph', aosym='s2ij')
cderi = cholesky_eri(mol, auxmol=auxmol)
eri0 = numpy.einsum('pi,pk->ik', cderi, cderi)
eri1 = numpy.einsum('ik,kl->il', j3c, numpy.linalg.inv(j2c))
eri1 = numpy.einsum('ip,kp->ik', eri1, j3c)
print(abs(eri1 - eri0).max())
eri0 = ao2mo.restore(1, eri0, nao)
mf = scf.RHF(mol)
ehf0 = mf.scf()
nao = mf.mo_energy.size
eri1 = ao2mo.restore(1, mf._eri, nao)
print(abs(eri1-eri0).max() - 0.0022142583265513105)
mf._eri = ao2mo.restore(8, eri0, nao)
ehf1 = mf.scf()
mf = scf.RHF(mol).density_fit(auxbasis='weigend')
ehf2 = mf.scf()
mf = mf.density_fit(auxbasis='weigend')
ehf3 = mf.scf()
print(ehf0, ehf1, ehf2, ehf3)
|
gkc1000/pyscf
|
pyscf/df/incore.py
|
Python
|
apache-2.0
| 10,276
|
[
"PySCF"
] |
d74154fd29db02bf37aa9d9ffc3ca663d74b1ff01c879c05dd45e8fbb192947f
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import json
import os
import unittest
import warnings
import numpy as np
from monty.json import MontyDecoder
from pymatgen.analysis.xas.spectrum import XAS, site_weighted_spectrum
from pymatgen.core import Element
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "spectrum_test")
with open(os.path.join(test_dir, "LiCoO2_k_xanes.json")) as fp:
k_xanes_dict = json.load(fp, cls=MontyDecoder)
with open(os.path.join(test_dir, "LiCoO2_k_exafs.json")) as fp:
k_exafs_dict = json.load(fp, cls=MontyDecoder)
with open(os.path.join(test_dir, "ZnO_l2_xanes.json")) as fp:
l2_xanes_dict = json.load(fp, cls=MontyDecoder)
with open(os.path.join(test_dir, "ZnO_l3_xanes.json")) as fp:
l3_xanes_dict = json.load(fp, cls=MontyDecoder)
with open(os.path.join(test_dir, "site1_k_xanes.json")) as fp:
site1_xanes_dict = json.load(fp, cls=MontyDecoder)
with open(os.path.join(test_dir, "site2_k_xanes.json")) as fp:
site2_xanes_dict = json.load(fp, cls=MontyDecoder)
class XASTest(PymatgenTest):
def setUp(self):
self.k_xanes = XAS.from_dict(k_xanes_dict)
self.k_exafs = XAS.from_dict(k_exafs_dict)
self.l2_xanes = XAS.from_dict(l2_xanes_dict)
self.l3_xanes = XAS.from_dict(l3_xanes_dict)
self.site1_xanes = XAS.from_dict(site1_xanes_dict)
self.site2_xanes = XAS.from_dict(site2_xanes_dict)
def test_e0(self):
self.assertAlmostEqual(7728.565, self.k_xanes.e0)
def test_k(self):
self.assertEqual(len(self.k_xanes.x), len(self.k_xanes.k))
self.assertAlmostEqual(self.k_xanes.e0, self.k_xanes.x[self.k_xanes.k.index(0)])
def test_normalization(self):
self.k_xanes.normalize(mode="sum")
self.assertAlmostEqual(1.0, np.sum(self.k_xanes.y))
def test_add_mul(self):
scaled_spect = self.k_xanes + self.k_xanes
scaled_spect2 = self.k_xanes * 3
self.assertTrue(np.allclose(scaled_spect.y, 2 * self.k_xanes.y))
self.assertTrue(np.allclose(scaled_spect2.y, 3 * self.k_xanes.y))
self.assertAlmostEqual(0.274302, self.k_xanes.get_interpolated_value(7720.422), 3)
def test_to_from_dict(self):
s = XAS.from_dict(self.k_xanes.as_dict())
self.assertArrayAlmostEqual(s.y, self.k_xanes.y)
def test_attributes(self):
self.assertArrayEqual(self.k_xanes.energy, self.k_xanes.x)
self.assertArrayEqual(self.k_xanes.intensity, self.k_xanes.y)
def test_str(self):
self.assertIsNotNone(str(self.k_xanes))
def test_validate(self):
y_zeros = np.zeros(len(self.k_xanes.x))
self.assertRaises(
ValueError,
XAS,
self.k_xanes.x,
y_zeros,
self.k_xanes.structure,
self.k_xanes.absorbing_element,
)
def test_stitch_xafs(self):
self.assertRaises(ValueError, XAS.stitch, self.k_xanes, self.k_exafs, mode="invalid")
xafs = XAS.stitch(self.k_xanes, self.k_exafs, mode="XAFS")
self.assertIsInstance(xafs, XAS)
self.assertEqual("XAFS", xafs.spectrum_type)
self.assertEqual(len(xafs.x), 500)
self.assertAlmostEqual(min(xafs.x), min(self.k_xanes.x), 2)
self.assertAlmostEqual(max(xafs.y), max(self.k_xanes.y), 2)
self.assertAlmostEqual(
xafs.x[np.argmax(np.gradient(xafs.y) / np.gradient(xafs.x))],
self.k_xanes.e0,
2,
)
self.assertRaises(ValueError, XAS.stitch, self.k_xanes, self.l2_xanes, mode="XAFS")
self.k_xanes.x = np.zeros(100)
self.assertRaises(ValueError, XAS.stitch, self.k_xanes, self.k_exafs)
self.k_xanes.absorbing_element = Element("Pt")
self.assertRaises(ValueError, XAS.stitch, self.k_xanes, self.k_exafs, mode="XAFS")
def test_stitch_l23(self):
self.l2_xanes.y[0] = 0.1
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
XAS.stitch(self.l2_xanes, self.l3_xanes, 100, mode="L23")
self.assertEqual(len(w), 6)
self.assertIs(w[-1].category, UserWarning)
self.assertIn("jump", str(w[-1].message))
self.l2_xanes = XAS.from_dict(l2_xanes_dict)
l23 = XAS.stitch(self.l2_xanes, self.l3_xanes, 100, mode="L23")
self.assertIsInstance(l23, XAS)
self.assertEqual("L23", l23.edge)
self.assertAlmostEqual(min(l23.x), min(self.l3_xanes.x), 3)
self.assertAlmostEqual(max(l23.x), max(self.l3_xanes.x), 3)
self.assertTrue(np.greater_equal(l23.y, self.l2_xanes.y).all())
self.assertEqual(len(l23.x), 100)
self.l2_xanes.spectrum_type = "EXAFS"
self.assertRaises(ValueError, XAS.stitch, self.l2_xanes, self.l3_xanes, mode="L23")
self.l2_xanes.absorbing_element = Element("Pt")
self.assertRaises(ValueError, XAS.stitch, self.l2_xanes, self.l3_xanes, mode="L23")
self.assertRaises(ValueError, XAS.stitch, self.k_xanes, self.l3_xanes, mode="L23")
def test_site_weighted_spectrum(self):
weighted_spectrum = site_weighted_spectrum([self.site1_xanes, self.site2_xanes])
self.assertIsInstance(weighted_spectrum, XAS)
self.assertTrue(len(weighted_spectrum.x), 500)
# The site multiplicities for site1 and site2 are 4 and 2, respectively.
self.assertAlmostEqual(
weighted_spectrum.y[0],
(4 * self.site1_xanes.y[0] + 2 * self.site2_xanes.y[0]) / 6,
2,
)
self.assertEqual(
min(weighted_spectrum.x),
max(min(self.site1_xanes.x), min(self.site2_xanes.x)),
)
self.site2_xanes.absorbing_index = self.site1_xanes.absorbing_index
self.assertRaises(ValueError, site_weighted_spectrum, [self.site1_xanes, self.site2_xanes])
if __name__ == "__main__":
unittest.main()
|
gmatteo/pymatgen
|
pymatgen/analysis/xas/tests/test_spectrum.py
|
Python
|
mit
| 5,998
|
[
"pymatgen"
] |
c43b0d02edadd075732c8e338358eece930740945dfaa29c29bdf7f3062b3f2a
|
########################################################################
# $HeadURL: svn+ssh://svn.cern.ch/reps/dirac/DIRAC/trunk/DIRAC/DataManagementSystem/DB/FileCatalogComponents/UserAndGroupManager.py $
########################################################################
__RCSID__ = "$Id$"
""" DIRAC FileCatalog component representing a flat directory tree """
import os, types,stat
# import time
from DIRAC import S_OK, S_ERROR
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DirectoryTreeBase import DirectoryTreeBase
from DIRAC.Core.Utilities.List import stringListToString,intListToString
class DirectoryFlatTree(DirectoryTreeBase):
_tables = {}
_tables["DirectoryInfo"] = { "Fields": {
"DirID": "INTEGER AUTO_INCREMENT",
"Parent": "INTEGER NOT NULL",
"Status": "SMALLINT UNSIGNED NOT NULL DEFAULT 0",
"DirName": "VARCHAR(1024) NOT NULL",
"CreationDate": "DATETIME",
"ModificationDate": "DATETIME",
"UID": "CHAR(8) NOT NULL",
"GID": "CHAR(8) NOT NULL",
"Mode": "SMALLINT UNSIGNED NOT NULL DEFAULT 775"
},
"PrimaryKey": "DirID",
"Indexes": {
"Parent": ["Parent"],
"Status": ["Status"],
"DirName": ["DirName"]
}
}
def __init__(self,database=None):
DirectoryTreeBase.__init__(self,database)
self.treeTable = 'DirectoryInfo'
def getDirectoryCounters(self):
req = "SELECT COUNT(*) FROM DirectoryInfo"
res = self.db._query(req)
if not res['OK']:
return res
return S_OK({'DirectoryInfo':res['Value'][0][0]})
def _findDirectories(self,paths,metadata=[]):
""" Find file ID if it exists for the given list of LFNs """
#startTime = time.time()
successful = {}
failed = {}
req = "SELECT DirName,DirID"
if metadata:
req = "%s,%s" % (req,intListToString(metadata))
req = "%s FROM DirectoryInfo WHERE DirName IN (%s)" % (req,stringListToString(paths))
res = self.db._query(req)
if not res['OK']:
return res
for tuple_ in res['Value']:
dirName = tuple_[0]
dirID = tuple_[1]
metaDict = {'DirID':dirID}
metaDict.update(dict(zip(metadata,tuple_[2:])))
successful[dirName] = metaDict
for path in paths:
if not successful.has_key(path):
failed[path] = 'No such file or directory'
return S_OK({"Successful":successful,"Failed":failed})
def __findDirs(self,paths,metadata=['DirName']):
dirs = {}
req = "SELECT DirID,%s FROM DirectoryInfo WHERE DirName IN (%s)" % (intListToString(metadata),stringListToString(paths))
res = self.db._query(req)
if not res['OK']:
return res
if not res['Value']:
return S_OK(dirs)
for tuple_ in res['Value']:
dirID = tuple_[0]
dirs[dirID] = dict(zip(metadata,tuple_[1:]))
return S_OK(dirs)
def getPathPermissions(self,paths,credDict):
""" Get the permissions for the supplied paths """
res = self.db.ugManager.getUserAndGroupID(credDict)
if not res['OK']:
return res
uid,gid = res['Value']
res = self._findDirectories(paths,metadata=['Mode','UID','GID'])
if not res['OK']:
return res
successful = {}
for dirName,dirDict in res['Value']['Successful'].items():
mode = dirDict['Mode']
p_uid = dirDict['UID']
p_gid = dirDict['GID']
successful[dirName] = {}
if p_uid == uid:
successful[dirName]['Read'] = mode & stat.S_IRUSR
successful[dirName]['Write'] = mode & stat.S_IWUSR
successful[dirName]['Execute'] = mode & stat.S_IXUSR
elif p_gid == gid:
successful[dirName]['Read'] = mode & stat.S_IRGRP
successful[dirName]['Write'] = mode & stat.S_IWGRP
successful[dirName]['Execute'] = mode & stat.S_IXGRP
else:
successful[dirName]['Read'] = mode & stat.S_IROTH
successful[dirName]['Write'] = mode & stat.S_IWOTH
successful[dirName]['Execute'] = mode & stat.S_IXOTH
return S_OK({'Successful':successful,'Failed':res['Value']['Failed']})
def findDir(self,path):
res = self.__findDirs([path])
if not res['OK']:
return res
if not res['Value']:
return S_OK(0)
return S_OK(res['Value'].keys()[0])
def removeDir(self,path):
""" Remove directory """
res = self.findDir(path)
if not res['OK']:
return res
if not res['Value']:
return S_OK()
dirID = res['Value']
req = "DELETE FROM DirectoryInfo WHERE DirID=%d" % dirID
return self.db._update(req)
def makeDirectory(self,path,credDict,status=0):
"""Create a new directory. The return value is the dictionary containing all the parameters of the newly created directory """
if path[0] != '/':
return S_ERROR('Not an absolute path')
result = self.findDir(path)
if not result['OK']:
return result
if result['Value']:
return S_OK(result['Value'])
result = self.db.ugManager.getUserAndGroupID(credDict)
if not result['OK']:
return result
uid,gid = result['Value']
res = self.getParent(path)
if not res['OK']:
return res
parentID = res['Value']
req = "INSERT INTO DirectoryInfo (Parent,Status,DirName,UID,GID,Mode,CreationDate,ModificationDate)\
VALUES (%d,%d,'%s',%d,%d,%d,UTC_TIMESTAMP(),UTC_TIMESTAMP());" % (parentID,status,path,uid,gid,self.db.umask)
result = self.db._update(req)
if not result['OK']:
self.removeDir(path)
return S_ERROR('Failed to create directory %s' % path)
return S_OK(result['lastRowId'])
def makeDir(self,path):
result = self.findDir(path)
if not result['OK']:
return result
dirID = result['Value']
if dirID:
return S_OK(dirID)
names = ['DirName']
values = [path]
result = self.db._insert('DirectoryInfo',names,values)
if not result['OK']:
return result
return S_OK(result['lastRowId'])
def existsDir(self,path):
""" Check the existence of a directory at the specified path
"""
result = self.findDir(path)
if not result['OK']:
return result
if not result['Value']:
return S_OK({"Exists":False})
else:
return S_OK({"Exists":True,"DirID":result['Value']})
def getParent(self,path):
""" Get the parent ID of the given directory """
return self.findDir(os.path.dirname(path))
def getParentID(self,dirID):
""" Get the ID of the parent of a directory specified by ID
"""
if dirID == 0:
return S_ERROR('Root directory ID given')
req = "SELECT Parent FROM DirectoryInfo WHERE DirID=%d" % dirID
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('No parent found')
return S_OK(result['Value'][0][0])
def getDirectoryPath(self,dirID):
""" Get directory name by directory ID """
req = "SELECT DirName FROM DirectoryInfo WHERE DirID=%d" % int(dirID)
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Directory with id %d not found' % int(dirID) )
return S_OK(result['Value'][0][0])
def getDirectoryName(self,dirID):
""" Get directory name by directory ID """
result = self.getDirectoryPath(dirID)
if not result['OK']:
return result
return S_OK(os.path.basename(result['Value']))
def getPathIDs(self,path):
""" Get IDs of all the directories in the parent hierarchy """
elements = path.split('/')
pelements = []
dPath = ''
for el in elements[1:]:
dPath += '/'+el
pelements.append(dPath)
pathString = [ "'"+p+"'" for p in pelements ]
req = "SELECT DirID FROM DirectoryInfo WHERE DirName in (%s) ORDER BY DirID" % ','.join(pathString)
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_ERROR('Directory %s not found' % path)
return S_OK([ x[0] for x in result['Value'] ])
def getChildren(self,path):
""" Get child directory IDs for the given directory """
if type(path) in types.StringTypes:
result = self.findDir(path)
if not result['OK']:
return result
dirID = result['Value']
else:
dirID = path
req = "SELECT DirID FROM DirectoryInfo WHERE Parent=%d" % dirID
result = self.db._query(req)
if not result['OK']:
return result
if not result['Value']:
return S_OK([])
return S_OK([ x[0] for x in result['Value'] ])
|
Sbalbp/DIRAC
|
DataManagementSystem/DB/FileCatalogComponents/DirectoryFlatTree.py
|
Python
|
gpl-3.0
| 9,231
|
[
"DIRAC"
] |
179544fd80b98802aa33173b85503a2d0b66fede7e49fb1788fe9ac8eb80564c
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions shared among files under word2act/data_generation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import attr
from enum import Enum
import numpy as np
import tensorflow.compat.v1 as tf # tf
from seq2act.data_generation import config
from seq2act.data_generation import view_hierarchy
gfile = tf.gfile
@attr.s
class MaxValues(object):
"""Represents max values for a task and UI."""
# For instrction
max_word_num = attr.ib(default=None)
max_word_length = attr.ib(default=None)
# For UI objects
max_ui_object_num = attr.ib(default=None)
max_ui_object_word_num = attr.ib(default=None)
max_ui_object_word_length = attr.ib(default=None)
def update(self, other):
"""Update max value from another MaxValues instance.
This will be used when want to merge several MaxValues instances:
max_values_list = ...
result = MaxValues()
for v in max_values_list:
result.update(v)
Then `result` contains merged max values in each field.
Args:
other: another MaxValues instance, contains updated data.
"""
self.max_word_num = max(self.max_word_num, other.max_word_num)
self.max_word_length = max(self.max_word_length, other.max_word_length)
self.max_ui_object_num = max(self.max_ui_object_num,
other.max_ui_object_num)
self.max_ui_object_word_num = max(self.max_ui_object_word_num,
other.max_ui_object_word_num)
self.max_ui_object_word_length = max(self.max_ui_object_word_length,
other.max_ui_object_word_length)
class ActionRules(Enum):
"""The rule_id to generate synthetic action."""
SINGLE_OBJECT_RULE = 0
GRID_CONTEXT_RULE = 1
NEIGHBOR_CONTEXT_RULE = 2
SWIPE_TO_OBJECT_RULE = 3
SWIPE_TO_DIRECTION_RULE = 4
REAL = 5 # The action is not generated, but a real user action.
CROWD_COMPUTE = 6
DIRECTION_VERB_RULE = 7 # For win, "click button under some tab/combobox
CONSUMED_MULTI_STEP = 8 # For win, if the target verb is not direction_verb
UNCONSUMED_MULTI_STEP = 9
NO_VERB_RULE = 10
class ActionTypes(Enum):
"""The action types and ids of Android actions."""
CLICK = 2
INPUT = 3
SWIPE = 4
CHECK = 5
UNCHECK = 6
LONG_CLICK = 7
OTHERS = 8
GO_HOME = 9
GO_BACK = 10
VERB_ID_MAP = {
'check': ActionTypes.CHECK,
'find': ActionTypes.SWIPE,
'navigate': ActionTypes.SWIPE,
'uncheck': ActionTypes.UNCHECK,
'head to': ActionTypes.SWIPE,
'enable': ActionTypes.CHECK,
'turn on': ActionTypes.CHECK,
'locate': ActionTypes.SWIPE,
'disable': ActionTypes.UNCHECK,
'tap and hold': ActionTypes.LONG_CLICK,
'long press': ActionTypes.LONG_CLICK,
'look': ActionTypes.SWIPE,
'press and hold': ActionTypes.LONG_CLICK,
'turn it on': ActionTypes.CHECK,
'turn off': ActionTypes.UNCHECK,
'switch on': ActionTypes.CHECK,
'visit': ActionTypes.SWIPE,
'hold': ActionTypes.LONG_CLICK,
'switch off': ActionTypes.UNCHECK,
'head': ActionTypes.SWIPE,
'head over': ActionTypes.SWIPE,
'long-press': ActionTypes.LONG_CLICK,
'un-click': ActionTypes.UNCHECK,
'tap': ActionTypes.CLICK,
'check off': ActionTypes.UNCHECK,
# 'power on': 21
}
class WinActionTypes(Enum):
"""The action types and ids of windows actions."""
LEFT_CLICK = 2
RIGHT_CLICK = 3
DOUBLE_CLICK = 4
INPUT = 5
@attr.s
class Action(object):
"""The class for a word2act action."""
instruction_str = attr.ib(default=None)
verb_str = attr.ib(default=None)
obj_desc_str = attr.ib(default=None)
input_content_str = attr.ib(default=None)
action_type = attr.ib(default=None)
action_rule = attr.ib(default=None)
target_obj_idx = attr.ib(default=None)
obj_str_pos = attr.ib(default=None)
input_str_pos = attr.ib(default=None)
verb_str_pos = attr.ib(default=None)
# start/end position of one whole step
step_str_pos = attr.ib(default=[0, 0])
# Defalt action is 1-step consumed action
is_consumed = attr.ib(default=True)
def __eq__(self, other):
if not isinstance(other, Action):
return NotImplemented
return self.instruction_str == other.instruction_str
def is_valid(self):
"""Does valid check for action instance.
Returns true when any component is None or obj_desc_str is all spaces.
Returns:
a boolean
"""
invalid_obj_pos = (np.array(self.obj_str_pos) == 0).all()
if (not self.instruction_str or invalid_obj_pos or
not self.obj_desc_str.strip()):
return False
return True
def has_valid_input(self):
"""Does valid check for input positions.
Returns true when input_str_pos is not all default value.
Returns:
a boolean
"""
return (self.input_str_pos != np.array([
config.LABEL_DEFAULT_VALUE_INT, config.LABEL_DEFAULT_VALUE_INT
])).any()
def regularize_strs(self):
"""Trims action instance's obj_desc_str, input_content_str, verb_str."""
self.obj_desc_str = self.obj_desc_str.strip()
self.input_content_str = self.input_content_str.strip()
self.verb_str = self.verb_str.strip()
def convert_to_lower_case(self):
self.instruction_str = self.instruction_str.lower()
self.obj_desc_str = self.obj_desc_str.lower()
self.input_content_str = self.input_content_str.lower()
self.verb_str = self.verb_str.lower()
@attr.s
class ActionEvent(object):
"""This class defines ActionEvent class.
ActionEvent is high level event summarized from low level android event logs.
This example shows the android event logs and the extracted ActionEvent
object:
Android Event Logs:
[ 42.407808] EV_ABS ABS_MT_TRACKING_ID 00000000
[ 42.407808] EV_ABS ABS_MT_TOUCH_MAJOR 00000004
[ 42.407808] EV_ABS ABS_MT_PRESSURE 00000081
[ 42.407808] EV_ABS ABS_MT_POSITION_X 00004289
[ 42.407808] EV_ABS ABS_MT_POSITION_Y 00007758
[ 42.407808] EV_SYN SYN_REPORT 00000000
[ 42.453256] EV_ABS ABS_MT_PRESSURE 00000000
[ 42.453256] EV_ABS ABS_MT_TRACKING_ID ffffffff
[ 42.453256] EV_SYN SYN_REPORT 00000000
This log can be generated from this command during runing android emulator:
adb shell getevent -lt /dev/input/event1
If screen pixel size is [480,800], this is the extracted ActionEvent Object:
ActionEvent(
event_time = 42.407808
action_type = ActionTypes.CLICK
action_object_id = -1
coordinates_x = [17033,]
coordinates_y = [30552,]
coordinates_x_pixel = [249,]
coordinates_y_pixel = [747,]
action_params = []
)
"""
event_time = attr.ib()
action_type = attr.ib()
coordinates_x = attr.ib()
coordinates_y = attr.ib()
action_params = attr.ib()
# These fields will be generated by public method update_info_from_screen()
coordinates_x_pixel = None
coordinates_y_pixel = None
object_id = config.LABEL_DEFAULT_INVALID_INT
leaf_nodes = None # If dedup, the nodes here will be less than XML
debug_target_object_word_sequence = None
def update_info_from_screen(self, screen_info, dedup=False):
"""Updates action event attributes from screen_info.
Updates coordinates_x(y)_pixel and object_id from the screen_info proto.
Args:
screen_info: ScreenInfo protobuf
dedup: whether dedup the UI objs with same text or content desc.
Raises:
ValueError when fail to find object id.
"""
self.update_norm_coordinates((config.SCREEN_WIDTH, config.SCREEN_HEIGHT))
vh = view_hierarchy.ViewHierarchy()
vh.load_xml(screen_info.view_hierarchy.xml.encode('utf-8'))
if dedup:
vh.dedup((self.coordinates_x_pixel[0], self.coordinates_y_pixel[0]))
self.leaf_nodes = vh.get_leaf_nodes()
ui_object_list = vh.get_ui_objects()
self._update_object_id(ui_object_list)
def _update_object_id(self, ui_object_list):
"""Updates ui object index from view_hierarchy.
If point(X,Y) surrounded by multiple UI objects, select the one with
smallest area.
Args:
ui_object_list: .
Raises:
ValueError when fail to find object id.
"""
smallest_area = -1
for index, ui_obj in enumerate(ui_object_list):
box = ui_obj.bounding_box
if (box.x1 <= self.coordinates_x_pixel[0] <= box.x2 and
box.y1 <= self.coordinates_y_pixel[0] <= box.y2):
area = (box.x2 - box.x1) * (box.y2 - box.y1)
if smallest_area == -1 or area < smallest_area:
self.object_id = index
self.debug_target_object_word_sequence = ui_obj.word_sequence
smallest_area = area
if smallest_area == -1:
raise ValueError(('Object id not found: x,y=%d,%d coordinates fail to '
'match every UI bounding box') %
(self.coordinates_x_pixel[0],
self.coordinates_y_pixel[0]))
def update_norm_coordinates(self, screen_size):
"""Update coordinates_x(y)_norm according to screen_size.
self.coordinate_x is scaled between [0, ANDROID_LOG_MAX_ABS_X]
self.coordinate_y is scaled between [0, ANDROID_LOG_MAX_ABS_Y]
This function recovers coordinate of android event logs back to coordinate
in real screen's pixel level.
coordinates_x_pixel = coordinates_x/ANDROID_LOG_MAX_ABS_X*horizontal_pixel
coordinates_y_pixel = coordinates_y/ANDROID_LOG_MAX_ABS_Y*vertical_pixel
For example,
ANDROID_LOG_MAX_ABS_X = ANDROID_LOG_MAX_ABS_Y = 32676
coordinate_x = [17033, ]
object_cords_y = [30552, ]
screen_size = (480, 800)
Then the updated pixel coordinates are as follow:
coordinates_x_pixel = [250, ]
coordinates_y_pixel = [747, ]
Args:
screen_size: a tuple of screen pixel size.
"""
(horizontal_pixel, vertical_pixel) = screen_size
self.coordinates_x_pixel = [
int(cord * horizontal_pixel / config.ANDROID_LOG_MAX_ABS_X)
for cord in self.coordinates_x
]
self.coordinates_y_pixel = [
int(cord * vertical_pixel / config.ANDROID_LOG_MAX_ABS_Y)
for cord in self.coordinates_y
]
# For Debug: Get distribution info for each cases
word_num_distribution_dict = collections.defaultdict(int)
word_length_distribution_dict = collections.defaultdict(int)
def get_word_statistics(file_path):
"""Calculates maximum word number/length from ui objects in one xml/json file.
Args:
file_path: The full path of a xml/json file.
Returns:
A tuple (max_word_num, max_word_length)
ui_object_num: UI object num.
max_word_num: The maximum number of words contained in all ui objects.
max_word_length: The maximum length of words contained in all ui objects.
"""
max_word_num = 0
max_word_length = 0
leaf_nodes = get_view_hierarchy_list(file_path)
for view_hierarchy_object in leaf_nodes:
word_sequence = view_hierarchy_object.uiobject.word_sequence
max_word_num = max(max_word_num, len(word_sequence))
word_num_distribution_dict[len(word_sequence)] += 1
for word in word_sequence:
max_word_length = max(max_word_length, len(word))
word_length_distribution_dict[len(word)] += 1
return len(leaf_nodes), max_word_num, max_word_length
def get_ui_max_values(file_paths):
"""Calculates max values from ui objects in multi xml/json files.
Args:
file_paths: The full paths of multi xml/json files.
Returns:
max_values: instrance of MaxValues.
"""
max_values = MaxValues()
for file_path in file_paths:
(ui_object_num,
max_ui_object_word_num,
max_ui_object_word_length) = get_word_statistics(file_path)
max_values.max_ui_object_num = max(
max_values.max_ui_object_num, ui_object_num)
max_values.max_ui_object_word_num = max(
max_values.max_ui_object_word_num, max_ui_object_word_num)
max_values.max_ui_object_word_length = max(
max_values.max_ui_object_word_length, max_ui_object_word_length)
return max_values
def get_ui_object_list(file_path):
"""Gets ui object list from view hierarchy leaf nodes.
Args:
file_path: file path of xml or json
Returns:
A list of ui objects according to view hierarchy leaf nodes.
"""
vh = _get_view_hierachy(file_path)
return vh.get_ui_objects()
def get_view_hierarchy_list(file_path):
"""Gets view hierarchy leaf node list.
Args:
file_path: file path of xml or json
Returns:
A list of view hierarchy leaf nodes.
"""
vh = _get_view_hierachy(file_path)
return vh.get_leaf_nodes()
def _get_view_hierachy(file_path):
"""Gets leaf nodes view hierarchy lists.
Args:
file_path: The full path of an input xml/json file.
Returns:
A ViewHierarchy object.
Raises:
ValueError: unsupported file format.
"""
with gfile.GFile(file_path, 'r') as f:
data = f.read()
_, file_extension = os.path.splitext(file_path)
if file_extension == '.xml':
vh = view_hierarchy.ViewHierarchy(
screen_width=config.SCREEN_WIDTH, screen_height=config.SCREEN_HEIGHT)
vh.load_xml(data)
elif file_extension == '.json':
vh = view_hierarchy.ViewHierarchy(
screen_width=config.RICO_SCREEN_WIDTH,
screen_height=config.RICO_SCREEN_HEIGHT)
vh.load_json(data)
else:
raise ValueError('unsupported file format %s' % file_extension)
return vh
|
google-research/google-research
|
seq2act/data_generation/common.py
|
Python
|
apache-2.0
| 14,083
|
[
"VisIt"
] |
bfcc0bc134a1de499569cbcf032e4706dd1a443ab87b23149dbcc8357a01201b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Andi Albrecht, albrecht.andi@gmail.com
#
# This setup script is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import re
from setuptools import setup, find_packages
def get_version():
"""Parse __init__.py for version number instead of importing the file."""
VERSIONFILE = 'sqlparse/__init__.py'
VSRE = r'^__version__ = [\'"]([^\'"]*)[\'"]'
with open(VERSIONFILE) as f:
verstrline = f.read()
mo = re.search(VSRE, verstrline, re.M)
if mo:
return mo.group(1)
raise RuntimeError('Unable to find version in {fn}'.format(fn=VERSIONFILE))
LONG_DESCRIPTION = """
``sqlparse`` is a non-validating SQL parser module.
It provides support for parsing, splitting and formatting SQL statements.
Visit the `project page <https://github.com/andialbrecht/sqlparse>`_ for
additional information and documentation.
**Example Usage**
Splitting SQL statements::
>>> import sqlparse
>>> sqlparse.split('select * from foo; select * from bar;')
[u'select * from foo; ', u'select * from bar;']
Formatting statemtents::
>>> sql = 'select * from foo where id in (select id from bar);'
>>> print sqlparse.format(sql, reindent=True, keyword_case='upper')
SELECT *
FROM foo
WHERE id IN
(SELECT id
FROM bar);
Parsing::
>>> sql = 'select * from someschema.mytable where id = 1'
>>> res = sqlparse.parse(sql)
>>> res
(<Statement 'select...' at 0x9ad08ec>,)
>>> stmt = res[0]
>>> str(stmt) # converting it back to unicode
'select * from someschema.mytable where id = 1'
>>> # This is how the internal representation looks like:
>>> stmt.tokens
(<DML 'select' at 0x9b63c34>,
<Whitespace ' ' at 0x9b63e8c>,
<Operator '*' at 0x9b63e64>,
<Whitespace ' ' at 0x9b63c5c>,
<Keyword 'from' at 0x9b63c84>,
<Whitespace ' ' at 0x9b63cd4>,
<Identifier 'somes...' at 0x9b5c62c>,
<Whitespace ' ' at 0x9b63f04>,
<Where 'where ...' at 0x9b5caac>)
"""
setup(
name='sqlparse',
version=get_version(),
author='Andi Albrecht',
author_email='albrecht.andi@gmail.com',
url='https://github.com/andialbrecht/sqlparse',
description='Non-validating SQL parser',
long_description=LONG_DESCRIPTION,
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Database',
'Topic :: Software Development',
],
packages=find_packages(exclude=('tests',)),
entry_points={
'console_scripts': [
'sqlformat = sqlparse.__main__:main',
]
},
)
|
vmuriart/sqlparse
|
setup.py
|
Python
|
bsd-3-clause
| 3,154
|
[
"VisIt"
] |
e25bf683d4b13bb56e494663c95c758075ee274667c47ec10e12758da1312df0
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PTransform and descendants.
A PTransform is an object describing (not executing) a computation. The actual
execution semantics for a transform is captured by a runner object. A transform
object always belongs to a pipeline object.
A PTransform derived class needs to define the expand() method that describes
how one or more PValues are created by the transform.
The module defines a few standard transforms: FlatMap (parallel do),
GroupByKey (group by key), etc. Note that the expand() methods for these
classes contain code that will add nodes to the processing graph associated
with a pipeline.
As support for the FlatMap transform, the module also defines a DoFn
class and wrapper class that allows lambda functions to be used as
FlatMap processing functions.
"""
# pytype: skip-file
from __future__ import absolute_import
import copy
import itertools
import operator
import os
import sys
import threading
from builtins import hex
from builtins import object
from builtins import zip
from functools import reduce
from functools import wraps
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
from typing import Dict
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
from typing import overload
from google.protobuf import message
from apache_beam import error
from apache_beam import pvalue
from apache_beam.internal import pickler
from apache_beam.internal import util
from apache_beam.portability import python_urns
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.transforms.display import HasDisplayData
from apache_beam.typehints import native_type_compatibility
from apache_beam.typehints import typehints
from apache_beam.typehints.decorators import TypeCheckError
from apache_beam.typehints.decorators import WithTypeHints
from apache_beam.typehints.decorators import get_signature
from apache_beam.typehints.decorators import getcallargs_forhints
from apache_beam.typehints.trivial_inference import instance_to_type
from apache_beam.typehints.typehints import validate_composite_type_param
from apache_beam.utils import proto_utils
if TYPE_CHECKING:
from apache_beam import coders
from apache_beam.pipeline import Pipeline
from apache_beam.runners.pipeline_context import PipelineContext
from apache_beam.transforms.core import Windowing
from apache_beam.portability.api import beam_runner_api_pb2
__all__ = [
'PTransform',
'ptransform_fn',
'label_from_callable',
]
T = TypeVar('T')
PTransformT = TypeVar('PTransformT', bound='PTransform')
ConstructorFn = Callable[
['beam_runner_api_pb2.PTransform', Optional[Any], 'PipelineContext'], Any]
class _PValueishTransform(object):
"""Visitor for PValueish objects.
A PValueish is a PValue, or list, tuple, dict of PValuesish objects.
This visits a PValueish, contstructing a (possibly mutated) copy.
"""
def visit_nested(self, node, *args):
if isinstance(node, (tuple, list)):
args = [self.visit(x, *args) for x in node]
if isinstance(node, tuple) and hasattr(node.__class__, '_make'):
# namedtuples require unpacked arguments in their constructor
return node.__class__(*args)
else:
return node.__class__(args)
elif isinstance(node, dict):
return node.__class__(
{key: self.visit(value, *args)
for (key, value) in node.items()})
else:
return node
class _SetInputPValues(_PValueishTransform):
def visit(self, node, replacements):
if id(node) in replacements:
return replacements[id(node)]
else:
return self.visit_nested(node, replacements)
# Caches to allow for materialization of values when executing a pipeline
# in-process, in eager mode. This cache allows the same _MaterializedResult
# object to be accessed and used despite Runner API round-trip serialization.
_pipeline_materialization_cache = {}
_pipeline_materialization_lock = threading.Lock()
def _allocate_materialized_pipeline(pipeline):
pid = os.getpid()
with _pipeline_materialization_lock:
pipeline_id = id(pipeline)
_pipeline_materialization_cache[(pid, pipeline_id)] = {}
def _allocate_materialized_result(pipeline):
pid = os.getpid()
with _pipeline_materialization_lock:
pipeline_id = id(pipeline)
if (pid, pipeline_id) not in _pipeline_materialization_cache:
raise ValueError(
'Materialized pipeline is not allocated for result '
'cache.')
result_id = len(_pipeline_materialization_cache[(pid, pipeline_id)])
result = _MaterializedResult(pipeline_id, result_id)
_pipeline_materialization_cache[(pid, pipeline_id)][result_id] = result
return result
def _get_materialized_result(pipeline_id, result_id):
pid = os.getpid()
with _pipeline_materialization_lock:
if (pid, pipeline_id) not in _pipeline_materialization_cache:
raise Exception(
'Materialization in out-of-process and remote runners is not yet '
'supported.')
return _pipeline_materialization_cache[(pid, pipeline_id)][result_id]
def _release_materialized_pipeline(pipeline):
pid = os.getpid()
with _pipeline_materialization_lock:
pipeline_id = id(pipeline)
del _pipeline_materialization_cache[(pid, pipeline_id)]
class _MaterializedResult(object):
def __init__(self, pipeline_id, result_id):
self._pipeline_id = pipeline_id
self._result_id = result_id
self.elements = []
def __reduce__(self):
# When unpickled (during Runner API roundtrip serailization), get the
# _MaterializedResult object from the cache so that values are written
# to the original _MaterializedResult when run in eager mode.
return (_get_materialized_result, (self._pipeline_id, self._result_id))
class _MaterializedDoOutputsTuple(pvalue.DoOutputsTuple):
def __init__(self, deferred, results_by_tag):
super(_MaterializedDoOutputsTuple,
self).__init__(None, None, deferred._tags, deferred._main_tag)
self._deferred = deferred
self._results_by_tag = results_by_tag
def __getitem__(self, tag):
if tag not in self._results_by_tag:
raise KeyError(
'Tag %r is not a a defined output tag of %s.' % (tag, self._deferred))
return self._results_by_tag[tag].elements
class _AddMaterializationTransforms(_PValueishTransform):
def _materialize_transform(self, pipeline):
result = _allocate_materialized_result(pipeline)
# Need to define _MaterializeValuesDoFn here to avoid circular
# dependencies.
from apache_beam import DoFn
from apache_beam import ParDo
class _MaterializeValuesDoFn(DoFn):
def process(self, element):
result.elements.append(element)
materialization_label = '_MaterializeValues%d' % result._result_id
return (materialization_label >> ParDo(_MaterializeValuesDoFn()), result)
def visit(self, node):
if isinstance(node, pvalue.PValue):
transform, result = self._materialize_transform(node.pipeline)
node | transform
return result
elif isinstance(node, pvalue.DoOutputsTuple):
results_by_tag = {}
for tag in itertools.chain([node._main_tag], node._tags):
results_by_tag[tag] = self.visit(node[tag])
return _MaterializedDoOutputsTuple(node, results_by_tag)
else:
return self.visit_nested(node)
class _FinalizeMaterialization(_PValueishTransform):
def visit(self, node):
if isinstance(node, _MaterializedResult):
return node.elements
elif isinstance(node, _MaterializedDoOutputsTuple):
return node
else:
return self.visit_nested(node)
class _GetPValues(_PValueishTransform):
def visit(self, node, pvalues):
if isinstance(node, (pvalue.PValue, pvalue.DoOutputsTuple)):
pvalues.append(node)
else:
self.visit_nested(node, pvalues)
def get_nested_pvalues(pvalueish):
pvalues = []
_GetPValues().visit(pvalueish, pvalues)
return pvalues
def get_named_nested_pvalues(pvalueish):
if isinstance(pvalueish, tuple):
# Check to see if it's a named tuple.
fields = getattr(pvalueish, '_fields', None)
if fields and len(fields) == len(pvalueish):
tagged_values = zip(fields, pvalueish)
else:
tagged_values = enumerate(pvalueish)
elif isinstance(pvalueish, list):
tagged_values = enumerate(pvalueish)
elif isinstance(pvalueish, dict):
tagged_values = pvalueish.items()
else:
if isinstance(pvalueish, (pvalue.PValue, pvalue.DoOutputsTuple)):
yield None, pvalueish
return
for tag, subvalue in tagged_values:
for subtag, subsubvalue in get_named_nested_pvalues(subvalue):
if subtag is None:
yield tag, subsubvalue
else:
yield '%s.%s' % (tag, subtag), subsubvalue
class _ZipPValues(object):
"""Pairs each PValue in a pvalueish with a value in a parallel out sibling.
Sibling should have the same nested structure as pvalueish. Leaves in
sibling are expanded across nested pvalueish lists, tuples, and dicts.
For example
ZipPValues().visit({'a': pc1, 'b': (pc2, pc3)},
{'a': 'A', 'b', 'B'})
will return
[('a', pc1, 'A'), ('b', pc2, 'B'), ('b', pc3, 'B')]
"""
def visit(self, pvalueish, sibling, pairs=None, context=None):
if pairs is None:
pairs = []
self.visit(pvalueish, sibling, pairs, context)
return pairs
elif isinstance(pvalueish, (pvalue.PValue, pvalue.DoOutputsTuple)):
pairs.append((context, pvalueish, sibling))
elif isinstance(pvalueish, (list, tuple)):
self.visit_sequence(pvalueish, sibling, pairs, context)
elif isinstance(pvalueish, dict):
self.visit_dict(pvalueish, sibling, pairs, context)
def visit_sequence(self, pvalueish, sibling, pairs, context):
if isinstance(sibling, (list, tuple)):
for ix, (p, s) in enumerate(zip(pvalueish,
list(sibling) + [None] * len(pvalueish))):
self.visit(p, s, pairs, 'position %s' % ix)
else:
for p in pvalueish:
self.visit(p, sibling, pairs, context)
def visit_dict(self, pvalueish, sibling, pairs, context):
if isinstance(sibling, dict):
for key, p in pvalueish.items():
self.visit(p, sibling.get(key), pairs, key)
else:
for p in pvalueish.values():
self.visit(p, sibling, pairs, context)
class PTransform(WithTypeHints, HasDisplayData):
"""A transform object used to modify one or more PCollections.
Subclasses must define an expand() method that will be used when the transform
is applied to some arguments. Typical usage pattern will be:
input | CustomTransform(...)
The expand() method of the CustomTransform object passed in will be called
with input as an argument.
"""
# By default, transforms don't have any side inputs.
side_inputs = () # type: Sequence[pvalue.AsSideInput]
# Used for nullary transforms.
pipeline = None # type: Optional[Pipeline]
# Default is unset.
_user_label = None # type: Optional[str]
def __init__(self, label=None):
# type: (Optional[str]) -> None
super(PTransform, self).__init__()
self.label = label # type: ignore # https://github.com/python/mypy/issues/3004
@property
def label(self):
# type: () -> str
return self._user_label or self.default_label()
@label.setter
def label(self, value):
# type: (Optional[str]) -> None
self._user_label = value
def default_label(self):
# type: () -> str
return self.__class__.__name__
def with_input_types(self, input_type_hint):
"""Annotates the input type of a :class:`PTransform` with a type-hint.
Args:
input_type_hint (type): An instance of an allowed built-in type, a custom
class, or an instance of a
:class:`~apache_beam.typehints.typehints.TypeConstraint`.
Raises:
TypeError: If **input_type_hint** is not a valid type-hint.
See
:obj:`apache_beam.typehints.typehints.validate_composite_type_param()`
for further details.
Returns:
PTransform: A reference to the instance of this particular
:class:`PTransform` object. This allows chaining type-hinting related
methods.
"""
input_type_hint = native_type_compatibility.convert_to_beam_type(
input_type_hint)
validate_composite_type_param(
input_type_hint, 'Type hints for a PTransform')
return super(PTransform, self).with_input_types(input_type_hint)
def with_output_types(self, type_hint):
"""Annotates the output type of a :class:`PTransform` with a type-hint.
Args:
type_hint (type): An instance of an allowed built-in type, a custom class,
or a :class:`~apache_beam.typehints.typehints.TypeConstraint`.
Raises:
TypeError: If **type_hint** is not a valid type-hint. See
:obj:`~apache_beam.typehints.typehints.validate_composite_type_param()`
for further details.
Returns:
PTransform: A reference to the instance of this particular
:class:`PTransform` object. This allows chaining type-hinting related
methods.
"""
type_hint = native_type_compatibility.convert_to_beam_type(type_hint)
validate_composite_type_param(type_hint, 'Type hints for a PTransform')
return super(PTransform, self).with_output_types(type_hint)
def type_check_inputs(self, pvalueish):
self.type_check_inputs_or_outputs(pvalueish, 'input')
def infer_output_type(self, unused_input_type):
return self.get_type_hints().simple_output_type(self.label) or typehints.Any
def type_check_outputs(self, pvalueish):
self.type_check_inputs_or_outputs(pvalueish, 'output')
def type_check_inputs_or_outputs(self, pvalueish, input_or_output):
type_hints = self.get_type_hints()
hints = getattr(type_hints, input_or_output + '_types')
if hints is None or not any(hints):
return
arg_hints, kwarg_hints = hints
if arg_hints and kwarg_hints:
raise TypeCheckError(
'PTransform cannot have both positional and keyword type hints '
'without overriding %s._type_check_%s()' %
(self.__class__, input_or_output))
root_hint = (
arg_hints[0] if len(arg_hints) == 1 else arg_hints or kwarg_hints)
for context, pvalue_, hint in _ZipPValues().visit(pvalueish, root_hint):
if pvalue_.element_type is None:
# TODO(robertwb): It's a bug that we ever get here. (typecheck)
continue
if hint and not typehints.is_consistent_with(pvalue_.element_type, hint):
at_context = ' %s %s' % (input_or_output, context) if context else ''
raise TypeCheckError(
'{type} type hint violation at {label}{context}: expected {hint}, '
'got {actual_type}\nFull type hint:\n{debug_str}'.format(
type=input_or_output.title(),
label=self.label,
context=at_context,
hint=hint,
actual_type=pvalue_.element_type,
debug_str=type_hints.debug_str()))
def _infer_output_coder(self, input_type=None, input_coder=None):
# type: (...) -> Optional[coders.Coder]
"""Returns the output coder to use for output of this transform.
Note: this API is experimental and is subject to change; please do not rely
on behavior induced by this method.
The Coder returned here should not be wrapped in a WindowedValueCoder
wrapper.
Args:
input_type: An instance of an allowed built-in type, a custom class, or a
typehints.TypeConstraint for the input type, or None if not available.
input_coder: Coder object for encoding input to this PTransform, or None
if not available.
Returns:
Coder object for encoding output of this PTransform or None if unknown.
"""
# TODO(ccy): further refine this API.
return None
def _clone(self, new_label):
"""Clones the current transform instance under a new label."""
transform = copy.copy(self)
transform.label = new_label
return transform
def expand(self, input_or_inputs):
raise NotImplementedError
def __str__(self):
return '<%s>' % self._str_internal()
def __repr__(self):
return '<%s at %s>' % (self._str_internal(), hex(id(self)))
def _str_internal(self):
return '%s(PTransform)%s%s%s' % (
self.__class__.__name__,
' label=[%s]' % self.label if
(hasattr(self, 'label') and self.label) else '',
' inputs=%s' % str(self.inputs) if
(hasattr(self, 'inputs') and self.inputs) else '',
' side_inputs=%s' % str(self.side_inputs) if self.side_inputs else '')
def _check_pcollection(self, pcoll):
# type: (pvalue.PCollection) -> None
if not isinstance(pcoll, pvalue.PCollection):
raise error.TransformError('Expecting a PCollection argument.')
if not pcoll.pipeline:
raise error.TransformError('PCollection not part of a pipeline.')
def get_windowing(self, inputs):
# type: (Any) -> Windowing
"""Returns the window function to be associated with transform's output.
By default most transforms just return the windowing function associated
with the input PCollection (or the first input if several).
"""
# TODO(robertwb): Assert all input WindowFns compatible.
return inputs[0].windowing
def __rrshift__(self, label):
return _NamedPTransform(self, label)
def __or__(self, right):
"""Used to compose PTransforms, e.g., ptransform1 | ptransform2."""
if isinstance(right, PTransform):
return _ChainedPTransform(self, right)
return NotImplemented
def __ror__(self, left, label=None):
"""Used to apply this PTransform to non-PValues, e.g., a tuple."""
pvalueish, pvalues = self._extract_input_pvalues(left)
pipelines = [v.pipeline for v in pvalues if isinstance(v, pvalue.PValue)]
if pvalues and not pipelines:
deferred = False
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam import pipeline
from apache_beam.options.pipeline_options import PipelineOptions
# pylint: enable=wrong-import-order, wrong-import-position
p = pipeline.Pipeline('DirectRunner', PipelineOptions(sys.argv))
else:
if not pipelines:
if self.pipeline is not None:
p = self.pipeline
else:
raise ValueError(
'"%s" requires a pipeline to be specified '
'as there are no deferred inputs.' % self.label)
else:
p = self.pipeline or pipelines[0]
for pp in pipelines:
if p != pp:
raise ValueError(
'Mixing value from different pipelines not allowed.')
deferred = not getattr(p.runner, 'is_eager', False)
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.core import Create
# pylint: enable=wrong-import-order, wrong-import-position
replacements = {
id(v): p | 'CreatePInput%s' % ix >> Create(v, reshuffle=False)
for ix,
v in enumerate(pvalues)
if not isinstance(v, pvalue.PValue) and v is not None
}
pvalueish = _SetInputPValues().visit(pvalueish, replacements)
self.pipeline = p
result = p.apply(self, pvalueish, label)
if deferred:
return result
_allocate_materialized_pipeline(p)
materialized_result = _AddMaterializationTransforms().visit(result)
p.run().wait_until_finish()
_release_materialized_pipeline(p)
return _FinalizeMaterialization().visit(materialized_result)
def _extract_input_pvalues(self, pvalueish):
"""Extract all the pvalues contained in the input pvalueish.
Returns pvalueish as well as the flat inputs list as the input may have to
be copied as inspection may be destructive.
By default, recursively extracts tuple components and dict values.
Generally only needs to be overriden for multi-input PTransforms.
"""
# pylint: disable=wrong-import-order
from apache_beam import pipeline
# pylint: enable=wrong-import-order
if isinstance(pvalueish, pipeline.Pipeline):
pvalueish = pvalue.PBegin(pvalueish)
def _dict_tuple_leaves(pvalueish):
if isinstance(pvalueish, tuple):
for a in pvalueish:
for p in _dict_tuple_leaves(a):
yield p
elif isinstance(pvalueish, dict):
for a in pvalueish.values():
for p in _dict_tuple_leaves(a):
yield p
else:
yield pvalueish
return pvalueish, tuple(_dict_tuple_leaves(pvalueish))
def _pvaluish_from_dict(self, input_dict):
if len(input_dict) == 1:
return next(iter(input_dict.values()))
else:
return input_dict
_known_urns = {} # type: Dict[str, Tuple[Optional[type], ConstructorFn]]
@classmethod
@overload
def register_urn(
cls,
urn, # type: str
parameter_type, # type: Type[T]
):
# type: (...) -> Callable[[Union[type, Callable[[beam_runner_api_pb2.PTransform, T, PipelineContext], Any]]], Callable[[T, PipelineContext], Any]]
pass
@classmethod
@overload
def register_urn(
cls,
urn, # type: str
parameter_type, # type: None
):
# type: (...) -> Callable[[Union[type, Callable[[beam_runner_api_pb2.PTransform, bytes, PipelineContext], Any]]], Callable[[bytes, PipelineContext], Any]]
pass
@classmethod
@overload
def register_urn(cls,
urn, # type: str
parameter_type, # type: Type[T]
constructor # type: Callable[[beam_runner_api_pb2.PTransform, T, PipelineContext], Any]
):
# type: (...) -> None
pass
@classmethod
@overload
def register_urn(cls,
urn, # type: str
parameter_type, # type: None
constructor # type: Callable[[beam_runner_api_pb2.PTransform, bytes, PipelineContext], Any]
):
# type: (...) -> None
pass
@classmethod
def register_urn(cls, urn, parameter_type, constructor=None):
def register(constructor):
if isinstance(constructor, type):
constructor.from_runner_api_parameter = register(
constructor.from_runner_api_parameter)
else:
cls._known_urns[urn] = parameter_type, constructor
return constructor
if constructor:
# Used as a statement.
register(constructor)
else:
# Used as a decorator.
return register
def to_runner_api(self, context, has_parts=False, **extra_kwargs):
# type: (PipelineContext, bool, Any) -> beam_runner_api_pb2.FunctionSpec
from apache_beam.portability.api import beam_runner_api_pb2
urn, typed_param = self.to_runner_api_parameter(context, **extra_kwargs)
if urn == python_urns.GENERIC_COMPOSITE_TRANSFORM and not has_parts:
# TODO(BEAM-3812): Remove this fallback.
urn, typed_param = self.to_runner_api_pickled(context)
return beam_runner_api_pb2.FunctionSpec(
urn=urn,
payload=typed_param.SerializeToString() if isinstance(
typed_param, message.Message) else typed_param.encode('utf-8')
if isinstance(typed_param, str) else typed_param)
@classmethod
def from_runner_api(cls,
proto, # type: Optional[beam_runner_api_pb2.PTransform]
context # type: PipelineContext
):
# type: (...) -> Optional[PTransform]
if proto is None or proto.spec is None or not proto.spec.urn:
return None
parameter_type, constructor = cls._known_urns[proto.spec.urn]
try:
return constructor(
proto,
proto_utils.parse_Bytes(proto.spec.payload, parameter_type),
context)
except Exception:
if context.allow_proto_holders:
# For external transforms we cannot build a Python ParDo object so
# we build a holder transform instead.
from apache_beam.transforms.core import RunnerAPIPTransformHolder
return RunnerAPIPTransformHolder(proto.spec, context)
raise
def to_runner_api_parameter(
self,
unused_context # type: PipelineContext
):
# type: (...) -> Tuple[str, Optional[Union[message.Message, bytes, str]]]
# The payload here is just to ease debugging.
return (
python_urns.GENERIC_COMPOSITE_TRANSFORM,
getattr(self, '_fn_api_payload', str(self)))
def to_runner_api_pickled(self, unused_context):
# type: (PipelineContext) -> Tuple[str, bytes]
return (python_urns.PICKLED_TRANSFORM, pickler.dumps(self))
def runner_api_requires_keyed_input(self):
return False
@PTransform.register_urn(python_urns.GENERIC_COMPOSITE_TRANSFORM, None)
def _create_transform(unused_ptransform, payload, unused_context):
empty_transform = PTransform()
empty_transform._fn_api_payload = payload
return empty_transform
@PTransform.register_urn(python_urns.PICKLED_TRANSFORM, None)
def _unpickle_transform(unused_ptransform, pickled_bytes, unused_context):
return pickler.loads(pickled_bytes)
class _ChainedPTransform(PTransform):
def __init__(self, *parts):
# type: (*PTransform) -> None
super(_ChainedPTransform, self).__init__(label=self._chain_label(parts))
self._parts = parts
def _chain_label(self, parts):
return '|'.join(p.label for p in parts)
def __or__(self, right):
if isinstance(right, PTransform):
# Create a flat list rather than a nested tree of composite
# transforms for better monitoring, etc.
return _ChainedPTransform(*(self._parts + (right, )))
return NotImplemented
def expand(self, pval):
return reduce(operator.or_, self._parts, pval)
class PTransformWithSideInputs(PTransform):
"""A superclass for any :class:`PTransform` (e.g.
:func:`~apache_beam.transforms.core.FlatMap` or
:class:`~apache_beam.transforms.core.CombineFn`)
invoking user code.
:class:`PTransform` s like :func:`~apache_beam.transforms.core.FlatMap`
invoke user-supplied code in some kind of package (e.g. a
:class:`~apache_beam.transforms.core.DoFn`) and optionally provide arguments
and side inputs to that code. This internal-use-only class contains common
functionality for :class:`PTransform` s that fit this model.
"""
def __init__(self, fn, *args, **kwargs):
# type: (WithTypeHints, *Any, **Any) -> None
if isinstance(fn, type) and issubclass(fn, WithTypeHints):
# Don't treat Fn class objects as callables.
raise ValueError('Use %s() not %s.' % (fn.__name__, fn.__name__))
self.fn = self.make_fn(fn, bool(args or kwargs))
# Now that we figure out the label, initialize the super-class.
super(PTransformWithSideInputs, self).__init__()
if (any([isinstance(v, pvalue.PCollection) for v in args]) or
any([isinstance(v, pvalue.PCollection) for v in kwargs.values()])):
raise error.SideInputError(
'PCollection used directly as side input argument. Specify '
'AsIter(pcollection) or AsSingleton(pcollection) to indicate how the '
'PCollection is to be used.')
self.args, self.kwargs, self.side_inputs = util.remove_objects_from_args(
args, kwargs, pvalue.AsSideInput)
self.raw_side_inputs = args, kwargs
# Prevent name collisions with fns of the form '<function <lambda> at ...>'
self._cached_fn = self.fn
# Ensure fn and side inputs are picklable for remote execution.
try:
self.fn = pickler.loads(pickler.dumps(self.fn))
except RuntimeError as e:
raise RuntimeError('Unable to pickle fn %s: %s' % (self.fn, e))
self.args = pickler.loads(pickler.dumps(self.args))
self.kwargs = pickler.loads(pickler.dumps(self.kwargs))
# For type hints, because loads(dumps(class)) != class.
self.fn = self._cached_fn
def with_input_types(
self, input_type_hint, *side_inputs_arg_hints, **side_input_kwarg_hints):
"""Annotates the types of main inputs and side inputs for the PTransform.
Args:
input_type_hint: An instance of an allowed built-in type, a custom class,
or an instance of a typehints.TypeConstraint.
*side_inputs_arg_hints: A variable length argument composed of
of an allowed built-in type, a custom class, or a
typehints.TypeConstraint.
**side_input_kwarg_hints: A dictionary argument composed of
of an allowed built-in type, a custom class, or a
typehints.TypeConstraint.
Example of annotating the types of side-inputs::
FlatMap().with_input_types(int, int, bool)
Raises:
:class:`TypeError`: If **type_hint** is not a valid type-hint.
See
:func:`~apache_beam.typehints.typehints.validate_composite_type_param`
for further details.
Returns:
:class:`PTransform`: A reference to the instance of this particular
:class:`PTransform` object. This allows chaining type-hinting related
methods.
"""
super(PTransformWithSideInputs, self).with_input_types(input_type_hint)
side_inputs_arg_hints = native_type_compatibility.convert_to_beam_types(
side_inputs_arg_hints)
side_input_kwarg_hints = native_type_compatibility.convert_to_beam_types(
side_input_kwarg_hints)
for si in side_inputs_arg_hints:
validate_composite_type_param(si, 'Type hints for a PTransform')
for si in side_input_kwarg_hints.values():
validate_composite_type_param(si, 'Type hints for a PTransform')
self.side_inputs_types = side_inputs_arg_hints
return WithTypeHints.with_input_types(
self, input_type_hint, *side_inputs_arg_hints, **side_input_kwarg_hints)
def type_check_inputs(self, pvalueish):
type_hints = self.get_type_hints()
input_types = type_hints.input_types
if input_types:
args, kwargs = self.raw_side_inputs
def element_type(side_input):
if isinstance(side_input, pvalue.AsSideInput):
return side_input.element_type
return instance_to_type(side_input)
arg_types = [pvalueish.element_type] + [element_type(v) for v in args]
kwargs_types = {k: element_type(v) for (k, v) in kwargs.items()}
argspec_fn = self._process_argspec_fn()
bindings = getcallargs_forhints(argspec_fn, *arg_types, **kwargs_types)
hints = getcallargs_forhints(
argspec_fn, *input_types[0], **input_types[1])
for arg, hint in hints.items():
if arg.startswith('__unknown__'):
continue
if hint is None:
continue
if not typehints.is_consistent_with(bindings.get(arg, typehints.Any),
hint):
raise TypeCheckError(
'Type hint violation for \'{label}\': requires {hint} but got '
'{actual_type} for {arg}\nFull type hint:\n{debug_str}'.format(
label=self.label,
hint=hint,
actual_type=bindings[arg],
arg=arg,
debug_str=type_hints.debug_str()))
def _process_argspec_fn(self):
"""Returns an argspec of the function actually consuming the data.
"""
raise NotImplementedError
def make_fn(self, fn, has_side_inputs):
# TODO(silviuc): Add comment describing that this is meant to be overriden
# by methods detecting callables and wrapping them in DoFns.
return fn
def default_label(self):
return '%s(%s)' % (self.__class__.__name__, self.fn.default_label())
class _PTransformFnPTransform(PTransform):
"""A class wrapper for a function-based transform."""
def __init__(self, fn, *args, **kwargs):
super(_PTransformFnPTransform, self).__init__()
self._fn = fn
self._args = args
self._kwargs = kwargs
def display_data(self):
res = {
'fn': (
self._fn.__name__
if hasattr(self._fn, '__name__') else self._fn.__class__),
'args': DisplayDataItem(str(self._args)).drop_if_default('()'),
'kwargs': DisplayDataItem(str(self._kwargs)).drop_if_default('{}')
}
return res
def expand(self, pcoll):
# Since the PTransform will be implemented entirely as a function
# (once called), we need to pass through any type-hinting information that
# may have been annotated via the .with_input_types() and
# .with_output_types() methods.
kwargs = dict(self._kwargs)
args = tuple(self._args)
# TODO(BEAM-5878) Support keyword-only arguments.
try:
if 'type_hints' in get_signature(self._fn).parameters:
args = (self.get_type_hints(), ) + args
except TypeError:
# Might not be a function.
pass
return self._fn(pcoll, *args, **kwargs)
def default_label(self):
if self._args:
return '%s(%s)' % (
label_from_callable(self._fn), label_from_callable(self._args[0]))
return label_from_callable(self._fn)
def ptransform_fn(fn):
"""A decorator for a function-based PTransform.
Experimental; no backwards-compatibility guarantees.
Args:
fn: A function implementing a custom PTransform.
Returns:
A CallablePTransform instance wrapping the function-based PTransform.
This wrapper provides an alternative, simpler way to define a PTransform.
The standard method is to subclass from PTransform and override the expand()
method. An equivalent effect can be obtained by defining a function that
accepts an input PCollection and additional optional arguments and returns a
resulting PCollection. For example::
@ptransform_fn
def CustomMapper(pcoll, mapfn):
return pcoll | ParDo(mapfn)
The equivalent approach using PTransform subclassing::
class CustomMapper(PTransform):
def __init__(self, mapfn):
super(CustomMapper, self).__init__()
self.mapfn = mapfn
def expand(self, pcoll):
return pcoll | ParDo(self.mapfn)
With either method the custom PTransform can be used in pipelines as if
it were one of the "native" PTransforms::
result_pcoll = input_pcoll | 'Label' >> CustomMapper(somefn)
Note that for both solutions the underlying implementation of the pipe
operator (i.e., `|`) will inject the pcoll argument in its proper place
(first argument if no label was specified and second argument otherwise).
"""
# TODO(robertwb): Consider removing staticmethod to allow for self parameter.
@wraps(fn)
def callable_ptransform_factory(*args, **kwargs):
return _PTransformFnPTransform(fn, *args, **kwargs)
return callable_ptransform_factory
def label_from_callable(fn):
if hasattr(fn, 'default_label'):
return fn.default_label()
elif hasattr(fn, '__name__'):
if fn.__name__ == '<lambda>':
return '<lambda at %s:%s>' % (
os.path.basename(fn.__code__.co_filename), fn.__code__.co_firstlineno)
return fn.__name__
return str(fn)
class _NamedPTransform(PTransform):
def __init__(self, transform, label):
super(_NamedPTransform, self).__init__(label)
self.transform = transform
def __ror__(self, pvalueish, _unused=None):
return self.transform.__ror__(pvalueish, self.label)
def expand(self, pvalue):
raise RuntimeError("Should never be expanded directly.")
|
iemejia/incubator-beam
|
sdks/python/apache_beam/transforms/ptransform.py
|
Python
|
apache-2.0
| 36,031
|
[
"VisIt"
] |
7aae1a623717a54b90fe0ae7dab080a87e48fedffdfc1581f417d53ea09fd8a3
|
#! /usr/bin/env python
# This version uses NEST's RandomConvergentConnect functions.
from scipy.optimize import fsolve
import cynest as nest
#import cynest.raster_plot
import time
from numpy import exp
def ComputePSPnorm(tauMem, CMem, tauSyn):
"""Compute the maximum of postsynaptic potential
for a synaptic input current of unit amplitude
(1 pA)"""
a = (tauMem / tauSyn)
b = (1.0 / tauSyn - 1.0 / tauMem)
# time of maximum
t_max = 1.0/b * ( -nest.sli_func('LambertWm1',-exp(-1.0/a)/a) - 1.0/a )
# maximum of PSP for current of unit amplitude
return exp(1.0)/(tauSyn*CMem*b) * ((exp(-t_max/tauMem) - exp(-t_max/tauSyn)) / b - t_max*exp(-t_max/tauSyn))
nest.ResetKernel()
startbuild = time.time()
dt = 0.1 # the resolution in ms
simtime = 1000.0 # Simulation time in ms
delay = 1.5 # synaptic delay in ms
# Parameters for asynchronous irregular firing
g = 5.0
eta = 2.0
epsilon = 0.1 # connection probability
order = 2500
NE = 4*order
NI = 1*order
N_neurons = NE+NI
N_rec = 50 # record from 50 neurons
CE = int(epsilon*NE) # number of excitatory synapses per neuron
CI = int(epsilon*NI) # number of inhibitory synapses per neuron
C_tot = int(CI+CE) # total number of synapses per neuron
# Initialize the parameters of the integrate and fire neuron
tauSyn = 0.5
tauMem = 20.0
CMem = 250.0
theta = 20.0
J = 0.1 # postsynaptic amplitude in mV
# normalize synaptic current so that amplitude of a PSP is J
J_unit = ComputePSPnorm(tauMem, CMem, tauSyn)
J_ex = J / J_unit
J_in = -g*J_ex
# threshold rate, equivalent rate of events needed to
# have mean input current equal to threshold
nu_th = (theta * CMem) / (J_ex*CE*exp(1)*tauMem*tauSyn)
nu_ex = eta*nu_th
p_rate = 1000.0*nu_ex*CE
nest.SetKernelStatus({"resolution": dt, "print_time": True})
print("Building network")
neuron_params= {"C_m": CMem,
"tau_m": tauMem,
"tau_syn_ex": tauSyn,
"tau_syn_in": tauSyn,
"t_ref": 2.0,
"E_L": 0.0,
"V_reset": 0.0,
"V_m": 0.0,
"V_th": theta}
nest.SetDefaults("iaf_psc_alpha", neuron_params)
nodes_ex=nest.Create("iaf_psc_alpha",NE)
nodes_in=nest.Create("iaf_psc_alpha",NI)
nest.SetDefaults("poisson_generator",{"rate": p_rate})
noise=nest.Create("poisson_generator")
espikes=nest.Create("spike_detector")
ispikes=nest.Create("spike_detector")
nest.SetStatus(espikes,[{"label": "brunel-py-ex",
"withtime": True,
"withgid": True}])
nest.SetStatus(ispikes,[{"label": "brunel-py-in",
"withtime": True,
"withgid": True}])
print("Connecting devices.")
nest.CopyModel("static_synapse","excitatory",{"weight":J_ex, "delay":delay})
nest.CopyModel("static_synapse","inhibitory",{"weight":J_in, "delay":delay})
nest.DivergentConnect(noise,nodes_ex,model="excitatory")
nest.DivergentConnect(noise,nodes_in,model="excitatory")
nest.ConvergentConnect(list(range(1,N_rec+1)),espikes,model="excitatory")
nest.ConvergentConnect(list(range(NE+1,NE+1+N_rec)),ispikes,model="excitatory")
print("Connecting network.")
# We now iterate over all neuron IDs, and connect the neuron to
# the sources from our array. The first loop connects the excitatory neurons
# and the second loop the inhibitory neurons.
print("Excitatory connections")
nest.RandomConvergentConnect(nodes_ex, nodes_ex+nodes_in, CE,model="excitatory")
print("Inhibitory connections")
nest.RandomConvergentConnect(nodes_in, nodes_ex+nodes_in, CI,model="inhibitory")
endbuild=time.time()
print("Simulating.")
nest.Simulate(simtime)
endsimulate= time.time()
events_ex = nest.GetStatus(espikes,"n_events")[0]
rate_ex = events_ex/simtime*1000.0/N_rec
events_in = nest.GetStatus(ispikes,"n_events")[0]
rate_in = events_in/simtime*1000.0/N_rec
num_synapses = nest.GetDefaults("excitatory")["num_connections"]+\
nest.GetDefaults("inhibitory")["num_connections"]
build_time = endbuild-startbuild
sim_time = endsimulate-endbuild
print("Brunel network simulation (Python)")
print("Number of neurons :", N_neurons)
print("Number of synapses:", num_synapses)
print(" Exitatory :", int(CE*N_neurons)+N_neurons)
print(" Inhibitory :", int(CI*N_neurons))
print("Excitatory rate : %.2f Hz" % rate_ex)
print("Inhibitory rate : %.2f Hz" % rate_in)
print("Building time : %.2f s" % build_time)
print("Simulation time : %.2f s" % sim_time)
#nest.raster_plot.from_device(espikes, hist=True)
#nest.raster_plot.show()
|
QJonny/CyNest
|
cynest/examples/brunel-alpha-nest.py
|
Python
|
gpl-2.0
| 4,644
|
[
"NEURON"
] |
41500a0fa5236ec4607fb7d761464c12ec7312536de3579c7e18f68b9e9c1c11
|
import numpy as np
import pandas as pd
import time
import simtk.openmm.app as app
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
n_steps = 10000
temperature = 300. * u.kelvin
data = {}
for cutoff in np.linspace(0.8, 1.3, 10):
testsystem = testsystems.DHFRExplicit(nonbondedCutoff=cutoff * u.nanometers, nonbondedMethod=app.PME)
system, positions = testsystem.system, testsystem.positions
integrator = mm.LangevinIntegrator(temperature, 1.0 / u.picoseconds, 2.0 * u.femtoseconds)
context = mm.Context(system, integrator)
context.setPositions(positions)
context.setVelocitiesToTemperature(temperature)
integrator.step(1)
t0 = time.time()
integrator.step(n_steps)
dt = time.time() - t0
time_per_frame = dt / float(n_steps) * u.seconds
frames_per_day = time_per_frame / u.day
ns_per_day = (2 * u.femtoseconds / frames_per_day) / u.nanoseconds
data[cutoff] = ns_per_day
print(cutoff, ns_per_day)
|
kyleabeauchamp/HMCNotes
|
code/old/benchmark_dhfr.py
|
Python
|
gpl-2.0
| 1,019
|
[
"OpenMM"
] |
41516398e05728aedc629fba91d3501b9d4dcc07412cdbca422aead7500316ce
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAdsplit(RPackage):
"""Annotation-Driven Clustering
This package implements clustering of microarray gene expression
profiles according to functional annotations. For each term genes are
annotated to, splits into two subclasses are computed and a significance
of the supporting gene set is determined."""
homepage = "https://bioconductor.org/packages/adSplit"
git = "https://git.bioconductor.org/packages/adSplit.git"
version('1.60.0', commit='de5abccfe652cbc5b5f49fb6ed77cdd15cc760cd')
version('1.54.0', commit='ce8fb61f4a3d0942294da2baa28be1472acb0652')
version('1.52.0', commit='3bd105dbd76c52798b7d52f60c17de62ef13da19')
version('1.50.0', commit='a02e2c994e78ececd5a248575109c5ed36c969db')
version('1.48.0', commit='57dfcd93b9232cf53f05c34179ecb759bb7aff46')
version('1.46.0', commit='7e81a83f34d371447f491b3a146bf6851e260c7c')
depends_on('r@2.1.0:', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-biobase@1.5.12:', type=('build', 'run'))
depends_on('r-cluster@1.9.1:', type=('build', 'run'))
depends_on('r-go-db@1.8.1:', type=('build', 'run'))
depends_on('r-kegg-db@1.8.1:', type=('build', 'run'))
depends_on('r-multtest@1.6.0:', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-adsplit/package.py
|
Python
|
lgpl-2.1
| 1,528
|
[
"Bioconductor"
] |
314018c80a55ba1af304e9376a72054fc3fac99baae7152e9c4204dc5b5abd2d
|
"""
This module contains the CharmmWriter class and associated methods,
which outputs a psf/pdb file with CHARMM names and parameters.
It does this by converting atom names to CHARMM names, writing
intermediate files as necessary to invoke the vmd psfgen plugin.
Author: Robin Betz
Copyright (C) 2019 Robin Betz
"""
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330
# Boston, MA 02111-1307, USA.
from __future__ import print_function
import os
import tempfile
from parmed.formats.registry import load_file
from psfgen import PsfGen
from vmd import atomsel, molecule
from dabble import DabbleError
from dabble.param import CharmmMatcher, MoleculeWriter, Patch
# Handle python 2/3 input
try:
input = raw_input
except NameError:
pass
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# CONSTANTS #
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
PATCHABLE_ACIDS = ('ACE ALA ARG ASN ASP CYS CYX GLN GLU GLY HIE HIS HSP HSE '
'HSD ILE LEU LYS MET NMA PHE PRO SER THR TRP TYR VAL')
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# CLASSES #
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
class CharmmWriter(MoleculeWriter):
"""
An object that handles all the conversions to a psf file
by interfacing with psfgen.
Writes a pdb/psf file pair from the current molecule using the
CHARMM36 topology and atom names/types. Interfaces with psfgen by
dynamically generating the .tcl file that psfgen takes as input.
Prompts the user for additional topology files and helps with
matching atom names that cannot be automatically translated to the
charmm naming conventions.
"""
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# CONSTANTS #
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
WATER_NAMES = {
"tip3" : "TIP3",
"tip4e" : "TP4E",
"spce" : "SPCE",
}
WATER_O_NAME = "OH2"
WATER_H_NAMES = ["H1", "H2"]
#==========================================================================
def __init__(self, molid, **kwargs):
"""
Creates a CHARMM writer
Args:
molid (int): VMD molecule ID of system to write
tmp_dir (str): Directory for temporary files. Defaults to "."
lipid_sel (str): Lipid selection string. Defaults to "lipid"
hmr (bool): If hydrogen masses should be repartitioned. Defaults
to False
forcefield (str): Forcefield to use, either "charmm" or "amber"
water_model (str): Water model to use
extra_topos (list of str): Additional topology (.str, .off, .lib) to
include.
extra_params (list of str): Additional parameter sets (.str, .frcmod)
override_defaults (bool): If set, omits default forcefield parameters.
debug_verbose (bool): Prints additional output, like from psfgen.
"""
# Initialize default options
super(CharmmWriter, self).__init__(molid, **kwargs)
# Create a psf generator object
self.psfgen = PsfGen()
# Set forcefield default topologies and parameters
self.forcefield = kwargs.get("forcefield", "charmm")
self.water_model = kwargs.get("water_model", "tip3")
self.topologies = self.get_topologies(self.forcefield, self.water_model)
self.parameters = self.get_parameters(self.forcefield, self.water_model)
if "charmm" in self.forcefield:
if self.hmr:
raise DabbleError("HMR not supported with CHARMM ff yet")
# Handle override and extra topologies
if self.override:
self.topologies = []
self.parameters = []
# Now extra topologies (put in self by super __init__)
self.topologies.extend(self.extra_topos)
self.parameters.extend(self.extra_params)
# Once all topologies defined, initialize matcher only if
# using CHARMM topologies (not if we're doing a conversion)
if "charmm" in self.forcefield or "opls" in self.forcefield:
self.matcher = CharmmMatcher(self.topologies)
# Keep track of segment numbers for protein and other
self.segint = 0
#=========================================================================
def write(self, filename):
"""
Writes the parameter and topology files
Args:
filename (str): File name to write. File type suffix will be added.
"""
self.outprefix = filename
# Put our molecule on top
old_top = molecule.get_top()
molecule.set_top(self.molid)
# Amber forcefield done with AmberWriter then conversion
if "amber" in self.forcefield:
# Avoid circular import by doing it here
from dabble.param import AmberWriter
prmtopgen = AmberWriter(molid=self.molid,
tmp_dir=self.tmp_dir,
forcefield=self.forcefield,
water_model=self.water_model,
hmr=self.hmr,
lipid_sel=self.lipid_sel,
extra_topos=self.extra_topos,
extra_params=self.extra_params,
override_defaults=self.override,
debug_verbose=self.debug)
prmtopgen.write(self.outprefix)
self._prmtop_to_charmm()
# Charmm forcefield
elif "charmm" in self.forcefield:
self._run_psfgen()
# OPLS forcefield. Same as charmm but list separately for readability
elif "opls" in self.forcefield:
self._run_psfgen()
else:
raise DabbleError("Unsupported forcefield '%s' for CharmmWriter"
% self.forcefield)
# Check output and finish up
self._check_psf_output()
# Reset top molecule
molecule.set_top(old_top)
#=========================================================================
# Static methods #
#=========================================================================
@classmethod
def get_topologies(cls, forcefield, water_model):
if forcefield == "charmm":
topos = [
"top_all36_caps.rtf",
"top_all36_cgenff.rtf",
"top_all36_prot.rtf",
"top_all36_lipid.rtf",
"top_all36_carb.rtf",
"top_all36_na.rtf",
"toppar_all36_prot_na_combined.str",
"toppar_all36_prot_fluoro_alkanes.str"
]
if water_model == "tip3":
topos.append("toppar_water_ions.str")
elif water_model == "tip4e":
topos.append("toppar_water_ions_tip4p_ew.str")
elif water_model == "spce":
topos.append("toppar_water_ions_spc_e.str")
elif forcefield == "opls":
topos = [
"opls_aam.rtf",
"opls_aam_caps.rtf"
]
if water_model != "tip3":
raise DabbleError("Only TIP3 water model supported for OPLS")
elif forcefield == "amber":
from dabble.param import AmberWriter # avoid circular dependency
return AmberWriter.get_topologies(forcefield, water_model)
else:
raise ValueError("Invalid forcefield: '%s'" % forcefield)
return [cls._get_forcefield_path(top) for top in topos]
#=========================================================================
@classmethod
def get_parameters(cls, forcefield, water_model):
if forcefield == "charmm":
prms = [
"par_all36m_prot.prm",
"par_all36_cgenff.prm",
"par_all36_lipid.prm",
"par_all36_carb.prm",
"par_all36_na.prm",
"toppar_all36_prot_na_combined.str"
]
if water_model == "tip3":
prms.append("toppar_water_ions.str")
elif water_model == "tip4e":
prms.append("toppar_water_ions_tip4p_ew.str")
elif water_model == "spce":
prms.append("toppar_water_ions_spc_e.str")
elif forcefield == "amber":
from dabble.param import AmberWriter # avoid circular dependency
return AmberWriter.get_parameters(forcefield, water_model)
elif forcefield == "opls":
prms = [
"opls_aam.prm"
]
if water_model != "tip3":
raise DabbleError("Only TIP3 water model supported for OPLS")
else:
raise ValueError("Invalid forcefield: '%s'" % forcefield)
return [cls._get_forcefield_path(par) for par in prms]
#=========================================================================
# Private methods #
#=========================================================================
def _write_water_blocks(self):
"""
Writes a lot of temporary files with 10000 waters each, to bypass
psfgen being stupid with files containing more than 10000 of a residue.
"""
# Set water names and write them to PDB file(s)
self._set_water_names()
pdbs = self._write_water_pdbs()
for i, pdb in enumerate(pdbs):
self.psfgen.add_segment(segid="W%d" % i, pdbfile=pdb)
self.psfgen.read_coords(segid="W%d" % i, filename=pdb)
# If water model includes dummy atoms, guess the coordinates
# This is safe as only waters have been added to the psfgen state
# so far, so actually broken atoms won't be fixed on accident.
if self.water_model != "tip3":
self.psfgen.guess_coords()
self.psfgen.regenerate_angles()
self.psfgen.regenerate_dihedrals()
#==========================================================================
def _write_lipid_blocks(self):
"""
Writes a temporary PDB file containing the lipids for later use by
psfgen. Renumbers the lipid residues because some can have **** instead
of an integer for resid in large systems, which will crash psfgen. Also
sets atom names for some common lipids (currently POPC)
Raises:
NotImplementedError if more than 10,000 lipids are present since it
doesn't support feeding multiple lipid blocks to psfgen currently
NotImplementedError if lipid other than POPC,POPE,POPG is found
"""
# Put current molecule on top to simplify atom selection
old_top = molecule.get_top()
molecule.set_top(self.molid)
# Collect lipid residues up
alll = atomsel('(%s) and user 1.0' % self.lipid_sel)
residues = list(set(alll.residue))
# Lipids not compatible with AMBER parameters, CHARMM format
if alll and ("amber" in self.forcefield or
"opls" in self.forcefield):
raise ValueError("AMBER or OPLS parameters not supported for lipids"
" in CHARMM output format")
# Sanity check for < 10k lipids
if len(residues) >= 10000:
raise NotImplementedError("More than 10k lipids found")
# Loop through all residues and renumber and correctly name them
lipress = []
for resname in set(alll.resname):
lipress.extend(self._rename_by_resname(resname, renumber=True))
# Write temporary lipid pdb
_, temp = tempfile.mkstemp(suffix='.pdb', prefix='psf_lipid_',
dir=self.tmp_dir)
os.close(_)
saved_lips = atomsel("residue %s" % ' '.join(str(_) for _ in lipress))
saved_lips.user = 0.0
saved_lips.write('pdb', temp)
# Generate lipid segment
self.psfgen.add_segment(segid="L", pdbfile=temp)
self.psfgen.read_coords(segid="L", filename=temp)
# Put old top back
molecule.set_top(old_top)
#==========================================================================
def _write_ion_blocks(self):
"""
Writes a PDB file containing correctly named ions for use by
psfgen, and instructs psfgen to use it in TCL code.
"""
# Put our molecule on top to simplify atom selection language
old_top = molecule.get_top()
molecule.set_top(self.molid)
# Select all ions
allions = []
for resname in set(atomsel("numbonds 0").resname):
allions.extend(self._rename_by_resname(resname, renumber=True))
# Stop if no ions were found
if not allions:
return
# Save ions as pdb
allsel = atomsel("residue %s" % " ".join(str(_) for _ in allions))
allsel.resid = range(len(allsel))
allsel.user = 0.0
_, temp = tempfile.mkstemp(suffix=".pdb", prefix="psf_ions_",
dir=self.tmp_dir)
os.close(_)
allsel.write("pdb", temp)
self.psfgen.add_segment(segid="I", pdbfile=temp)
self.psfgen.read_coords(segid="I", filename=temp)
molecule.set_top(old_top)
#==========================================================================
def _find_single_residue_names(self, resname, molid):
"""
Uses graph matcher and available topologies to match up
ligand names automatically. Tries to use graphs, and if there's an
uneven number of atoms tries to match manually to suggest which atoms
are most likely missing.
Args:
resname (str): Residue name of the ligand that will be written.
All ligands will be checked separately against the graphs.
molid (int): VMD molecule ID to consider
Returns:
(list of ints): Residue numbers (not resid) of all input ligands
that were successfully matched. Need to do it this way since
residue names can be changed in here to different things.
Raises:
ValueError if number of resids does not match number of residues as
interpreted by VMD
NotImplementedError if a residue could not be matched to a graph.
"""
# Put our molecule on top
old_top = molecule.get_top()
molecule.set_top(molid)
# Sanity check that there is no discrepancy between defined resids and
# residues as interpreted by VMD.
residues = set(atomsel("user 1.0 and resname '%s'" % resname).residue)
for chain in set(atomsel("user 1.0 and resname '%s'" % resname).chain):
tempres = set(atomsel("user 1.0 and resname '%s' and chain %s"
% (resname, chain)).residue)
resids = set(atomsel("user 1.0 and resname '%s' and chain %s"
% (resname, chain)).resid)
if len(tempres) != len(resids):
raise DabbleError("VMD found %d residues for resname '%s', "
"but there are %d resids in chain %s! "
"Check input."
% (len(tempres), resname, len(resids), chain))
for residue in residues:
sel = atomsel("residue %s and resname '%s' and user 1.0"
% (residue, resname))
newname, atomnames = self.matcher.get_names(sel, print_warning=True)
if not newname:
resname, patch, atomnames = self.matcher.get_patches(sel)
if not newname:
print("ERROR: Could not find a residue definition for %s:%s"
% (resname, residue))
raise NotImplementedError("No residue definition for %s:%s"
% (resname, residue))
print("\tApplying patch %s to ligand %s" % (patch, newname))
# Do the renaming
self._apply_naming_dictionary(atomnames=atomnames,
resnames=newname,
verbose=True)
molecule.set_top(old_top)
return list(residues)
#==========================================================================
def _write_generic_block(self, residues):
"""
Matches ligands to available topology file, renames atoms, and then
writes temporary files for the ligands
Args:
residues (list of int): Residue numbers to be written. Will all
be written to one segment.
Returns:
True if successful
"""
# Put our molecule on top to simplify atom selection language
old_top = molecule.get_top()
molecule.set_top(self.molid)
alig = atomsel('user 1.0 and residue %s' % " ".join([str(x) for x in residues]))
# Write temporary file containg the residues and update tcl commands
_, temp = tempfile.mkstemp(suffix='.pdb', prefix='psf_block_',
dir=self.tmp_dir)
os.close(_)
alig.write('pdb', temp)
alig.user = 0.0
# Get next available segment name
segname = "B%d" % self.segint
self.segint += 1
self.psfgen.add_segment(segid=segname, pdbfile=temp)
self.psfgen.read_coords(segid=segname, filename=temp)
if old_top != -1:
molecule.set_top(old_top)
return True
#==========================================================================
def _write_protein_blocks(self, molid, frag):
"""
Writes a protein fragment to a pdb file for input to psfgen
Automatically assigns amino acid names
Args:
molid (int): VMD molecule ID of renumbered protein
frag (str): Fragment to write
Returns:
(list of Patches): Patches to add to psfgen input files
"""
print("Setting protein atom names")
# Put our molecule on top to simplify atom selection language
old_top = molecule.get_top()
molecule.set_top(molid)
patches = set()
extpatches = set()
# Get a unique and reliabe segment name
seg = self.matcher.get_protein_segname(molid, frag)
fragsel = atomsel("fragment '%s'" % frag)
residues = list(set(fragsel.residue))
for residue in residues:
sel = atomsel('residue %s' % residue)
resid = sel.resid[0]
# Only try to match single amino acid if there are 1 or 2 bonds
if len(self.matcher.get_extraresidue_atoms(sel)) < 3:
(newname, atomnames) = self.matcher.get_names(sel, False)
# See if it's a disulfide bond participant
else:
(newname, patch, atomnames) = \
self.matcher.get_disulfide("residue %d" % residue,
molid)
if newname:
extpatches.add(patch)
# Couldn't find a match. See if it's a patched residue
if not newname:
(newname, patchname, atomnames) = self.matcher.get_patches(sel)
if newname:
# This returns patch name only, not a Patch object
patches.add(Patch(name=patchname, segids=[seg],
resids=[resid]))
# Fall through to error condition
if not newname:
raise DabbleError("Couldn't find a patch for %s:%s"
% (sel.resname[0], resid))
# Do the renaming
self._apply_naming_dictionary(atomnames=atomnames,
resnames=newname)
# Save protein chain in the correct order
filename = self.tmp_dir + '/psf_protein_%s.pdb' % seg
_write_ordered_pdb(filename, "fragment '%s'" % frag, molid)
print("\tWrote %d atoms to the protein segment %s"
% (len(atomsel("fragment %s" % frag)), seg))
# Now invoke psfgen for the protein segments
self.psfgen.add_segment(segid=seg, pdbfile=filename)
print("Applying the following single-residue patches to P%s:\n" % frag)
print("\t%s" % "\t".join(str(_) for _ in patches))
for p in patches:
self.psfgen.patch(patchname=p.name, targets=p.targets())
self.psfgen.read_coords(segid=seg, filename=filename)
# Fix coordinates that are out of bounds, ie 5 characters
badidxs = atomsel("fragment '%s' and (abs(x) >= 100 or abs(y) >= 100 "
"or abs(z) >= 100)" % frag, molid).index
for idx in badidxs:
atom = atomsel("index %d" % idx, molid)
self.psfgen.set_position(segid=seg, resid=atom.resid[0],
atomname=atom.name[0],
position=(atom.x[0], atom.y[0], atom.z[0]))
if old_top != -1:
molecule.set_top(old_top)
fragsel.user = 0.0
return extpatches
#==========================================================================
def _check_psf_output(self):
"""
Scans the output psf from psfgen for atoms where the coordinate
could not be set, indicating an unmatched atom. This check is necessary
because sometimes psfgen will run with no errors or warnings but will
have unmatched atoms that are all at (0,0,0).
"""
# Check file was written at all
if not os.path.isfile('%s.pdb'% self.outprefix):
raise DabbleError("\nERROR: psf file failed to write.\n"
" Please see log above.\n")
# Open the pdb file in VMD and check for atoms with no occupancy
fileh = molecule.load('pdb', '%s.pdb' % self.outprefix)
errors = atomsel("occupancy=-1", molid=fileh)
# Print out error messages
if errors:
errstr = "\nERROR: Couldn't find the following atoms.\n"
for i in range(len(errors)):
errstr += "\t%s%s:%s\n" % (errors.resname[i], errors.resid[i],
errors.name[i])
errstr += "Check if they are present in the original structure.\n"
raise DabbleError(errstr)
print("\nChecked output pdb/psf has all atoms present "
"and correct.\n")
#==========================================================================
def _find_residue_in_rtf(self, resname, molid):
"""
Scans the input topology files to find a name match for the given
residue name, then pulls out the atoms involved and checks that they
are all present in the input coordinates, prompting the user to correct
the names of atoms that could not be matched.
Residue ID is used because there can be multiple copies of a residue
with the same name, but only one has missing or extra atoms.
Args:
resname (str): Residue name to check
molid (int): VMD molecule ID
Returns:
True if all matching was successful
False if the residue name cannot be found
"""
print("Finding residue name '%s'" % resname)
for top in self.topologies:
topfile = open(top, 'r')
topo_atoms = _get_atoms_from_rtf(text=topfile.readlines(),
resname=resname)
# Use first definition found of this residue
if topo_atoms:
break
topfile.close()
if not topo_atoms:
return False
print("Successfully found residue %s in input topologies" % resname)
# Match up atoms with python sets
pdb_atoms = set(atomsel("resname '%s' and user 1.0" % resname,
molid=molid).name)
pdb_only = pdb_atoms - topo_atoms
topo_only = topo_atoms - pdb_atoms
# If uneven number of atoms, there are missing or additional atoms
if len(pdb_atoms) > len(topo_atoms):
raise DabbleError("\nERROR: Cannot process modified residue %s.\n"
"There are %d extra atoms in the input structure "
"that are undefined in the topology file. The "
"following atoms could not be matched and may "
"either be misnamed, or additional atoms:\n"
"[ %s ]\n"
% (resname, len(pdb_atoms)-len(topo_atoms),
" ".join(pdb_only)))
if len(topo_atoms) > len(pdb_atoms):
raise DabbleError("\nERROR: Cannot process modified residue %s.\n"
"There are %d missing atoms in the input structure "
"that are defined in the topology file. The "
"following atoms could not be matched and may "
"either be misnamed or deleted atoms:\n"
"[ %s ]\n"
% (resname, len(topo_atoms)-len(pdb_atoms),
" ".join(topo_only)))
# Offer to rename atoms that couldn't be matched to the topology
if pdb_only:
print("\nWARNING: Having some trouble with modified residue %s.\n"
" The following atom names cannot be matched up "
" to the input topologies. They are probably "
" misnamed.\n" % resname)
print(" To help you, here are the atom names that "
" should be present according to the topology "
" but were not found:\n")
print(" [ %s ]\n" % ' '.join([str(t) for t in topo_only]))
print(" Please enter a valid name for each atom as "
"it appears or CTRL+D to quit..\n")
for unmatched in pdb_only:
print("Unmatched topology names: [ %s ]"
% ' '.join(topo_only))
newname = input(" %s -> " % unmatched)
while newname not in topo_only:
print("'%s' is not an available name in the topology."
"Please try again.\n" % newname)
newname = input(" %s -> " % unmatched)
atomsel("resname '%s' and user 1.0 and name '%s'"
% (resname, unmatched)).name = newname
pdb_atoms = set(atomsel("resname '%s' and user 1.0"
% resname).name)
topo_only = topo_atoms-pdb_atoms
resname = newname
# Recurse to check that everything is assigned correctly
self._find_residue_in_rtf(resname, molid)
print("Matched up all atom names for resname '%s'\n" % resname)
return True
#==========================================================================
def _get_patch(self, seg, resid):
"""
Prompts the user for a patch to apply for the given residue.
Gathers available patches from topology files
Args:
seg (str): Segment to apply the patch to
resid (int): Residue ID to apply the patch to
Returns:
(str) patch line to put in the psfgen input file
"""
avail_patches = self._get_avail_patches()
print("What is the patch name I should apply?")
print("Type NONE for no patch, if your residue is completely "
"defined in a str file")
print("Or type HELP for a list of all patches I know about")
patchname = input("> ")
if patchname == "HELP":
print(" PATCH COMMENT")
print(" ----- -------")
for patch in avail_patches:
print("%7s %s" % (patch, avail_patches[patch]))
patchname = input("> ")
while (patchname not in avail_patches) and (patchname != "NONE"):
print("I don't know about patch %s" % patchname)
patchname = input("Try again > ")
if patchname == "NONE":
return ""
return "patch %s %s:%d\n" % (patchname, seg, resid)
#==========================================================================
def _get_avail_patches(self):
"""
Gathers the patches defined in all topology files.
Returns:
(dict str -> str): Patch names as keys, comment as value
"""
avail_patches = {}
for top in self.topologies:
topfile = open(top, 'r')
for line in topfile:
tokens = line.split()
if not tokens:
continue
if tokens[0] == "PRES":
comment = ' '.join(tokens[tokens.index("!")+1:])
avail_patches[tokens[1]] = comment
return avail_patches
#==========================================================================
def _run_psfgen(self):
# Read topology files in to psfgen
print("Using the following topologies:")
for top in self.topologies:
print(" - %s" % os.path.split(top)[1])
self.psfgen.read_topology(top)
# Mark all atoms as unsaved with the user field
atomsel('all', molid=self.molid).user = 1.0
check_atom_names(molid=self.molid)
# Save water 10k molecules at a time
if atomsel('water', molid=self.molid):
self._write_water_blocks()
# Now ions if present, changing the atom names
if atomsel('ions', molid=self.molid):
self._write_ion_blocks()
# Now lipid
if atomsel(self.lipid_sel):
self._write_lipid_blocks()
# Now handle the protein
# Save and reload the protein so residue looping is correct
if atomsel("resname %s" % PATCHABLE_ACIDS, molid=self.molid):
extpatches = set()
for frag in sorted(set(atomsel("resname %s" % PATCHABLE_ACIDS,
molid=self.molid).fragment)):
extpatches.update(self._write_protein_blocks(self.molid, frag))
# List all patches applied to the protein
print("Applying the following patches:\n")
print("\t%s" % "\n\t".join(str(_) for _ in extpatches))
# Apply all multi segment patches to the protein
for p in extpatches:
self.psfgen.patch(p.name, p.targets())
else:
print("\n\tDidn't find any protein. Continuing...\n")
# Regenerate angles and dihedrals after applying patches
# Angles must be regenerated FIRST!
# See http://www.ks.uiuc.edu/Research/namd/mailing_list/namd-l.2009-2010/4137.html
self.psfgen.regenerate_angles()
self.psfgen.regenerate_dihedrals()
# Check if there is anything else and let the user know about it
leftovers = atomsel('user 1.0', molid=self.molid)
for lig in set(leftovers.resname):
residues = self._find_single_residue_names(resname=lig,
molid=self.molid)
self._write_generic_block(residues)
# Write the output files and run
self.psfgen.write_psf(filename="%s.psf" % self.outprefix, type="x-plor")
self.psfgen.write_pdb(filename="%s.pdb" % self.outprefix)
#==========================================================================
def _prmtop_to_charmm(self):
"""
Converts an AMBER prmtop with AMBER parameters to a psf file,
using ParmEd.
"""
# Save PSF topology and parameter file
parmstruct = load_file(self.outprefix + ".prmtop",
xyz=self.outprefix + ".inpcrd",
structure=True)
parmstruct.save(self.outprefix + ".psf", format="psf")
# Save PDB file with coordinates
m = molecule.load("parm7", self.outprefix + ".prmtop",
"rst7", self.outprefix + ".inpcrd")
atomsel("all", m).write("pdb", self.outprefix + ".pdb")
molecule.delete(m)
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# FUNCTIONS #
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _write_ordered_pdb(filename, sel, molid):
"""
Writes a pdb file in order of residues, renumbering the atoms
accordingly, since psfgen wants each residue sequentially while
VMD will write them in the same order as input, which from Maestro
created files has some guessed atoms at the end.
Args:
filename (str): Name of the pdb file to write
sel (str): VMD atomsel string for atoms that will be written
molid (int): VMD molecule ID to write from
"""
old_top = molecule.get_top()
molecule.set_top(molid)
fileh = open(filename, 'w')
# Use resids since order can be wrong when sorting by residue
# Then, use residue to pull out each one since it is much much
# faster then trying to pull out residues
resids = set(atomsel(sel).resid)
# Add additional residue constraint to selection since pulling out
# by resid can match something in a different chain
resstr = ' '.join([str(x) for x in set(atomsel(sel).residue)])
idx = 1
# For renumbering capping groups
for resid in sorted(resids):
# Check for alternate locations
residues = sorted(set(atomsel("resid '%s' and residue %s"
% (resid, resstr)).residue))
for rid in residues:
for i in atomsel('residue %d' % rid).index:
a = atomsel('index %d' % i) # pylint: disable=invalid-name
fileh.write(MoleculeWriter.get_pdb_line(a, idx, a.resid[0]))
idx += 1
fileh.write('END\n')
atomsel(sel).user = 0.0
fileh.close()
molecule.set_top(old_top)
#==========================================================================
def _get_atoms_from_rtf(text, resname):
"""
Scans the input text for the residue with a given name. Once found,
pulls out all the atom names that comprise that residue.
Args:
text (str): Contents of an rtf file to scan
resname (str): Residue to look for
Returns:
atoms (set of str): Atom names in this residue, or the empyty set if
the residue was not found.
"""
atoms = []
found = False
for line in text:
words = line.split()
if not words:
continue
if not found and words[0] == 'RESI' \
and words[1] == resname:
found = True
elif found and words[0] == 'ATOM':
atoms.append(words[1])
elif found and words[0] == 'RESI':
break
return set(atoms)
#==========================================================================
def get_bonded_atoms(molid, index):
"""
Returns the element of all atoms bonded to the current atom.
Args:
molid (int): VMD molecule ID to consider
index (int): Atom index to look at bonded atoms
Returns:
(list of str) elements of atoms bound to the current atom
"""
asel = atomsel('index %d' % index, molid=molid)
bound = []
for atom in asel.bonds[0]:
bound.append(atomsel('index %d' % atom).element[0])
return bound
#==========================================================================
def check_atom_names(molid):
"""
Checks that there are no spaces in atom names. If spaces are
found, they are removed and a warning is printed
"""
names = set(atomsel(molid=molid).name)
for name in names:
if ' ' in name:
print("\nWARNING: Found space character in name '%s'\n"
" Incompatible with charmm formats, removing it"
% name)
atomsel("name '%s'", molid=molid).name = name.replace(' ', '')
#==========================================================================
|
Eigenstate/dabble
|
dabble/param/charmm.py
|
Python
|
gpl-2.0
| 37,819
|
[
"Amber",
"CHARMM",
"NAMD",
"VMD"
] |
58da9753e5554f1dfe4c83368de2bd721a57e18b152f7c465e2367114fc1076c
|
import numpy as np
from .network import Network
from . import losses
from . import activations
class VAE:
'''Variational Autoencoder'''
def __init__(self, dimensions, latent_dim, params):
self.latent_dim = latent_dim
self.encoder = Network(dimensions[0] + [2], params)
self.decoder = Network([latent_dim] + dimensions[1], params)
for i in range(len(self.encoder.weights)):
self.encoder.weights[i] = np.abs(self.encoder.weights[i])
for i in range(len(self.encoder.weights)):
self.decoder.weights[i] = np.abs(self.decoder.weights[i])
self.batch_size = params['batch_size']
self.iter = params['iter']
self.encoder.loss = losses.loss_table['identity']
self.decoder.loss = losses.loss_table['squared_error']
if type(params['activation']) is str and params['activation'] in activations.activation_table:
self.activation = activations.activation_table[params['activation']]
else:
self.activation = params['activation']
def _forwardstep(self, X):
# encoder learns parameters
latent = self.encoder._feedforward(X)
self.mu = latent[:,0]
self.sigma = np.exp(latent[:,1])
# sample from gaussian with learned parameters
epsilon = np.random.normal(0, 1, size=(X.shape[0], self.latent_dim))
z_sample = self.mu[:,None] + np.sqrt(self.sigma)[:,None] * epsilon
# pass sampled vector through to decoder
X_hat = self.decoder._feedforward(z_sample)
return X_hat
def _kl_divergence_loss(self):
d_mu = self.mu
d_s2 = 1 - 1 / (2 * (self.sigma + 1e-6))
return np.vstack((d_mu, d_s2)).T
def _backwardstep(self, X, X_hat):
# propagate reconstuction error through decoder
n = len(self.decoder.weights)
delta = -1 * self.decoder.loss(X, X_hat)[1] * self.activation(self.decoder._z[n-1])[1]
decoder_weights = {n-1: self.decoder._z_act[n-1].T @ delta}
for i in reversed(range(len(self.decoder.weights)-1)):
delta = delta @ self.decoder.weights[i+1].T * self.activation(self.decoder._z[i])[1]
decoder_weights[i] = self.decoder._z_act[i].T @ delta
# add kl-divergence loss
m = len(self.encoder.weights)
kl_loss = self._kl_divergence_loss()
kl_delta = kl_loss * self.activation(self.encoder._z[m-1])[1]
delta = delta @ self.decoder.weights[0].T * self.activation(self.encoder._z[m-1])[1]
delta = delta + kl_delta
encoder_weights = {m-1: self.encoder._z_act[n-1].T @ delta}
# propagate kl error through encoder
for i in reversed(range(len(self.decoder.weights)-1)):
delta = delta @ self.encoder.weights[i+1].T * self.activation(self.encoder._z[i])[1]
encoder_weights[i] = self.encoder._z_act[i].T @ delta
return encoder_weights, decoder_weights
def learn(self, X):
X_batch = X
for i in range(self.iter):
if self.batch_size > 0 and self.batch_size < X.shape[0]:
k = np.random.choice(range(X.shape[0]), self.batch_size, replace=False)
X_batch = X[k,:]
X_hat = self._forwardstep(X_batch)
grad_encoder, grad_decoder = self._backwardstep(X_batch, X_hat)
for j in range(len(self.encoder.weights)):
self.encoder.weights[j] -= self.encoder.alpha * grad_encoder[j]
for j in range(len(self.decoder.weights)):
self.decoder.weights[j] -= self.decoder.alpha * grad_decoder[j]
def generate(self, z = None):
if not np.any(z):
z = np.random.normal(0, 1, size=(1, self.latent_dim))
return self.decoder.predict(z)
def encode_decode(self, X):
return self._forwardstep(X)
|
FaustineLi/Sta663-Project
|
vae/vae_class.py
|
Python
|
mit
| 3,993
|
[
"Gaussian"
] |
f3156f6fce5a1a9062a54d90a8069919df2f4b5b386d865991364026c6067323
|
import sys
import ast
import inspect
import textwrap
from collections import defaultdict
from itertools import count
from byteplay import Code, LOAD_GLOBAL, LOAD_CONST
from funcy import (
walk_keys, zipdict, merge, join, project, flip, ikeep,
post_processing, unwrap, none, cached_property, cut_prefix
)
__all__ = ('introspect', 'lookup', 'plookup', 'rebind')
def introspect(func):
if isinstance(func, str):
func = import_func(func)
return _introspect(func, set())
def _introspect(func, seen):
seen.add(func)
if inspect.isbuiltin(func) or not hasattr(func, '__module__') or not hasattr(func, '__name__'):
return {}
if isinstance(func, type):
methods = inspect.getmembers(func, predicate=inspect.ismethod)
return join(_introspect(meth, seen) for _, meth in methods if meth not in seen) or {}
if not hasattr(func, '__defaults__') or not hasattr(func, '__code__'):
return {}
func_name = _full_name(func)
consts = merge(get_defaults(func), get_assignments(func))
consts_spec = walk_keys(lambda k: '%s.%s' % (func_name, k), consts)
consts_spec.update({'%s.%s' % (func.__module__, name): value
for name, value in get_closure(func).items()})
# Recurse
callables = filter(callable, consts_spec.values())
recurse_specs = (_introspect(f, seen) for f in callables if f not in seen)
return merge(join(recurse_specs) or {}, consts_spec)
def lookup(func):
if isinstance(func, str):
ref = func
module, func = _resolve_ref(ref)
# Try to find class method
full_name = _full_name(func)
if full_name != ref:
attr = cut_prefix(ref, full_name).split('.')[0]
try:
func = getattr(func, attr)
except AttributeError:
pass
source_lines, lineno = inspect.getsourcelines(func)
source = ''.join(source_lines)
return '# %s:%d\n%s' % (inspect.getfile(func), lineno, source)
def plookup(func):
print lookup(func)
def rebind(func, bindings):
if isinstance(func, str):
func = import_func(func)
refs = set(bindings) | _get_refs(func)
# Collect attrs to rebind and module dependencies
attrs = defaultdict(set)
deps = defaultdict(set)
for ref in refs:
module, attr = _resolve_ref(ref)
if getattr(attr, '__module__', None) == module:
attrs[module].add(attr)
deps[module].update(_get_deps(attr))
# Rebind modules starting from most independent ones
rebound = {}
for module, module_deps in sorted(deps.items(), key=lambda (_, deps): len(deps)):
# Not all dependencies satisfied, the only possibility - cyclic dependency
if not module_deps <= set(rebound) | {module}:
raise ImportError('Cyclic dependency while rebinding %s' % module)
# No need to actually rebind anything
if not (module_deps | {module}) & set(ikeep(r'^\w+', bindings)):
rebound[module] = sys.modules[module].__dict__
continue
rebound[module] = _rebind_module(module, bindings, attrs=attrs[module], rebound=rebound)
if func.__module__ in rebound:
return rebound[func.__module__][func.__name__]
else:
return func
def _rebind_module(module, bindings, attrs=None, rebound=None):
rewriter = ConstRewriter(module, bindings)
global_vars = _rebound_globals(module, rebound)
global_vars.update(rewriter.local_bindings)
tree = ast.Module(body=[get_ast(f) for f in attrs if callable(f)])
tree = rewriter.visit(tree)
ast.fix_missing_locations(tree)
code = compile(tree, sys.modules[module].__file__, 'exec')
exec(code, global_vars)
return global_vars
@post_processing(dict)
def _rebound_globals(module, rebound):
for name, value in sys.modules[module].__dict__.items():
if inspect.ismodule(value):
yield name, rebound.get(value.__name__, value)
elif hasattr(value, '__module__') and value.__module__ in rebound \
and hasattr(value, '__name__'):
yield name, rebound[value.__module__].get(value.__name__, value)
else:
yield name, value
def _get_refs(func):
closure = get_closure(func)
deps = {func} | set(closure.values())
return {'%s.%s' % (f.__module__, f.__name__) for f in deps
if hasattr(f, '__module__') and hasattr(f, '__name__')}
def _get_deps(value):
if callable(value):
closure = get_closure(value).values()
return {f.__module__ for f in closure if hasattr(f, '__module__')} \
| {m.__name__ for m in closure if inspect.ismodule(m)}
else:
return set() # constant
def _resolve_ref(ref):
words = ref.split('.')
for tail in range(1, len(words)):
module_name = '.'.join(words[:-tail])
try:
module = import_module(module_name)
except ImportError:
pass
else:
attr = getattr(module, words[-tail])
return module.__name__, attr
else:
raise ImportError('Failed to resolve %s' % ref)
class ConstRewriter(ast.NodeTransformer):
def __init__(self, module, bindings):
self.bindings = bindings
self.ns = module.split('.')
def push_scope(self, name):
self.ns.append(name)
if hasattr(self, 'local_bindings'):
del self.local_bindings
def pop_scope(self):
self.ns.pop()
if hasattr(self, 'local_bindings'):
del self.local_bindings
@cached_property
def local_bindings(self):
prefix = ''.join('%s.' % name for name in self.ns)
return {key[len(prefix):]: value for key, value in self.bindings.items()
if key.startswith(prefix)}
def visit_FunctionDef(self, node):
self.push_scope(node.name)
node = self.generic_visit(node)
self.pop_scope()
return node
visit_ClassDef = visit_FunctionDef
def visit_Assign(self, node):
if not is_literal(node.value):
return node
to_rebind = [isinstance(target, ast.Name) and target.id in self.local_bindings
for target in node.targets]
if none(to_rebind):
return node
if any(to_rebind) and len(node.targets) > 1:
raise NotImplementedError('Rebinding in mass assignment is not supported')
node.value = literal_to_ast(self.local_bindings[node.targets[0].id])
return node
def visit_arguments(self, node):
kwargs = node.args[len(node.args)-len(node.defaults):]
for i, kwarg, default in zip(count(), kwargs, node.defaults):
if kwarg.id in self.local_bindings:
node.defaults[i] = literal_to_ast(self.local_bindings[kwarg.id])
return node
# Utilities
from importlib import import_module
def _full_name(func):
if hasattr(func, 'im_class'):
return '%s.%s.%s' % (func.__module__, func.im_class.__name__, func.__name__)
else:
return '%s.%s' % (func.__module__, func.__name__)
def import_func(full_name):
module_name, func_name = full_name.rsplit('.', 1)
module = import_module(module_name)
try:
return getattr(module, func_name)
except AttributeError:
raise ImportError("Module %s doesn't have function %s" % (module_name, func_name))
# Introspect arguments
def get_defaults(func):
func = unwrap(func)
return zipdict(get_kwargnames(func), func.__defaults__ or ())
def get_kwargnames(func):
if not func.__defaults__:
return ()
argnames = func.__code__.co_varnames[:func.__code__.co_argcount]
return argnames[len(argnames) - len(func.__defaults__):]
# Introspect assignments
@post_processing(dict)
def get_assignments(func):
tree = get_ast(func)
for node in ast.walk(tree):
if not isinstance(node, ast.Assign):
continue
try:
value = ast_eval(node.value)
except ValueError:
continue
for target in node.targets:
if isinstance(target, ast.Name):
yield target.id, value
# AST helpers
def get_ast(func):
source_lines, lineno = inspect.getsourcelines(func)
source = '\n' * (lineno - 1) + textwrap.dedent(''.join(source_lines))
return ast.parse(source, inspect.getfile(func), 'single').body[0]
NAMED_CONSTS = {'None': None, 'True': True, 'False': False}
CONST_NAMES = flip(NAMED_CONSTS)
def is_literal(node):
return isinstance(node, (ast.Str, ast.Num)) \
or isinstance(node, ast.Name) and node.id in NAMED_CONSTS \
or isinstance(node, (ast.List, ast.Tuple)) and all(is_literal, node.elts) \
or isinstance(node, ast.Dict) and all(is_literal, node.keys + node.values)
def ast_eval(node):
"""
Faster ast.literal_eval() with better error messages.
Works only with nodes not strings.
"""
if isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.Str):
return node.s
elif isinstance(node, ast.Name) and node.id in NAMED_CONSTS:
return NAMED_CONSTS[node.id]
elif isinstance(node, ast.Tuple):
return tuple(ast_eval(n) for n in node.elts)
elif isinstance(node, ast.List):
return [ast_eval(n) for n in node.elts]
elif isinstance(node, ast.Dict):
return zipdict(ast_eval(node.keys), ast_eval(node.values))
else:
raise ValueError("Don't know how to eval %s" % node.__class__.__name__)
def literal_to_ast(value):
if isinstance(value, (int, float)):
return ast.Num(n=value)
elif isinstance(value, (str, unicode)):
return ast.Str(s=value)
elif value in CONST_NAMES:
return ast.Name(id=CONST_NAMES[value])
elif isinstance(value, tuple):
return ast.Tuple(elts=map(literal_to_ast, value))
elif isinstance(value, ast.List):
return ast.List(elts=map(literal_to_ast, value))
elif isinstance(value, ast.Dict):
return ast.Dict(
keys=map(literal_to_ast, value.keys()),
values=map(literal_to_ast, value.values())
)
else:
raise ValueError("Can't convert %s to AST" % value)
# Introspect enclosed
def _code_names(code):
names = set()
for cmd, param in code.code:
if cmd == LOAD_GLOBAL:
names.add(param)
elif cmd == LOAD_CONST and isinstance(param, Code):
names.update(_code_names(param))
return names
def get_closure(func):
if isinstance(func, type):
methods = inspect.getmembers(func, predicate=inspect.ismethod)
return join(get_closure(meth.im_func) for _, meth in methods) or {}
code = Code.from_code(func.__code__)
names = _code_names(code)
return project(func.__globals__, names)
|
Suor/rebind
|
rebind.py
|
Python
|
bsd-2-clause
| 10,827
|
[
"VisIt"
] |
eddb73ba9d7d487d8e6015177d935290779577ed254f8f2c5ff592df5ff9a588
|
"""
Constructs added to allow easy switching between PyCSP implementations.
Copyright (c) 2009 John Markus Bjoerndalen <jmb@cs.uit.no>,
Brian Vinter <vinter@nbi.dk>, Rune M. Friborg <rune.m.friborg@gmail.com>.
See LICENSE.txt for licensing details (MIT License).
"""
from pycsp.greenlets.exceptions import *
def shutdown():
"""
Perform shutdown of non-active hosted channels. Wait
for active hosted channels to become non-active.
"""
return
def multiprocess(func=None, pycsp_host='', pycsp_port=None):
raise InfoException("multiprocess not available for greenlets. Use pycsp.parallel")
class MultiProcess(object):
def __init__(self, fn, *args, **kwargs):
raise InfoException("MultiProcess not available for greenlets. Use pycsp.parallel")
def sshprocess(func=None, pycsp_host='', pycsp_port=0, ssh_host='localhost', ssh_port=22, ssh_user=None, ssh_password=None, ssh_python='python'):
raise InfoException("sshprocess not available for greenlets. Use pycsp.parallel")
class SSHProcess(object):
def __init__(self, fn, *args, **kwargs):
raise InfoException("SSHProcess not available for greenlets. Use pycsp.parallel")
def clusterprocess(func=None, cluster_nodefile="$PBS_NODEFILE", cluster_pin=None, cluster_hint='blocked', cluster_ssh_port=22, cluster_python='python'):
raise InfoException("clusterprocess not available for greenlets. Use pycsp.parallel")
class ClusterProcess(object):
def __init__(self, fn, *args, **kwargs):
raise InfoException("ClusterProcess not available for greenlets. Use pycsp.parallel")
class ChannelSocketException(Exception):
def __init__(self, addr, msg):
self.msg = msg
self.addr = addr
def __str__(self):
return repr("%s %s" % (self.msg, self.addr))
SOCKETS_CONNECT_TIMEOUT = 0
SOCKETS_CONNECT_RETRY_DELAY = 1
SOCKETS_BIND_TIMEOUT = 2
SOCKETS_BIND_RETRY_DELAY = 3
PYCSP_PORT = 5
PYCSP_HOST = 6
SOCKETS_STRICT_MODE = 4
class Configuration(object):
"""
This is dummy Configuration class, as the greenlets
does not require any configuration.
"""
__instance = None # the unique instance
__conf = {}
def __new__(cls, *args, **kargs):
return cls.getInstance(cls, *args, **kargs)
def __init__(self):
pass
def getInstance(cls, *args, **kwargs):
'''Static method to have a reference to **THE UNIQUE** instance'''
if cls.__instance is None:
# Initialize **the unique** instance
cls.__instance = object.__new__(cls)
cls.__conf = {
SOCKETS_CONNECT_TIMEOUT:0,
SOCKETS_CONNECT_RETRY_DELAY:0,
SOCKETS_BIND_TIMEOUT:0,
SOCKETS_BIND_RETRY_DELAY:0,
PYCSP_PORT:0,
PYCSP_HOST:'',
SOCKETS_STRICT_MODE:False
}
return cls.__instance
getInstance = classmethod(getInstance)
def get(self, conf_id):
return self.__conf[conf_id]
def set(self, conf_id, value):
self.__conf[conf_id] = value
|
runefriborg/pycsp
|
pycsp/greenlets/compat.py
|
Python
|
mit
| 3,124
|
[
"Brian"
] |
f8054f24233ae11c363afb3054eb2a6d35870fe19fe5b24b7cffa42b27095630
|
# -*- coding: utf-8 -*-
# pylint: disable=
"""
Created on Tue May 3 18:34:45 2016
@author: P. Rodriguez-Mier and T. Teijeiro
"""
import random
from operator import attrgetter
def bound_value(v, min_v, max_v):
return min(max(min_v, v), max_v)
def recombinate(pairs, gene_props, mutation_probability=0.1, effect=0.5):
offspring = []
for p1, p2 in pairs:
children_genes = {}
for gen in p1.genes.keys():
values = [p1.genes[gen], p2.genes[gen]]
children_genes[gen] = random.uniform(min(values), max(values))
if random.random() < mutation_probability:
min_v = gene_props[gen]['min']
max_v = gene_props[gen]['max']
v = children_genes[gen]
rv = random.choice([-1, 1]) * random.uniform(0, effect * (max_v - min_v))
new_v_gauss = bound_value(random.gauss(v, (max_v - min_v) * effect), min_v, max_v)
new_v = bound_value(v + rv, min_v, max_v)
# print '----- Mutating ' + gen + ' - RV: ' + str(rv) + ' - V: ' + str(v) + ' - New: ' + str(new_v) + ' - Gaussian: ' + str(new_v_gauss)
# rv = random.uniform(children_genes[gen], (max_v - min_v)*0.1)
children_genes[gen] = new_v
offspring.append(children_genes)
return offspring
def mating_pool(population, num_of_pairs=10, evaluator=attrgetter('fitness')):
evaluated_population = evaluate(population, evaluator)
return zip(roulette_wheel(evaluated_population, k=num_of_pairs),
roulette_wheel(evaluated_population, k=num_of_pairs))
def mating_pool_tournament(population, num_of_pairs=10, evaluator=attrgetter('fitness')):
pool = []
while len(pool) < num_of_pairs:
# Generate a pair for mating
p1 = tournament(population, evaluator)
p2 = tournament(population - {p1}, evaluator)
pool.append((p1, p2))
return pool
def evaluate(population, evaluator=attrgetter('fitness')):
return map(lambda x: (x, evaluator(x)), population)
def roulette_wheel(evaluated_population, k=10):
sum_fitness = sum([v[1] for v in evaluated_population])
selected = []
while len(selected) < k:
r = random.uniform(0, sum_fitness)
for i in evaluated_population:
r -= i[1]
if r < 0:
selected.append(i[0])
break
return selected
def tournament(population, evaluator, k=2):
sample = population if len(population) < k else random.sample(population, k)
return max(sample, key=evaluator)
if __name__ == '__main__':
pop = {15, 18, 30, 100, 120, 60, 35, 40, 42}
print mating_pool(pop, evaluator=lambda x: x)
print mating_pool_tournament(pop, evaluator=lambda x: x)
|
citiususc/citius-invaders
|
python/evolution.py
|
Python
|
mit
| 2,775
|
[
"Gaussian"
] |
fd89e2a9d9f2bab3075ec216c298e435b8e7943babddb3095f46dc6a1fcad035
|
import contextlib
import collections
import pickle
import re
import sys
from unittest import TestCase, main, skipUnless, SkipTest, skip
from copy import copy, deepcopy
from typing import Any, NoReturn
from typing import TypeVar, AnyStr
from typing import T, KT, VT # Not in __all__.
from typing import Union, Optional, Literal
from typing import Tuple, List, MutableMapping
from typing import Callable
from typing import Generic, ClassVar, Final, final, Protocol
from typing import cast, runtime_checkable
from typing import get_type_hints
from typing import get_origin, get_args
from typing import no_type_check, no_type_check_decorator
from typing import Type
from typing import NewType
from typing import NamedTuple, TypedDict
from typing import IO, TextIO, BinaryIO
from typing import Pattern, Match
import abc
import typing
import weakref
import types
from test import mod_generics_cache
class BaseTestCase(TestCase):
def assertIsSubclass(self, cls, class_or_tuple, msg=None):
if not issubclass(cls, class_or_tuple):
message = '%r is not a subclass of %r' % (cls, class_or_tuple)
if msg is not None:
message += ' : %s' % msg
raise self.failureException(message)
def assertNotIsSubclass(self, cls, class_or_tuple, msg=None):
if issubclass(cls, class_or_tuple):
message = '%r is a subclass of %r' % (cls, class_or_tuple)
if msg is not None:
message += ' : %s' % msg
raise self.failureException(message)
def clear_caches(self):
for f in typing._cleanups:
f()
class Employee:
pass
class Manager(Employee):
pass
class Founder(Employee):
pass
class ManagingFounder(Manager, Founder):
pass
class AnyTests(BaseTestCase):
def test_any_instance_type_error(self):
with self.assertRaises(TypeError):
isinstance(42, Any)
def test_any_subclass_type_error(self):
with self.assertRaises(TypeError):
issubclass(Employee, Any)
with self.assertRaises(TypeError):
issubclass(Any, Employee)
def test_repr(self):
self.assertEqual(repr(Any), 'typing.Any')
def test_errors(self):
with self.assertRaises(TypeError):
issubclass(42, Any)
with self.assertRaises(TypeError):
Any[int] # Any is not a generic type.
def test_cannot_subclass(self):
with self.assertRaises(TypeError):
class A(Any):
pass
with self.assertRaises(TypeError):
class A(type(Any)):
pass
def test_cannot_instantiate(self):
with self.assertRaises(TypeError):
Any()
with self.assertRaises(TypeError):
type(Any)()
def test_any_works_with_alias(self):
# These expressions must simply not fail.
typing.Match[Any]
typing.Pattern[Any]
typing.IO[Any]
class NoReturnTests(BaseTestCase):
def test_noreturn_instance_type_error(self):
with self.assertRaises(TypeError):
isinstance(42, NoReturn)
def test_noreturn_subclass_type_error(self):
with self.assertRaises(TypeError):
issubclass(Employee, NoReturn)
with self.assertRaises(TypeError):
issubclass(NoReturn, Employee)
def test_repr(self):
self.assertEqual(repr(NoReturn), 'typing.NoReturn')
def test_not_generic(self):
with self.assertRaises(TypeError):
NoReturn[int]
def test_cannot_subclass(self):
with self.assertRaises(TypeError):
class A(NoReturn):
pass
with self.assertRaises(TypeError):
class A(type(NoReturn)):
pass
def test_cannot_instantiate(self):
with self.assertRaises(TypeError):
NoReturn()
with self.assertRaises(TypeError):
type(NoReturn)()
class TypeVarTests(BaseTestCase):
def test_basic_plain(self):
T = TypeVar('T')
# T equals itself.
self.assertEqual(T, T)
# T is an instance of TypeVar
self.assertIsInstance(T, TypeVar)
def test_typevar_instance_type_error(self):
T = TypeVar('T')
with self.assertRaises(TypeError):
isinstance(42, T)
def test_typevar_subclass_type_error(self):
T = TypeVar('T')
with self.assertRaises(TypeError):
issubclass(int, T)
with self.assertRaises(TypeError):
issubclass(T, int)
def test_constrained_error(self):
with self.assertRaises(TypeError):
X = TypeVar('X', int)
X
def test_union_unique(self):
X = TypeVar('X')
Y = TypeVar('Y')
self.assertNotEqual(X, Y)
self.assertEqual(Union[X], X)
self.assertNotEqual(Union[X], Union[X, Y])
self.assertEqual(Union[X, X], X)
self.assertNotEqual(Union[X, int], Union[X])
self.assertNotEqual(Union[X, int], Union[int])
self.assertEqual(Union[X, int].__args__, (X, int))
self.assertEqual(Union[X, int].__parameters__, (X,))
self.assertIs(Union[X, int].__origin__, Union)
def test_union_constrained(self):
A = TypeVar('A', str, bytes)
self.assertNotEqual(Union[A, str], Union[A])
def test_repr(self):
self.assertEqual(repr(T), '~T')
self.assertEqual(repr(KT), '~KT')
self.assertEqual(repr(VT), '~VT')
self.assertEqual(repr(AnyStr), '~AnyStr')
T_co = TypeVar('T_co', covariant=True)
self.assertEqual(repr(T_co), '+T_co')
T_contra = TypeVar('T_contra', contravariant=True)
self.assertEqual(repr(T_contra), '-T_contra')
def test_no_redefinition(self):
self.assertNotEqual(TypeVar('T'), TypeVar('T'))
self.assertNotEqual(TypeVar('T', int, str), TypeVar('T', int, str))
def test_cannot_subclass_vars(self):
with self.assertRaises(TypeError):
class V(TypeVar('T')):
pass
def test_cannot_subclass_var_itself(self):
with self.assertRaises(TypeError):
class V(TypeVar):
pass
def test_cannot_instantiate_vars(self):
with self.assertRaises(TypeError):
TypeVar('A')()
def test_bound_errors(self):
with self.assertRaises(TypeError):
TypeVar('X', bound=42)
with self.assertRaises(TypeError):
TypeVar('X', str, float, bound=Employee)
def test_no_bivariant(self):
with self.assertRaises(ValueError):
TypeVar('T', covariant=True, contravariant=True)
class UnionTests(BaseTestCase):
def test_basics(self):
u = Union[int, float]
self.assertNotEqual(u, Union)
def test_subclass_error(self):
with self.assertRaises(TypeError):
issubclass(int, Union)
with self.assertRaises(TypeError):
issubclass(Union, int)
with self.assertRaises(TypeError):
issubclass(int, Union[int, str])
with self.assertRaises(TypeError):
issubclass(Union[int, str], int)
def test_union_any(self):
u = Union[Any]
self.assertEqual(u, Any)
u1 = Union[int, Any]
u2 = Union[Any, int]
u3 = Union[Any, object]
self.assertEqual(u1, u2)
self.assertNotEqual(u1, Any)
self.assertNotEqual(u2, Any)
self.assertNotEqual(u3, Any)
def test_union_object(self):
u = Union[object]
self.assertEqual(u, object)
u1 = Union[int, object]
u2 = Union[object, int]
self.assertEqual(u1, u2)
self.assertNotEqual(u1, object)
self.assertNotEqual(u2, object)
def test_unordered(self):
u1 = Union[int, float]
u2 = Union[float, int]
self.assertEqual(u1, u2)
def test_single_class_disappears(self):
t = Union[Employee]
self.assertIs(t, Employee)
def test_base_class_kept(self):
u = Union[Employee, Manager]
self.assertNotEqual(u, Employee)
self.assertIn(Employee, u.__args__)
self.assertIn(Manager, u.__args__)
def test_union_union(self):
u = Union[int, float]
v = Union[u, Employee]
self.assertEqual(v, Union[int, float, Employee])
def test_repr(self):
self.assertEqual(repr(Union), 'typing.Union')
u = Union[Employee, int]
self.assertEqual(repr(u), 'typing.Union[%s.Employee, int]' % __name__)
u = Union[int, Employee]
self.assertEqual(repr(u), 'typing.Union[int, %s.Employee]' % __name__)
T = TypeVar('T')
u = Union[T, int][int]
self.assertEqual(repr(u), repr(int))
u = Union[List[int], int]
self.assertEqual(repr(u), 'typing.Union[typing.List[int], int]')
def test_cannot_subclass(self):
with self.assertRaises(TypeError):
class C(Union):
pass
with self.assertRaises(TypeError):
class C(type(Union)):
pass
with self.assertRaises(TypeError):
class C(Union[int, str]):
pass
def test_cannot_instantiate(self):
with self.assertRaises(TypeError):
Union()
with self.assertRaises(TypeError):
type(Union)()
u = Union[int, float]
with self.assertRaises(TypeError):
u()
with self.assertRaises(TypeError):
type(u)()
def test_union_generalization(self):
self.assertFalse(Union[str, typing.Iterable[int]] == str)
self.assertFalse(Union[str, typing.Iterable[int]] == typing.Iterable[int])
self.assertIn(str, Union[str, typing.Iterable[int]].__args__)
self.assertIn(typing.Iterable[int], Union[str, typing.Iterable[int]].__args__)
def test_union_compare_other(self):
self.assertNotEqual(Union, object)
self.assertNotEqual(Union, Any)
self.assertNotEqual(ClassVar, Union)
self.assertNotEqual(Optional, Union)
self.assertNotEqual([None], Optional)
self.assertNotEqual(Optional, typing.Mapping)
self.assertNotEqual(Optional[typing.MutableMapping], Union)
def test_optional(self):
o = Optional[int]
u = Union[int, None]
self.assertEqual(o, u)
def test_empty(self):
with self.assertRaises(TypeError):
Union[()]
def test_union_instance_type_error(self):
with self.assertRaises(TypeError):
isinstance(42, Union[int, str])
def test_no_eval_union(self):
u = Union[int, str]
def f(x: u): ...
self.assertIs(get_type_hints(f)['x'], u)
def test_function_repr_union(self):
def fun() -> int: ...
self.assertEqual(repr(Union[fun, int]), 'typing.Union[fun, int]')
def test_union_str_pattern(self):
# Shouldn't crash; see http://bugs.python.org/issue25390
A = Union[str, Pattern]
A
def test_etree(self):
# See https://github.com/python/typing/issues/229
# (Only relevant for Python 2.)
try:
from xml.etree.cElementTree import Element
except ImportError:
raise SkipTest("cElementTree not found")
Union[Element, str] # Shouldn't crash
def Elem(*args):
return Element(*args)
Union[Elem, str] # Nor should this
class TupleTests(BaseTestCase):
def test_basics(self):
with self.assertRaises(TypeError):
issubclass(Tuple, Tuple[int, str])
with self.assertRaises(TypeError):
issubclass(tuple, Tuple[int, str])
class TP(tuple): ...
self.assertTrue(issubclass(tuple, Tuple))
self.assertTrue(issubclass(TP, Tuple))
def test_equality(self):
self.assertEqual(Tuple[int], Tuple[int])
self.assertEqual(Tuple[int, ...], Tuple[int, ...])
self.assertNotEqual(Tuple[int], Tuple[int, int])
self.assertNotEqual(Tuple[int], Tuple[int, ...])
def test_tuple_subclass(self):
class MyTuple(tuple):
pass
self.assertTrue(issubclass(MyTuple, Tuple))
def test_tuple_instance_type_error(self):
with self.assertRaises(TypeError):
isinstance((0, 0), Tuple[int, int])
self.assertIsInstance((0, 0), Tuple)
def test_repr(self):
self.assertEqual(repr(Tuple), 'typing.Tuple')
self.assertEqual(repr(Tuple[()]), 'typing.Tuple[()]')
self.assertEqual(repr(Tuple[int, float]), 'typing.Tuple[int, float]')
self.assertEqual(repr(Tuple[int, ...]), 'typing.Tuple[int, ...]')
def test_errors(self):
with self.assertRaises(TypeError):
issubclass(42, Tuple)
with self.assertRaises(TypeError):
issubclass(42, Tuple[int])
class CallableTests(BaseTestCase):
def test_self_subclass(self):
with self.assertRaises(TypeError):
self.assertTrue(issubclass(type(lambda x: x), Callable[[int], int]))
self.assertTrue(issubclass(type(lambda x: x), Callable))
def test_eq_hash(self):
self.assertEqual(Callable[[int], int], Callable[[int], int])
self.assertEqual(len({Callable[[int], int], Callable[[int], int]}), 1)
self.assertNotEqual(Callable[[int], int], Callable[[int], str])
self.assertNotEqual(Callable[[int], int], Callable[[str], int])
self.assertNotEqual(Callable[[int], int], Callable[[int, int], int])
self.assertNotEqual(Callable[[int], int], Callable[[], int])
self.assertNotEqual(Callable[[int], int], Callable)
def test_cannot_instantiate(self):
with self.assertRaises(TypeError):
Callable()
with self.assertRaises(TypeError):
type(Callable)()
c = Callable[[int], str]
with self.assertRaises(TypeError):
c()
with self.assertRaises(TypeError):
type(c)()
def test_callable_wrong_forms(self):
with self.assertRaises(TypeError):
Callable[[...], int]
with self.assertRaises(TypeError):
Callable[(), int]
with self.assertRaises(TypeError):
Callable[[()], int]
with self.assertRaises(TypeError):
Callable[[int, 1], 2]
with self.assertRaises(TypeError):
Callable[int]
def test_callable_instance_works(self):
def f():
pass
self.assertIsInstance(f, Callable)
self.assertNotIsInstance(None, Callable)
def test_callable_instance_type_error(self):
def f():
pass
with self.assertRaises(TypeError):
self.assertIsInstance(f, Callable[[], None])
with self.assertRaises(TypeError):
self.assertIsInstance(f, Callable[[], Any])
with self.assertRaises(TypeError):
self.assertNotIsInstance(None, Callable[[], None])
with self.assertRaises(TypeError):
self.assertNotIsInstance(None, Callable[[], Any])
def test_repr(self):
ct0 = Callable[[], bool]
self.assertEqual(repr(ct0), 'typing.Callable[[], bool]')
ct2 = Callable[[str, float], int]
self.assertEqual(repr(ct2), 'typing.Callable[[str, float], int]')
ctv = Callable[..., str]
self.assertEqual(repr(ctv), 'typing.Callable[..., str]')
def test_callable_with_ellipsis(self):
def foo(a: Callable[..., T]):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Callable[..., T]})
def test_ellipsis_in_generic(self):
# Shouldn't crash; see https://github.com/python/typing/issues/259
typing.List[Callable[..., str]]
class LiteralTests(BaseTestCase):
def test_basics(self):
# All of these are allowed.
Literal[1]
Literal[1, 2, 3]
Literal["x", "y", "z"]
Literal[None]
Literal[True]
Literal[1, "2", False]
Literal[Literal[1, 2], Literal[4, 5]]
Literal[b"foo", u"bar"]
def test_illegal_parameters_do_not_raise_runtime_errors(self):
# Type checkers should reject these types, but we do not
# raise errors at runtime to maintain maximium flexibility.
Literal[int]
Literal[3j + 2, ..., ()]
Literal[{"foo": 3, "bar": 4}]
Literal[T]
def test_literals_inside_other_types(self):
List[Literal[1, 2, 3]]
List[Literal[("foo", "bar", "baz")]]
def test_repr(self):
self.assertEqual(repr(Literal[1]), "typing.Literal[1]")
self.assertEqual(repr(Literal[1, True, "foo"]), "typing.Literal[1, True, 'foo']")
self.assertEqual(repr(Literal[int]), "typing.Literal[int]")
self.assertEqual(repr(Literal), "typing.Literal")
self.assertEqual(repr(Literal[None]), "typing.Literal[None]")
def test_cannot_init(self):
with self.assertRaises(TypeError):
Literal()
with self.assertRaises(TypeError):
Literal[1]()
with self.assertRaises(TypeError):
type(Literal)()
with self.assertRaises(TypeError):
type(Literal[1])()
def test_no_isinstance_or_issubclass(self):
with self.assertRaises(TypeError):
isinstance(1, Literal[1])
with self.assertRaises(TypeError):
isinstance(int, Literal[1])
with self.assertRaises(TypeError):
issubclass(1, Literal[1])
with self.assertRaises(TypeError):
issubclass(int, Literal[1])
def test_no_subclassing(self):
with self.assertRaises(TypeError):
class Foo(Literal[1]): pass
with self.assertRaises(TypeError):
class Bar(Literal): pass
def test_no_multiple_subscripts(self):
with self.assertRaises(TypeError):
Literal[1][1]
XK = TypeVar('XK', str, bytes)
XV = TypeVar('XV')
class SimpleMapping(Generic[XK, XV]):
def __getitem__(self, key: XK) -> XV:
...
def __setitem__(self, key: XK, value: XV):
...
def get(self, key: XK, default: XV = None) -> XV:
...
class MySimpleMapping(SimpleMapping[XK, XV]):
def __init__(self):
self.store = {}
def __getitem__(self, key: str):
return self.store[key]
def __setitem__(self, key: str, value):
self.store[key] = value
def get(self, key: str, default=None):
try:
return self.store[key]
except KeyError:
return default
class Coordinate(Protocol):
x: int
y: int
@runtime_checkable
class Point(Coordinate, Protocol):
label: str
class MyPoint:
x: int
y: int
label: str
class XAxis(Protocol):
x: int
class YAxis(Protocol):
y: int
@runtime_checkable
class Position(XAxis, YAxis, Protocol):
pass
@runtime_checkable
class Proto(Protocol):
attr: int
def meth(self, arg: str) -> int:
...
class Concrete(Proto):
pass
class Other:
attr: int = 1
def meth(self, arg: str) -> int:
if arg == 'this':
return 1
return 0
class NT(NamedTuple):
x: int
y: int
@runtime_checkable
class HasCallProtocol(Protocol):
__call__: typing.Callable
class ProtocolTests(BaseTestCase):
def test_basic_protocol(self):
@runtime_checkable
class P(Protocol):
def meth(self):
pass
class C: pass
class D:
def meth(self):
pass
def f():
pass
self.assertIsSubclass(D, P)
self.assertIsInstance(D(), P)
self.assertNotIsSubclass(C, P)
self.assertNotIsInstance(C(), P)
self.assertNotIsSubclass(types.FunctionType, P)
self.assertNotIsInstance(f, P)
def test_everything_implements_empty_protocol(self):
@runtime_checkable
class Empty(Protocol):
pass
class C:
pass
def f():
pass
for thing in (object, type, tuple, C, types.FunctionType):
self.assertIsSubclass(thing, Empty)
for thing in (object(), 1, (), typing, f):
self.assertIsInstance(thing, Empty)
def test_function_implements_protocol(self):
def f():
pass
self.assertIsInstance(f, HasCallProtocol)
def test_no_inheritance_from_nominal(self):
class C: pass
class BP(Protocol): pass
with self.assertRaises(TypeError):
class P(C, Protocol):
pass
with self.assertRaises(TypeError):
class P(Protocol, C):
pass
with self.assertRaises(TypeError):
class P(BP, C, Protocol):
pass
class D(BP, C): pass
class E(C, BP): pass
self.assertNotIsInstance(D(), E)
self.assertNotIsInstance(E(), D)
def test_no_instantiation(self):
class P(Protocol): pass
with self.assertRaises(TypeError):
P()
class C(P): pass
self.assertIsInstance(C(), C)
T = TypeVar('T')
class PG(Protocol[T]): pass
with self.assertRaises(TypeError):
PG()
with self.assertRaises(TypeError):
PG[int]()
with self.assertRaises(TypeError):
PG[T]()
class CG(PG[T]): pass
self.assertIsInstance(CG[int](), CG)
def test_cannot_instantiate_abstract(self):
@runtime_checkable
class P(Protocol):
@abc.abstractmethod
def ameth(self) -> int:
raise NotImplementedError
class B(P):
pass
class C(B):
def ameth(self) -> int:
return 26
with self.assertRaises(TypeError):
B()
self.assertIsInstance(C(), P)
def test_subprotocols_extending(self):
class P1(Protocol):
def meth1(self):
pass
@runtime_checkable
class P2(P1, Protocol):
def meth2(self):
pass
class C:
def meth1(self):
pass
def meth2(self):
pass
class C1:
def meth1(self):
pass
class C2:
def meth2(self):
pass
self.assertNotIsInstance(C1(), P2)
self.assertNotIsInstance(C2(), P2)
self.assertNotIsSubclass(C1, P2)
self.assertNotIsSubclass(C2, P2)
self.assertIsInstance(C(), P2)
self.assertIsSubclass(C, P2)
def test_subprotocols_merging(self):
class P1(Protocol):
def meth1(self):
pass
class P2(Protocol):
def meth2(self):
pass
@runtime_checkable
class P(P1, P2, Protocol):
pass
class C:
def meth1(self):
pass
def meth2(self):
pass
class C1:
def meth1(self):
pass
class C2:
def meth2(self):
pass
self.assertNotIsInstance(C1(), P)
self.assertNotIsInstance(C2(), P)
self.assertNotIsSubclass(C1, P)
self.assertNotIsSubclass(C2, P)
self.assertIsInstance(C(), P)
self.assertIsSubclass(C, P)
def test_protocols_issubclass(self):
T = TypeVar('T')
@runtime_checkable
class P(Protocol):
def x(self): ...
@runtime_checkable
class PG(Protocol[T]):
def x(self): ...
class BadP(Protocol):
def x(self): ...
class BadPG(Protocol[T]):
def x(self): ...
class C:
def x(self): ...
self.assertIsSubclass(C, P)
self.assertIsSubclass(C, PG)
self.assertIsSubclass(BadP, PG)
with self.assertRaises(TypeError):
issubclass(C, PG[T])
with self.assertRaises(TypeError):
issubclass(C, PG[C])
with self.assertRaises(TypeError):
issubclass(C, BadP)
with self.assertRaises(TypeError):
issubclass(C, BadPG)
with self.assertRaises(TypeError):
issubclass(P, PG[T])
with self.assertRaises(TypeError):
issubclass(PG, PG[int])
def test_protocols_issubclass_non_callable(self):
class C:
x = 1
@runtime_checkable
class PNonCall(Protocol):
x = 1
with self.assertRaises(TypeError):
issubclass(C, PNonCall)
self.assertIsInstance(C(), PNonCall)
PNonCall.register(C)
with self.assertRaises(TypeError):
issubclass(C, PNonCall)
self.assertIsInstance(C(), PNonCall)
# check that non-protocol subclasses are not affected
class D(PNonCall): ...
self.assertNotIsSubclass(C, D)
self.assertNotIsInstance(C(), D)
D.register(C)
self.assertIsSubclass(C, D)
self.assertIsInstance(C(), D)
with self.assertRaises(TypeError):
issubclass(D, PNonCall)
def test_protocols_isinstance(self):
T = TypeVar('T')
@runtime_checkable
class P(Protocol):
def meth(x): ...
@runtime_checkable
class PG(Protocol[T]):
def meth(x): ...
class BadP(Protocol):
def meth(x): ...
class BadPG(Protocol[T]):
def meth(x): ...
class C:
def meth(x): ...
self.assertIsInstance(C(), P)
self.assertIsInstance(C(), PG)
with self.assertRaises(TypeError):
isinstance(C(), PG[T])
with self.assertRaises(TypeError):
isinstance(C(), PG[C])
with self.assertRaises(TypeError):
isinstance(C(), BadP)
with self.assertRaises(TypeError):
isinstance(C(), BadPG)
def test_protocols_isinstance_py36(self):
class APoint:
def __init__(self, x, y, label):
self.x = x
self.y = y
self.label = label
class BPoint:
label = 'B'
def __init__(self, x, y):
self.x = x
self.y = y
class C:
def __init__(self, attr):
self.attr = attr
def meth(self, arg):
return 0
class Bad: pass
self.assertIsInstance(APoint(1, 2, 'A'), Point)
self.assertIsInstance(BPoint(1, 2), Point)
self.assertNotIsInstance(MyPoint(), Point)
self.assertIsInstance(BPoint(1, 2), Position)
self.assertIsInstance(Other(), Proto)
self.assertIsInstance(Concrete(), Proto)
self.assertIsInstance(C(42), Proto)
self.assertNotIsInstance(Bad(), Proto)
self.assertNotIsInstance(Bad(), Point)
self.assertNotIsInstance(Bad(), Position)
self.assertNotIsInstance(Bad(), Concrete)
self.assertNotIsInstance(Other(), Concrete)
self.assertIsInstance(NT(1, 2), Position)
def test_protocols_isinstance_init(self):
T = TypeVar('T')
@runtime_checkable
class P(Protocol):
x = 1
@runtime_checkable
class PG(Protocol[T]):
x = 1
class C:
def __init__(self, x):
self.x = x
self.assertIsInstance(C(1), P)
self.assertIsInstance(C(1), PG)
def test_protocol_checks_after_subscript(self):
class P(Protocol[T]): pass
class C(P[T]): pass
class Other1: pass
class Other2: pass
CA = C[Any]
self.assertNotIsInstance(Other1(), C)
self.assertNotIsSubclass(Other2, C)
class D1(C[Any]): pass
class D2(C[Any]): pass
CI = C[int]
self.assertIsInstance(D1(), C)
self.assertIsSubclass(D2, C)
def test_protocols_support_register(self):
@runtime_checkable
class P(Protocol):
x = 1
class PM(Protocol):
def meth(self): pass
class D(PM): pass
class C: pass
D.register(C)
P.register(C)
self.assertIsInstance(C(), P)
self.assertIsInstance(C(), D)
def test_none_on_non_callable_doesnt_block_implementation(self):
@runtime_checkable
class P(Protocol):
x = 1
class A:
x = 1
class B(A):
x = None
class C:
def __init__(self):
self.x = None
self.assertIsInstance(B(), P)
self.assertIsInstance(C(), P)
def test_none_on_callable_blocks_implementation(self):
@runtime_checkable
class P(Protocol):
def x(self): ...
class A:
def x(self): ...
class B(A):
x = None
class C:
def __init__(self):
self.x = None
self.assertNotIsInstance(B(), P)
self.assertNotIsInstance(C(), P)
def test_non_protocol_subclasses(self):
class P(Protocol):
x = 1
@runtime_checkable
class PR(Protocol):
def meth(self): pass
class NonP(P):
x = 1
class NonPR(PR): pass
class C:
x = 1
class D:
def meth(self): pass
self.assertNotIsInstance(C(), NonP)
self.assertNotIsInstance(D(), NonPR)
self.assertNotIsSubclass(C, NonP)
self.assertNotIsSubclass(D, NonPR)
self.assertIsInstance(NonPR(), PR)
self.assertIsSubclass(NonPR, PR)
def test_custom_subclasshook(self):
class P(Protocol):
x = 1
class OKClass: pass
class BadClass:
x = 1
class C(P):
@classmethod
def __subclasshook__(cls, other):
return other.__name__.startswith("OK")
self.assertIsInstance(OKClass(), C)
self.assertNotIsInstance(BadClass(), C)
self.assertIsSubclass(OKClass, C)
self.assertNotIsSubclass(BadClass, C)
def test_issubclass_fails_correctly(self):
@runtime_checkable
class P(Protocol):
x = 1
class C: pass
with self.assertRaises(TypeError):
issubclass(C(), P)
def test_defining_generic_protocols(self):
T = TypeVar('T')
S = TypeVar('S')
@runtime_checkable
class PR(Protocol[T, S]):
def meth(self): pass
class P(PR[int, T], Protocol[T]):
y = 1
with self.assertRaises(TypeError):
PR[int]
with self.assertRaises(TypeError):
P[int, str]
with self.assertRaises(TypeError):
PR[int, 1]
with self.assertRaises(TypeError):
PR[int, ClassVar]
class C(PR[int, T]): pass
self.assertIsInstance(C[str](), C)
def test_defining_generic_protocols_old_style(self):
T = TypeVar('T')
S = TypeVar('S')
@runtime_checkable
class PR(Protocol, Generic[T, S]):
def meth(self): pass
class P(PR[int, str], Protocol):
y = 1
with self.assertRaises(TypeError):
issubclass(PR[int, str], PR)
self.assertIsSubclass(P, PR)
with self.assertRaises(TypeError):
PR[int]
with self.assertRaises(TypeError):
PR[int, 1]
class P1(Protocol, Generic[T]):
def bar(self, x: T) -> str: ...
class P2(Generic[T], Protocol):
def bar(self, x: T) -> str: ...
@runtime_checkable
class PSub(P1[str], Protocol):
x = 1
class Test:
x = 1
def bar(self, x: str) -> str:
return x
self.assertIsInstance(Test(), PSub)
with self.assertRaises(TypeError):
PR[int, ClassVar]
def test_init_called(self):
T = TypeVar('T')
class P(Protocol[T]): pass
class C(P[T]):
def __init__(self):
self.test = 'OK'
self.assertEqual(C[int]().test, 'OK')
def test_protocols_bad_subscripts(self):
T = TypeVar('T')
S = TypeVar('S')
with self.assertRaises(TypeError):
class P(Protocol[T, T]): pass
with self.assertRaises(TypeError):
class P(Protocol[int]): pass
with self.assertRaises(TypeError):
class P(Protocol[T], Protocol[S]): pass
with self.assertRaises(TypeError):
class P(typing.Mapping[T, S], Protocol[T]): pass
def test_generic_protocols_repr(self):
T = TypeVar('T')
S = TypeVar('S')
class P(Protocol[T, S]): pass
self.assertTrue(repr(P[T, S]).endswith('P[~T, ~S]'))
self.assertTrue(repr(P[int, str]).endswith('P[int, str]'))
def test_generic_protocols_eq(self):
T = TypeVar('T')
S = TypeVar('S')
class P(Protocol[T, S]): pass
self.assertEqual(P, P)
self.assertEqual(P[int, T], P[int, T])
self.assertEqual(P[T, T][Tuple[T, S]][int, str],
P[Tuple[int, str], Tuple[int, str]])
def test_generic_protocols_special_from_generic(self):
T = TypeVar('T')
class P(Protocol[T]): pass
self.assertEqual(P.__parameters__, (T,))
self.assertEqual(P[int].__parameters__, ())
self.assertEqual(P[int].__args__, (int,))
self.assertIs(P[int].__origin__, P)
def test_generic_protocols_special_from_protocol(self):
@runtime_checkable
class PR(Protocol):
x = 1
class P(Protocol):
def meth(self):
pass
T = TypeVar('T')
class PG(Protocol[T]):
x = 1
def meth(self):
pass
self.assertTrue(P._is_protocol)
self.assertTrue(PR._is_protocol)
self.assertTrue(PG._is_protocol)
self.assertFalse(P._is_runtime_protocol)
self.assertTrue(PR._is_runtime_protocol)
self.assertTrue(PG[int]._is_protocol)
self.assertEqual(typing._get_protocol_attrs(P), {'meth'})
self.assertEqual(typing._get_protocol_attrs(PR), {'x'})
self.assertEqual(frozenset(typing._get_protocol_attrs(PG)),
frozenset({'x', 'meth'}))
def test_no_runtime_deco_on_nominal(self):
with self.assertRaises(TypeError):
@runtime_checkable
class C: pass
class Proto(Protocol):
x = 1
with self.assertRaises(TypeError):
@runtime_checkable
class Concrete(Proto):
pass
def test_none_treated_correctly(self):
@runtime_checkable
class P(Protocol):
x = None # type: int
class B(object): pass
self.assertNotIsInstance(B(), P)
class C:
x = 1
class D:
x = None
self.assertIsInstance(C(), P)
self.assertIsInstance(D(), P)
class CI:
def __init__(self):
self.x = 1
class DI:
def __init__(self):
self.x = None
self.assertIsInstance(C(), P)
self.assertIsInstance(D(), P)
def test_protocols_in_unions(self):
class P(Protocol):
x = None # type: int
Alias = typing.Union[typing.Iterable, P]
Alias2 = typing.Union[P, typing.Iterable]
self.assertEqual(Alias, Alias2)
def test_protocols_pickleable(self):
global P, CP # pickle wants to reference the class by name
T = TypeVar('T')
@runtime_checkable
class P(Protocol[T]):
x = 1
class CP(P[int]):
pass
c = CP()
c.foo = 42
c.bar = 'abc'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(c, proto)
x = pickle.loads(z)
self.assertEqual(x.foo, 42)
self.assertEqual(x.bar, 'abc')
self.assertEqual(x.x, 1)
self.assertEqual(x.__dict__, {'foo': 42, 'bar': 'abc'})
s = pickle.dumps(P)
D = pickle.loads(s)
class E:
x = 1
self.assertIsInstance(E(), D)
def test_supports_int(self):
self.assertIsSubclass(int, typing.SupportsInt)
self.assertNotIsSubclass(str, typing.SupportsInt)
def test_supports_float(self):
self.assertIsSubclass(float, typing.SupportsFloat)
self.assertNotIsSubclass(str, typing.SupportsFloat)
def test_supports_complex(self):
# Note: complex itself doesn't have __complex__.
class C:
def __complex__(self):
return 0j
self.assertIsSubclass(C, typing.SupportsComplex)
self.assertNotIsSubclass(str, typing.SupportsComplex)
def test_supports_bytes(self):
# Note: bytes itself doesn't have __bytes__.
class B:
def __bytes__(self):
return b''
self.assertIsSubclass(B, typing.SupportsBytes)
self.assertNotIsSubclass(str, typing.SupportsBytes)
def test_supports_abs(self):
self.assertIsSubclass(float, typing.SupportsAbs)
self.assertIsSubclass(int, typing.SupportsAbs)
self.assertNotIsSubclass(str, typing.SupportsAbs)
def test_supports_round(self):
issubclass(float, typing.SupportsRound)
self.assertIsSubclass(float, typing.SupportsRound)
self.assertIsSubclass(int, typing.SupportsRound)
self.assertNotIsSubclass(str, typing.SupportsRound)
def test_reversible(self):
self.assertIsSubclass(list, typing.Reversible)
self.assertNotIsSubclass(int, typing.Reversible)
def test_supports_index(self):
self.assertIsSubclass(int, typing.SupportsIndex)
self.assertNotIsSubclass(str, typing.SupportsIndex)
def test_bundled_protocol_instance_works(self):
self.assertIsInstance(0, typing.SupportsAbs)
class C1(typing.SupportsInt):
def __int__(self) -> int:
return 42
class C2(C1):
pass
c = C2()
self.assertIsInstance(c, C1)
def test_collections_protocols_allowed(self):
@runtime_checkable
class Custom(collections.abc.Iterable, Protocol):
def close(self): ...
class A: pass
class B:
def __iter__(self):
return []
def close(self):
return 0
self.assertIsSubclass(B, Custom)
self.assertNotIsSubclass(A, Custom)
def test_builtin_protocol_whitelist(self):
with self.assertRaises(TypeError):
class CustomProtocol(TestCase, Protocol):
pass
class CustomContextManager(typing.ContextManager, Protocol):
pass
class GenericTests(BaseTestCase):
def test_basics(self):
X = SimpleMapping[str, Any]
self.assertEqual(X.__parameters__, ())
with self.assertRaises(TypeError):
X[str]
with self.assertRaises(TypeError):
X[str, str]
Y = SimpleMapping[XK, str]
self.assertEqual(Y.__parameters__, (XK,))
Y[str]
with self.assertRaises(TypeError):
Y[str, str]
SM1 = SimpleMapping[str, int]
with self.assertRaises(TypeError):
issubclass(SM1, SimpleMapping)
self.assertIsInstance(SM1(), SimpleMapping)
def test_generic_errors(self):
T = TypeVar('T')
S = TypeVar('S')
with self.assertRaises(TypeError):
Generic[T]()
with self.assertRaises(TypeError):
Generic[T][T]
with self.assertRaises(TypeError):
Generic[T][S]
with self.assertRaises(TypeError):
class C(Generic[T], Generic[T]): ...
with self.assertRaises(TypeError):
isinstance([], List[int])
with self.assertRaises(TypeError):
issubclass(list, List[int])
with self.assertRaises(TypeError):
class NewGeneric(Generic): ...
with self.assertRaises(TypeError):
class MyGeneric(Generic[T], Generic[S]): ...
with self.assertRaises(TypeError):
class MyGeneric(List[T], Generic[S]): ...
def test_init(self):
T = TypeVar('T')
S = TypeVar('S')
with self.assertRaises(TypeError):
Generic[T, T]
with self.assertRaises(TypeError):
Generic[T, S, T]
def test_init_subclass(self):
class X(typing.Generic[T]):
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.attr = 42
class Y(X):
pass
self.assertEqual(Y.attr, 42)
with self.assertRaises(AttributeError):
X.attr
X.attr = 1
Y.attr = 2
class Z(Y):
pass
class W(X[int]):
pass
self.assertEqual(Y.attr, 2)
self.assertEqual(Z.attr, 42)
self.assertEqual(W.attr, 42)
def test_repr(self):
self.assertEqual(repr(SimpleMapping),
f"<class '{__name__}.SimpleMapping'>")
self.assertEqual(repr(MySimpleMapping),
f"<class '{__name__}.MySimpleMapping'>")
def test_chain_repr(self):
T = TypeVar('T')
S = TypeVar('S')
class C(Generic[T]):
pass
X = C[Tuple[S, T]]
self.assertEqual(X, C[Tuple[S, T]])
self.assertNotEqual(X, C[Tuple[T, S]])
Y = X[T, int]
self.assertEqual(Y, X[T, int])
self.assertNotEqual(Y, X[S, int])
self.assertNotEqual(Y, X[T, str])
Z = Y[str]
self.assertEqual(Z, Y[str])
self.assertNotEqual(Z, Y[int])
self.assertNotEqual(Z, Y[T])
self.assertTrue(str(Z).endswith(
'.C[typing.Tuple[str, int]]'))
def test_new_repr(self):
T = TypeVar('T')
U = TypeVar('U', covariant=True)
S = TypeVar('S')
self.assertEqual(repr(List), 'typing.List')
self.assertEqual(repr(List[T]), 'typing.List[~T]')
self.assertEqual(repr(List[U]), 'typing.List[+U]')
self.assertEqual(repr(List[S][T][int]), 'typing.List[int]')
self.assertEqual(repr(List[int]), 'typing.List[int]')
def test_new_repr_complex(self):
T = TypeVar('T')
TS = TypeVar('TS')
self.assertEqual(repr(typing.Mapping[T, TS][TS, T]), 'typing.Mapping[~TS, ~T]')
self.assertEqual(repr(List[Tuple[T, TS]][int, T]),
'typing.List[typing.Tuple[int, ~T]]')
self.assertEqual(
repr(List[Tuple[T, T]][List[int]]),
'typing.List[typing.Tuple[typing.List[int], typing.List[int]]]'
)
def test_new_repr_bare(self):
T = TypeVar('T')
self.assertEqual(repr(Generic[T]), 'typing.Generic[~T]')
self.assertEqual(repr(typing.Protocol[T]), 'typing.Protocol[~T]')
class C(typing.Dict[Any, Any]): ...
# this line should just work
repr(C.__mro__)
def test_dict(self):
T = TypeVar('T')
class B(Generic[T]):
pass
b = B()
b.foo = 42
self.assertEqual(b.__dict__, {'foo': 42})
class C(B[int]):
pass
c = C()
c.bar = 'abc'
self.assertEqual(c.__dict__, {'bar': 'abc'})
def test_subscripted_generics_as_proxies(self):
T = TypeVar('T')
class C(Generic[T]):
x = 'def'
self.assertEqual(C[int].x, 'def')
self.assertEqual(C[C[int]].x, 'def')
C[C[int]].x = 'changed'
self.assertEqual(C.x, 'changed')
self.assertEqual(C[str].x, 'changed')
C[List[str]].z = 'new'
self.assertEqual(C.z, 'new')
self.assertEqual(C[Tuple[int]].z, 'new')
self.assertEqual(C().x, 'changed')
self.assertEqual(C[Tuple[str]]().z, 'new')
class D(C[T]):
pass
self.assertEqual(D[int].x, 'changed')
self.assertEqual(D.z, 'new')
D.z = 'from derived z'
D[int].x = 'from derived x'
self.assertEqual(C.x, 'changed')
self.assertEqual(C[int].z, 'new')
self.assertEqual(D.x, 'from derived x')
self.assertEqual(D[str].z, 'from derived z')
def test_abc_registry_kept(self):
T = TypeVar('T')
class C(collections.abc.Mapping, Generic[T]): ...
C.register(int)
self.assertIsInstance(1, C)
C[int]
self.assertIsInstance(1, C)
C._abc_registry_clear()
C._abc_caches_clear() # To keep refleak hunting mode clean
def test_false_subclasses(self):
class MyMapping(MutableMapping[str, str]): pass
self.assertNotIsInstance({}, MyMapping)
self.assertNotIsSubclass(dict, MyMapping)
def test_abc_bases(self):
class MM(MutableMapping[str, str]):
def __getitem__(self, k):
return None
def __setitem__(self, k, v):
pass
def __delitem__(self, k):
pass
def __iter__(self):
return iter(())
def __len__(self):
return 0
# this should just work
MM().update()
self.assertIsInstance(MM(), collections.abc.MutableMapping)
self.assertIsInstance(MM(), MutableMapping)
self.assertNotIsInstance(MM(), List)
self.assertNotIsInstance({}, MM)
def test_multiple_bases(self):
class MM1(MutableMapping[str, str], collections.abc.MutableMapping):
pass
class MM2(collections.abc.MutableMapping, MutableMapping[str, str]):
pass
self.assertEqual(MM2.__bases__, (collections.abc.MutableMapping, Generic))
def test_orig_bases(self):
T = TypeVar('T')
class C(typing.Dict[str, T]): ...
self.assertEqual(C.__orig_bases__, (typing.Dict[str, T],))
def test_naive_runtime_checks(self):
def naive_dict_check(obj, tp):
# Check if a dictionary conforms to Dict type
if len(tp.__parameters__) > 0:
raise NotImplementedError
if tp.__args__:
KT, VT = tp.__args__
return all(
isinstance(k, KT) and isinstance(v, VT)
for k, v in obj.items()
)
self.assertTrue(naive_dict_check({'x': 1}, typing.Dict[str, int]))
self.assertFalse(naive_dict_check({1: 'x'}, typing.Dict[str, int]))
with self.assertRaises(NotImplementedError):
naive_dict_check({1: 'x'}, typing.Dict[str, T])
def naive_generic_check(obj, tp):
# Check if an instance conforms to the generic class
if not hasattr(obj, '__orig_class__'):
raise NotImplementedError
return obj.__orig_class__ == tp
class Node(Generic[T]): ...
self.assertTrue(naive_generic_check(Node[int](), Node[int]))
self.assertFalse(naive_generic_check(Node[str](), Node[int]))
self.assertFalse(naive_generic_check(Node[str](), List))
with self.assertRaises(NotImplementedError):
naive_generic_check([1, 2, 3], Node[int])
def naive_list_base_check(obj, tp):
# Check if list conforms to a List subclass
return all(isinstance(x, tp.__orig_bases__[0].__args__[0])
for x in obj)
class C(List[int]): ...
self.assertTrue(naive_list_base_check([1, 2, 3], C))
self.assertFalse(naive_list_base_check(['a', 'b'], C))
def test_multi_subscr_base(self):
T = TypeVar('T')
U = TypeVar('U')
V = TypeVar('V')
class C(List[T][U][V]): ...
class D(C, List[T][U][V]): ...
self.assertEqual(C.__parameters__, (V,))
self.assertEqual(D.__parameters__, (V,))
self.assertEqual(C[int].__parameters__, ())
self.assertEqual(D[int].__parameters__, ())
self.assertEqual(C[int].__args__, (int,))
self.assertEqual(D[int].__args__, (int,))
self.assertEqual(C.__bases__, (list, Generic))
self.assertEqual(D.__bases__, (C, list, Generic))
self.assertEqual(C.__orig_bases__, (List[T][U][V],))
self.assertEqual(D.__orig_bases__, (C, List[T][U][V]))
def test_subscript_meta(self):
T = TypeVar('T')
class Meta(type): ...
self.assertEqual(Type[Meta], Type[Meta])
self.assertEqual(Union[T, int][Meta], Union[Meta, int])
self.assertEqual(Callable[..., Meta].__args__, (Ellipsis, Meta))
def test_generic_hashes(self):
class A(Generic[T]):
...
class B(Generic[T]):
class A(Generic[T]):
...
self.assertEqual(A, A)
self.assertEqual(mod_generics_cache.A[str], mod_generics_cache.A[str])
self.assertEqual(B.A, B.A)
self.assertEqual(mod_generics_cache.B.A[B.A[str]],
mod_generics_cache.B.A[B.A[str]])
self.assertNotEqual(A, B.A)
self.assertNotEqual(A, mod_generics_cache.A)
self.assertNotEqual(A, mod_generics_cache.B.A)
self.assertNotEqual(B.A, mod_generics_cache.A)
self.assertNotEqual(B.A, mod_generics_cache.B.A)
self.assertNotEqual(A[str], B.A[str])
self.assertNotEqual(A[List[Any]], B.A[List[Any]])
self.assertNotEqual(A[str], mod_generics_cache.A[str])
self.assertNotEqual(A[str], mod_generics_cache.B.A[str])
self.assertNotEqual(B.A[int], mod_generics_cache.A[int])
self.assertNotEqual(B.A[List[Any]], mod_generics_cache.B.A[List[Any]])
self.assertNotEqual(Tuple[A[str]], Tuple[B.A[str]])
self.assertNotEqual(Tuple[A[List[Any]]], Tuple[B.A[List[Any]]])
self.assertNotEqual(Union[str, A[str]], Union[str, mod_generics_cache.A[str]])
self.assertNotEqual(Union[A[str], A[str]],
Union[A[str], mod_generics_cache.A[str]])
self.assertNotEqual(typing.FrozenSet[A[str]],
typing.FrozenSet[mod_generics_cache.B.A[str]])
if sys.version_info[:2] > (3, 2):
self.assertTrue(repr(Tuple[A[str]]).endswith('<locals>.A[str]]'))
self.assertTrue(repr(Tuple[B.A[str]]).endswith('<locals>.B.A[str]]'))
self.assertTrue(repr(Tuple[mod_generics_cache.A[str]])
.endswith('mod_generics_cache.A[str]]'))
self.assertTrue(repr(Tuple[mod_generics_cache.B.A[str]])
.endswith('mod_generics_cache.B.A[str]]'))
def test_extended_generic_rules_eq(self):
T = TypeVar('T')
U = TypeVar('U')
self.assertEqual(Tuple[T, T][int], Tuple[int, int])
self.assertEqual(typing.Iterable[Tuple[T, T]][T], typing.Iterable[Tuple[T, T]])
with self.assertRaises(TypeError):
Tuple[T, int][()]
with self.assertRaises(TypeError):
Tuple[T, U][T, ...]
self.assertEqual(Union[T, int][int], int)
self.assertEqual(Union[T, U][int, Union[int, str]], Union[int, str])
class Base: ...
class Derived(Base): ...
self.assertEqual(Union[T, Base][Union[Base, Derived]], Union[Base, Derived])
with self.assertRaises(TypeError):
Union[T, int][1]
self.assertEqual(Callable[[T], T][KT], Callable[[KT], KT])
self.assertEqual(Callable[..., List[T]][int], Callable[..., List[int]])
with self.assertRaises(TypeError):
Callable[[T], U][..., int]
with self.assertRaises(TypeError):
Callable[[T], U][[], int]
def test_extended_generic_rules_repr(self):
T = TypeVar('T')
self.assertEqual(repr(Union[Tuple, Callable]).replace('typing.', ''),
'Union[Tuple, Callable]')
self.assertEqual(repr(Union[Tuple, Tuple[int]]).replace('typing.', ''),
'Union[Tuple, Tuple[int]]')
self.assertEqual(repr(Callable[..., Optional[T]][int]).replace('typing.', ''),
'Callable[..., Union[int, NoneType]]')
self.assertEqual(repr(Callable[[], List[T]][int]).replace('typing.', ''),
'Callable[[], List[int]]')
def test_generic_forward_ref(self):
def foobar(x: List[List['CC']]): ...
class CC: ...
self.assertEqual(
get_type_hints(foobar, globals(), locals()),
{'x': List[List[CC]]}
)
T = TypeVar('T')
AT = Tuple[T, ...]
def barfoo(x: AT): ...
self.assertIs(get_type_hints(barfoo, globals(), locals())['x'], AT)
CT = Callable[..., List[T]]
def barfoo2(x: CT): ...
self.assertIs(get_type_hints(barfoo2, globals(), locals())['x'], CT)
def test_extended_generic_rules_subclassing(self):
class T1(Tuple[T, KT]): ...
class T2(Tuple[T, ...]): ...
class C1(Callable[[T], T]): ...
class C2(Callable[..., int]):
def __call__(self):
return None
self.assertEqual(T1.__parameters__, (T, KT))
self.assertEqual(T1[int, str].__args__, (int, str))
self.assertEqual(T1[int, T].__origin__, T1)
self.assertEqual(T2.__parameters__, (T,))
with self.assertRaises(TypeError):
T1[int]
with self.assertRaises(TypeError):
T2[int, str]
self.assertEqual(repr(C1[int]).split('.')[-1], 'C1[int]')
self.assertEqual(C2.__parameters__, ())
self.assertIsInstance(C2(), collections.abc.Callable)
self.assertIsSubclass(C2, collections.abc.Callable)
self.assertIsSubclass(C1, collections.abc.Callable)
self.assertIsInstance(T1(), tuple)
self.assertIsSubclass(T2, tuple)
with self.assertRaises(TypeError):
issubclass(Tuple[int, ...], typing.Sequence)
with self.assertRaises(TypeError):
issubclass(Tuple[int, ...], typing.Iterable)
def test_fail_with_bare_union(self):
with self.assertRaises(TypeError):
List[Union]
with self.assertRaises(TypeError):
Tuple[Optional]
with self.assertRaises(TypeError):
ClassVar[ClassVar]
with self.assertRaises(TypeError):
List[ClassVar[int]]
def test_fail_with_bare_generic(self):
T = TypeVar('T')
with self.assertRaises(TypeError):
List[Generic]
with self.assertRaises(TypeError):
Tuple[Generic[T]]
with self.assertRaises(TypeError):
List[typing.Protocol]
def test_type_erasure_special(self):
T = TypeVar('T')
# this is the only test that checks type caching
self.clear_caches()
class MyTup(Tuple[T, T]): ...
self.assertIs(MyTup[int]().__class__, MyTup)
self.assertIs(MyTup[int]().__orig_class__, MyTup[int])
class MyCall(Callable[..., T]):
def __call__(self): return None
self.assertIs(MyCall[T]().__class__, MyCall)
self.assertIs(MyCall[T]().__orig_class__, MyCall[T])
class MyDict(typing.Dict[T, T]): ...
self.assertIs(MyDict[int]().__class__, MyDict)
self.assertIs(MyDict[int]().__orig_class__, MyDict[int])
class MyDef(typing.DefaultDict[str, T]): ...
self.assertIs(MyDef[int]().__class__, MyDef)
self.assertIs(MyDef[int]().__orig_class__, MyDef[int])
# ChainMap was added in 3.3
if sys.version_info >= (3, 3):
class MyChain(typing.ChainMap[str, T]): ...
self.assertIs(MyChain[int]().__class__, MyChain)
self.assertIs(MyChain[int]().__orig_class__, MyChain[int])
def test_all_repr_eq_any(self):
objs = (getattr(typing, el) for el in typing.__all__)
for obj in objs:
self.assertNotEqual(repr(obj), '')
self.assertEqual(obj, obj)
if getattr(obj, '__parameters__', None) and len(obj.__parameters__) == 1:
self.assertEqual(obj[Any].__args__, (Any,))
if isinstance(obj, type):
for base in obj.__mro__:
self.assertNotEqual(repr(base), '')
self.assertEqual(base, base)
def test_pickle(self):
global C # pickle wants to reference the class by name
T = TypeVar('T')
class B(Generic[T]):
pass
class C(B[int]):
pass
c = C()
c.foo = 42
c.bar = 'abc'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(c, proto)
x = pickle.loads(z)
self.assertEqual(x.foo, 42)
self.assertEqual(x.bar, 'abc')
self.assertEqual(x.__dict__, {'foo': 42, 'bar': 'abc'})
samples = [Any, Union, Tuple, Callable, ClassVar,
Union[int, str], ClassVar[List], Tuple[int, ...], Callable[[str], bytes],
typing.DefaultDict, typing.FrozenSet[int]]
for s in samples:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(s, proto)
x = pickle.loads(z)
self.assertEqual(s, x)
more_samples = [List, typing.Iterable, typing.Type, List[int],
typing.Type[typing.Mapping], typing.AbstractSet[Tuple[int, str]]]
for s in more_samples:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(s, proto)
x = pickle.loads(z)
self.assertEqual(s, x)
def test_copy_and_deepcopy(self):
T = TypeVar('T')
class Node(Generic[T]): ...
things = [Union[T, int], Tuple[T, int], Callable[..., T], Callable[[int], int],
Tuple[Any, Any], Node[T], Node[int], Node[Any], typing.Iterable[T],
typing.Iterable[Any], typing.Iterable[int], typing.Dict[int, str],
typing.Dict[T, Any], ClassVar[int], ClassVar[List[T]], Tuple['T', 'T'],
Union['T', int], List['T'], typing.Mapping['T', int]]
for t in things + [Any]:
self.assertEqual(t, copy(t))
self.assertEqual(t, deepcopy(t))
def test_immutability_by_copy_and_pickle(self):
# Special forms like Union, Any, etc., generic aliases to containers like List,
# Mapping, etc., and type variabcles are considered immutable by copy and pickle.
global TP, TPB, TPV # for pickle
TP = TypeVar('TP')
TPB = TypeVar('TPB', bound=int)
TPV = TypeVar('TPV', bytes, str)
for X in [TP, TPB, TPV, List, typing.Mapping, ClassVar, typing.Iterable,
Union, Any, Tuple, Callable]:
self.assertIs(copy(X), X)
self.assertIs(deepcopy(X), X)
self.assertIs(pickle.loads(pickle.dumps(X)), X)
# Check that local type variables are copyable.
TL = TypeVar('TL')
TLB = TypeVar('TLB', bound=int)
TLV = TypeVar('TLV', bytes, str)
for X in [TL, TLB, TLV]:
self.assertIs(copy(X), X)
self.assertIs(deepcopy(X), X)
def test_copy_generic_instances(self):
T = TypeVar('T')
class C(Generic[T]):
def __init__(self, attr: T) -> None:
self.attr = attr
c = C(42)
self.assertEqual(copy(c).attr, 42)
self.assertEqual(deepcopy(c).attr, 42)
self.assertIsNot(copy(c), c)
self.assertIsNot(deepcopy(c), c)
c.attr = 1
self.assertEqual(copy(c).attr, 1)
self.assertEqual(deepcopy(c).attr, 1)
ci = C[int](42)
self.assertEqual(copy(ci).attr, 42)
self.assertEqual(deepcopy(ci).attr, 42)
self.assertIsNot(copy(ci), ci)
self.assertIsNot(deepcopy(ci), ci)
ci.attr = 1
self.assertEqual(copy(ci).attr, 1)
self.assertEqual(deepcopy(ci).attr, 1)
self.assertEqual(ci.__orig_class__, C[int])
def test_weakref_all(self):
T = TypeVar('T')
things = [Any, Union[T, int], Callable[..., T], Tuple[Any, Any],
Optional[List[int]], typing.Mapping[int, str],
typing.re.Match[bytes], typing.Iterable['whatever']]
for t in things:
self.assertEqual(weakref.ref(t)(), t)
def test_parameterized_slots(self):
T = TypeVar('T')
class C(Generic[T]):
__slots__ = ('potato',)
c = C()
c_int = C[int]()
c.potato = 0
c_int.potato = 0
with self.assertRaises(AttributeError):
c.tomato = 0
with self.assertRaises(AttributeError):
c_int.tomato = 0
def foo(x: C['C']): ...
self.assertEqual(get_type_hints(foo, globals(), locals())['x'], C[C])
self.assertEqual(copy(C[int]), deepcopy(C[int]))
def test_parameterized_slots_dict(self):
T = TypeVar('T')
class D(Generic[T]):
__slots__ = {'banana': 42}
d = D()
d_int = D[int]()
d.banana = 'yes'
d_int.banana = 'yes'
with self.assertRaises(AttributeError):
d.foobar = 'no'
with self.assertRaises(AttributeError):
d_int.foobar = 'no'
def test_errors(self):
with self.assertRaises(TypeError):
B = SimpleMapping[XK, Any]
class C(Generic[B]):
pass
def test_repr_2(self):
class C(Generic[T]):
pass
self.assertEqual(C.__module__, __name__)
self.assertEqual(C.__qualname__,
'GenericTests.test_repr_2.<locals>.C')
X = C[int]
self.assertEqual(X.__module__, __name__)
self.assertEqual(repr(X).split('.')[-1], 'C[int]')
class Y(C[int]):
pass
self.assertEqual(Y.__module__, __name__)
self.assertEqual(Y.__qualname__,
'GenericTests.test_repr_2.<locals>.Y')
def test_eq_1(self):
self.assertEqual(Generic, Generic)
self.assertEqual(Generic[T], Generic[T])
self.assertNotEqual(Generic[KT], Generic[VT])
def test_eq_2(self):
class A(Generic[T]):
pass
class B(Generic[T]):
pass
self.assertEqual(A, A)
self.assertNotEqual(A, B)
self.assertEqual(A[T], A[T])
self.assertNotEqual(A[T], B[T])
def test_multiple_inheritance(self):
class A(Generic[T, VT]):
pass
class B(Generic[KT, T]):
pass
class C(A[T, VT], Generic[VT, T, KT], B[KT, T]):
pass
self.assertEqual(C.__parameters__, (VT, T, KT))
def test_multiple_inheritance_special(self):
S = TypeVar('S')
class B(Generic[S]): ...
class C(List[int], B): ...
self.assertEqual(C.__mro__, (C, list, B, Generic, object))
def test_init_subclass_super_called(self):
class FinalException(Exception):
pass
class Final:
def __init_subclass__(cls, **kwargs) -> None:
for base in cls.__bases__:
if base is not Final and issubclass(base, Final):
raise FinalException(base)
super().__init_subclass__(**kwargs)
class Test(Generic[T], Final):
pass
with self.assertRaises(FinalException):
class Subclass(Test):
pass
with self.assertRaises(FinalException):
class Subclass(Test[int]):
pass
def test_nested(self):
G = Generic
class Visitor(G[T]):
a = None
def set(self, a: T):
self.a = a
def get(self):
return self.a
def visit(self) -> T:
return self.a
V = Visitor[typing.List[int]]
class IntListVisitor(V):
def append(self, x: int):
self.a.append(x)
a = IntListVisitor()
a.set([])
a.append(1)
a.append(42)
self.assertEqual(a.get(), [1, 42])
def test_type_erasure(self):
T = TypeVar('T')
class Node(Generic[T]):
def __init__(self, label: T,
left: 'Node[T]' = None,
right: 'Node[T]' = None):
self.label = label # type: T
self.left = left # type: Optional[Node[T]]
self.right = right # type: Optional[Node[T]]
def foo(x: T):
a = Node(x)
b = Node[T](x)
c = Node[Any](x)
self.assertIs(type(a), Node)
self.assertIs(type(b), Node)
self.assertIs(type(c), Node)
self.assertEqual(a.label, x)
self.assertEqual(b.label, x)
self.assertEqual(c.label, x)
foo(42)
def test_implicit_any(self):
T = TypeVar('T')
class C(Generic[T]):
pass
class D(C):
pass
self.assertEqual(D.__parameters__, ())
with self.assertRaises(Exception):
D[int]
with self.assertRaises(Exception):
D[Any]
with self.assertRaises(Exception):
D[T]
def test_new_with_args(self):
class A(Generic[T]):
pass
class B:
def __new__(cls, arg):
# call object
obj = super().__new__(cls)
obj.arg = arg
return obj
# mro: C, A, Generic, B, object
class C(A, B):
pass
c = C('foo')
self.assertEqual(c.arg, 'foo')
def test_new_with_args2(self):
class A:
def __init__(self, arg):
self.from_a = arg
# call object
super().__init__()
# mro: C, Generic, A, object
class C(Generic[T], A):
def __init__(self, arg):
self.from_c = arg
# call Generic
super().__init__(arg)
c = C('foo')
self.assertEqual(c.from_a, 'foo')
self.assertEqual(c.from_c, 'foo')
def test_new_no_args(self):
class A(Generic[T]):
pass
with self.assertRaises(TypeError):
A('foo')
class B:
def __new__(cls):
# call object
obj = super().__new__(cls)
obj.from_b = 'b'
return obj
# mro: C, A, Generic, B, object
class C(A, B):
def __init__(self, arg):
self.arg = arg
def __new__(cls, arg):
# call A
obj = super().__new__(cls)
obj.from_c = 'c'
return obj
c = C('foo')
self.assertEqual(c.arg, 'foo')
self.assertEqual(c.from_b, 'b')
self.assertEqual(c.from_c, 'c')
class ClassVarTests(BaseTestCase):
def test_basics(self):
with self.assertRaises(TypeError):
ClassVar[1]
with self.assertRaises(TypeError):
ClassVar[int, str]
with self.assertRaises(TypeError):
ClassVar[int][str]
def test_repr(self):
self.assertEqual(repr(ClassVar), 'typing.ClassVar')
cv = ClassVar[int]
self.assertEqual(repr(cv), 'typing.ClassVar[int]')
cv = ClassVar[Employee]
self.assertEqual(repr(cv), 'typing.ClassVar[%s.Employee]' % __name__)
def test_cannot_subclass(self):
with self.assertRaises(TypeError):
class C(type(ClassVar)):
pass
with self.assertRaises(TypeError):
class C(type(ClassVar[int])):
pass
def test_cannot_init(self):
with self.assertRaises(TypeError):
ClassVar()
with self.assertRaises(TypeError):
type(ClassVar)()
with self.assertRaises(TypeError):
type(ClassVar[Optional[int]])()
def test_no_isinstance(self):
with self.assertRaises(TypeError):
isinstance(1, ClassVar[int])
with self.assertRaises(TypeError):
issubclass(int, ClassVar)
class FinalTests(BaseTestCase):
def test_basics(self):
Final[int] # OK
with self.assertRaises(TypeError):
Final[1]
with self.assertRaises(TypeError):
Final[int, str]
with self.assertRaises(TypeError):
Final[int][str]
with self.assertRaises(TypeError):
Optional[Final[int]]
def test_repr(self):
self.assertEqual(repr(Final), 'typing.Final')
cv = Final[int]
self.assertEqual(repr(cv), 'typing.Final[int]')
cv = Final[Employee]
self.assertEqual(repr(cv), 'typing.Final[%s.Employee]' % __name__)
def test_cannot_subclass(self):
with self.assertRaises(TypeError):
class C(type(Final)):
pass
with self.assertRaises(TypeError):
class C(type(Final[int])):
pass
def test_cannot_init(self):
with self.assertRaises(TypeError):
Final()
with self.assertRaises(TypeError):
type(Final)()
with self.assertRaises(TypeError):
type(Final[Optional[int]])()
def test_no_isinstance(self):
with self.assertRaises(TypeError):
isinstance(1, Final[int])
with self.assertRaises(TypeError):
issubclass(int, Final)
def test_final_unmodified(self):
def func(x): ...
self.assertIs(func, final(func))
class CastTests(BaseTestCase):
def test_basics(self):
self.assertEqual(cast(int, 42), 42)
self.assertEqual(cast(float, 42), 42)
self.assertIs(type(cast(float, 42)), int)
self.assertEqual(cast(Any, 42), 42)
self.assertEqual(cast(list, 42), 42)
self.assertEqual(cast(Union[str, float], 42), 42)
self.assertEqual(cast(AnyStr, 42), 42)
self.assertEqual(cast(None, 42), 42)
def test_errors(self):
# Bogus calls are not expected to fail.
cast(42, 42)
cast('hello', 42)
class ForwardRefTests(BaseTestCase):
def test_basics(self):
class Node(Generic[T]):
def __init__(self, label: T):
self.label = label
self.left = self.right = None
def add_both(self,
left: 'Optional[Node[T]]',
right: 'Node[T]' = None,
stuff: int = None,
blah=None):
self.left = left
self.right = right
def add_left(self, node: Optional['Node[T]']):
self.add_both(node, None)
def add_right(self, node: 'Node[T]' = None):
self.add_both(None, node)
t = Node[int]
both_hints = get_type_hints(t.add_both, globals(), locals())
self.assertEqual(both_hints['left'], Optional[Node[T]])
self.assertEqual(both_hints['right'], Optional[Node[T]])
self.assertEqual(both_hints['left'], both_hints['right'])
self.assertEqual(both_hints['stuff'], Optional[int])
self.assertNotIn('blah', both_hints)
left_hints = get_type_hints(t.add_left, globals(), locals())
self.assertEqual(left_hints['node'], Optional[Node[T]])
right_hints = get_type_hints(t.add_right, globals(), locals())
self.assertEqual(right_hints['node'], Optional[Node[T]])
def test_forwardref_instance_type_error(self):
fr = typing.ForwardRef('int')
with self.assertRaises(TypeError):
isinstance(42, fr)
def test_forwardref_subclass_type_error(self):
fr = typing.ForwardRef('int')
with self.assertRaises(TypeError):
issubclass(int, fr)
def test_forward_equality(self):
fr = typing.ForwardRef('int')
self.assertEqual(fr, typing.ForwardRef('int'))
self.assertNotEqual(List['int'], List[int])
def test_forward_equality_gth(self):
c1 = typing.ForwardRef('C')
c1_gth = typing.ForwardRef('C')
c2 = typing.ForwardRef('C')
c2_gth = typing.ForwardRef('C')
class C:
pass
def foo(a: c1_gth, b: c2_gth):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()), {'a': C, 'b': C})
self.assertEqual(c1, c2)
self.assertEqual(c1, c1_gth)
self.assertEqual(c1_gth, c2_gth)
self.assertEqual(List[c1], List[c1_gth])
self.assertNotEqual(List[c1], List[C])
self.assertNotEqual(List[c1_gth], List[C])
self.assertEqual(Union[c1, c1_gth], Union[c1])
self.assertEqual(Union[c1, c1_gth, int], Union[c1, int])
def test_forward_equality_hash(self):
c1 = typing.ForwardRef('int')
c1_gth = typing.ForwardRef('int')
c2 = typing.ForwardRef('int')
c2_gth = typing.ForwardRef('int')
def foo(a: c1_gth, b: c2_gth):
pass
get_type_hints(foo, globals(), locals())
self.assertEqual(hash(c1), hash(c2))
self.assertEqual(hash(c1_gth), hash(c2_gth))
self.assertEqual(hash(c1), hash(c1_gth))
def test_forward_equality_namespace(self):
class A:
pass
def namespace1():
a = typing.ForwardRef('A')
def fun(x: a):
pass
get_type_hints(fun, globals(), locals())
return a
def namespace2():
a = typing.ForwardRef('A')
class A:
pass
def fun(x: a):
pass
get_type_hints(fun, globals(), locals())
return a
self.assertEqual(namespace1(), namespace1())
self.assertNotEqual(namespace1(), namespace2())
def test_forward_repr(self):
self.assertEqual(repr(List['int']), "typing.List[ForwardRef('int')]")
def test_union_forward(self):
def foo(a: Union['T']):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Union[T]})
def test_tuple_forward(self):
def foo(a: Tuple['T']):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Tuple[T]})
def test_forward_recursion_actually(self):
def namespace1():
a = typing.ForwardRef('A')
A = a
def fun(x: a): pass
ret = get_type_hints(fun, globals(), locals())
return a
def namespace2():
a = typing.ForwardRef('A')
A = a
def fun(x: a): pass
ret = get_type_hints(fun, globals(), locals())
return a
def cmp(o1, o2):
return o1 == o2
r1 = namespace1()
r2 = namespace2()
self.assertIsNot(r1, r2)
self.assertRaises(RecursionError, cmp, r1, r2)
def test_union_forward_recursion(self):
ValueList = List['Value']
Value = Union[str, ValueList]
class C:
foo: List[Value]
class D:
foo: Union[Value, ValueList]
class E:
foo: Union[List[Value], ValueList]
class F:
foo: Union[Value, List[Value], ValueList]
self.assertEqual(get_type_hints(C, globals(), locals()), get_type_hints(C, globals(), locals()))
self.assertEqual(get_type_hints(C, globals(), locals()),
{'foo': List[Union[str, List[Union[str, List['Value']]]]]})
self.assertEqual(get_type_hints(D, globals(), locals()),
{'foo': Union[str, List[Union[str, List['Value']]]]})
self.assertEqual(get_type_hints(E, globals(), locals()),
{'foo': Union[
List[Union[str, List[Union[str, List['Value']]]]],
List[Union[str, List['Value']]]
]
})
self.assertEqual(get_type_hints(F, globals(), locals()),
{'foo': Union[
str,
List[Union[str, List['Value']]],
List[Union[str, List[Union[str, List['Value']]]]]
]
})
def test_callable_forward(self):
def foo(a: Callable[['T'], 'T']):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Callable[[T], T]})
def test_callable_with_ellipsis_forward(self):
def foo(a: 'Callable[..., T]'):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Callable[..., T]})
def test_syntax_error(self):
with self.assertRaises(SyntaxError):
Generic['/T']
def test_delayed_syntax_error(self):
def foo(a: 'Node[T'):
pass
with self.assertRaises(SyntaxError):
get_type_hints(foo)
def test_type_error(self):
def foo(a: Tuple['42']):
pass
with self.assertRaises(TypeError):
get_type_hints(foo)
def test_name_error(self):
def foo(a: 'Noode[T]'):
pass
with self.assertRaises(NameError):
get_type_hints(foo, locals())
def test_no_type_check(self):
@no_type_check
def foo(a: 'whatevers') -> {}:
pass
th = get_type_hints(foo)
self.assertEqual(th, {})
def test_no_type_check_class(self):
@no_type_check
class C:
def foo(a: 'whatevers') -> {}:
pass
cth = get_type_hints(C.foo)
self.assertEqual(cth, {})
ith = get_type_hints(C().foo)
self.assertEqual(ith, {})
def test_no_type_check_no_bases(self):
class C:
def meth(self, x: int): ...
@no_type_check
class D(C):
c = C
# verify that @no_type_check never affects bases
self.assertEqual(get_type_hints(C.meth), {'x': int})
def test_no_type_check_forward_ref_as_string(self):
class C:
foo: typing.ClassVar[int] = 7
class D:
foo: ClassVar[int] = 7
class E:
foo: 'typing.ClassVar[int]' = 7
class F:
foo: 'ClassVar[int]' = 7
expected_result = {'foo': typing.ClassVar[int]}
for clazz in [C, D, E, F]:
self.assertEqual(get_type_hints(clazz), expected_result)
def test_nested_classvar_fails_forward_ref_check(self):
class E:
foo: 'typing.ClassVar[typing.ClassVar[int]]' = 7
class F:
foo: ClassVar['ClassVar[int]'] = 7
for clazz in [E, F]:
with self.assertRaises(TypeError):
get_type_hints(clazz)
def test_meta_no_type_check(self):
@no_type_check_decorator
def magic_decorator(func):
return func
self.assertEqual(magic_decorator.__name__, 'magic_decorator')
@magic_decorator
def foo(a: 'whatevers') -> {}:
pass
@magic_decorator
class C:
def foo(a: 'whatevers') -> {}:
pass
self.assertEqual(foo.__name__, 'foo')
th = get_type_hints(foo)
self.assertEqual(th, {})
cth = get_type_hints(C.foo)
self.assertEqual(cth, {})
ith = get_type_hints(C().foo)
self.assertEqual(ith, {})
def test_default_globals(self):
code = ("class C:\n"
" def foo(self, a: 'C') -> 'D': pass\n"
"class D:\n"
" def bar(self, b: 'D') -> C: pass\n"
)
ns = {}
exec(code, ns)
hints = get_type_hints(ns['C'].foo)
self.assertEqual(hints, {'a': ns['C'], 'return': ns['D']})
def test_final_forward_ref(self):
self.assertEqual(gth(Loop, globals())['attr'], Final[Loop])
self.assertNotEqual(gth(Loop, globals())['attr'], Final[int])
self.assertNotEqual(gth(Loop, globals())['attr'], Final)
class OverloadTests(BaseTestCase):
def test_overload_fails(self):
from typing import overload
with self.assertRaises(RuntimeError):
@overload
def blah():
pass
blah()
def test_overload_succeeds(self):
from typing import overload
@overload
def blah():
pass
def blah():
pass
blah()
ASYNCIO_TESTS = """
import asyncio
T_a = TypeVar('T_a')
class AwaitableWrapper(typing.Awaitable[T_a]):
def __init__(self, value):
self.value = value
def __await__(self) -> typing.Iterator[T_a]:
yield
return self.value
class AsyncIteratorWrapper(typing.AsyncIterator[T_a]):
def __init__(self, value: typing.Iterable[T_a]):
self.value = value
def __aiter__(self) -> typing.AsyncIterator[T_a]:
return self
async def __anext__(self) -> T_a:
data = await self.value
if data:
return data
else:
raise StopAsyncIteration
class ACM:
async def __aenter__(self) -> int:
return 42
async def __aexit__(self, etype, eval, tb):
return None
"""
try:
exec(ASYNCIO_TESTS)
except ImportError:
ASYNCIO = False # multithreading is not enabled
else:
ASYNCIO = True
# Definitions needed for features introduced in Python 3.6
from test import ann_module, ann_module2, ann_module3
from typing import AsyncContextManager
class A:
y: float
class B(A):
x: ClassVar[Optional['B']] = None
y: int
b: int
class CSub(B):
z: ClassVar['CSub'] = B()
class G(Generic[T]):
lst: ClassVar[List[T]] = []
class Loop:
attr: Final['Loop']
class NoneAndForward:
parent: 'NoneAndForward'
meaning: None
class CoolEmployee(NamedTuple):
name: str
cool: int
class CoolEmployeeWithDefault(NamedTuple):
name: str
cool: int = 0
class XMeth(NamedTuple):
x: int
def double(self):
return 2 * self.x
class XRepr(NamedTuple):
x: int
y: int = 1
def __str__(self):
return f'{self.x} -> {self.y}'
def __add__(self, other):
return 0
Label = TypedDict('Label', [('label', str)])
class Point2D(TypedDict):
x: int
y: int
class LabelPoint2D(Point2D, Label): ...
class Options(TypedDict, total=False):
log_level: int
log_path: str
class HasForeignBaseClass(mod_generics_cache.A):
some_xrepr: 'XRepr'
other_a: 'mod_generics_cache.A'
async def g_with(am: AsyncContextManager[int]):
x: int
async with am as x:
return x
try:
g_with(ACM()).send(None)
except StopIteration as e:
assert e.args[0] == 42
gth = get_type_hints
class GetTypeHintTests(BaseTestCase):
def test_get_type_hints_from_various_objects(self):
# For invalid objects should fail with TypeError (not AttributeError etc).
with self.assertRaises(TypeError):
gth(123)
with self.assertRaises(TypeError):
gth('abc')
with self.assertRaises(TypeError):
gth(None)
def test_get_type_hints_modules(self):
ann_module_type_hints = {1: 2, 'f': Tuple[int, int], 'x': int, 'y': str}
self.assertEqual(gth(ann_module), ann_module_type_hints)
self.assertEqual(gth(ann_module2), {})
self.assertEqual(gth(ann_module3), {})
@skip("known bug")
def test_get_type_hints_modules_forwardref(self):
# FIXME: This currently exposes a bug in typing. Cached forward references
# don't account for the case where there are multiple types of the same
# name coming from different modules in the same program.
mgc_hints = {'default_a': Optional[mod_generics_cache.A],
'default_b': Optional[mod_generics_cache.B]}
self.assertEqual(gth(mod_generics_cache), mgc_hints)
def test_get_type_hints_classes(self):
self.assertEqual(gth(ann_module.C), # gth will find the right globalns
{'y': Optional[ann_module.C]})
self.assertIsInstance(gth(ann_module.j_class), dict)
self.assertEqual(gth(ann_module.M), {'123': 123, 'o': type})
self.assertEqual(gth(ann_module.D),
{'j': str, 'k': str, 'y': Optional[ann_module.C]})
self.assertEqual(gth(ann_module.Y), {'z': int})
self.assertEqual(gth(ann_module.h_class),
{'y': Optional[ann_module.C]})
self.assertEqual(gth(ann_module.S), {'x': str, 'y': str})
self.assertEqual(gth(ann_module.foo), {'x': int})
self.assertEqual(gth(NoneAndForward),
{'parent': NoneAndForward, 'meaning': type(None)})
self.assertEqual(gth(HasForeignBaseClass),
{'some_xrepr': XRepr, 'other_a': mod_generics_cache.A,
'some_b': mod_generics_cache.B})
self.assertEqual(gth(XRepr.__new__),
{'x': int, 'y': int})
self.assertEqual(gth(mod_generics_cache.B),
{'my_inner_a1': mod_generics_cache.B.A,
'my_inner_a2': mod_generics_cache.B.A,
'my_outer_a': mod_generics_cache.A})
def test_respect_no_type_check(self):
@no_type_check
class NoTpCheck:
class Inn:
def __init__(self, x: 'not a type'): ...
self.assertTrue(NoTpCheck.__no_type_check__)
self.assertTrue(NoTpCheck.Inn.__init__.__no_type_check__)
self.assertEqual(gth(ann_module2.NTC.meth), {})
class ABase(Generic[T]):
def meth(x: int): ...
@no_type_check
class Der(ABase): ...
self.assertEqual(gth(ABase.meth), {'x': int})
def test_get_type_hints_for_builtins(self):
# Should not fail for built-in classes and functions.
self.assertEqual(gth(int), {})
self.assertEqual(gth(type), {})
self.assertEqual(gth(dir), {})
self.assertEqual(gth(len), {})
self.assertEqual(gth(object.__str__), {})
self.assertEqual(gth(object().__str__), {})
self.assertEqual(gth(str.join), {})
def test_previous_behavior(self):
def testf(x, y): ...
testf.__annotations__['x'] = 'int'
self.assertEqual(gth(testf), {'x': int})
def testg(x: None): ...
self.assertEqual(gth(testg), {'x': type(None)})
def test_get_type_hints_for_object_with_annotations(self):
class A: ...
class B: ...
b = B()
b.__annotations__ = {'x': 'A'}
self.assertEqual(gth(b, locals()), {'x': A})
def test_get_type_hints_ClassVar(self):
self.assertEqual(gth(ann_module2.CV, ann_module2.__dict__),
{'var': typing.ClassVar[ann_module2.CV]})
self.assertEqual(gth(B, globals()),
{'y': int, 'x': ClassVar[Optional[B]], 'b': int})
self.assertEqual(gth(CSub, globals()),
{'z': ClassVar[CSub], 'y': int, 'b': int,
'x': ClassVar[Optional[B]]})
self.assertEqual(gth(G), {'lst': ClassVar[List[T]]})
class GetUtilitiesTestCase(TestCase):
def test_get_origin(self):
T = TypeVar('T')
class C(Generic[T]): pass
self.assertIs(get_origin(C[int]), C)
self.assertIs(get_origin(C[T]), C)
self.assertIs(get_origin(int), None)
self.assertIs(get_origin(ClassVar[int]), ClassVar)
self.assertIs(get_origin(Union[int, str]), Union)
self.assertIs(get_origin(Literal[42, 43]), Literal)
self.assertIs(get_origin(Final[List[int]]), Final)
self.assertIs(get_origin(Generic), Generic)
self.assertIs(get_origin(Generic[T]), Generic)
self.assertIs(get_origin(List[Tuple[T, T]][int]), list)
def test_get_args(self):
T = TypeVar('T')
class C(Generic[T]): pass
self.assertEqual(get_args(C[int]), (int,))
self.assertEqual(get_args(C[T]), (T,))
self.assertEqual(get_args(int), ())
self.assertEqual(get_args(ClassVar[int]), (int,))
self.assertEqual(get_args(Union[int, str]), (int, str))
self.assertEqual(get_args(Literal[42, 43]), (42, 43))
self.assertEqual(get_args(Final[List[int]]), (List[int],))
self.assertEqual(get_args(Union[int, Tuple[T, int]][str]),
(int, Tuple[str, int]))
self.assertEqual(get_args(typing.Dict[int, Tuple[T, T]][Optional[int]]),
(int, Tuple[Optional[int], Optional[int]]))
self.assertEqual(get_args(Callable[[], T][int]), ([], int,))
self.assertEqual(get_args(Union[int, Callable[[Tuple[T, ...]], str]]),
(int, Callable[[Tuple[T, ...]], str]))
self.assertEqual(get_args(Tuple[int, ...]), (int, ...))
self.assertEqual(get_args(Tuple[()]), ((),))
class CollectionsAbcTests(BaseTestCase):
def test_hashable(self):
self.assertIsInstance(42, typing.Hashable)
self.assertNotIsInstance([], typing.Hashable)
def test_iterable(self):
self.assertIsInstance([], typing.Iterable)
# Due to ABC caching, the second time takes a separate code
# path and could fail. So call this a few times.
self.assertIsInstance([], typing.Iterable)
self.assertIsInstance([], typing.Iterable)
self.assertNotIsInstance(42, typing.Iterable)
# Just in case, also test issubclass() a few times.
self.assertIsSubclass(list, typing.Iterable)
self.assertIsSubclass(list, typing.Iterable)
def test_iterator(self):
it = iter([])
self.assertIsInstance(it, typing.Iterator)
self.assertNotIsInstance(42, typing.Iterator)
@skipUnless(ASYNCIO, 'Python 3.5 and multithreading required')
def test_awaitable(self):
ns = {}
exec(
"async def foo() -> typing.Awaitable[int]:\n"
" return await AwaitableWrapper(42)\n",
globals(), ns)
foo = ns['foo']
g = foo()
self.assertIsInstance(g, typing.Awaitable)
self.assertNotIsInstance(foo, typing.Awaitable)
g.send(None) # Run foo() till completion, to avoid warning.
@skipUnless(ASYNCIO, 'Python 3.5 and multithreading required')
def test_coroutine(self):
ns = {}
exec(
"async def foo():\n"
" return\n",
globals(), ns)
foo = ns['foo']
g = foo()
self.assertIsInstance(g, typing.Coroutine)
with self.assertRaises(TypeError):
isinstance(g, typing.Coroutine[int])
self.assertNotIsInstance(foo, typing.Coroutine)
try:
g.send(None)
except StopIteration:
pass
@skipUnless(ASYNCIO, 'Python 3.5 and multithreading required')
def test_async_iterable(self):
base_it = range(10) # type: Iterator[int]
it = AsyncIteratorWrapper(base_it)
self.assertIsInstance(it, typing.AsyncIterable)
self.assertIsInstance(it, typing.AsyncIterable)
self.assertNotIsInstance(42, typing.AsyncIterable)
@skipUnless(ASYNCIO, 'Python 3.5 and multithreading required')
def test_async_iterator(self):
base_it = range(10) # type: Iterator[int]
it = AsyncIteratorWrapper(base_it)
self.assertIsInstance(it, typing.AsyncIterator)
self.assertNotIsInstance(42, typing.AsyncIterator)
def test_sized(self):
self.assertIsInstance([], typing.Sized)
self.assertNotIsInstance(42, typing.Sized)
def test_container(self):
self.assertIsInstance([], typing.Container)
self.assertNotIsInstance(42, typing.Container)
def test_collection(self):
if hasattr(typing, 'Collection'):
self.assertIsInstance(tuple(), typing.Collection)
self.assertIsInstance(frozenset(), typing.Collection)
self.assertIsSubclass(dict, typing.Collection)
self.assertNotIsInstance(42, typing.Collection)
def test_abstractset(self):
self.assertIsInstance(set(), typing.AbstractSet)
self.assertNotIsInstance(42, typing.AbstractSet)
def test_mutableset(self):
self.assertIsInstance(set(), typing.MutableSet)
self.assertNotIsInstance(frozenset(), typing.MutableSet)
def test_mapping(self):
self.assertIsInstance({}, typing.Mapping)
self.assertNotIsInstance(42, typing.Mapping)
def test_mutablemapping(self):
self.assertIsInstance({}, typing.MutableMapping)
self.assertNotIsInstance(42, typing.MutableMapping)
def test_sequence(self):
self.assertIsInstance([], typing.Sequence)
self.assertNotIsInstance(42, typing.Sequence)
def test_mutablesequence(self):
self.assertIsInstance([], typing.MutableSequence)
self.assertNotIsInstance((), typing.MutableSequence)
def test_bytestring(self):
self.assertIsInstance(b'', typing.ByteString)
self.assertIsInstance(bytearray(b''), typing.ByteString)
def test_list(self):
self.assertIsSubclass(list, typing.List)
def test_deque(self):
self.assertIsSubclass(collections.deque, typing.Deque)
class MyDeque(typing.Deque[int]): ...
self.assertIsInstance(MyDeque(), collections.deque)
def test_counter(self):
self.assertIsSubclass(collections.Counter, typing.Counter)
def test_set(self):
self.assertIsSubclass(set, typing.Set)
self.assertNotIsSubclass(frozenset, typing.Set)
def test_frozenset(self):
self.assertIsSubclass(frozenset, typing.FrozenSet)
self.assertNotIsSubclass(set, typing.FrozenSet)
def test_dict(self):
self.assertIsSubclass(dict, typing.Dict)
def test_no_list_instantiation(self):
with self.assertRaises(TypeError):
typing.List()
with self.assertRaises(TypeError):
typing.List[T]()
with self.assertRaises(TypeError):
typing.List[int]()
def test_list_subclass(self):
class MyList(typing.List[int]):
pass
a = MyList()
self.assertIsInstance(a, MyList)
self.assertIsInstance(a, typing.Sequence)
self.assertIsSubclass(MyList, list)
self.assertNotIsSubclass(list, MyList)
def test_no_dict_instantiation(self):
with self.assertRaises(TypeError):
typing.Dict()
with self.assertRaises(TypeError):
typing.Dict[KT, VT]()
with self.assertRaises(TypeError):
typing.Dict[str, int]()
def test_dict_subclass(self):
class MyDict(typing.Dict[str, int]):
pass
d = MyDict()
self.assertIsInstance(d, MyDict)
self.assertIsInstance(d, typing.MutableMapping)
self.assertIsSubclass(MyDict, dict)
self.assertNotIsSubclass(dict, MyDict)
def test_defaultdict_instantiation(self):
self.assertIs(type(typing.DefaultDict()), collections.defaultdict)
self.assertIs(type(typing.DefaultDict[KT, VT]()), collections.defaultdict)
self.assertIs(type(typing.DefaultDict[str, int]()), collections.defaultdict)
def test_defaultdict_subclass(self):
class MyDefDict(typing.DefaultDict[str, int]):
pass
dd = MyDefDict()
self.assertIsInstance(dd, MyDefDict)
self.assertIsSubclass(MyDefDict, collections.defaultdict)
self.assertNotIsSubclass(collections.defaultdict, MyDefDict)
def test_ordereddict_instantiation(self):
self.assertIs(type(typing.OrderedDict()), collections.OrderedDict)
self.assertIs(type(typing.OrderedDict[KT, VT]()), collections.OrderedDict)
self.assertIs(type(typing.OrderedDict[str, int]()), collections.OrderedDict)
def test_ordereddict_subclass(self):
class MyOrdDict(typing.OrderedDict[str, int]):
pass
od = MyOrdDict()
self.assertIsInstance(od, MyOrdDict)
self.assertIsSubclass(MyOrdDict, collections.OrderedDict)
self.assertNotIsSubclass(collections.OrderedDict, MyOrdDict)
@skipUnless(sys.version_info >= (3, 3), 'ChainMap was added in 3.3')
def test_chainmap_instantiation(self):
self.assertIs(type(typing.ChainMap()), collections.ChainMap)
self.assertIs(type(typing.ChainMap[KT, VT]()), collections.ChainMap)
self.assertIs(type(typing.ChainMap[str, int]()), collections.ChainMap)
class CM(typing.ChainMap[KT, VT]): ...
self.assertIs(type(CM[int, str]()), CM)
@skipUnless(sys.version_info >= (3, 3), 'ChainMap was added in 3.3')
def test_chainmap_subclass(self):
class MyChainMap(typing.ChainMap[str, int]):
pass
cm = MyChainMap()
self.assertIsInstance(cm, MyChainMap)
self.assertIsSubclass(MyChainMap, collections.ChainMap)
self.assertNotIsSubclass(collections.ChainMap, MyChainMap)
def test_deque_instantiation(self):
self.assertIs(type(typing.Deque()), collections.deque)
self.assertIs(type(typing.Deque[T]()), collections.deque)
self.assertIs(type(typing.Deque[int]()), collections.deque)
class D(typing.Deque[T]): ...
self.assertIs(type(D[int]()), D)
def test_counter_instantiation(self):
self.assertIs(type(typing.Counter()), collections.Counter)
self.assertIs(type(typing.Counter[T]()), collections.Counter)
self.assertIs(type(typing.Counter[int]()), collections.Counter)
class C(typing.Counter[T]): ...
self.assertIs(type(C[int]()), C)
def test_counter_subclass_instantiation(self):
class MyCounter(typing.Counter[int]):
pass
d = MyCounter()
self.assertIsInstance(d, MyCounter)
self.assertIsInstance(d, typing.Counter)
self.assertIsInstance(d, collections.Counter)
def test_no_set_instantiation(self):
with self.assertRaises(TypeError):
typing.Set()
with self.assertRaises(TypeError):
typing.Set[T]()
with self.assertRaises(TypeError):
typing.Set[int]()
def test_set_subclass_instantiation(self):
class MySet(typing.Set[int]):
pass
d = MySet()
self.assertIsInstance(d, MySet)
def test_no_frozenset_instantiation(self):
with self.assertRaises(TypeError):
typing.FrozenSet()
with self.assertRaises(TypeError):
typing.FrozenSet[T]()
with self.assertRaises(TypeError):
typing.FrozenSet[int]()
def test_frozenset_subclass_instantiation(self):
class MyFrozenSet(typing.FrozenSet[int]):
pass
d = MyFrozenSet()
self.assertIsInstance(d, MyFrozenSet)
def test_no_tuple_instantiation(self):
with self.assertRaises(TypeError):
Tuple()
with self.assertRaises(TypeError):
Tuple[T]()
with self.assertRaises(TypeError):
Tuple[int]()
def test_generator(self):
def foo():
yield 42
g = foo()
self.assertIsSubclass(type(g), typing.Generator)
def test_no_generator_instantiation(self):
with self.assertRaises(TypeError):
typing.Generator()
with self.assertRaises(TypeError):
typing.Generator[T, T, T]()
with self.assertRaises(TypeError):
typing.Generator[int, int, int]()
def test_async_generator(self):
ns = {}
exec("async def f():\n"
" yield 42\n", globals(), ns)
g = ns['f']()
self.assertIsSubclass(type(g), typing.AsyncGenerator)
def test_no_async_generator_instantiation(self):
with self.assertRaises(TypeError):
typing.AsyncGenerator()
with self.assertRaises(TypeError):
typing.AsyncGenerator[T, T]()
with self.assertRaises(TypeError):
typing.AsyncGenerator[int, int]()
def test_subclassing(self):
class MMA(typing.MutableMapping):
pass
with self.assertRaises(TypeError): # It's abstract
MMA()
class MMC(MMA):
def __getitem__(self, k):
return None
def __setitem__(self, k, v):
pass
def __delitem__(self, k):
pass
def __iter__(self):
return iter(())
def __len__(self):
return 0
self.assertEqual(len(MMC()), 0)
assert callable(MMC.update)
self.assertIsInstance(MMC(), typing.Mapping)
class MMB(typing.MutableMapping[KT, VT]):
def __getitem__(self, k):
return None
def __setitem__(self, k, v):
pass
def __delitem__(self, k):
pass
def __iter__(self):
return iter(())
def __len__(self):
return 0
self.assertEqual(len(MMB()), 0)
self.assertEqual(len(MMB[str, str]()), 0)
self.assertEqual(len(MMB[KT, VT]()), 0)
self.assertNotIsSubclass(dict, MMA)
self.assertNotIsSubclass(dict, MMB)
self.assertIsSubclass(MMA, typing.Mapping)
self.assertIsSubclass(MMB, typing.Mapping)
self.assertIsSubclass(MMC, typing.Mapping)
self.assertIsInstance(MMB[KT, VT](), typing.Mapping)
self.assertIsInstance(MMB[KT, VT](), collections.abc.Mapping)
self.assertIsSubclass(MMA, collections.abc.Mapping)
self.assertIsSubclass(MMB, collections.abc.Mapping)
self.assertIsSubclass(MMC, collections.abc.Mapping)
with self.assertRaises(TypeError):
issubclass(MMB[str, str], typing.Mapping)
self.assertIsSubclass(MMC, MMA)
class I(typing.Iterable): ...
self.assertNotIsSubclass(list, I)
class G(typing.Generator[int, int, int]): ...
def g(): yield 0
self.assertIsSubclass(G, typing.Generator)
self.assertIsSubclass(G, typing.Iterable)
self.assertIsSubclass(G, collections.abc.Generator)
self.assertIsSubclass(G, collections.abc.Iterable)
self.assertNotIsSubclass(type(g), G)
def test_subclassing_async_generator(self):
class G(typing.AsyncGenerator[int, int]):
def asend(self, value):
pass
def athrow(self, typ, val=None, tb=None):
pass
ns = {}
exec('async def g(): yield 0', globals(), ns)
g = ns['g']
self.assertIsSubclass(G, typing.AsyncGenerator)
self.assertIsSubclass(G, typing.AsyncIterable)
self.assertIsSubclass(G, collections.abc.AsyncGenerator)
self.assertIsSubclass(G, collections.abc.AsyncIterable)
self.assertNotIsSubclass(type(g), G)
instance = G()
self.assertIsInstance(instance, typing.AsyncGenerator)
self.assertIsInstance(instance, typing.AsyncIterable)
self.assertIsInstance(instance, collections.abc.AsyncGenerator)
self.assertIsInstance(instance, collections.abc.AsyncIterable)
self.assertNotIsInstance(type(g), G)
self.assertNotIsInstance(g, G)
def test_subclassing_subclasshook(self):
class Base(typing.Iterable):
@classmethod
def __subclasshook__(cls, other):
if other.__name__ == 'Foo':
return True
else:
return False
class C(Base): ...
class Foo: ...
class Bar: ...
self.assertIsSubclass(Foo, Base)
self.assertIsSubclass(Foo, C)
self.assertNotIsSubclass(Bar, C)
def test_subclassing_register(self):
class A(typing.Container): ...
class B(A): ...
class C: ...
A.register(C)
self.assertIsSubclass(C, A)
self.assertNotIsSubclass(C, B)
class D: ...
B.register(D)
self.assertIsSubclass(D, A)
self.assertIsSubclass(D, B)
class M(): ...
collections.abc.MutableMapping.register(M)
self.assertIsSubclass(M, typing.Mapping)
def test_collections_as_base(self):
class M(collections.abc.Mapping): ...
self.assertIsSubclass(M, typing.Mapping)
self.assertIsSubclass(M, typing.Iterable)
class S(collections.abc.MutableSequence): ...
self.assertIsSubclass(S, typing.MutableSequence)
self.assertIsSubclass(S, typing.Iterable)
class I(collections.abc.Iterable): ...
self.assertIsSubclass(I, typing.Iterable)
class A(collections.abc.Mapping, metaclass=abc.ABCMeta): ...
class B: ...
A.register(B)
self.assertIsSubclass(B, typing.Mapping)
class OtherABCTests(BaseTestCase):
def test_contextmanager(self):
@contextlib.contextmanager
def manager():
yield 42
cm = manager()
self.assertIsInstance(cm, typing.ContextManager)
self.assertNotIsInstance(42, typing.ContextManager)
@skipUnless(ASYNCIO, 'Python 3.5 required')
def test_async_contextmanager(self):
class NotACM:
pass
self.assertIsInstance(ACM(), typing.AsyncContextManager)
self.assertNotIsInstance(NotACM(), typing.AsyncContextManager)
@contextlib.contextmanager
def manager():
yield 42
cm = manager()
self.assertNotIsInstance(cm, typing.AsyncContextManager)
self.assertEqual(typing.AsyncContextManager[int].__args__, (int,))
with self.assertRaises(TypeError):
isinstance(42, typing.AsyncContextManager[int])
with self.assertRaises(TypeError):
typing.AsyncContextManager[int, str]
class TypeTests(BaseTestCase):
def test_type_basic(self):
class User: pass
class BasicUser(User): pass
class ProUser(User): pass
def new_user(user_class: Type[User]) -> User:
return user_class()
new_user(BasicUser)
def test_type_typevar(self):
class User: pass
class BasicUser(User): pass
class ProUser(User): pass
U = TypeVar('U', bound=User)
def new_user(user_class: Type[U]) -> U:
return user_class()
new_user(BasicUser)
def test_type_optional(self):
A = Optional[Type[BaseException]]
def foo(a: A) -> Optional[BaseException]:
if a is None:
return None
else:
return a()
assert isinstance(foo(KeyboardInterrupt), KeyboardInterrupt)
assert foo(None) is None
class NewTypeTests(BaseTestCase):
def test_basic(self):
UserId = NewType('UserId', int)
UserName = NewType('UserName', str)
self.assertIsInstance(UserId(5), int)
self.assertIsInstance(UserName('Joe'), str)
self.assertEqual(UserId(5) + 1, 6)
def test_errors(self):
UserId = NewType('UserId', int)
UserName = NewType('UserName', str)
with self.assertRaises(TypeError):
issubclass(UserId, int)
with self.assertRaises(TypeError):
class D(UserName):
pass
class NamedTupleTests(BaseTestCase):
class NestedEmployee(NamedTuple):
name: str
cool: int
def test_basics(self):
Emp = NamedTuple('Emp', [('name', str), ('id', int)])
self.assertIsSubclass(Emp, tuple)
joe = Emp('Joe', 42)
jim = Emp(name='Jim', id=1)
self.assertIsInstance(joe, Emp)
self.assertIsInstance(joe, tuple)
self.assertEqual(joe.name, 'Joe')
self.assertEqual(joe.id, 42)
self.assertEqual(jim.name, 'Jim')
self.assertEqual(jim.id, 1)
self.assertEqual(Emp.__name__, 'Emp')
self.assertEqual(Emp._fields, ('name', 'id'))
self.assertEqual(Emp.__annotations__,
collections.OrderedDict([('name', str), ('id', int)]))
self.assertIs(Emp._field_types, Emp.__annotations__)
def test_namedtuple_pyversion(self):
if sys.version_info[:2] < (3, 6):
with self.assertRaises(TypeError):
NamedTuple('Name', one=int, other=str)
with self.assertRaises(TypeError):
class NotYet(NamedTuple):
whatever = 0
def test_annotation_usage(self):
tim = CoolEmployee('Tim', 9000)
self.assertIsInstance(tim, CoolEmployee)
self.assertIsInstance(tim, tuple)
self.assertEqual(tim.name, 'Tim')
self.assertEqual(tim.cool, 9000)
self.assertEqual(CoolEmployee.__name__, 'CoolEmployee')
self.assertEqual(CoolEmployee._fields, ('name', 'cool'))
self.assertEqual(CoolEmployee.__annotations__,
collections.OrderedDict(name=str, cool=int))
self.assertIs(CoolEmployee._field_types, CoolEmployee.__annotations__)
def test_annotation_usage_with_default(self):
jelle = CoolEmployeeWithDefault('Jelle')
self.assertIsInstance(jelle, CoolEmployeeWithDefault)
self.assertIsInstance(jelle, tuple)
self.assertEqual(jelle.name, 'Jelle')
self.assertEqual(jelle.cool, 0)
cooler_employee = CoolEmployeeWithDefault('Sjoerd', 1)
self.assertEqual(cooler_employee.cool, 1)
self.assertEqual(CoolEmployeeWithDefault.__name__, 'CoolEmployeeWithDefault')
self.assertEqual(CoolEmployeeWithDefault._fields, ('name', 'cool'))
self.assertEqual(CoolEmployeeWithDefault._field_types, dict(name=str, cool=int))
self.assertEqual(CoolEmployeeWithDefault._field_defaults, dict(cool=0))
with self.assertRaises(TypeError):
exec("""
class NonDefaultAfterDefault(NamedTuple):
x: int = 3
y: int
""")
def test_annotation_usage_with_methods(self):
self.assertEqual(XMeth(1).double(), 2)
self.assertEqual(XMeth(42).x, XMeth(42)[0])
self.assertEqual(str(XRepr(42)), '42 -> 1')
self.assertEqual(XRepr(1, 2) + XRepr(3), 0)
with self.assertRaises(AttributeError):
exec("""
class XMethBad(NamedTuple):
x: int
def _fields(self):
return 'no chance for this'
""")
with self.assertRaises(AttributeError):
exec("""
class XMethBad2(NamedTuple):
x: int
def _source(self):
return 'no chance for this as well'
""")
def test_namedtuple_keyword_usage(self):
LocalEmployee = NamedTuple("LocalEmployee", name=str, age=int)
nick = LocalEmployee('Nick', 25)
self.assertIsInstance(nick, tuple)
self.assertEqual(nick.name, 'Nick')
self.assertEqual(LocalEmployee.__name__, 'LocalEmployee')
self.assertEqual(LocalEmployee._fields, ('name', 'age'))
self.assertEqual(LocalEmployee.__annotations__, dict(name=str, age=int))
self.assertIs(LocalEmployee._field_types, LocalEmployee.__annotations__)
with self.assertRaises(TypeError):
NamedTuple('Name', [('x', int)], y=str)
with self.assertRaises(TypeError):
NamedTuple('Name', x=1, y='a')
def test_namedtuple_special_keyword_names(self):
NT = NamedTuple("NT", cls=type, self=object, typename=str, fields=list)
self.assertEqual(NT.__name__, 'NT')
self.assertEqual(NT._fields, ('cls', 'self', 'typename', 'fields'))
a = NT(cls=str, self=42, typename='foo', fields=[('bar', tuple)])
self.assertEqual(a.cls, str)
self.assertEqual(a.self, 42)
self.assertEqual(a.typename, 'foo')
self.assertEqual(a.fields, [('bar', tuple)])
def test_namedtuple_errors(self):
with self.assertRaises(TypeError):
NamedTuple.__new__()
with self.assertRaises(TypeError):
NamedTuple()
with self.assertRaises(TypeError):
NamedTuple('Emp', [('name', str)], None)
with self.assertRaises(ValueError):
NamedTuple('Emp', [('_name', str)])
with self.assertWarns(DeprecationWarning):
Emp = NamedTuple(typename='Emp', name=str, id=int)
self.assertEqual(Emp.__name__, 'Emp')
self.assertEqual(Emp._fields, ('name', 'id'))
with self.assertWarns(DeprecationWarning):
Emp = NamedTuple('Emp', fields=[('name', str), ('id', int)])
self.assertEqual(Emp.__name__, 'Emp')
self.assertEqual(Emp._fields, ('name', 'id'))
def test_copy_and_pickle(self):
global Emp # pickle wants to reference the class by name
Emp = NamedTuple('Emp', [('name', str), ('cool', int)])
for cls in Emp, CoolEmployee, self.NestedEmployee:
with self.subTest(cls=cls):
jane = cls('jane', 37)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(jane, proto)
jane2 = pickle.loads(z)
self.assertEqual(jane2, jane)
self.assertIsInstance(jane2, cls)
jane2 = copy(jane)
self.assertEqual(jane2, jane)
self.assertIsInstance(jane2, cls)
jane2 = deepcopy(jane)
self.assertEqual(jane2, jane)
self.assertIsInstance(jane2, cls)
class TypedDictTests(BaseTestCase):
def test_basics_functional_syntax(self):
Emp = TypedDict('Emp', {'name': str, 'id': int})
self.assertIsSubclass(Emp, dict)
self.assertIsSubclass(Emp, typing.MutableMapping)
self.assertNotIsSubclass(Emp, collections.abc.Sequence)
jim = Emp(name='Jim', id=1)
self.assertIs(type(jim), dict)
self.assertEqual(jim['name'], 'Jim')
self.assertEqual(jim['id'], 1)
self.assertEqual(Emp.__name__, 'Emp')
self.assertEqual(Emp.__module__, __name__)
self.assertEqual(Emp.__bases__, (dict,))
self.assertEqual(Emp.__annotations__, {'name': str, 'id': int})
self.assertEqual(Emp.__total__, True)
def test_basics_keywords_syntax(self):
Emp = TypedDict('Emp', name=str, id=int)
self.assertIsSubclass(Emp, dict)
self.assertIsSubclass(Emp, typing.MutableMapping)
self.assertNotIsSubclass(Emp, collections.abc.Sequence)
jim = Emp(name='Jim', id=1)
self.assertIs(type(jim), dict)
self.assertEqual(jim['name'], 'Jim')
self.assertEqual(jim['id'], 1)
self.assertEqual(Emp.__name__, 'Emp')
self.assertEqual(Emp.__module__, __name__)
self.assertEqual(Emp.__bases__, (dict,))
self.assertEqual(Emp.__annotations__, {'name': str, 'id': int})
self.assertEqual(Emp.__total__, True)
def test_typeddict_special_keyword_names(self):
TD = TypedDict("TD", cls=type, self=object, typename=str, _typename=int, fields=list, _fields=dict)
self.assertEqual(TD.__name__, 'TD')
self.assertEqual(TD.__annotations__, {'cls': type, 'self': object, 'typename': str, '_typename': int, 'fields': list, '_fields': dict})
a = TD(cls=str, self=42, typename='foo', _typename=53, fields=[('bar', tuple)], _fields={'baz', set})
self.assertEqual(a['cls'], str)
self.assertEqual(a['self'], 42)
self.assertEqual(a['typename'], 'foo')
self.assertEqual(a['_typename'], 53)
self.assertEqual(a['fields'], [('bar', tuple)])
self.assertEqual(a['_fields'], {'baz', set})
def test_typeddict_create_errors(self):
with self.assertRaises(TypeError):
TypedDict.__new__()
with self.assertRaises(TypeError):
TypedDict()
with self.assertRaises(TypeError):
TypedDict('Emp', [('name', str)], None)
with self.assertRaises(TypeError):
TypedDict(_typename='Emp', name=str, id=int)
with self.assertRaises(TypeError):
TypedDict('Emp', _fields={'name': str, 'id': int})
def test_typeddict_errors(self):
Emp = TypedDict('Emp', {'name': str, 'id': int})
self.assertEqual(TypedDict.__module__, 'typing')
jim = Emp(name='Jim', id=1)
with self.assertRaises(TypeError):
isinstance({}, Emp)
with self.assertRaises(TypeError):
isinstance(jim, Emp)
with self.assertRaises(TypeError):
issubclass(dict, Emp)
with self.assertRaises(TypeError):
TypedDict('Hi', x=1)
with self.assertRaises(TypeError):
TypedDict('Hi', [('x', int), ('y', 1)])
with self.assertRaises(TypeError):
TypedDict('Hi', [('x', int)], y=int)
def test_py36_class_syntax_usage(self):
self.assertEqual(LabelPoint2D.__name__, 'LabelPoint2D')
self.assertEqual(LabelPoint2D.__module__, __name__)
self.assertEqual(LabelPoint2D.__annotations__, {'x': int, 'y': int, 'label': str})
self.assertEqual(LabelPoint2D.__bases__, (dict,))
self.assertEqual(LabelPoint2D.__total__, True)
self.assertNotIsSubclass(LabelPoint2D, typing.Sequence)
not_origin = Point2D(x=0, y=1)
self.assertEqual(not_origin['x'], 0)
self.assertEqual(not_origin['y'], 1)
other = LabelPoint2D(x=0, y=1, label='hi')
self.assertEqual(other['label'], 'hi')
def test_pickle(self):
global EmpD # pickle wants to reference the class by name
EmpD = TypedDict('EmpD', name=str, id=int)
jane = EmpD({'name': 'jane', 'id': 37})
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(jane, proto)
jane2 = pickle.loads(z)
self.assertEqual(jane2, jane)
self.assertEqual(jane2, {'name': 'jane', 'id': 37})
ZZ = pickle.dumps(EmpD, proto)
EmpDnew = pickle.loads(ZZ)
self.assertEqual(EmpDnew({'name': 'jane', 'id': 37}), jane)
def test_optional(self):
EmpD = TypedDict('EmpD', name=str, id=int)
self.assertEqual(typing.Optional[EmpD], typing.Union[None, EmpD])
self.assertNotEqual(typing.List[EmpD], typing.Tuple[EmpD])
def test_total(self):
D = TypedDict('D', {'x': int}, total=False)
self.assertEqual(D(), {})
self.assertEqual(D(x=1), {'x': 1})
self.assertEqual(D.__total__, False)
self.assertEqual(Options(), {})
self.assertEqual(Options(log_level=2), {'log_level': 2})
self.assertEqual(Options.__total__, False)
class IOTests(BaseTestCase):
def test_io(self):
def stuff(a: IO) -> AnyStr:
return a.readline()
a = stuff.__annotations__['a']
self.assertEqual(a.__parameters__, (AnyStr,))
def test_textio(self):
def stuff(a: TextIO) -> str:
return a.readline()
a = stuff.__annotations__['a']
self.assertEqual(a.__parameters__, ())
def test_binaryio(self):
def stuff(a: BinaryIO) -> bytes:
return a.readline()
a = stuff.__annotations__['a']
self.assertEqual(a.__parameters__, ())
def test_io_submodule(self):
from typing.io import IO, TextIO, BinaryIO, __all__, __name__
self.assertIs(IO, typing.IO)
self.assertIs(TextIO, typing.TextIO)
self.assertIs(BinaryIO, typing.BinaryIO)
self.assertEqual(set(__all__), set(['IO', 'TextIO', 'BinaryIO']))
self.assertEqual(__name__, 'typing.io')
class RETests(BaseTestCase):
# Much of this is really testing _TypeAlias.
def test_basics(self):
pat = re.compile('[a-z]+', re.I)
self.assertIsSubclass(pat.__class__, Pattern)
self.assertIsSubclass(type(pat), Pattern)
self.assertIsInstance(pat, Pattern)
mat = pat.search('12345abcde.....')
self.assertIsSubclass(mat.__class__, Match)
self.assertIsSubclass(type(mat), Match)
self.assertIsInstance(mat, Match)
# these should just work
Pattern[Union[str, bytes]]
Match[Union[bytes, str]]
def test_alias_equality(self):
self.assertEqual(Pattern[str], Pattern[str])
self.assertNotEqual(Pattern[str], Pattern[bytes])
self.assertNotEqual(Pattern[str], Match[str])
self.assertNotEqual(Pattern[str], str)
def test_errors(self):
m = Match[Union[str, bytes]]
with self.assertRaises(TypeError):
m[str]
with self.assertRaises(TypeError):
# We don't support isinstance().
isinstance(42, Pattern[str])
with self.assertRaises(TypeError):
# We don't support issubclass().
issubclass(Pattern[bytes], Pattern[str])
def test_repr(self):
self.assertEqual(repr(Pattern), 'typing.Pattern')
self.assertEqual(repr(Pattern[str]), 'typing.Pattern[str]')
self.assertEqual(repr(Pattern[bytes]), 'typing.Pattern[bytes]')
self.assertEqual(repr(Match), 'typing.Match')
self.assertEqual(repr(Match[str]), 'typing.Match[str]')
self.assertEqual(repr(Match[bytes]), 'typing.Match[bytes]')
def test_re_submodule(self):
from typing.re import Match, Pattern, __all__, __name__
self.assertIs(Match, typing.Match)
self.assertIs(Pattern, typing.Pattern)
self.assertEqual(set(__all__), set(['Match', 'Pattern']))
self.assertEqual(__name__, 'typing.re')
def test_cannot_subclass(self):
with self.assertRaises(TypeError) as ex:
class A(typing.Match):
pass
self.assertEqual(str(ex.exception),
"type 're.Match' is not an acceptable base type")
class AllTests(BaseTestCase):
"""Tests for __all__."""
def test_all(self):
from typing import __all__ as a
# Just spot-check the first and last of every category.
self.assertIn('AbstractSet', a)
self.assertIn('ValuesView', a)
self.assertIn('cast', a)
self.assertIn('overload', a)
if hasattr(contextlib, 'AbstractContextManager'):
self.assertIn('ContextManager', a)
# Check that io and re are not exported.
self.assertNotIn('io', a)
self.assertNotIn('re', a)
# Spot-check that stdlib modules aren't exported.
self.assertNotIn('os', a)
self.assertNotIn('sys', a)
# Check that Text is defined.
self.assertIn('Text', a)
# Check previously missing classes.
self.assertIn('SupportsBytes', a)
self.assertIn('SupportsComplex', a)
def test_all_exported_names(self):
import typing
actual_all = set(typing.__all__)
computed_all = {
k for k, v in vars(typing).items()
# explicitly exported, not a thing with __module__
if k in actual_all or (
# avoid private names
not k.startswith('_') and
# avoid things in the io / re typing submodules
k not in typing.io.__all__ and
k not in typing.re.__all__ and
k not in {'io', 're'} and
# there's a few types and metaclasses that aren't exported
not k.endswith(('Meta', '_contra', '_co')) and
not k.upper() == k and
# but export all things that have __module__ == 'typing'
getattr(v, '__module__', None) == typing.__name__
)
}
self.assertSetEqual(computed_all, actual_all)
if __name__ == '__main__':
main()
|
batermj/algorithm-challenger
|
code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/test/test_typing.py
|
Python
|
apache-2.0
| 124,018
|
[
"VisIt"
] |
916a51ff68a027840b069e73c4346e112488bab5fe779c44afc1f0b9f77c076b
|
import sys
tests=[
("testExecs/testFeatures.exe","",{}),
]
longTests=[
]
if __name__=='__main__':
import sys
from rdkit import TestRunner
failed,tests = TestRunner.RunScript('test_list.py',0,1)
sys.exit(len(failed))
|
soerendip42/rdkit
|
Code/Features/test_list.py
|
Python
|
bsd-3-clause
| 233
|
[
"RDKit"
] |
efcf79a56c4178189595d06830b7ab8dc9d360e99c68c98f923bca31e3de69a1
|
# Copyright: 2005 Gentoo Foundation
# Author(s): Brian Harring (ferringb@gentoo.org)
# License: GPL2
import errno
import re
import stat
import sys
from portage import os
from portage import _encodings
from portage import _unicode_encode
from portage.cache import cache_errors, flat_hash
import portage.eclass_cache
from portage.cache.template import reconstruct_eclasses
from portage.cache.mappings import ProtectedDict
if sys.hexversion >= 0x3000000:
basestring = str
long = int
# this is the old cache format, flat_list. count maintained here.
magic_line_count = 22
# store the current key order *here*.
class database(flat_hash.database):
complete_eclass_entries = False
auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
'KEYWORDS', 'INHERITED', 'IUSE', 'REQUIRED_USE',
'PDEPEND', 'PROVIDE', 'EAPI', 'PROPERTIES', 'DEFINED_PHASES')
autocommits = True
serialize_eclasses = False
_hashed_re = re.compile('^(\\w+)=([^\n]*)')
def __init__(self, location, *args, **config):
loc = location
super(database, self).__init__(location, *args, **config)
self.location = os.path.join(loc, "metadata","cache")
self.ec = None
self.raise_stat_collision = False
def _parse_data(self, data, cpv):
_hashed_re_match = self._hashed_re.match
d = {}
for line in data:
hashed = False
hashed_match = _hashed_re_match(line)
if hashed_match is None:
d.clear()
try:
for i, key in enumerate(self.auxdbkey_order):
d[key] = data[i]
except IndexError:
pass
break
else:
d[hashed_match.group(1)] = hashed_match.group(2)
if "_eclasses_" not in d:
if "INHERITED" in d:
if self.ec is None:
self.ec = portage.eclass_cache.cache(self.location[:-15])
try:
d["_eclasses_"] = self.ec.get_eclass_data(
d["INHERITED"].split())
except KeyError as e:
# INHERITED contains a non-existent eclass.
raise cache_errors.CacheCorruption(cpv, e)
del d["INHERITED"]
else:
d["_eclasses_"] = {}
elif isinstance(d["_eclasses_"], basestring):
# We skip this if flat_hash.database._parse_data() was called above
# because it calls reconstruct_eclasses() internally.
d["_eclasses_"] = reconstruct_eclasses(None, d["_eclasses_"])
return d
def _setitem(self, cpv, values):
if "_eclasses_" in values:
values = ProtectedDict(values)
values["INHERITED"] = ' '.join(sorted(values["_eclasses_"]))
new_content = []
for k in self.auxdbkey_order:
new_content.append(values.get(k, ''))
new_content.append('\n')
for i in range(magic_line_count - len(self.auxdbkey_order)):
new_content.append('\n')
new_content = ''.join(new_content)
new_content = _unicode_encode(new_content,
_encodings['repo.content'], errors='backslashreplace')
new_fp = os.path.join(self.location, cpv)
try:
f = open(_unicode_encode(new_fp,
encoding=_encodings['fs'], errors='strict'), 'rb')
except EnvironmentError:
pass
else:
try:
try:
existing_st = os.fstat(f.fileno())
existing_content = f.read()
finally:
f.close()
except EnvironmentError:
pass
else:
existing_mtime = existing_st[stat.ST_MTIME]
if values['_mtime_'] == existing_mtime and \
existing_content == new_content:
return
if self.raise_stat_collision and \
values['_mtime_'] == existing_mtime and \
len(new_content) == existing_st.st_size:
raise cache_errors.StatCollision(cpv, new_fp,
existing_mtime, existing_st.st_size)
s = cpv.rfind("/")
fp = os.path.join(self.location,cpv[:s],
".update.%i.%s" % (os.getpid(), cpv[s+1:]))
try:
myf = open(_unicode_encode(fp,
encoding=_encodings['fs'], errors='strict'), 'wb')
except EnvironmentError as e:
if errno.ENOENT == e.errno:
try:
self._ensure_dirs(cpv)
myf = open(_unicode_encode(fp,
encoding=_encodings['fs'], errors='strict'), 'wb')
except EnvironmentError as e:
raise cache_errors.CacheCorruption(cpv, e)
else:
raise cache_errors.CacheCorruption(cpv, e)
try:
myf.write(new_content)
finally:
myf.close()
self._ensure_access(fp, mtime=values["_mtime_"])
try:
os.rename(fp, new_fp)
except EnvironmentError as e:
try:
os.unlink(fp)
except EnvironmentError:
pass
raise cache_errors.CacheCorruption(cpv, e)
|
Neuvoo/legacy-portage
|
pym/portage/cache/metadata.py
|
Python
|
gpl-2.0
| 4,361
|
[
"Brian"
] |
11d1ebae737a3a76c3316a1f80afb378590b691b1b26c1a1d4aa0b8f297b79c9
|
"""
Generate a synthetic dataset of Hodgkin-Huxley style neurons.
We'll use this to test our classification algorithms.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import gamma
import cPickle, gzip
from hips.plotting.sausage import sausage_plot
from hh_dynamics import forward_euler, implicit_euler
def make_mixture_distribution():
# The two classes of neurons are distinguished
# by differences in the conductance of their
# leak, sodium, and potassium channels.
# Since the conductances are nonnegative,
# we model the channel conductance distribution as
# a mixture of gamma distributions
a = 50.0
a_healthy = np.array([a, a, a])
b_healthy = np.array([a/0.2, a/120., a/36.0])
a_disease = np.array([a, a, a])
b_disease = np.array([a/0.2, a/90., a/36.0])
p_healthy = gamma(a=a_healthy, scale=1./b_healthy)
p_disease = gamma(a=a_disease, scale=1./b_disease)
return p_healthy, p_disease
def plot_mixture_distribution():
# Plot the two distributions along the g_Na and g_K axes
p_healthy, p_disease = make_mixture_distribution()
# Make a meshgrid
Gleak, GNa, GK = np.meshgrid(np.linspace(0.1,1.0,10), # leak conductances
np.linspace(100,150,10), # Na conductances
np.linspace(20,40,10) # K conductances
)
# Compute the healthy cell pdf
pdf_healthy = p_healthy.pdf(
np.concatenate((Gleak[:,:,:,None], GNa[:,:,:,None], GK[:,:,:,None]), axis=-1))
# Compute product of marginal densities
pdf_healthy = pdf_healthy.prod(axis=-1)
# Compute the diseased cell pdf
pdf_disease = p_disease.pdf(
np.concatenate((Gleak[:,:,:,None], GNa[:,:,:,None], GK[:,:,:,None]), axis=-1))
# Compute product of marginal densities
pdf_disease = pdf_disease.prod(axis=-1)
# Plot
plt.figure()
plt.subplot(121)
plt.imshow(pdf_healthy[:,0,:], extent=(20,40,100,150), origin="lower", interpolation="bilinear", cmap="Greys")
# plt.contour(GNa[:,0,:], GK[:,0,:], pdf_healthy[:,0,:], 10, interpolation="cubic")
plt.subplot(122)
plt.imshow(pdf_disease[:,0,:], extent=(20,40,100,150), origin="lower", interpolation="bilinear", cmap="Greys")
# plt.contour(GNa[:,0,:], GK[:,0,:], pdf_disease[:,0,:], 10, interpolation="cubic")
plt.show()
def make_input(t, duty=50., wait=50., amp=75.):
# Define a pulsed impulse
inpt = np.zeros_like(t)
scale = 1
offset = 0
while offset < t[-1]:
offset += wait
t_on = offset
t_off = offset + duty
inpt += scale*amp * (t > t_on) * (t < t_off)
offset += duty
scale += 1
f_inpt = lambda tf: np.interp(tf, t, inpt)
return f_inpt
def generate_data():
# Set timing and number of neurons
N = 100
dt = 0.01
T = int(1000 / dt)
t = np.arange(T) * dt
# Define a pulsed impulse
inpt = make_input(t)
# Make the channel conductance distributions
p_healthy, p_disease = make_mixture_distribution()
# Set the initial conditions shared by both cells
z0 = np.zeros(4)
z0[0] = -77.
z0[1:] = 0.01
# Simulate N traces of healthy cells
g_healthy = np.zeros((3,N))
v_healthy = np.zeros((T,N))
for n in xrange(N):
if n % 10 == 0:
print "Simulating healthy neuron ", n
# Sample conductances
g = p_healthy.rvs()
g_healthy[:,n] = g
# Simulate a trace
# z = forward_euler(z0, g, inpt, T, dt)
z = implicit_euler(z0, g, inpt, t)
v_healthy[:,n] = z[:,0]
# Simulate N traces of disease cells
g_disease = np.zeros((3,N))
v_disease = np.zeros((T,N))
for n in xrange(N):
if n % 10 == 0:
print "Simulating disease neuron ", n
# Sample conductances
g = p_disease.rvs()
g_disease[:,n] = g
# Simulate a trace
# z = forward_euler(z0, g, inpt, T, dt)
z = implicit_euler(z0, g, inpt, t)
v_disease[:,n] = z[:,0]
v_healthy, g_healthy = check_data(v_healthy, g_healthy)
v_disease, g_disease = check_data(v_disease, g_disease)
# Compute statistics
mu_healthy = v_healthy.mean(axis=1)
std_healthy = v_healthy.std(axis=1)
mu_disease = v_disease.mean(axis=1)
std_disease = v_disease.std(axis=1)
plt.figure()
# Plot disease
plt.plot(t, mu_disease, color='r', lw=2)
sausage_plot(t, mu_disease, std_disease, facecolor='r', alpha=0.25)
plt.plot(t, v_disease, color='r', lw=0.25)
# Plot healthy
plt.plot(t, mu_healthy, color='b', lw=2)
sausage_plot(t, mu_healthy, std_healthy, facecolor='b', alpha=0.25)
plt.plot(t, v_healthy, color='b', lw=0.25)
# Scatter plot the conductances
plt.figure()
plt.plot(g_healthy[1,:], g_healthy[2,:], 'bo', markerfacecolor='b', markeredgecolor="none", markersize=4)
plt.plot(g_disease[1,:], g_disease[2,:], 'ro', markerfacecolor='r', markeredgecolor="none", markersize=4)
plt.show()
# Save the data
data = g_healthy, v_healthy, g_disease, v_disease
with gzip.open("hh_data.pkl.gz", "w") as f:
cPickle.dump(data, f, protocol=-1)
def check_data(v,g):
"""
Plot each trace and accept or reject by eye
:param v:
:param g:
:return:
"""
N = v.shape[1]
assert g.shape[1] == N
to_keep = np.ones(N, dtype=np.bool)
plt.figure()
plt.ion()
for n in xrange(N):
plt.clf()
plt.plot(v[:,n])
plt.show()
accept = raw_input("Accept %d? " % n)
if accept.strip().lower() != "y":
print "Rejecting ", n
to_keep[n] = 0
plt.ioff()
print "Accepted: "
print to_keep
vp = v[:,to_keep]
gp = g[:,to_keep]
return vp, gp
# plot_mixture_distribution()
generate_data()
|
HIPS/optofit
|
optoclassify/generate_classification_data.py
|
Python
|
gpl-2.0
| 5,928
|
[
"NEURON"
] |
a6af520fc028d7fcd5310cb737118eaa9a19664b9ac891c5d00a309dfec6e64d
|
#!/usr/bin/env python
########################################################################
#
# (C) 2013, James Cammarata <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import ansible.constants as C
from ansible.compat.six import string_types
from ansible.compat.six.moves.urllib.error import HTTPError
from ansible.compat.six.moves.urllib.parse import quote as urlquote, urlencode
from ansible.errors import AnsibleError
from ansible.galaxy.token import GalaxyToken
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def g_connect(method):
''' wrapper to lazily initialize connection info to galaxy '''
def wrapped(self, *args, **kwargs):
if not self.initialized:
display.vvvv("Initial connection to galaxy_server: %s" % self._api_server)
server_version = self._get_server_api_version()
if server_version not in self.SUPPORTED_VERSIONS:
raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version)
self.baseurl = '%s/api/%s' % (self._api_server, server_version)
self.version = server_version # for future use
display.vvvv("Base API: %s" % self.baseurl)
self.initialized = True
return method(self, *args, **kwargs)
return wrapped
class GalaxyAPI(object):
''' This class is meant to be used as a API client for an Ansible Galaxy server '''
SUPPORTED_VERSIONS = ['v1']
def __init__(self, galaxy):
self.galaxy = galaxy
self.token = GalaxyToken()
self._api_server = C.GALAXY_SERVER
self._validate_certs = not galaxy.options.ignore_certs
self.baseurl = None
self.version = None
self.initialized = False
display.debug('Validate TLS certificates: %s' % self._validate_certs)
# set the API server
if galaxy.options.api_server != C.GALAXY_SERVER:
self._api_server = galaxy.options.api_server
def __auth_header(self):
token = self.token.get()
if token is None:
raise AnsibleError("No access token. You must first use login to authenticate and obtain an access token.")
return {'Authorization': 'Token ' + token}
@g_connect
def __call_galaxy(self, url, args=None, headers=None, method=None):
if args and not headers:
headers = self.__auth_header()
try:
display.vvv(url)
resp = open_url(url, data=args, validate_certs=self._validate_certs, headers=headers, method=method,
timeout=20)
data = json.load(resp)
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['detail'])
return data
@property
def api_server(self):
return self._api_server
@property
def validate_certs(self):
return self._validate_certs
def _get_server_api_version(self):
"""
Fetches the Galaxy API current version to ensure
the API server is up and reachable.
"""
url = '%s/api/' % self._api_server
try:
return_data = open_url(url, validate_certs=self._validate_certs)
except Exception as e:
raise AnsibleError("Failed to get data from the API server (%s): %s " % (url, to_native(e)))
try:
data = json.load(return_data)
except Exception as e:
raise AnsibleError("Could not process data from the API server (%s): %s " % (url, to_native(e)))
if 'current_version' not in data:
raise AnsibleError("missing required 'current_version' from server response (%s)" % url)
return data['current_version']
@g_connect
def authenticate(self, github_token):
"""
Retrieve an authentication token
"""
url = '%s/tokens/' % self.baseurl
args = urlencode({"github_token": github_token})
resp = open_url(url, data=args, validate_certs=self._validate_certs, method="POST")
data = json.load(resp)
return data
@g_connect
def create_import_task(self, github_user, github_repo, reference=None, role_name=None):
"""
Post an import request
"""
url = '%s/imports/' % self.baseurl
args = {
"github_user": github_user,
"github_repo": github_repo,
"github_reference": reference if reference else ""
}
if role_name:
args['alternate_role_name'] = role_name
elif github_repo.startswith('ansible-role'):
args['alternate_role_name'] = github_repo[len('ansible-role')+1:]
data = self.__call_galaxy(url, args=urlencode(args))
if data.get('results', None):
return data['results']
return data
@g_connect
def get_import_task(self, task_id=None, github_user=None, github_repo=None):
"""
Check the status of an import task.
"""
url = '%s/imports/' % self.baseurl
if task_id is not None:
url = "%s?id=%d" % (url,task_id)
elif github_user is not None and github_repo is not None:
url = "%s?github_user=%s&github_repo=%s" % (url,github_user,github_repo)
else:
raise AnsibleError("Expected task_id or github_user and github_repo")
data = self.__call_galaxy(url)
return data['results']
@g_connect
def lookup_role_by_name(self, role_name, notify=True):
"""
Find a role by name.
"""
role_name = urlquote(role_name)
try:
parts = role_name.split(".")
user_name = ".".join(parts[0:-1])
role_name = parts[-1]
if notify:
display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
except:
raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name)
url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name)
data = self.__call_galaxy(url)
if len(data["results"]) != 0:
return data["results"][0]
return None
@g_connect
def fetch_role_related(self, related, role_id):
"""
Fetch the list of related items for the given role.
The url comes from the 'related' field of the role.
"""
try:
url = '%s/roles/%d/%s/?page_size=50' % (self.baseurl, int(role_id), related)
data = self.__call_galaxy(url)
results = data['results']
done = (data.get('next_link', None) is None)
while not done:
url = '%s%s' % (self._api_server, data['next_link'])
data = self.__call_galaxy(url)
results += data['results']
done = (data.get('next_link', None) is None)
return results
except:
return None
@g_connect
def get_list(self, what):
"""
Fetch the list of items specified.
"""
try:
url = '%s/%s/?page_size' % (self.baseurl, what)
data = self.__call_galaxy(url)
if "results" in data:
results = data['results']
else:
results = data
done = True
if "next" in data:
done = (data.get('next_link', None) is None)
while not done:
url = '%s%s' % (self._api_server, data['next_link'])
data = self.__call_galaxy(url)
results += data['results']
done = (data.get('next_link', None) is None)
return results
except Exception as error:
raise AnsibleError("Failed to download the %s list: %s" % (what, str(error)))
@g_connect
def search_roles(self, search, **kwargs):
search_url = self.baseurl + '/search/roles/?'
if search:
search_url += '&autocomplete=' + urlquote(search)
tags = kwargs.get('tags',None)
platforms = kwargs.get('platforms', None)
page_size = kwargs.get('page_size', None)
author = kwargs.get('author', None)
if tags and isinstance(tags, string_types):
tags = tags.split(',')
search_url += '&tags_autocomplete=' + '+'.join(tags)
if platforms and isinstance(platforms, string_types):
platforms = platforms.split(',')
search_url += '&platforms_autocomplete=' + '+'.join(platforms)
if page_size:
search_url += '&page_size=%s' % page_size
if author:
search_url += '&username_autocomplete=%s' % author
data = self.__call_galaxy(search_url)
return data
@g_connect
def add_secret(self, source, github_user, github_repo, secret):
url = "%s/notification_secrets/" % self.baseurl
args = urlencode({
"source": source,
"github_user": github_user,
"github_repo": github_repo,
"secret": secret
})
data = self.__call_galaxy(url, args=args)
return data
@g_connect
def list_secrets(self):
url = "%s/notification_secrets" % self.baseurl
data = self.__call_galaxy(url, headers=self.__auth_header())
return data
@g_connect
def remove_secret(self, secret_id):
url = "%s/notification_secrets/%s/" % (self.baseurl, secret_id)
data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE')
return data
@g_connect
def delete_role(self, github_user, github_repo):
url = "%s/removerole/?github_user=%s&github_repo=%s" % (self.baseurl,github_user,github_repo)
data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE')
return data
|
kaarolch/ansible
|
lib/ansible/galaxy/api.py
|
Python
|
gpl-3.0
| 10,835
|
[
"Galaxy"
] |
709dc01459f8debb57ad069a653092cabfdc19b00394f38c7383632534fa6a23
|
import logging
from collections import namedtuple
from .refs import extract
from .nodes import NodeVisitor
from .utils import Buffer
from .parser import parse
from .errors import UserError, WARNING, ERROR, Errors
from .compat import _exec_in
from .checker import def_types, split_defs, Environ, check, collect_defs
from .checker import NamesResolver, NamesUnResolver
from .loaders import DictCache
from .tokenizer import tokenize
from .compile.python import compile_module
log = logging.getLogger(__name__)
class DependenciesVisitor(NodeVisitor):
def __init__(self):
self._dependencies = set([])
@classmethod
def get_dependencies(cls, node):
visitor = cls()
visitor.visit(node)
return visitor._dependencies
def visit_tuple(self, node):
sym = node.values[0]
if sym.ns:
self._dependencies.add(sym.ns)
super(DependenciesVisitor, self).visit_tuple(node)
Namespace = namedtuple('Namespace',
'name modified_time module dependencies')
ParsedSource = namedtuple('ParsedSource',
'name modified_time node dependencies')
class SimpleContext(object):
def __init__(self, result):
self.buffer = Buffer()
self.result = result
class Function(object):
def __init__(self, lookup, name):
self._lookup = lookup
self.name = name
def query(self):
return self._lookup._get_query(self.name)
def render(self, result):
return self._lookup._render(self.name, result)
class Context(SimpleContext):
def __init__(self, lookup, result):
self._lookup = lookup
super(Context, self).__init__(result)
self.builtins = lookup.builtins
def lookup(self, name):
ns, _, fn_name = name.partition('/')
return self._lookup._get_namespace(ns).module[fn_name]
class Lookup(object):
def __init__(self, types, loader, cache=None, builtins=None):
self.types = types
self._loader = loader
self._cache = cache or DictCache()
self.builtins = builtins or {}
self._namespaces = {}
self._reqs = {}
def _get_dependencies(self, ns, _visited=None):
_visited = set([]) if _visited is None else _visited
if ns.name not in _visited:
_visited.add(ns.name)
yield ns
for dep_name in ns.dependencies:
dep = self._namespaces[dep_name]
for item in self._get_dependencies(dep, _visited=_visited):
yield item
def _load_sources(self, name, _visited=None):
_visited = set([]) if _visited is None else _visited
if name not in _visited:
_visited.add(name)
source = self._loader.load(name)
errors = Errors()
try:
with errors.module_ctx(name):
node = parse(list(tokenize(source.content, errors)), errors)
except UserError as e:
self._raise_on_errors(errors, type(e))
raise
node = NamesResolver(source.name).visit(node)
dependencies = DependenciesVisitor.get_dependencies(node)
yield ParsedSource(name, source.modified_time, node, dependencies)
for dep in dependencies:
for item in self._load_sources(dep, _visited=_visited):
yield item
def _format_error(self, error):
source = self._loader.load(error.func.module)
source_lines = source.content.splitlines()
start, end = error.location.start, error.location.end
start_line, end_line = start.line - 1, end.line - 1
first_line = source_lines[start_line]
indent = len(first_line) - len(first_line.lstrip())
indent = min(indent, start.column - 1)
if end_line - start_line > 2:
snippet = [' | ' + source_lines[start_line][indent:],
' ...',
' | ' + source_lines[end_line][indent:]]
elif end_line - start_line > 0:
snippet = [' | ' + source_lines[l][indent:]
for l in range(start_line, end_line + 1)]
else:
first_line_offset = sum(map(len, source_lines[:start_line])) + \
start_line
highlight_indent = start.offset - first_line_offset - indent
highlight_len = end.offset - start.offset
highlight = (' ' * highlight_indent) + ('~' * highlight_len)
snippet = [' ' + source_lines[start_line][indent:],
' ' + highlight]
return (
'{message}\n'
' File "{file}", line {line_num}, in {func_name}\n'
'{snippet}'
.format(
message=error.message,
file=source.file_path,
line_num=start.line,
func_name=error.func.name or '<content>',
snippet='\n'.join(' ' + l for l in snippet),
)
)
def _raise_on_errors(self, errors, error_cls=None):
error_cls = UserError if error_cls is None else error_cls
errors_list = []
warnings_list = []
for e in errors.list:
msg = self._format_error(e)
if e.severity == WARNING:
warnings_list.append(msg)
elif e.severity == ERROR:
errors_list.append(msg)
else:
raise ValueError(repr(e.severity))
if warnings_list:
for msg in warnings_list:
log.warn(msg)
if errors_list:
raise error_cls('\n'.join(errors_list))
def _check(self, parsed_sources):
env = dict(self.types)
node = collect_defs(ps.node for ps in parsed_sources)
env.update(def_types(node))
environ = Environ(env)
try:
node = check(node, environ)
except UserError as e:
self._raise_on_errors(environ.errors, type(e))
else:
self._raise_on_errors(environ.errors)
reqs = extract(node)
modules = {ns: NamesUnResolver(ns).visit(mod)
for ns, mod in split_defs(node).items()}
checked_sources = [ps._replace(node=modules[ps.name])
for ps in parsed_sources]
return checked_sources, reqs
def _compile_module(self, name, module):
module_code = compile(module, '<{}.kinko>'.format(name), 'exec')
globals_dict = {}
_exec_in(module_code, globals_dict)
return globals_dict
def _load(self, name):
ns = self._namespaces.get(name)
if ns is not None:
deps = list(self._get_dependencies(ns))
if all(self._loader.is_uptodate(dep) for dep in deps):
return
parsed_sources = list(self._load_sources(name))
checked_sources, refs = self._check(parsed_sources)
modules = {cs.name: compile_module(cs.node) for cs in checked_sources}
compiled_modules = {name: self._compile_module(name, module)
for name, module in modules.items()}
for src in parsed_sources:
self._namespaces[src.name] = Namespace(src.name, src.modified_time,
compiled_modules[src.name],
src.dependencies)
self._reqs.update(refs)
def _get_namespace(self, name):
self._load(name)
return self._namespaces[name]
def _get_query(self, name):
ns, _, _ = name.partition('/')
self._load(ns)
return self._reqs[name]
def _render(self, name, result):
ctx = Context(self, result)
ctx.buffer.push()
fn = ctx.lookup(name)
fn(ctx)
return ctx.buffer.pop()
def get(self, name):
return Function(self, name)
|
vmagamedov/kinko
|
kinko/lookup.py
|
Python
|
bsd-3-clause
| 7,960
|
[
"VisIt"
] |
26e5e2ee7e6c1dc15af409e4a7a5575ddab942d8ad0c7877452a836d4decdbe9
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import scipy.signal as ss
import numpy as np
class GetFeatures(object):
'''Class to extract features from step-current soma traces'''
def __init__(self, tvec, traces):
'''Initialize object. Input tvec is global time vector,
traces an n x tvec.size array containing the somatic traces'''
self.tvec = tvec
self.traces = traces
#getting spiketrains, sounds useful....
APtrains = []
for x in self.traces:
APtrains.append(self.return_spiketrain(v = x, v_t = -20))
self.APtrains = np.array(APtrains)
def return_spiketrain(self, v, v_t = -20):
'''Takes voltage trace v, and some optional voltage threshold v_t,
returning spike train array of same length as v.'''
AP_train = np.zeros(v.shape)
[u] = np.where(v > v_t)
if len(u) > 0:
#splitting u in intervals if there are more than 1 AP
w = {}
j = 0
i_n = 0
i = 0
for i in xrange(1,len(u)):
if u[i]!=u[i-1]+1:
w[j] = u[i_n:i]
i_n = i
j += 1
w[j] = u[i_n:i+1]
for j in xrange(len(w.keys())):
[n] = np.where(v[w[j]] == v[w[j]].max())
AP_train[w[j][n]] = 1.
return AP_train
def feature0(self,
xedges = np.arange(-100, 55, 5),
yedges = np.arange(-10, 25, 1),
threshold=2,
smooth=True):
'''Return the phase plane trajectory 2D-histogram of the spikes'''
diff_traces = np.zeros(self.traces.shape)
for i, trace in enumerate(self.traces):
diff_traces[i, ] = np.gradient(trace)
hist2 = np.zeros((yedges.size-1, xedges.size-1))
for i, diff_trace in enumerate(diff_traces):
inds = abs(diff_trace) >= threshold
if np.any(inds):
hist2 += np.histogram2d(diff_trace[inds],
self.traces[i, inds],
bins=[yedges, xedges])[0]
if smooth:
y = np.ones((3,3))
y *= ss.gaussian(3,1)[0]
y[1,1] = 1
y /= y.sum()
hist2 = ss.convolve2d(hist2, y, 'same')
return hist2
def feature1(self,
rows=[-1],
inds=np.r_[range(800, 1600), range(7200, 8000)]):
'''Return the sum of potentials from the negative step currents'''
return self.traces[rows, inds]
#def feature2(self):
# '''Return the trace of the rebound burst'''
# [inds] = np.where(self.tvec >= 1000)
# return self.traces[-1, inds]
#def feature3(self):
# '''return the cumsum of trace with 70 pA input current'''
# return self.traces[6, ].cumsum()
#def feature4(self, xedges = np.linspace(-100, 30, 27),
# yedges = np.linspace(-200, 300, 21), threshold=10):
# '''Return the phase plane trajectory 2D-histogram of the spikes'''
#
# trace = self.traces[7, 1E4:-1]
#
#
# diff_trace = np.diff(trace)*10
#
#
# #hist2 = np.zeros((yedges.size-1, xedges.size-1))
# hist2 = np.histogram2d(diff_trace, trace[1:], bins=[yedges, xedges])[0]
#
# return hist2
def feature5(self, **kwargs):
'''return the cumsum of the APtrains'''
cumsums = []
for x in self.APtrains:
cumsums.append(x.cumsum())
return np.array(cumsums)
#def feature6(self):
# '''return the rebound burst from the last dataset'''
# return self.traces[7, 1E4:-1]
def feature7(self):
'''return the value of the resting potential, i.e mean potential of all
traces before stimulus onset'''
inds = np.arange(0, 1000)
return self.traces[:, inds].mean()
def plot_feature0(self, new_fig=True, **kwargs):
data = self.feature0(**kwargs)
if new_fig:
plt.figure()
else:
plt.gca()
plt.imshow(data, interpolation='nearest', origin='bottom', )
plt.axis('tight')
plt.ylabel(r'$dV/dt$ (-)')
plt.xlabel(r'$V_\mathrm{soma}$ (-)')
plt.colorbar()
plt.title('Feature 0')
return plt.gcf()
def plot_feature1(self, new_fig=True, **kwargs):
if new_fig:
plt.figure()
else:
plt.gca()
plt.plot(self.feature1(**kwargs), label='sag&rebound')
plt.axis('tight')
plt.legend(loc='best')
plt.xlabel('time (ms)')
plt.ylabel('Potential (mV)')
plt.title('Feature 1')
return plt.gcf()
def plot_feature5(self, new_fig=True, **kwargs):
data = self.feature5(**kwargs)
if new_fig:
plt.figure()
else:
plt.gca()
plt.imshow(data, interpolation='nearest')
plt.axis('tight')
plt.colorbar()
plt.ylabel('Trial ()')
plt.xlabel('Timestep ()')
plt.title('Feature 5')
return plt.gcf()
def plot_traces(self, ymin=None, ymax=None, new_fig=True):
if new_fig:
fig = plt.figure()
else:
fig = plt.gcf()
fig.subplots_adjust(left=None, bottom=0.1, right=None, top=0.9, wspace=0, hspace=None)
nrows = self.traces.shape[0]
#get global min and maxima of traces to set axis correctly
i = 1
for x in self.traces:
ax = fig.add_subplot(nrows, 1, i, frameon=False, xticks=[], yticks=[], clip_on=False)
ax.plot(self.tvec, x)
if ymin != None and ymax != None:
plt.ylim([ymin, ymax])
else:
plt.ylim([self.traces.min(), self.traces.max()])
plt.xlim([self.tvec[0], self.tvec[-1]])
if i == 1:
ax.plot([self.tvec[-1]-500, self.tvec[-1]-400],
[self.traces.min(), self.traces.min()],
lw=2, color='k', clip_on=False)
ax.text(self.tvec[-1]-500, self.traces.min()-30, '100 ms')
ax.plot([self.tvec[-1], self.tvec[-1]], [-50, 0],
lw=2, color='k', clip_on=False)
ax.text(self.tvec[-1]+5, -25, '50 mV')
i += 1
for o in fig.findobj():
o.set_clip_on(False)
return plt.gcf()
|
espenhgn/NSGAII
|
feature.py
|
Python
|
gpl-2.0
| 6,768
|
[
"Gaussian"
] |
a3c9018cb97247478177cb9c5204976d02f13d82f4403d824506649f892a562f
|
r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set. [1]_, [2]_
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [3]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] Johanna Hardin, David M Rocke. The distribution of robust distances.
Journal of Computational and Graphical Statistics. December 1, 2005,
14(4): 928-946.
.. [2] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
.. [3] P. J. Rousseeuw. Least median of squares regression. Journal of American
Statistical Ass., 79:871, 1984.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(
np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1],
)
).astype(int)
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10.0 * (
np.random.randint(2, size=(n_outliers, n_features)) - 0.5
)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_**2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = (
EmpiricalCovariance().fit(X).error_norm(np.eye(n_features))
)
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location**2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
lw = 2
plt.errorbar(
range_n_outliers,
err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location",
lw=lw,
color="m",
)
plt.errorbar(
range_n_outliers,
err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean",
lw=lw,
color="green",
)
plt.errorbar(
range_n_outliers,
err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean",
lw=lw,
color="black",
)
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(
range_n_outliers,
err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)",
color="m",
)
plt.errorbar(
range_n_outliers[: (x_size // 5 + 1)],
err_cov_emp_full.mean(1)[: (x_size // 5 + 1)],
yerr=err_cov_emp_full.std(1)[: (x_size // 5 + 1)],
label="Full data set empirical covariance",
color="green",
)
plt.plot(
range_n_outliers[(x_size // 5) : (x_size // 2 - 1)],
err_cov_emp_full.mean(1)[(x_size // 5) : (x_size // 2 - 1)],
color="green",
ls="--",
)
plt.errorbar(
range_n_outliers,
err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance",
color="black",
)
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
|
manhhomienbienthuy/scikit-learn
|
examples/covariance/plot_robust_vs_empirical_covariance.py
|
Python
|
bsd-3-clause
| 6,490
|
[
"Gaussian"
] |
bf38b9411fcabd15dfb5c157d78f36a58ad81133b7384f47a2c8b01dbc49659d
|
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False, dtype=None):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : string, type, list of types or None (default=None)
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
warn_on_dtype = dtype is not None
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
Y = check_array(Y, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 3
gamma : float, default None
if None, defaults to 1.0 / n_samples_1
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_1
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter *dense_output* for sparse output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
X, Y = check_pairwise_arrays(X, Y, dtype=dtype)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances recquire boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
'dice',
'jaccard',
'kulsinski',
'matching',
'rogerstanimoto',
'russellrao',
'sokalmichener',
'sokalsneath',
'yule',
]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
toastedcornflakes/scikit-learn
|
sklearn/metrics/pairwise.py
|
Python
|
bsd-3-clause
| 46,488
|
[
"Gaussian"
] |
31b6a4d2b7f809abe519e1dbdf42c0d96f9d3d1c8fcfed9040d31db2e95e39a6
|
from __future__ import (absolute_import, division, print_function)
import AbinsModules
import io
import numpy as np
from mantid.kernel import Atom
class LoadGAUSSIAN(AbinsModules.GeneralDFTProgram):
"""
Class for loading GAUSSIAN ab initio vibrational data.
"""
def __init__(self, input_dft_filename):
"""
:param input_dft_filename: name of file with phonon data (foo.log)
"""
super(LoadGAUSSIAN, self).__init__(input_dft_filename=input_dft_filename)
self._dft_program = "GAUSSIAN"
self._parser = AbinsModules.GeneralDFTParser()
self._num_atoms = None
self._num_read_freq = 0
def read_phonon_file(self):
"""
Reads phonon data from GAUSSIAN output files. Saves frequencies and atomic displacements (only molecular
calculations), hash of the phonon file (hash) to <>.hdf5.
:returns: object of type AbinsData.
"""
data = {} # container to store read data
with io.open(self._clerk.get_input_filename(), "rb", ) as gaussian_file:
# create dummy lattice vectors
self._generates_lattice_vectors(data=data)
# move file pointer to the last optimized atomic positions
self._parser.find_last(file_obj=gaussian_file, msg="Input orientation:")
self._read_atomic_coordinates(file_obj=gaussian_file, data=data)
# read frequencies, corresponding atomic displacements for a molecule
self._parser.find_first(file_obj=gaussian_file,
msg="Harmonic frequencies (cm**-1), IR intensities (KM/Mole), Raman scattering")
self._read_modes(file_obj=gaussian_file, data=data)
# save data to hdf file
self.save_dft_data(data=data)
# return AbinsData object
return self._rearrange_data(data=data)
def _read_atomic_coordinates(self, file_obj=None, data=None):
"""
Reads atomic coordinates from .log GAUSSIAN file.
:param file_obj: file object from which we read
:param data: Python dictionary to which atoms data should be added
"""
atoms = {}
atom_indx = 0
end_msgs = ["---------------------------------------------------------------------"]
header_lines = 5
# Input orientation:
# ---------------------------------------------------------------------
# Center Atomic Atomic Coordinates (Angstroms)
# Number Number Type X Y Z
# ---------------------------------------------------------------------
for i in range(header_lines):
file_obj.readline()
while not self._parser.block_end(file_obj=file_obj, msg=end_msgs):
line = file_obj.readline()
entries = line.split()
z_number = int(entries[1])
atom = Atom(z_number=z_number)
coord = np.asarray([float(i) for i in entries[3:6]])
atoms["atom_{}".format(atom_indx)] = {"symbol": atom.symbol, "mass": atom.mass, "sort": atom_indx,
"coord": coord}
atom_indx += 1
self._num_atoms = len(atoms)
data["atoms"] = atoms
def _generates_lattice_vectors(self, data=None):
"""
Generates dummy lattice vectors. Gaussian is only for molecular calculations.
:param obj_file: file object from which we read
:param data: Python dictionary to which found lattice vectors should be added
"""
data["unit_cell"] = np.zeros(shape=(3, 3), dtype=AbinsModules.AbinsConstants.FLOAT_TYPE)
def _read_modes(self, file_obj=None, data=None):
"""
Reads vibrational modes (frequencies and atomic displacements).
:param file_obj: file object from which we read
:param data: Python dictionary to which k-point data should be added
"""
freq = []
# it is a molecule so we subtract 3 translations and 3 rotations
num_freq = 3 * self._num_atoms - AbinsModules.AbinsConstants.ROTATIONS_AND_TRANSLATIONS
dim = 3
atomic_disp = np.zeros(shape=(num_freq, self._num_atoms, dim), dtype=AbinsModules.AbinsConstants.COMPLEX_TYPE)
end_msg = ["-------------------"]
# Next block is:
# -------------------
# - Thermochemistry -
# -------------------
# parse block with frequencies and atomic displacements
while not (self._parser.block_end(file_obj=file_obj, msg=end_msg) or self._parser.file_end(file_obj=file_obj)):
self._read_freq_block(file_obj=file_obj, freq=freq)
self._read_atomic_disp_block(file_obj=file_obj, disp=atomic_disp)
data["frequencies"] = np.asarray([freq]).astype(dtype=AbinsModules.AbinsConstants.FLOAT_TYPE, casting="safe")
# we mimic that we have one Gamma k-point
data["k_vectors"] = np.asarray([[0.0, 0.0, 0.0]]).astype(dtype=AbinsModules.AbinsConstants.FLOAT_TYPE,
casting="safe")
data["weights"] = np.asarray([1.0])
# Normalize displacements so that Abins can use it to create its internal data objects
# num_atoms: number of atoms in the system
# num_freq: number of modes
# dim: dimension for each atomic displacement (atoms vibrate in 3D space)
self._num_k = 1
# normalisation
# atomic_disp [num_freq, num_atoms, dim]
# masses [num_atoms]
masses = np.asarray([data["atoms"]["atom_%s" % atom]["mass"] for atom in range(self._num_atoms)])
# [num_freq, num_atoms, dim] -> [num_freq, num_atoms, dim, dim] -> [num_freq, num_atoms]
temp1 = np.trace(np.einsum("lki, lkj->lkij", atomic_disp, atomic_disp), axis1=2, axis2=3)
temp2 = np.einsum("ij, j->ij", temp1, masses)
# [num_freq, num_atoms] -> [num_freq]
norm = np.sum(temp2, axis=1)
# noinspection PyTypeChecker
atomic_disp = np.einsum("ijk,i->ijk", atomic_disp, 1.0 / np.sqrt(norm))
atomic_disp = np.einsum("ijk,j->ijk", atomic_disp, np.sqrt(masses))
# [num_freq, num_atoms, dim] -> [num_k, num_atoms, num_freq, dim]
data["atomic_displacements"] = np.asarray([np.transpose(a=atomic_disp, axes=(1, 0, 2))])
def _read_freq_block(self, file_obj=None, freq=None):
"""
Parses block with frequencies.
:param file_obj: file object from which we read
:param freq: list with frequencies which we update
"""
line = self._parser.find_first(file_obj=file_obj, msg="Frequencies --")
line = line.split()
freq.extend([float(i) for i in line[2:]])
def _read_atomic_disp_block(self, file_obj=None, disp=None):
"""
Parses block with atomic displacements.
:param file_obj: file object from which we read
:param disp: list with x coordinates which we update [num_freq, num_atoms, dim]
"""
sub_block_start = "X Y Z X Y Z X Y Z"
self._parser.find_first(file_obj=file_obj, msg=sub_block_start)
num_atom = 0
# Atom AN X Y Z X Y Z X Y Z
line_size = len(sub_block_start.split()) + 2
freq_per_line = sub_block_start.count("X")
l = file_obj.readline().split()
while len(l) == line_size:
for f in range(freq_per_line):
disp[self._num_read_freq + f, num_atom, 0] = complex(float(l[2 + 3 * f]), 0)
disp[self._num_read_freq + f, num_atom, 1] = complex(float(l[3 + 3 * f]), 0)
disp[self._num_read_freq + f, num_atom, 2] = complex(float(l[4 + 3 * f]), 0)
l = file_obj.readline().split()
num_atom += 1
self._num_read_freq += freq_per_line
|
dymkowsk/mantid
|
scripts/AbinsModules/LoadGAUSSIAN.py
|
Python
|
gpl-3.0
| 8,008
|
[
"Gaussian"
] |
5b9bebec1088482d918e3fcdcb3b8b80e5452b712bc9302a70a05edee98f210e
|
""" DIRAC JobDB class is a front-end to the main WMS database containing
job definitions and status information. It is used in most of the WMS
components
The following methods are provided for public usage:
getJobParameters()
getJobParametersAndAttributes()
setJobParameter()
"""
from __future__ import absolute_import
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Base.ElasticDB import ElasticDB as DB
#############################################################################
class ElasticJobDB(DB):
def __init__(self):
""" Standard Constructor
"""
DB.__init__(self, 'jobelasticdb', 'WorkloadManagement/ElasticJobDB')
self.log.info("==================================================")
#############################################################################
def getJobParameters(self, jobID, paramList=None):
""" Get Job Parameters defined for jobID.
Returns a dictionary with the Job Parameters.
If paramList is empty - all the parameters are returned.
:param self: self reference
:param int jobID: Job ID
:param list paramList: list of parameters to be returned
:return: dict with all Job Parameter values
"""
self.log.debug('JobDB.getParameters: Getting Parameters for job %s' % jobID)
resultDict = {}
if paramList:
paramNameList = []
for x in paramList:
paramNameList.append(x)
query = {
"query": {
"bool": {
"must": [
{"match": {"JobID": jobID}}, {"match": {"Name": ','.join(paramNameList)}}]}},
"_source": ["Name", "Value"]}
else:
query = {"query": {"match": {"JobID": jobID}}, "_source": ["Name", "Value"]}
gLogger.debug("Getting results for %d" % jobID)
result = self.query('jobelasticdb*', query)
if not result['OK']:
return S_ERROR(result)
sources = result['Value']['hits']['hits']
for source in sources:
name = source['_source']['Name']
value = source['_source']['Value']
try:
resultDict[name] = value.tostring()
except BaseException:
resultDict[name] = value
return S_OK(resultDict)
############################################################################
def getJobParametersAndAttributes(self, jobID, attribute=None, paramList=None):
""" Get Job Parameters with Attributes defined for jobID.
Returns a dictionary with the Job Parameters.
If paramList is empty - all the parameters are returned.
:param self: self reference
:param int jobID: Job ID
:param string attribute: Attribute
:param list paramList: list of parameters to be returned
:return: dict with all Job Parameter and Attribute values
"""
self.log.debug('JobDB.getParameters: Getting Parameters for job %s' % jobID)
jobParameters = ["JobID", "Name", "Value", "JobGroup", "Owner", "Proxy", "SubmissionTime", "RunningTime"]
resultDict = {}
if paramList:
query = {
"query": {
"bool": {
"must": [
{"match": {"JobID": jobID}}, {"match": {"Name": ','.join(paramList)}}]}},
"_source": jobParameters}
else:
query = {"query": {"match": {"JobID": jobID}}, "_source": jobParameters}
gLogger.debug("Getting results for %d" % jobID)
result = self.query('jobelasticdb*', query)
if not result['OK']:
return result
sources = result['Value']['hits']['hits']
jobParameters.remove("JobID")
for source in sources:
jobID = source['_source']['JobID']
parametersDict = {}
for parameter in jobParameters:
parametersDict[parameter] = source['_source'][parameter]
resultDict[jobID] = parametersDict
if attribute:
if attribute in jobParameters:
return S_OK(resultDict[jobID][attribute])
else:
return S_ERROR('Attribute %s not found' % attribute)
else:
return S_OK(resultDict)
#############################################################################
def setJobParameter(self, jobID, key, value, **kwargs):
""" Set parameters for the job JobID
:param self: self reference
:param int jobID: Job ID
:param basestring key: Name
:param keyword value: value
:return: S_OK/S_ERROR
"""
attributesDict = {"jobGroup": "00000000", "owner": 'Unknown', "proxy": None, "subTime": None, "runTime": None}
attributesDict.update(kwargs)
paramsStr = "ctx._source.Value = params.value; ctx._source.Name = params.name; "
jobGroupStr = "ctx._source.JobGroup = params.jobGroup; "
attrStr = "ctx._source.Owner = params.owner; ctx._source.Proxy = params.proxy; "
timeStr = "ctx._source.SubmissionTime = params.subTime; ctx._source.RunningTime = params.runTime"
query = {
"query": {
"term": {
"JobID": jobID}},
"script": {
"inline": paramsStr + jobGroupStr + attrStr + timeStr,
"params": {
"value": value,
"name": key,
"jobGroup": attributesDict["jobGroup"],
"owner": attributesDict["owner"],
"proxy": attributesDict["proxy"],
"subTime": attributesDict["subTime"],
"runTime": attributesDict["runTime"]}}}
indexName = self.generateFullIndexName('jobelasticdb')
result = self.exists(indexName)
if not result:
mapping = {
"JobParameters": {
"properties": {
"JobID": {
"type": "long"}, "Name": {
"type": "text"}, "Value": {
"type": "keyword"}, "JobGroup": {
"type": "text"}, "Owner": {
"type": "text"}, "Proxy": {
"type": "text"}, "SubmissionTime": {
"type": "date"}, "RunningTime": {
"type": "date"}}}}
gLogger.debug("Creating index %s" % indexName)
result = self.createIndex('jobelasticdb', mapping)
if not result['OK']:
return result
result = self.update('jobelasticdb*', 'JobParameters', query)
if not result['OK']:
gLogger.error("JobDB.setJobParameter: operation failed.")
return result
if result['Value']['updated'] == 0:
gLogger.debug("Updated values: 0")
query = {
"JobID": jobID,
"Name": key,
"Value": value,
"JobGroup": attributesDict["jobGroup"],
"Owner": attributesDict["owner"],
"Proxy": attributesDict["proxy"],
"SubmissionTime": attributesDict["subTime"],
"RunningTime": attributesDict["runTime"]}
gLogger.debug("Inserting values in index %s" % indexName)
result = self.update(indexName, 'JobParameters', query, updateByQuery=False, id=jobID)
if not result['OK']:
gLogger.error("JobDB.setJobParameter: operation failed.")
return result
return result
|
petricm/DIRAC
|
WorkloadManagementSystem/DB/ElasticJobDB.py
|
Python
|
gpl-3.0
| 7,057
|
[
"DIRAC"
] |
d2f99736e415115da834f62898269b64e46706bf1998bcd374897545bb8cdc7c
|
#!/usr/bin/env python
# coding: utf-8
from datetime import datetime
from HTMLParser import HTMLParser
import argparse
import json
import os
import sys
import urllib2
###############################################################################
# Options
###############################################################################
MAGIC_URL = 'http://magic.gae-init.appspot.com'
PARSER = argparse.ArgumentParser(description='Visit %s for more.' % MAGIC_URL)
PARSER.add_argument(
'-p', '--project', dest='project_id', action='store',
help='project ID of the project that you want to sync',
)
PARSER.add_argument(
'-r', '--remote', dest='remote_url', action='store', default=MAGIC_URL,
help="set the remote URL if it's not http://magic.gae-init.appspot.com",
)
ARGS = PARSER.parse_args()
###############################################################################
# Constants
###############################################################################
DIR_MAIN = 'main'
DIR_CONTROL = os.path.join(DIR_MAIN, 'control')
FILE_CONTROL_INIT = os.path.join(DIR_CONTROL, '__init__.py')
DIR_MODEL = os.path.join(DIR_MAIN, 'model')
FILE_MODEL_INIT = os.path.join(DIR_MODEL, '__init__.py')
DIR_API = os.path.join(DIR_MAIN, 'api', 'v1')
FILE_API_INIT = os.path.join(DIR_API, '__init__.py')
DIR_TEMPLATES = os.path.join(DIR_MAIN, 'templates')
FILE_HEADER = os.path.join(DIR_TEMPLATES, 'bit', 'header.html')
FILE_ADMIN = os.path.join(DIR_TEMPLATES, 'admin', 'admin.html')
###############################################################################
# Helpers
###############################################################################
def print_out(script, filename=''):
timestamp = datetime.now().strftime('%H:%M:%S')
if not filename:
filename = '-' * 46
script = script.rjust(12, '-')
print '[%s] %12s %s' % (timestamp, script, filename)
def make_dirs(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def append_to(project_url, destination):
url = ('%smagic/%s' % (project_url, destination)).replace('\\', '/')
response = urllib2.urlopen(url)
if response.getcode() == 200:
with open(destination, 'r') as dest:
lines = ''.join(dest.readlines())
content = response.read()
if content in lines:
print_out('IGNORED', destination)
return
with open(destination, 'a') as dest:
dest.write(content)
print_out('APPEND', destination)
def insert_to(project_url, destination, find_what, indent=0):
url = ('%smagic/%s' % (project_url, destination)).replace('\\', '/')
response = urllib2.urlopen(url)
if response.getcode() == 200:
with open(destination, 'r') as dest:
dest_contents = dest.readlines()
lines = ''.join(dest_contents)
content = HTMLParser().unescape(response.read())
if content.replace(' ', '') in lines.replace(' ', ''):
print_out('IGNORED', destination)
return
generated = []
for line in dest_contents:
generated.append(line)
if line.lower().find(find_what.lower()) >= 0:
spaces = len(line) - len(line.lstrip())
for l in content.split('\n'):
if l:
generated.append('%s%s\n' % (' ' * (spaces + indent), l))
with open(destination, 'w') as dest:
for line in generated:
dest.write(line)
print_out('INSERT', destination)
def create_file(project_url, destination):
url = ('%smagic/%s' % (project_url, destination)).replace('\\', '/')
response = urllib2.urlopen(url)
if response.getcode() == 200:
with open(destination, 'w') as dest:
dest.write('%s\n' % HTMLParser().unescape(response.read()))
print_out('CREATE', destination)
def get_project_url():
return '%s/api/v1/project/%s/' % (ARGS.remote_url, ARGS.project_id.split('/')[0])
def get_project_db():
response = urllib2.urlopen(get_project_url())
if response.getcode() == 200:
project_body = response.read()
return json.loads(project_body)['result']
return None
def sync_from_magic():
model_dbs = ''
project_url = get_project_url()
model_url = '%smodel/' % project_url
response = urllib2.urlopen(model_url)
if response.getcode() == 200:
models_body = response.read()
model_dbs = json.loads(models_body)['result']
append_to(project_url, FILE_MODEL_INIT)
append_to(project_url, FILE_CONTROL_INIT)
append_to(project_url, FILE_API_INIT)
insert_to(project_url, FILE_HEADER, '<ul class="nav navbar-nav">', 2)
insert_to(project_url, FILE_ADMIN, "url_for('user_list'")
for model_db in model_dbs:
name = model_db['variable_name']
create_file(project_url, os.path.join(DIR_MODEL, '%s.py' % name))
create_file(project_url, os.path.join(DIR_CONTROL, '%s.py' % name))
create_file(project_url, os.path.join(DIR_API, '%s.py' % name))
root = os.path.join(DIR_TEMPLATES, name)
make_dirs(root)
create_file(project_url, os.path.join(root, '%s_update.html' % name))
create_file(project_url, os.path.join(root, '%s_view.html' % name))
create_file(project_url, os.path.join(root, '%s_list.html' % name))
create_file(project_url, os.path.join(root, 'admin_%s_update.html' % name))
create_file(project_url, os.path.join(root, 'admin_%s_list.html' % name))
###############################################################################
# Main
###############################################################################
def magic():
if len(sys.argv) == 1:
PARSER.print_help()
sys.exit(1)
os.chdir(os.path.dirname(os.path.realpath(__file__)))
if ARGS.project_id:
project_db = get_project_db()
answer = raw_input(
'Are you sure you want to sync "%(name)s" with %(model_count)d '
'model(s) that was modified on %(modified)s? (Y/n): '
% {
'name': project_db['name'],
'model_count': project_db['model_count'],
'modified': project_db['modified'][:16].replace('T', ' at '),
}
)
if not answer or answer.lower() == 'y':
sync_from_magic()
else:
print 'Project ID is not provided.'
PARSER.print_help()
if __name__ == '__main__':
magic()
|
mdxs/gae-init-babel
|
magic.py
|
Python
|
mit
| 6,224
|
[
"VisIt"
] |
f6d038bfa16b63b9eba564e31b8d7bb72b2861c920d811ffb889fe9eaba0966e
|
""" Frontend to FTS3 MySQL DB. Written using sqlalchemy
"""
# We disable the no-member error because
# they are constructed by SQLAlchemy for all
# the objects mapped to a table.
# pylint: disable=no-member
import datetime
import errno
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.sql.expression import and_
from sqlalchemy.orm import relationship, sessionmaker, mapper
from sqlalchemy.sql import update, delete
from sqlalchemy import create_engine, Table, Column, MetaData, ForeignKey, \
Integer, String, DateTime, Enum, BigInteger, SmallInteger, Float, func, text
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.DataManagementSystem.Client.FTS3Operation import FTS3Operation, FTS3TransferOperation, FTS3StagingOperation
from DIRAC.DataManagementSystem.Client.FTS3File import FTS3File
from DIRAC.DataManagementSystem.Client.FTS3Job import FTS3Job
from DIRAC.ConfigurationSystem.Client.Utilities import getDBParameters
__RCSID__ = "$Id$"
metadata = MetaData()
fts3FileTable = Table('Files', metadata,
Column('fileID', Integer, primary_key=True),
Column('operationID', Integer,
ForeignKey('Operations.operationID', ondelete='CASCADE'),
nullable=False),
Column('attempt', Integer, server_default='0'),
Column('lastUpdate', DateTime, onupdate=func.utc_timestamp()),
Column('rmsFileID', Integer, server_default='0'),
Column('lfn', String(1024)),
Column('checksum', String(255)),
Column('size', BigInteger),
Column('targetSE', String(255), nullable=False),
Column('error', String(2048)),
Column('ftsGUID', String(255)),
Column('status', Enum(*FTS3File.ALL_STATES),
server_default=FTS3File.INIT_STATE,
index=True),
mysql_engine='InnoDB',
)
mapper(FTS3File, fts3FileTable)
fts3JobTable = Table('Jobs', metadata,
Column('jobID', Integer, primary_key=True),
Column('operationID', Integer,
ForeignKey('Operations.operationID', ondelete='CASCADE'),
nullable=False),
Column('submitTime', DateTime),
Column('lastUpdate', DateTime, onupdate=func.utc_timestamp()),
Column('lastMonitor', DateTime),
Column('completeness', Float),
# Could be fetched from Operation, but bad for perf
Column('username', String(255)),
# Could be fetched from Operation, but bad for perf
Column('userGroup', String(255)),
Column('ftsGUID', String(255)),
Column('ftsServer', String(255)),
Column('error', String(2048)),
Column('status', Enum(*FTS3Job.ALL_STATES),
server_default=FTS3Job.INIT_STATE,
index=True),
Column('assignment', String(255), server_default=None),
mysql_engine='InnoDB',
)
mapper(FTS3Job, fts3JobTable)
fts3OperationTable = Table('Operations', metadata,
Column('operationID', Integer, primary_key=True),
Column('username', String(255)),
Column('userGroup', String(255)),
# -1 because with 0 we would get any random request
# when performing reqClient.getRequest
Column('rmsReqID', Integer, server_default='-1'),
Column('rmsOpID', Integer, server_default='0', index=True),
Column('sourceSEs', String(255)),
Column('activity', String(255)),
Column('priority', SmallInteger),
Column('creationTime', DateTime),
Column('lastUpdate', DateTime, onupdate=func.utc_timestamp()),
Column('status', Enum(*FTS3Operation.ALL_STATES),
server_default=FTS3Operation.INIT_STATE,
index=True),
Column('error', String(1024)),
Column('type', String(255)),
Column('assignment', String(255), server_default=None),
mysql_engine='InnoDB',
)
fts3Operation_mapper = mapper(FTS3Operation, fts3OperationTable,
properties={'ftsFiles': relationship(
FTS3File,
lazy='joined', # Immediately load the entirety of the object
innerjoin=True, # Use inner join instead of left outer join
cascade='all, delete-orphan', # if a File is removed from the list,
# remove it from the DB
passive_deletes=True, # used together with cascade='all, delete-orphan'
),
'ftsJobs': relationship(
FTS3Job,
lazy='subquery', # Immediately load the entirety of the object,
# but use a subquery to do it
# This is to avoid the cartesian product between the three tables.
# https://docs.sqlalchemy.org/en/latest/orm/loading_relationships.html#subquery-eager-loading
cascade='all, delete-orphan', # if a File is removed from the list,
# remove it from the DB
passive_deletes=True, # used together with cascade='all, delete-orphan'
),
},
polymorphic_on='type',
polymorphic_identity='Abs'
)
mapper(FTS3TransferOperation, fts3OperationTable,
inherits=fts3Operation_mapper,
polymorphic_identity='Transfer'
)
mapper(FTS3StagingOperation, fts3OperationTable,
inherits=fts3Operation_mapper,
polymorphic_identity='Staging'
)
########################################################################
class FTS3DB(object):
"""
.. class:: RequestDB
db holding requests
"""
def __getDBConnectionInfo(self, fullname):
""" Collect from the CS all the info needed to connect to the DB.
This should be in a base class eventually
"""
result = getDBParameters(fullname)
if not result['OK']:
raise Exception('Cannot get database parameters: %s' % result['Message'])
dbParameters = result['Value']
self.dbHost = dbParameters['Host']
self.dbPort = dbParameters['Port']
self.dbUser = dbParameters['User']
self.dbPass = dbParameters['Password']
self.dbName = dbParameters['DBName']
def __init__(self, pool_size=15):
"""c'tor
:param self: self reference
:param pool_size: size of the connection pool to the DB
"""
self.log = gLogger.getSubLogger('FTS3DB')
# Initialize the connection info
self.__getDBConnectionInfo('DataManagement/FTS3DB')
runDebug = (gLogger.getLevel() == 'DEBUG')
self.engine = create_engine(
'mysql://%s:%s@%s:%s/%s' %
(self.dbUser,
self.dbPass,
self.dbHost,
self.dbPort,
self.dbName),
echo=runDebug,
pool_size=pool_size)
metadata.bind = self.engine
self.dbSession = sessionmaker(bind=self.engine)
def createTables(self):
""" create tables """
try:
metadata.create_all(self.engine)
except SQLAlchemyError as e:
return S_ERROR(e)
return S_OK()
def persistOperation(self, operation):
""" update or insert request into db
Also release the assignment tag
:param operation: FTS3Operation instance
"""
session = self.dbSession(expire_on_commit=False)
# set the assignment to NULL
# so that another agent can work on the request
operation.assignment = None
# because of the merge we have to explicitely set lastUpdate
operation.lastUpdate = func.utc_timestamp()
try:
# Merge it in case it already is in the DB
operation = session.merge(operation)
session.add(operation)
session.commit()
session.expunge_all()
return S_OK(operation.operationID)
except SQLAlchemyError as e:
session.rollback()
self.log.exception("persistOperation: unexpected exception", lException=e)
return S_ERROR("persistOperation: unexpected exception %s" % e)
finally:
session.close()
def getOperation(self, operationID):
""" read request
This does not set the assignment flag
:param operationID: ID of the FTS3Operation
"""
# expire_on_commit is set to False so that we can still use the object
# after we close the session
session = self.dbSession(expire_on_commit=False)
try:
operation = session.query(FTS3Operation)\
.filter(getattr(FTS3Operation, 'operationID') == operationID)\
.one()
session.commit()
###################################
session.expunge_all()
return S_OK(operation)
except NoResultFound as e:
# We use the ENOENT error, even if not really a file error :)
return S_ERROR(errno.ENOENT, "No FTS3Operation with id %s" % operationID)
except SQLAlchemyError as e:
return S_ERROR("getOperation: unexpected exception : %s" % e)
finally:
session.close()
def getActiveJobs(self, limit=20, lastMonitor=None, jobAssignmentTag="Assigned"):
""" Get the FTSJobs that are not in a final state, and are not assigned for monitoring
or has its operation being treated
By assigning the job to the DB:
* it cannot be monitored by another agent
* the operation to which it belongs cannot be treated
:param limit: max number of Jobs to retrieve
:param lastMonitor: jobs monitored earlier than the given date
:param jobAssignmentTag: if not None, block the Job for other queries,
and use it as a prefix for the value in the operation table
:returns: list of FTS3Jobs
"""
session = self.dbSession(expire_on_commit=False)
try:
# the tild sign is for "not"
ftsJobsQuery = session.query(FTS3Job)\
.join(FTS3Operation)\
.filter(~FTS3Job.status.in_(FTS3Job.FINAL_STATES))\
.filter(FTS3Job.assignment.is_(None))\
.filter(FTS3Operation.assignment.is_(None))
if lastMonitor:
ftsJobsQuery = ftsJobsQuery.filter(FTS3Job.lastMonitor < lastMonitor)
if jobAssignmentTag:
ftsJobsQuery = ftsJobsQuery.with_for_update()
ftsJobsQuery = ftsJobsQuery.order_by(FTS3Job.lastMonitor.asc())
ftsJobsQuery = ftsJobsQuery.limit(limit)
ftsJobs = ftsJobsQuery.all()
if jobAssignmentTag:
jobAssignmentTag += "_%s" % datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
jobIds = [job.jobID for job in ftsJobs]
if jobIds:
session.execute(update(FTS3Job)
.where(FTS3Job.jobID.in_(jobIds)
)
.values({'assignment': jobAssignmentTag})
)
session.commit()
session.expunge_all()
return S_OK(ftsJobs)
except SQLAlchemyError as e:
session.rollback()
return S_ERROR("getAllActiveJobs: unexpected exception : %s" % e)
finally:
session.close()
def updateFileStatus(self, fileStatusDict, ftsGUID=None):
"""Update the file ftsStatus and error
The update is only done if the file is not in a final state
(To avoid bringing back to life a file by consuming MQ a posteriori)
TODO: maybe it should query first the status and filter the rows I want to update !
:param fileStatusDict: { fileID : { status , error, ftsGUID } }
:param ftsGUID: If specified, only update the rows where the ftsGUID matches this value.
This avoids two jobs handling the same file one after another to step on each other foot.
Note that for the moment it is an optional parameter, but it may turn mandatory soon.
"""
# This here is inneficient as we update every files, even if it did not change, and we commit every time.
# It would probably be best to update only the files that changed.
# However, commiting every time is the recommendation of MySQL
# (https://dev.mysql.com/doc/refman/5.7/en/innodb-deadlocks-handling.html)
for fileID, valueDict in fileStatusDict.iteritems():
session = self.dbSession()
try:
updateDict = {FTS3File.status: valueDict['status']}
# We only update error if it is specified
if 'error' in valueDict:
newError = valueDict['error']
# Replace empty string with None
if not newError:
newError = None
updateDict[FTS3File.error] = newError
# We only update ftsGUID if it is specified
if 'ftsGUID' in valueDict:
newFtsGUID = valueDict['ftsGUID']
# Replace empty string with None
if not newFtsGUID:
newFtsGUID = None
updateDict[FTS3File.ftsGUID] = newFtsGUID
# We only update the lines matching:
# * the good fileID
# * the status is not Final
whereConditions = [FTS3File.fileID == fileID,
~ FTS3File.status.in_(FTS3File.FINAL_STATES)]
# If an ftsGUID is specified, add it to the `where` condition
if ftsGUID:
whereConditions.append(FTS3File.ftsGUID == ftsGUID)
updateQuery = update(FTS3File)\
.where(and_(*whereConditions)
)\
.values(updateDict)
session.execute(updateQuery)
session.commit()
except SQLAlchemyError as e:
session.rollback()
self.log.exception("updateFileFtsStatus: unexpected exception", lException=e)
return S_ERROR("updateFileFtsStatus: unexpected exception %s" % e)
finally:
session.close()
return S_OK()
def updateJobStatus(self, jobStatusDict):
""" Update the job Status and error
The update is only done if the job is not in a final state
The assignment flag is released
:param jobStatusDict: { jobID : { status , error, completeness } }
"""
session = self.dbSession()
try:
for jobID, valueDict in jobStatusDict.iteritems():
updateDict = {FTS3Job.status: valueDict['status']}
# We only update error if it is specified
if 'error' in valueDict:
newError = valueDict['error']
# Replace empty string with None
if not newError:
newError = None
updateDict[FTS3Job.error] = newError
if 'completeness' in valueDict:
updateDict[FTS3Job.completeness] = valueDict['completeness']
if valueDict.get('lastMonitor'):
updateDict[FTS3Job.lastMonitor] = func.utc_timestamp()
updateDict[FTS3Job.assignment] = None
session.execute(update(FTS3Job)
.where(and_(FTS3Job.jobID == jobID,
~ FTS3Job.status.in_(FTS3Job.FINAL_STATES)
)
)
.values(updateDict)
)
session.commit()
return S_OK()
except SQLAlchemyError as e:
session.rollback()
self.log.exception("updateJobStatus: unexpected exception", lException=e)
return S_ERROR("updateJobStatus: unexpected exception %s" % e)
finally:
session.close()
def getNonFinishedOperations(self, limit=20, operationAssignmentTag="Assigned"):
""" Get all the non assigned FTS3Operations that are not yet finished, so either Active or Processed.
An operation won't be picked if it is already assigned, or one of its job is.
:param limit: max number of operations to retrieve
:param operationAssignmentTag: if not None, block the operations for other queries,
and use it as a prefix for the value in the operation table
:return: list of Operations
"""
session = self.dbSession(expire_on_commit=False)
try:
ftsOperations = []
# We need to do the select in two times because the join clause that makes the limit difficult
operationIDsQuery = session.query(FTS3Operation.operationID)\
.outerjoin(FTS3Job)\
.filter(FTS3Operation.status.in_(['Active', 'Processed']))\
.filter(FTS3Operation.assignment.is_(None))\
.filter(FTS3Job.assignment.is_(None))\
.limit(limit)\
.distinct()
# Block the Operations for other requests
if operationAssignmentTag:
operationIDsQuery = operationIDsQuery.with_for_update()
operationIDs = operationIDsQuery.all()
operationIDs = [oidTuple[0] for oidTuple in operationIDs]
if operationIDs:
# Fetch the operation object for these IDs
ftsOperations = session.query(FTS3Operation)\
.filter(FTS3Operation.operationID.in_(operationIDs))\
.all()
if operationAssignmentTag:
operationAssignmentTag += "_%s" % datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
session.execute(update(FTS3Operation)
.where(FTS3Operation.operationID.in_(operationIDs)
)
.values({'assignment': operationAssignmentTag})
)
session.commit()
session.expunge_all()
return S_OK(ftsOperations)
except SQLAlchemyError as e:
session.rollback()
return S_ERROR("getAllProcessedOperations: unexpected exception : %s" % e)
finally:
session.close()
def kickStuckOperations(self, limit=20, kickDelay=2):
"""finds operations that have not been updated for more than a given
time but are still assigned and resets the assignment
:param int limit: number of operations to treat
:param int kickDelay: age of the lastUpdate in hours
:returns: S_OK/S_ERROR with number of kicked operations
"""
session = self.dbSession(expire_on_commit=False)
try:
ftsOps = session.query(FTS3Operation.operationID)\
.filter(FTS3Operation.lastUpdate < (func.date_sub(func.utc_timestamp(),
text('INTERVAL %d HOUR' % kickDelay
))))\
.filter(~FTS3Operation.assignment.is_(None))\
.limit(limit)
opIDs = [opTuple[0] for opTuple in ftsOps]
rowCount = 0
if opIDs:
result = session.execute(
update(FTS3Operation) .where(
FTS3Operation.operationID.in_(opIDs)) .where(
FTS3Operation.lastUpdate < (
func.date_sub(
func.utc_timestamp(), text(
'INTERVAL %d HOUR' %
kickDelay)))) .values(
{
'assignment': None}))
rowCount = result.rowcount
session.commit()
session.expunge_all()
return S_OK(rowCount)
except SQLAlchemyError as e:
session.rollback()
return S_ERROR("kickStuckOperations: unexpected exception : %s" % e)
finally:
session.close()
def kickStuckJobs(self, limit=20, kickDelay=2):
"""finds jobs that have not been updated for more than a given
time but are still assigned and resets the assignment
:param int limit: number of jobs to treat
:param int kickDelay: age of the lastUpdate in hours
:returns: S_OK/S_ERROR with number of kicked jobs
"""
session = self.dbSession(expire_on_commit=False)
try:
ftsJobs = session.query(FTS3Job.jobID)\
.filter(FTS3Job.lastUpdate < (func.date_sub(func.utc_timestamp(),
text('INTERVAL %d HOUR' % kickDelay
))))\
.filter(~FTS3Job.assignment.is_(None))\
.limit(limit)
jobIDs = [jobTuple[0] for jobTuple in ftsJobs]
rowCount = 0
if jobIDs:
result = session.execute(
update(FTS3Job) .where(
FTS3Job.jobID.in_(jobIDs)) .where(
FTS3Job.lastUpdate < (
func.date_sub(
func.utc_timestamp(), text(
'INTERVAL %d HOUR' %
kickDelay)))) .values(
{
'assignment': None}))
rowCount = result.rowcount
session.commit()
session.expunge_all()
return S_OK(rowCount)
except SQLAlchemyError as e:
session.rollback()
return S_ERROR("kickStuckJobs: unexpected exception : %s" % e)
finally:
session.close()
def deleteFinalOperations(self, limit=20, deleteDelay=180):
"""deletes operation in final state that are older than given time
:param int limit: number of operations to treat
:param int deleteDelay: age of the lastUpdate in days
:returns: S_OK/S_ERROR with number of deleted operations
"""
session = self.dbSession(expire_on_commit=False)
try:
ftsOps = session.query(
FTS3Operation.operationID) .filter(
FTS3Operation.lastUpdate < (
func.date_sub(
func.utc_timestamp(),
text(
'INTERVAL %d DAY' %
deleteDelay)))) .filter(
FTS3Operation.status.in_(
FTS3Operation.FINAL_STATES)) .limit(limit)
opIDs = [opTuple[0] for opTuple in ftsOps]
rowCount = 0
if opIDs:
result = session.execute(delete(FTS3Operation)
.where(FTS3Operation.operationID.in_(opIDs)))
rowCount = result.rowcount
session.commit()
session.expunge_all()
return S_OK(rowCount)
except SQLAlchemyError as e:
session.rollback()
return S_ERROR("deleteFinalOperations: unexpected exception : %s" % e)
finally:
session.close()
def getOperationsFromRMSOpID(self, rmsOpID):
""" Returns the FTS3Operations matching a given RMS OperationID
This does not set the assignment flag
:param rmsOpID: ID of the RMS Operation
"""
# expire_on_commit is set to False so that we can still use the object
# after we close the session
session = self.dbSession(expire_on_commit=False)
try:
operations = session.query(FTS3Operation)\
.filter(FTS3Operation.rmsOpID == rmsOpID)\
.all()
session.commit()
###################################
session.expunge_all()
return S_OK(operations)
except NoResultFound as e:
# If there is no such operation, return an empty list
return S_OK([])
except SQLAlchemyError as e:
return S_ERROR("getOperationsFromRMSOpID: unexpected exception : %s" % e)
finally:
session.close()
|
fstagni/DIRAC
|
DataManagementSystem/DB/FTS3DB.py
|
Python
|
gpl-3.0
| 24,062
|
[
"DIRAC"
] |
1b355fe740eeb06b4aabcf5df00bbefce0e133adaae17661efc6fa63a0abeb7e
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines classes to generate point defect structures
"""
import logging
from abc import ABCMeta, abstractmethod
from monty.json import MSONable
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.analysis.defects.core import Interstitial, Substitution, Vacancy
from pymatgen.analysis.defects.utils import (
StructureMotifInterstitial,
TopographyAnalyzer,
)
from pymatgen.analysis.structure_matcher import PointDefectComparator
from pymatgen.core import PeriodicSite
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
__author__ = "Danny Broberg, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyam Dwaraknath"
__email__ = "shyamd@lbl.gov"
__status__ = "Development"
__date__ = "Mar 15, 2018"
logger = logging.getLogger(__name__)
class DefectGenerator(MSONable, metaclass=ABCMeta):
"""
Abstract class for point defects
Implements generator pattern
"""
def __iter__(self):
"""
Return self as this should be an iterator
"""
return self
@abstractmethod
def __next__(self):
"""
Abstract method to return defects
"""
return
class VacancyGenerator(DefectGenerator):
"""
Simple generator for vacancies based on periodically
equivalent sites
"""
def __init__(self, structure, include_bv_charge=False):
"""
Initializes a Vacancy Generator
Args:
structure(Structure): pymatgen structure object
"""
self.structure = structure
self.include_bv_charge = include_bv_charge
# Find equivalent site list
sga = SpacegroupAnalyzer(self.structure)
self.symm_structure = sga.get_symmetrized_structure()
self.equiv_site_seq = list(self.symm_structure.equivalent_sites)
self.struct_valences = None
if self.include_bv_charge:
bv = BVAnalyzer()
self.struct_valences = bv.get_valences(self.structure)
def __next__(self):
"""
Returns the next vacancy in the sequence or
raises StopIteration
"""
if len(self.equiv_site_seq) > 0:
vac_site = self.equiv_site_seq.pop(0)
charge = 0.0
if self.struct_valences:
site_index = self.structure.get_sites_in_sphere(vac_site[0].coords, 0.1, include_index=True)[0][2]
charge = -1 * self.struct_valences[site_index]
return Vacancy(self.structure, vac_site[0], charge=charge)
raise StopIteration
class SubstitutionGenerator(DefectGenerator):
"""
Simple generator for substitution based on periodically
equivalent sites
"""
def __init__(self, structure, element):
"""
Initializes a Substitution Generator
note: an Antisite is considered a type of substitution
Args:
structure(Structure): pymatgen structure object
element (str or Element or Species): element for the substitution
"""
self.structure = structure
self.element = element
# Find equivalent site list
sga = SpacegroupAnalyzer(self.structure)
self.symm_structure = sga.get_symmetrized_structure()
self.equiv_sub = []
for equiv_site_set in list(self.symm_structure.equivalent_sites):
vac_site = equiv_site_set[0]
if isinstance(element, str): # make sure you compare with specie symbol or Element type
vac_specie = vac_site.specie.symbol
else:
vac_specie = vac_site.specie
if element != vac_specie:
defect_site = PeriodicSite(
element,
vac_site.coords,
structure.lattice,
coords_are_cartesian=True,
)
sub = Substitution(structure, defect_site)
self.equiv_sub.append(sub)
def __next__(self):
"""
Returns the next Substitution in the sequence or
raises StopIteration
"""
if len(self.equiv_sub) > 0:
return self.equiv_sub.pop(0)
raise StopIteration
class InterstitialGenerator(DefectGenerator):
"""
Generator for interstitials at positions
where the interstitialcy is coordinated by nearest neighbors
in a way that resembles basic structure motifs
(e.g., tetrahedra, octahedra). The algorithm is called InFiT
(Interstitialcy Finding Tool), it was introducted by
Nils E. R. Zimmermann, Matthew K. Horton, Anubhav Jain,
and Maciej Haranczyk (Front. Mater., 4, 34, 2017),
and it is used by the Python Charged Defect Toolkit
(PyCDT: D. Broberg et al., Comput. Phys. Commun., in press, 2018).
"""
def __init__(self, structure, element):
"""
Initializes an Interstitial generator using structure motifs
Args:
structure (Structure): pymatgen structure object
element (str or Element or Species): element for the interstitial
"""
self.structure = structure
self.element = element
interstitial_finder = StructureMotifInterstitial(self.structure, self.element)
self.unique_defect_seq = []
# eliminate sublattice equivalent defects which may
# have slipped through interstitial finder
pdc = PointDefectComparator()
for poss_site in interstitial_finder.enumerate_defectsites():
now_defect = Interstitial(self.structure, poss_site)
append_defect = True
for unique_defect in self.unique_defect_seq:
if pdc.are_equal(now_defect, unique_defect):
append_defect = False
if append_defect:
self.unique_defect_seq.append(now_defect)
self.count_def = 0 # for counting the index of the generated defect
def __next__(self):
"""
Returns the next interstitial or
raises StopIteration
"""
if len(self.unique_defect_seq) > 0:
inter_defect = self.unique_defect_seq.pop(0)
inter_site = inter_defect.site
self.count_def += 1
site_name = "InFiT" + str(self.count_def)
return Interstitial(self.structure, inter_site, site_name=site_name)
raise StopIteration
class VoronoiInterstitialGenerator(DefectGenerator):
"""
Generator for interstitials based on a simple Voronoi analysis
"""
def __init__(self, structure, element):
"""
Initializes an Interstitial generator using Voronoi sites
Args:
structure (Structure): pymatgen structure object
element (str or Element or Species): element for the interstitial
"""
self.structure = structure
self.element = element
framework = list(self.structure.symbol_set)
get_voronoi = TopographyAnalyzer(self.structure, framework, [], check_volume=False)
get_voronoi.cluster_nodes()
get_voronoi.remove_collisions()
# trim equivalent nodes with symmetry analysis
struct_to_trim = self.structure.copy()
for poss_inter in get_voronoi.vnodes:
struct_to_trim.append(self.element, poss_inter.frac_coords, coords_are_cartesian=False)
symmetry_finder = SpacegroupAnalyzer(struct_to_trim, symprec=1e-1)
equiv_sites_list = symmetry_finder.get_symmetrized_structure().equivalent_sites
# do additional screening for sublattice equivalent
# defects which may have slipped through
pdc = PointDefectComparator()
self.unique_defect_seq = []
for poss_site_list in equiv_sites_list:
poss_site = poss_site_list[0]
if poss_site not in self.structure:
now_defect = Interstitial(self.structure, poss_site)
append_defect = True
for unique_defect in self.unique_defect_seq:
if pdc.are_equal(now_defect, unique_defect):
append_defect = False
if append_defect:
self.unique_defect_seq.append(now_defect)
self.count_def = 0 # for counting the index of the generated defect
def __next__(self):
"""
Returns the next interstitial or
raises StopIteration
"""
if len(self.unique_defect_seq) > 0:
inter_defect = self.unique_defect_seq.pop(0)
inter_site = inter_defect.site
self.count_def += 1
site_name = "Voronoi" + str(self.count_def)
return Interstitial(self.structure, inter_site, site_name=site_name)
raise StopIteration
class SimpleChargeGenerator(DefectGenerator):
"""
Does an extremely simple/limited charge generation scheme (only one charge generated)
for vacancies: use bond valence method to assign oxidation states and consider
negative of the vacant site's oxidation state as single charge to try
for antisites and subs: use bond valence method to assign oxidation states and consider
negative of the vacant site's oxidation state as single charge to try +
added to likely charge of substitutional site (closest to zero)
for interstitial: charge zero
"""
def __init__(self, defect):
"""
Args:
defect(Defect): pymatgen Defect object
"""
self.defect = defect
try:
bv = BVAnalyzer()
struct_valences = bv.get_valences(self.defect.bulk_structure)
site_index = self.defect.bulk_structure.get_sites_in_sphere(
self.defect.site.coords, 0.1, include_index=True
)[0][2]
def_site_valence = struct_valences[site_index]
except Exception: # sometimes valences cant be assigned
def_site_valence = 0
if isinstance(defect, Vacancy):
self.charges = [-1 * def_site_valence]
elif isinstance(defect, Substitution):
# (minimize difference with host site specie)
probable_chgs = [ox - def_site_valence for ox in self.defect.site.specie.oxidation_states]
self.charges = [min(probable_chgs, key=abs)]
elif isinstance(defect, Interstitial):
self.charges = [0]
else:
raise ValueError("Defect Type not recognized.")
def __next__(self):
"""
Returns the next defect type with the correct charge appended
raises StopIteration
"""
if len(self.charges) > 0:
charge = self.charges.pop(0)
defect = self.defect.copy()
defect.set_charge(charge)
return defect
raise StopIteration
|
vorwerkc/pymatgen
|
pymatgen/analysis/defects/generators.py
|
Python
|
mit
| 10,953
|
[
"pymatgen"
] |
49379804b95d51f03bd46284561f1711684fa53fe0efd5476a57204d1dd8c713
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to
# submit large numbers of jobs on supercomputers. It provides a python interface to physical input,
# such as crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential
# programs. It is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
from pytest import fixture
@fixture
def root(functional):
from pylada.jobfolder import JobFolder
sizes = [10, 15, 20, 25]
root = JobFolder()
for type, trial, size in [('this', 0, 10), ('this', 1, 15), ('that', 2, 20), ('that', 1, 20)]:
jobfolder = root / type / trial
jobfolder.functional = functional
jobfolder.params['indiv'] = size
if type == 'that':
jobfolder.params['value'] = True
return root
@fixture
def expected_compute():
return {'that/1': 20, 'that/2': 20, 'this/1': 15, 'this/2': 15, 'this/0': 10}
def test_subfolder_creation():
from pylada.jobfolder import JobFolder
root = JobFolder()
jobfolder = root / 'this' / '0'
assert jobfolder.name == "/this/0/"
def test_subfolder_creation_nonstring():
from pylada.jobfolder import JobFolder
root = JobFolder()
jobfolder = root / 'this' / 0
assert jobfolder.name == "/this/0/"
def test_contains(root):
assert 'this/0' in root
assert 'this/1' in root
assert 'that/2' in root
assert 'that/1' in root
assert 'other' not in root
def test_getitem_and_contains(root):
assert '0' in root['this']
assert '1' in root['this']
assert '1' in root['that']
assert '2' in root['that']
def test_values_iteration(root, functional):
for jobfolder in root.values():
assert repr(jobfolder.functional) == repr(functional)
def test_getattr(root):
assert getattr(root['this/0'], 'indiv', 0) == 10
assert getattr(root['this/1'], 'indiv', 0) == 15
assert getattr(root['that/1'], 'indiv', 0) == 20
assert getattr(root['that/2'], 'indiv', 0) == 20
assert not hasattr(root['this/0'], 'value')
assert not hasattr(root['this/1'], 'value')
assert getattr(root['that/1'], 'value', False)
assert getattr(root['that/2'], 'value', False)
def test_key_iteration(root):
for key, test in zip(root, ['that/1', 'that/2', 'this/0', 'this/1']):
assert key == test
for key, test in zip(root['that/1'].root, ['that/1', 'that/2', 'this/0', 'this/1']):
assert key == test
for key, test in zip(root['that'], ['1', '2']):
assert key == test
for key, test in zip(root['this'], ['0', '1']):
assert key == test
def test_delete_subfolder(root):
del root['that/2']
assert 'that/2' not in root
def test_compute(tmpdir, root, expected_compute):
for name, jobfolder in root.items():
result = jobfolder.compute(outdir=str(tmpdir.join(name)))
assert result.success
assert tmpdir.join(name, 'OUTCAR').check()
assert result.indiv == expected_compute[name]
def test_pickling_then_compute(tmpdir, root, expected_compute):
from pickle import loads, dumps
for name, jobfolder in root.items():
result = jobfolder.compute(outdir=str(tmpdir.join(name)))
assert result.success
assert tmpdir.join(name, 'OUTCAR').check()
assert result.indiv == expected_compute[name]
def test_deepcopy_then_compute(tmpdir, root, expected_compute):
from copy import deepcopy
for name, jobfolder in deepcopy(root).items():
result = jobfolder.compute(outdir=str(tmpdir.join(name)))
assert result.success
assert tmpdir.join(name, 'OUTCAR').check()
assert result.indiv == expected_compute[name]
def test_deepcopy(root):
from copy import deepcopy
jobfolder = deepcopy(root)
for subfolder in root.values():
assert subfolder.name in jobfolder
|
pylada/pylada-light
|
tests/jobfolder/test_jobfolder.py
|
Python
|
gpl-3.0
| 4,660
|
[
"CRYSTAL",
"VASP"
] |
ac9fc456f2fdd0e5daa4176566a8429095444f3f98e581d84969b951d2cd3681
|
"""
Service class implements the server side part of the DISET protocol
There are 2 main parts in this class:
- All useful functions for initialization
- All useful functions to handle the requests
"""
# pylint: skip-file
# __searchInitFunctions gives RuntimeError: maximum recursion depth exceeded
import os
import time
import threading
import DIRAC
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.DErrno import ENOAUTH
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.Core.Utilities import Time, MemStat
from DIRAC.Core.DISET.private.LockManager import LockManager
from DIRAC.FrameworkSystem.Client.MonitoringClient import MonitoringClient
from DIRAC.Core.DISET.private.ServiceConfiguration import ServiceConfiguration
from DIRAC.Core.DISET.private.TransportPool import getGlobalTransportPool
from DIRAC.Core.DISET.private.MessageBroker import MessageBroker, MessageSender
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
from DIRAC.Core.Utilities.ReturnValues import isReturnStructure
from DIRAC.Core.DISET.AuthManager import AuthManager
from DIRAC.FrameworkSystem.Client.SecurityLogClient import SecurityLogClient
from DIRAC.ConfigurationSystem.Client import PathFinder
__RCSID__ = "$Id$"
class Service(object):
SVC_VALID_ACTIONS = {'RPC': 'export',
'FileTransfer': 'transfer',
'Message': 'msg',
'Connection': 'Message'}
SVC_SECLOG_CLIENT = SecurityLogClient()
def __init__(self, serviceData):
"""
Init the variables for the service
:param serviceData: dict with modName, standalone, loadName, moduleObj, classObj. e.g.:
{'modName': 'Framework/serviceName',
'standalone': True,
'loadName': 'Framework/serviceName',
'moduleObj': <module 'serviceNameHandler' from '/home/DIRAC/FrameworkSystem/Service/serviceNameHandler.pyo'>,
'classObj': <class 'serviceNameHandler.serviceHandler'>}
Standalone is true if there is only one service started
If it's false, every service is linked to a different MonitoringClient
"""
self._svcData = serviceData
self._name = serviceData['modName']
self._startTime = Time.dateTime()
self._validNames = [serviceData['modName']]
if serviceData['loadName'] not in self._validNames:
self._validNames.append(serviceData['loadName'])
self._cfg = ServiceConfiguration(list(self._validNames))
if serviceData['standalone']:
self._monitor = gMonitor
else:
self._monitor = MonitoringClient()
self.__monitorLastStatsUpdate = time.time()
self._stats = {'queries': 0, 'connections': 0}
self._authMgr = AuthManager("%s/Authorization" % PathFinder.getServiceSection(serviceData['loadName']))
self._transportPool = getGlobalTransportPool()
self.__cloneId = 0
self.__maxFD = 0
def setCloneProcessId(self, cloneId):
self.__cloneId = cloneId
self._monitor.setComponentName("%s-Clone:%s" % (self._name, cloneId))
def _isMetaAction(self, action):
referedAction = Service.SVC_VALID_ACTIONS[action]
if referedAction in Service.SVC_VALID_ACTIONS:
return referedAction
return False
def initialize(self):
# Build the URLs
self._url = self._cfg.getURL()
if not self._url:
return S_ERROR("Could not build service URL for %s" % self._name)
gLogger.verbose("Service URL is %s" % self._url)
# Load handler
result = self._loadHandlerInit()
if not result['OK']:
return result
self._handler = result['Value']
# Initialize lock manager
self._lockManager = LockManager(self._cfg.getMaxWaitingPetitions())
self._initMonitoring()
self._threadPool = ThreadPool(max(1, self._cfg.getMinThreads()),
max(0, self._cfg.getMaxThreads()),
self._cfg.getMaxWaitingPetitions())
self._threadPool.daemonize()
self._msgBroker = MessageBroker("%sMSB" % self._name, threadPool=self._threadPool)
# Create static dict
self._serviceInfoDict = {'serviceName': self._name,
'serviceSectionPath': PathFinder.getServiceSection(self._name),
'URL': self._cfg.getURL(),
'messageSender': MessageSender(self._name, self._msgBroker),
'validNames': self._validNames,
'csPaths': [PathFinder.getServiceSection(svcName) for svcName in self._validNames]
}
# Call static initialization function
try:
self._handler['class']._rh__initializeClass(dict(self._serviceInfoDict),
self._lockManager,
self._msgBroker,
self._monitor)
if self._handler['init']:
for initFunc in self._handler['init']:
gLogger.verbose("Executing initialization function")
try:
result = initFunc(dict(self._serviceInfoDict))
except Exception as excp:
gLogger.exception("Exception while calling initialization function", lException=excp)
return S_ERROR("Exception while calling initialization function: %s" % str(excp))
if not isReturnStructure(result):
return S_ERROR("Service initialization function %s must return S_OK/S_ERROR" % initFunc)
if not result['OK']:
return S_ERROR("Error while initializing %s: %s" % (self._name, result['Message']))
except Exception as e:
errMsg = "Exception while initializing %s" % self._name
gLogger.exception(e)
gLogger.exception(errMsg)
return S_ERROR(errMsg)
# Load actions after the handler has initialized itself
result = self._loadActions()
if not result['OK']:
return result
self._actions = result['Value']
gThreadScheduler.addPeriodicTask(30, self.__reportThreadPoolContents)
return S_OK()
def __searchInitFunctions(self, handlerClass, currentClass=None):
if not currentClass:
currentClass = handlerClass
initFuncs = []
ancestorHasInit = False
for ancestor in currentClass.__bases__:
initFuncs += self.__searchInitFunctions(handlerClass, ancestor)
if 'initializeHandler' in dir(ancestor):
ancestorHasInit = True
if ancestorHasInit:
initFuncs.append(super(currentClass, handlerClass).initializeHandler)
if currentClass == handlerClass and 'initializeHandler' in dir(handlerClass):
initFuncs.append(handlerClass.initializeHandler)
return initFuncs
def _loadHandlerInit(self):
handlerClass = self._svcData['classObj']
handlerName = handlerClass.__name__
handlerInitMethods = self.__searchInitFunctions(handlerClass)
try:
handlerInitMethods.append(getattr(self._svcData['moduleObj'], "initialize%s" % handlerName))
except AttributeError:
gLogger.verbose("Not found global initialization function for service")
if handlerInitMethods:
gLogger.info("Found %s initialization methods" % len(handlerInitMethods))
handlerInfo = {}
handlerInfo["name"] = handlerName
handlerInfo["module"] = self._svcData['moduleObj']
handlerInfo["class"] = handlerClass
handlerInfo["init"] = handlerInitMethods
return S_OK(handlerInfo)
def _loadActions(self):
handlerClass = self._handler['class']
authRules = {}
typeCheck = {}
methodsList = {}
for actionType in Service.SVC_VALID_ACTIONS:
if self._isMetaAction(actionType):
continue
authRules[actionType] = {}
typeCheck[actionType] = {}
methodsList[actionType] = []
handlerAttributeList = dir(handlerClass)
for actionType in Service.SVC_VALID_ACTIONS:
if self._isMetaAction(actionType):
continue
methodPrefix = '%s_' % Service.SVC_VALID_ACTIONS[actionType]
for attribute in handlerAttributeList:
if attribute.find(methodPrefix) != 0:
continue
exportedName = attribute[len(methodPrefix):]
methodsList[actionType].append(exportedName)
gLogger.verbose("+ Found %s method %s" % (actionType, exportedName))
# Create lock for method
self._lockManager.createLock("%s/%s" % (actionType, exportedName),
self._cfg.getMaxThreadsForMethod(actionType, exportedName))
# Look for type and auth rules
if actionType == 'RPC':
typeAttr = "types_%s" % exportedName
authAttr = "auth_%s" % exportedName
else:
typeAttr = "types_%s_%s" % (Service.SVC_VALID_ACTIONS[actionType], exportedName)
authAttr = "auth_%s_%s" % (Service.SVC_VALID_ACTIONS[actionType], exportedName)
if typeAttr in handlerAttributeList:
obj = getattr(handlerClass, typeAttr)
gLogger.verbose("|- Found type definition %s: %s" % (typeAttr, str(obj)))
typeCheck[actionType][exportedName] = obj
if authAttr in handlerAttributeList:
obj = getattr(handlerClass, authAttr)
gLogger.verbose("|- Found auth rules %s: %s" % (authAttr, str(obj)))
authRules[actionType][exportedName] = obj
for actionType in Service.SVC_VALID_ACTIONS:
referedAction = self._isMetaAction(actionType)
if not referedAction:
continue
gLogger.verbose("Action %s is a meta action for %s" % (actionType, referedAction))
authRules[actionType] = []
for method in authRules[referedAction]:
for prop in authRules[referedAction][method]:
if prop not in authRules[actionType]:
authRules[actionType].append(prop)
gLogger.verbose("Meta action %s props are %s" % (actionType, authRules[actionType]))
return S_OK({'methods': methodsList, 'auth': authRules, 'types': typeCheck})
def _initMonitoring(self):
# Init extra bits of monitoring
self._monitor.setComponentType(MonitoringClient.COMPONENT_SERVICE)
self._monitor.setComponentName(self._name)
self._monitor.setComponentLocation(self._cfg.getURL())
self._monitor.initialize()
self._monitor.registerActivity(
"Connections",
"Connections received",
"Framework",
"connections",
MonitoringClient.OP_RATE)
self._monitor.registerActivity("Queries", "Queries served", "Framework", "queries", MonitoringClient.OP_RATE)
self._monitor.registerActivity('CPU', "CPU Usage", 'Framework', "CPU,%", MonitoringClient.OP_MEAN, 600)
self._monitor.registerActivity('MEM', "Memory Usage", 'Framework', 'Memory,MB', MonitoringClient.OP_MEAN, 600)
self._monitor.registerActivity(
'PendingQueries',
"Pending queries",
'Framework',
'queries',
MonitoringClient.OP_MEAN)
self._monitor.registerActivity('ActiveQueries', "Active queries", 'Framework', 'threads', MonitoringClient.OP_MEAN)
self._monitor.registerActivity(
'RunningThreads',
"Running threads",
'Framework',
'threads',
MonitoringClient.OP_MEAN)
self._monitor.registerActivity('MaxFD', "Max File Descriptors", 'Framework', 'fd', MonitoringClient.OP_MEAN)
self._monitor.setComponentExtraParam('DIRACVersion', DIRAC.version)
self._monitor.setComponentExtraParam('platform', DIRAC.getPlatform())
self._monitor.setComponentExtraParam('startTime', Time.dateTime())
for prop in (("__RCSID__", "version"), ("__doc__", "description")):
try:
value = getattr(self._handler['module'], prop[0])
except Exception as e:
gLogger.exception(e)
gLogger.error("Missing property", prop[0])
value = 'unset'
self._monitor.setComponentExtraParam(prop[1], value)
for secondaryName in self._cfg.registerAlsoAs():
gLogger.info("Registering %s also as %s" % (self._name, secondaryName))
self._validNames.append(secondaryName)
return S_OK()
def __reportThreadPoolContents(self):
self._monitor.addMark('PendingQueries', self._threadPool.pendingJobs())
self._monitor.addMark('ActiveQueries', self._threadPool.numWorkingThreads())
self._monitor.addMark('RunningThreads', threading.activeCount())
self._monitor.addMark('MaxFD', self.__maxFD)
self.__maxFD = 0
def getConfig(self):
return self._cfg
# End of initialization functions
def handleConnection(self, clientTransport):
"""
This method may be called by ServiceReactor.
The method stacks openened connection in a queue, another thread
read this queue and handle connection.
:param clientTransport: Object wich describe opened connection (PlainTransport or SSLTransport)
"""
self._stats['connections'] += 1
self._monitor.setComponentExtraParam('queries', self._stats['connections'])
self._threadPool.generateJobAndQueueIt(self._processInThread,
args=(clientTransport, ))
# Threaded process function
def _processInThread(self, clientTransport):
"""
This method handles a RPC, FileTransfer or Connection.
Connection may be opened via ServiceReactor.__acceptIncomingConnection
- Do the SSL/TLS Handshake (if dips is used) and extract credentials
- Get the action called by the client
- Check if the client is authorized to perform ation
- If not, connection is closed
- Instanciate the RequestHandler (RequestHandler contain all methods callable)
(Following is not directly in this method but it describe what happen at
#Execute the action)
- Notify the client we're ready to execute the action (via _processProposal)
and call RequestHandler._rh_executeAction()
- Receive arguments/file/something else (depending on action) in the RequestHandler
- Executing the action asked by the client
:param clientTransport: Object who describe the opened connection (SSLTransport or PlainTransport)
:return: S_OK with "closeTransport" a boolean to indicate if th connection have to be closed
e.g. after RPC, closeTransport=True
"""
self.__maxFD = max(self.__maxFD, clientTransport.oSocket.fileno())
self._lockManager.lockGlobal()
try:
monReport = self.__startReportToMonitoring()
except Exception:
monReport = False
try:
# Handshake
try:
result = clientTransport.handshake()
if not result['OK']:
clientTransport.close()
return
except BaseException:
return
# Add to the transport pool
trid = self._transportPool.add(clientTransport)
if not trid:
return
# Receive and check proposal
result = self._receiveAndCheckProposal(trid)
if not result['OK']:
self._transportPool.sendAndClose(trid, result)
return
proposalTuple = result['Value']
# Instantiate handler
result = self._instantiateHandler(trid, proposalTuple)
if not result['OK']:
self._transportPool.sendAndClose(trid, result)
return
handlerObj = result['Value']
# Execute the action
result = self._processProposal(trid, proposalTuple, handlerObj)
# Close the connection if required
if result['closeTransport'] or not result['OK']:
if not result['OK']:
gLogger.error("Error processing proposal", result['Message'])
self._transportPool.close(trid)
return result
finally:
self._lockManager.unlockGlobal()
if monReport:
self.__endReportToMonitoring(*monReport)
def _createIdentityString(self, credDict, clientTransport=None):
if 'username' in credDict:
if 'group' in credDict:
identity = "[%s:%s]" % (credDict['username'], credDict['group'])
else:
identity = "[%s:unknown]" % credDict['username']
else:
identity = 'unknown'
if clientTransport:
addr = clientTransport.getRemoteAddress()
if addr:
addr = "{%s:%s}" % (addr[0], addr[1])
if 'DN' in credDict:
identity += "(%s)" % credDict['DN']
return identity
def _receiveAndCheckProposal(self, trid):
clientTransport = self._transportPool.get(trid)
# Get the peer credentials
credDict = clientTransport.getConnectingCredentials()
# Receive the action proposal
retVal = clientTransport.receiveData(1024)
if not retVal['OK']:
gLogger.error("Invalid action proposal", "%s %s" % (self._createIdentityString(credDict,
clientTransport),
retVal['Message']))
return S_ERROR("Invalid action proposal")
proposalTuple = retVal['Value']
gLogger.debug("Received action from client", "/".join(list(proposalTuple[1])))
# Check if there are extra credentials
if proposalTuple[2]:
clientTransport.setExtraCredentials(proposalTuple[2])
# Check if this is the requested service
requestedService = proposalTuple[0][0]
if requestedService not in self._validNames:
return S_ERROR("%s is not up in this server" % requestedService)
# Check if the action is valid
requestedActionType = proposalTuple[1][0]
if requestedActionType not in Service.SVC_VALID_ACTIONS:
return S_ERROR("%s is not a known action type" % requestedActionType)
# Check if it's authorized
result = self._authorizeProposal(proposalTuple[1], trid, credDict)
if not result['OK']:
return result
#Proposal is OK
return S_OK(proposalTuple)
def _authorizeProposal(self, actionTuple, trid, credDict):
# Find CS path for the Auth rules
referedAction = self._isMetaAction(actionTuple[0])
if referedAction:
csAuthPath = "%s/Default" % actionTuple[0]
hardcodedMethodAuth = self._actions['auth'][actionTuple[0]]
else:
if actionTuple[0] == 'RPC':
csAuthPath = actionTuple[1]
else:
csAuthPath = "/".join(actionTuple)
# Find if there are hardcoded auth rules in the code
hardcodedMethodAuth = False
if actionTuple[0] in self._actions['auth']:
hardcodedRulesByType = self._actions['auth'][actionTuple[0]]
if actionTuple[0] == "FileTransfer":
methodName = actionTuple[1][0].lower() + actionTuple[1][1:]
else:
methodName = actionTuple[1]
if methodName in hardcodedRulesByType:
hardcodedMethodAuth = hardcodedRulesByType[methodName]
# Auth time!
if not self._authMgr.authQuery(csAuthPath, credDict, hardcodedMethodAuth):
# Get the identity string
identity = self._createIdentityString(credDict)
fromHost = "unknown host"
tr = self._transportPool.get(trid)
if tr:
fromHost = '/'.join([str(item) for item in tr.getRemoteAddress()])
gLogger.warn("Unauthorized query", "to %s:%s by %s from %s" % (self._name,
"/".join(actionTuple),
identity, fromHost))
result = S_ERROR(ENOAUTH, "Unauthorized query")
else:
result = S_OK()
# Security log
tr = self._transportPool.get(trid)
if not tr:
return S_ERROR("Client disconnected")
sourceAddress = tr.getRemoteAddress()
identity = self._createIdentityString(credDict)
Service.SVC_SECLOG_CLIENT.addMessage(result['OK'], sourceAddress[0], sourceAddress[1], identity,
self._cfg.getHostname(),
self._cfg.getPort(),
self._name, "/".join(actionTuple))
return result
def _instantiateHandler(self, trid, proposalTuple=None):
"""
Generate an instance of the handler for a given service
"""
# Generate the client params
clientParams = {'serviceStartTime': self._startTime}
if proposalTuple:
clientParams['clientSetup'] = proposalTuple[0][1]
if len(proposalTuple[0]) < 3:
clientParams['clientVO'] = gConfig.getValue("/DIRAC/VirtualOrganization", "unknown")
else:
clientParams['clientVO'] = proposalTuple[0][2]
clientTransport = self._transportPool.get(trid)
if clientTransport:
clientParams['clientAddress'] = clientTransport.getRemoteAddress()
# Generate handler dict with per client info
handlerInitDict = dict(self._serviceInfoDict)
for key in clientParams:
handlerInitDict[key] = clientParams[key]
#Instantiate and initialize
try:
handlerInstance = self._handler['class'](handlerInitDict, trid)
handlerInstance.initialize()
except Exception as e:
gLogger.exception("Server error while loading handler: %s" % str(e))
return S_ERROR("Server error while loading handler")
return S_OK(handlerInstance)
def _processProposal(self, trid, proposalTuple, handlerObj):
# Notify the client we're ready to execute the action
retVal = self._transportPool.send(trid, S_OK())
if not retVal['OK']:
return retVal
messageConnection = False
if proposalTuple[1] == ('Connection', 'new'):
messageConnection = True
if messageConnection:
if self._msgBroker.getNumConnections() > self._cfg.getMaxMessagingConnections():
result = S_ERROR("Maximum number of connections reached. Try later")
result['closeTransport'] = True
return result
# This is a stable connection
self._msgBroker.addTransportId(trid, self._name,
receiveMessageCallback=self._mbReceivedMsg,
disconnectCallback=self._mbDisconnect,
listenToConnection=False)
result = self._executeAction(trid, proposalTuple, handlerObj)
if result['OK'] and messageConnection:
self._msgBroker.listenToTransport(trid)
result = self._mbConnect(trid, handlerObj)
if not result['OK']:
self._msgBroker.removeTransport(trid)
result['closeTransport'] = not messageConnection or not result['OK']
return result
def _mbConnect(self, trid, handlerObj=None):
if not handlerObj:
result = self._instantiateHandler(trid)
if not result['OK']:
return result
handlerObj = result['Value']
return handlerObj._rh_executeConnectionCallback('connected')
def _executeAction(self, trid, proposalTuple, handlerObj):
try:
return handlerObj._rh_executeAction(proposalTuple)
except Exception as e:
gLogger.exception("Exception while executing handler action")
return S_ERROR("Server error while executing action: %s" % str(e))
def _mbReceivedMsg(self, trid, msgObj):
result = self._authorizeProposal(('Message', msgObj.getName()),
trid,
self._transportPool.get(trid).getConnectingCredentials())
if not result['OK']:
return result
result = self._instantiateHandler(trid)
if not result['OK']:
return result
handlerObj = result['Value']
return handlerObj._rh_executeMessageCallback(msgObj)
def _mbDisconnect(self, trid):
result = self._instantiateHandler(trid)
if not result['OK']:
return result
handlerObj = result['Value']
return handlerObj._rh_executeConnectionCallback('drop')
def __startReportToMonitoring(self):
self._monitor.addMark("Queries")
now = time.time()
stats = os.times()
cpuTime = stats[0] + stats[2]
if now - self.__monitorLastStatsUpdate < 0:
return (now, cpuTime)
# Send CPU consumption mark
wallClock = now - self.__monitorLastStatsUpdate
self.__monitorLastStatsUpdate = now
# Send Memory consumption mark
membytes = MemStat.VmB('VmRSS:')
if membytes:
mem = membytes / (1024. * 1024.)
self._monitor.addMark('MEM', mem)
return (now, cpuTime)
def __endReportToMonitoring(self, initialWallTime, initialCPUTime):
wallTime = time.time() - initialWallTime
stats = os.times()
cpuTime = stats[0] + stats[2] - initialCPUTime
percentage = cpuTime / wallTime * 100.
if percentage > 0:
self._monitor.addMark('CPU', percentage)
|
fstagni/DIRAC
|
Core/DISET/private/Service.py
|
Python
|
gpl-3.0
| 24,377
|
[
"DIRAC"
] |
7a863144af249c0e8f9e4da90dca3a7a74b0a4925624370051ab1afa9d394234
|
from django.test import TestCase
from django.test import Client
from django.urls import reverse
# Import Some Django models that we use
from django.contrib.auth.models import User, Group
# Import some models
from .models import ActivityGenerator
from .models import ActivitySet
from .models import ActivityType
from .models import Activity
from .models import AssessmentResource
from .models import AssessmentResourceType
from .models import AssessmentStaff
from .models import Category
from .models import Campus
from .models import ExternalExaminer
from .models import Staff
from .models import Task
from .models import Activity
from .models import TaskCompletion
from .models import Module
from .models import ModuleSize
from .models import ModuleStaff
from .models import Programme
from .models import Project
from .models import ProjectStaff
from .models import WorkPackage
class WorkPackageMigrationTestCase(TestCase):
def setUp(self):
# Create a workpackage
package = WorkPackage.objects.create(name="source", startdate="2017-09-01", enddate="2018-08-31")
destination_package = WorkPackage.objects.create(name="destination", startdate="2018-09-01",
enddate="2019-08-31")
# Create a campus
campus = Campus.objects.create(name="campus")
# Create a Module Size
modulesize = ModuleSize.objects.create(text="50", admin_scaling=1.0, assessment_scaling=1.0)
# Create some Users
user_aA = User.objects.create(username="academicA", password="test")
user_aB = User.objects.create(username="academicB", password="test")
user_aC = User.objects.create(username="academicC", password="test")
user_aD = User.objects.create(username="academicD", password="test")
user_aE = User.objects.create(username="academicE", password="test")
user_aF = User.objects.create(username="assessmentstaffA", password="test")
user_eA = User.objects.create(username="externalA", password="test")
user_eB = User.objects.create(username="externalB", password="test")
user_eC = User.objects.create(username="externalC", password="test")
# Create linked Staff and ExternalExaminers
coordinator = Staff.objects.create(user=user_aA)
team_member = Staff.objects.create(user=user_aB)
resource_owner = Staff.objects.create(user=user_aC)
moderator = Staff.objects.create(user=user_aD)
other_staff = Staff.objects.create(user=user_aE)
assessment_staff = Staff.objects.create(user=user_aF)
lead_examiner = Staff.objects.create(user=user_eA, is_external=True, has_workload=False)
associated_examiner = Staff.objects.create(user=user_eB, is_external=True, has_workload=False)
other_examiner = Staff.objects.create(user=user_eC, is_external=True, has_workload=False)
# Add the user to AssessmentStaff
AssessmentStaff.objects.create(staff=assessment_staff, package=package)
# Create some programmes
lead_programme = Programme.objects.create(
programme_code="123",
programme_name="BSc Breaking Things",
package=package)
lead_programme.examiners.add(lead_examiner)
lead_programme.save()
other_programme = Programme.objects.create(
programme_code="456",
programme_name="MSc Breaking Things",
package=package)
other_programme.examiners.add(associated_examiner)
other_programme.save()
# Create a module with staffA as coordinator and staff
module = Module.objects.create(module_code="ABC101",
module_name="Breaking Things",
package=package,
coordinator=coordinator,
lead_programme=lead_programme,
campus=campus,
size=modulesize)
module.moderators.add(moderator)
module.programmes.add(lead_programme)
module.programmes.add(other_programme)
module.save()
# and staffB on teaching team
ModuleStaff.objects.create(
module=module,
staff=team_member,
contact_proportion=50,
admin_proportion=50,
assessment_proportion=50,
package=package)
# Create a module related activity
category = Category.objects.create(name="Learning and Teaching")
activity_type = ActivityType.objects.create(name="Exciting Activity", category=category)
module_activity = Activity.objects.create(name="Module Activity",
activity_type=activity_type,
module=module,
hours=50,
percentage=0,
semester="1",
package=package,
staff=team_member)
# Create a custom activity
custom_activity = Activity.objects.create(name="Grand Wizard",
activity_type=activity_type,
hours=20,
percentage=0,
semester="1,2,3",
package=package,
staff=moderator)
# Create a generator (with no module) and run it
generator_no_module = ActivityGenerator.objects.create(name="Generator No Module",
hours=10,
percentage=0,
semester="1",
activity_type=activity_type,
package=package,
details="Test")
generator_no_module.targets.add(moderator)
generator_no_module.save()
generator_no_module.generate_activities()
# Create a generator (with a module) and run it
generator_module = ActivityGenerator.objects.create(name="Generator Module",
hours=10,
percentage=0,
semester="2",
module=module,
activity_type=activity_type,
package=package,
details="Test")
generator_module.targets.add(coordinator)
generator_module.save()
generator_module.generate_activities()
def test_package_migration_programmes(self):
destination = WorkPackage.objects.get(name="destination")
source = WorkPackage.objects.get(name="source")
# Just copy programmes first
options = dict()
options['copy_programmes'] = True
options['copy_activities_generated'] = False
options['copy_activities_custom'] = False
options['copy_modules'] = False
# Perform the clone
destination.clone_from(source, options)
# Attempt to run a second time to ensure idempotency
destination.clone_from(source, options)
# Get the programmes and check the numbers of each are correct
source_programmes = Programme.objects.all().filter(package=source)
self.assertEqual(len(source_programmes), 2)
destination_programmes = Programme.objects.all().filter(package=destination)
self.assertEqual(len(destination_programmes), 2)
# Now get the individual programmes.
source_programme_123 = source_programmes.get(programme_code="123")
source_programme_456 = source_programmes.get(programme_code="123")
destination_programme_123 = destination_programmes.get(programme_code="123")
destination_programme_456 = destination_programmes.get(programme_code="123")
# Should be trivially true
self.assertNotEqual(source_programme_123.pk, destination_programme_123.pk)
def test_package_migration_programmes_modules(self):
destination = WorkPackage.objects.get(name="destination")
source = WorkPackage.objects.get(name="source")
# Just copy programmes first
options = dict()
options['copy_programmes'] = True
options['copy_activities_generated'] = True
options['copy_activities_custom'] = True
options['copy_modules'] = True
options['copy_activities_modules'] = True
options['copy_modulestaff'] = True
# Perform the clone
destination.clone_from(source, options)
# Attempt to run a second time to ensure idempotency
destination.clone_from(source, options)
# Get the programmes and check the numbers of each are correct
source_programmes = Programme.objects.all().filter(package=source)
self.assertEqual(len(source_programmes), 2)
destination_programmes = Programme.objects.all().filter(package=destination)
self.assertEqual(len(destination_programmes), 2)
# And now the module counts (checks they are mapped correctly against packages)
source_modules = Module.objects.all().filter(package=source)
self.assertEqual(len(source_modules), 1)
destination_modules = Module.objects.all().filter(package=destination)
self.assertEqual(len(destination_modules), 1)
# Now get the individual programmes.
source_programme_123 = source_programmes.get(programme_code="123")
source_programme_456 = source_programmes.get(programme_code="123")
destination_programme_123 = destination_programmes.get(programme_code="123")
destination_programme_456 = destination_programmes.get(programme_code="123")
# Check the programme remappings
self.assertEqual(destination_modules[0].lead_programme, destination_programme_123)
for module_programme in destination_modules[0].programmes.all():
self.assertTrue(module_programme in destination_programmes)
# and modules
source_module_ABC101 = source_modules.get(module_code="ABC101")
destination_module_ABC101 = destination_modules.get(module_code="ABC101")
# Moderators - check it copied
self.assertEqual(len(destination_module_ABC101.moderators.all()), 1)
# And now staff allocations
source_modulestaff = ModuleStaff.objects.all().filter(package=source)
self.assertEqual(len(source_modulestaff), 1)
destination_modulestaff = ModuleStaff.objects.all().filter(package=destination)
self.assertEqual(len(destination_modulestaff), 1)
# Check the module is properly remapped
self.assertEqual(destination_modulestaff[0].module, destination_module_ABC101)
# And now module activities (that are not generated)
source_module_activities = Activity.objects.all().filter(package=source).filter(module__isnull=False). \
filter(activity_set__isnull=True)
self.assertEqual(len(source_module_activities), 1)
destination_module_activities = Activity.objects.all().filter(package=destination). \
filter(activity_set__isnull=True).filter(module__isnull=False)
self.assertEqual(len(destination_module_activities), 1)
# Check the module is properly remapped
self.assertEqual(destination_module_activities[0].module, destination_module_ABC101)
# And now custom activities (not related to a module)
source_custom_activities = Activity.objects.all().filter(package=source).filter(module__isnull=True). \
filter(activity_set__isnull=True)
self.assertEqual(len(source_custom_activities), 1)
destination_custom_activities = Activity.objects.all().filter(package=destination). \
filter(module__isnull=True).filter(activity_set__isnull=True)
self.assertEqual(len(destination_custom_activities), 1)
# Generated activities (no module)
source_generated_activities = Activity.objects.all().filter(package=source).filter(module__isnull=True). \
filter(activity_set__isnull=False)
self.assertEqual(len(source_generated_activities), 1)
destination_generated_activities = Activity.objects.all().filter(package=destination). \
filter(module__isnull=True).filter(activity_set__isnull=False)
self.assertEqual(len(destination_generated_activities), 1)
# Generated activities (no module)
source_generated_module_activities = Activity.objects.all().filter(package=source). \
filter(module__isnull=False).filter(activity_set__isnull=False)
self.assertEqual(len(source_generated_module_activities), 1)
destination_generated_module_activities = Activity.objects.all().filter(package=destination). \
filter(module__isnull=False).filter(activity_set__isnull=False)
self.assertEqual(len(destination_generated_module_activities), 1)
# Check the module is properly remapped
self.assertEqual(destination_generated_module_activities[0].module, destination_module_ABC101)
# Check counts on activity sets in each package to ensure they aren't cross linked
# Originally there were two and the new activities should be cross mapped
activity_sets = ActivitySet.objects.all()
self.assertEqual(len(activity_sets), 4)
# Each activity set should have precisely one activity. Let's check non modules ones first
no_module_activity_sets = activity_sets.filter(generator__module__isnull=True)
for activity_set in no_module_activity_sets:
self.assertEqual(len(Activity.objects.all().filter(activity_set=activity_set)), 1)
# And now module based ones
module_activity_sets = activity_sets.filter(generator__module__isnull=False)
# And there should be exactly one set per slot
for activity_set in module_activity_sets:
self.assertEqual(len(Activity.objects.all().filter(activity_set=activity_set)), 1)
def test_package_migration_orphaned_modules(self):
"""A test for the correct migration of modules that have no parent programme"""
destination = WorkPackage.objects.get(name="destination")
source = WorkPackage.objects.get(name="source")
# Just copy programmes first
options = dict()
options['copy_programmes'] = True
options['copy_activities_generated'] = True
options['copy_activities_custom'] = True
options['copy_modules'] = True
options['copy_activities_modules'] = True
options['copy_modulestaff'] = True
# Orphan a module
module = Module.objects.get(module_code="ABC101")
# Remove the lead programme
module.lead_programme = None
for programme in module.programmes.all():
module.programmes.remove(programme)
# Now perform the clone
destination.clone_from(source, options)
# Attempt to run a second time to ensure idempotency
destination.clone_from(source, options)
# check the numbers of module in each package are correct
source_modules = Module.objects.all().filter(package=source).filter(module_code="ABC101")
self.assertEqual(len(source_modules), 1)
destination_modules = Module.objects.all().filter(package=destination).filter(module_code="ABC101")
self.assertEqual(len(destination_modules), 1)
# and modules
#source_module_ABC101 = source_modules.get(module_code="ABC101")
#destination_module_ABC101 = destination_modules.get(module_code="ABC101")
def test_package_migration_module_from_otherpackage(self):
"""A test for when a module has a programme from outside the sourcepackage"""
destination = WorkPackage.objects.get(name="destination")
source = WorkPackage.objects.get(name="source")
# Create an alternative source package
alternate_source = WorkPackage.objects.create(name="alt_source", startdate="2017-09-01", enddate="2018-08-31")
# And a programme within it
lead_programme = Programme.objects.create(
programme_code="987",
programme_name="BSc Alternative Breaking Things",
package=alternate_source)
# Copy most stuff
options = dict()
options['copy_programmes'] = True
options['copy_activities_generated'] = True
options['copy_activities_custom'] = True
options['copy_modules'] = True
options['copy_activities_modules'] = True
options['copy_modulestaff'] = True
# Change the programme of a module to one in another workpackage
module = Module.objects.get(module_code="ABC101")
# Change the lead programme, remove any others
module.lead_programme = lead_programme
for programme in module.programmes.all():
module.programmes.remove(programme)
module.programmes.add(lead_programme)
# Now perform the clone
destination.clone_from(source, options)
# Attempt to run a second time to ensure idempotency
destination.clone_from(source, options)
# check the numbers of module in each package are correct
source_modules = Module.objects.all().filter(package=source).filter(module_code="ABC101")
self.assertEqual(len(source_modules), 1)
destination_modules = Module.objects.all().filter(package=destination).filter(module_code="ABC101")
self.assertEqual(len(destination_modules), 1)
class AssessmentResourceTestCase(TestCase):
def setUp(self):
# Create a workpackage
package = WorkPackage.objects.create(name="test", startdate="2017-09-01", enddate="2018-08-31")
# Create a campus
campus = Campus.objects.create(name="campus")
# Create a Module Size
modulesize = ModuleSize.objects.create(text="50", admin_scaling=1.0, assessment_scaling=1.0)
# Create some Users
user_aA = User.objects.create(username="academicA", password="test")
user_aB = User.objects.create(username="academicB", password="test")
user_aC = User.objects.create(username="academicC", password="test")
user_aD = User.objects.create(username="academicD", password="test")
user_aE = User.objects.create(username="academicE", password="test")
user_aF = User.objects.create(username="assessmentstaffA", password="test")
user_eA = User.objects.create(username="externalA", password="test")
user_eB = User.objects.create(username="externalB", password="test")
user_eC = User.objects.create(username="externalC", password="test")
# Create linked Staff and ExternalExaminers
coordinator = Staff.objects.create(user=user_aA)
team_member = Staff.objects.create(user=user_aB)
resource_owner = Staff.objects.create(user=user_aC)
moderator = Staff.objects.create(user=user_aD)
other_staff = Staff.objects.create(user=user_aE)
assessment_staff = Staff.objects.create(user=user_aF)
lead_examiner = Staff.objects.create(user=user_eA, is_external=True, has_workload=False)
associated_examiner = Staff.objects.create(user=user_eB, is_external=True, has_workload=False)
other_examiner = Staff.objects.create(user=user_eC, is_external=True, has_workload=False)
# Add the user to AssessmentStaff
AssessmentStaff.objects.create(staff=assessment_staff, package=package)
# Create some programmes
lead_programme = Programme.objects.create(
programme_code="123",
programme_name="BSc Breaking Things",
package=package)
lead_programme.examiners.add(lead_examiner)
lead_programme.save()
other_programme = Programme.objects.create(
programme_code="456",
programme_name="MSc Breaking Things",
package=package)
other_programme.examiners.add(associated_examiner)
other_programme.save()
# Create a module with staffA as coordinator and staff
module = Module.objects.create(module_code="ABC101",
module_name="Breaking Things",
package=package,
coordinator=coordinator,
lead_programme=lead_programme,
campus=campus,
size=modulesize)
module.moderators.add(moderator)
module.programmes.add(lead_programme)
module.programmes.add(other_programme)
module.save()
# and staffB on teaching team
ModuleStaff.objects.create(
module=module,
staff=team_member,
contact_proportion=50,
admin_proportion=50,
assessment_proportion=50,
package=package)
# Create an AssessmentResourceType
resource_type = AssessmentResourceType.objects.create(name="exam")
# Create a resource, with staffC as an owner
resource = AssessmentResource.objects.create(
name="test",
module=module,
owner=resource_owner,
resource_type=resource_type)
def test_resource_coordinator_permissions(self):
user = User.objects.get(username="academicA")
coordinator = Staff.objects.get(user__username="academicA")
resource = AssessmentResource.objects.get(name="test")
# Coordinators should be able to download
self.assertEqual(resource.is_downloadable_by(coordinator), True)
self.assertEqual(resource.is_downloadable_by_staff(coordinator), True)
self.assertEqual(resource.is_downloadable_by_external(coordinator), False)
def test_resource_team_permissions(self):
team_member = Staff.objects.get(user__username="academicB")
resource = AssessmentResource.objects.get(name="test")
# Team members should be able to download
self.assertEqual(resource.is_downloadable_by(team_member), True)
self.assertEqual(resource.is_downloadable_by_staff(team_member), True)
self.assertEqual(resource.is_downloadable_by_external(team_member), False)
def test_resource_moderator_permissions(self):
moderator = Staff.objects.get(user__username="academicD")
resource = AssessmentResource.objects.get(name="test")
# Moderators should be able to download
self.assertEqual(resource.is_downloadable_by(moderator), True)
self.assertEqual(resource.is_downloadable_by_staff(moderator), True)
self.assertEqual(resource.is_downloadable_by_external(moderator), False)
def test_resource_owner_permissions(self):
owner = Staff.objects.get(user__username="academicC")
resource = AssessmentResource.objects.get(name="test")
# Owners should be able to download
self.assertEqual(resource.is_downloadable_by(owner), True)
self.assertEqual(resource.is_downloadable_by_staff(owner), True)
self.assertEqual(resource.is_downloadable_by_external(owner), False)
def test_resource_assessment_staff_permissions(self):
assessment_staff = Staff.objects.get(user__username="assessmentstaffA")
resource = AssessmentResource.objects.get(name="test")
# Assessment staff should be able to download
self.assertEqual(resource.is_downloadable_by(assessment_staff), True)
self.assertEqual(resource.is_downloadable_by_staff(assessment_staff), True)
self.assertEqual(resource.is_downloadable_by_external(assessment_staff), False)
def test_resource_other_staff_permissions(self):
other = Staff.objects.get(user__username="academicE")
resource = AssessmentResource.objects.get(name="test")
# Others should NOT be able to download
self.assertEqual(resource.is_downloadable_by(other), False)
self.assertEqual(resource.is_downloadable_by_staff(other), False)
self.assertEqual(resource.is_downloadable_by_external(other), False)
def test_resource_lead_examiner_permissions(self):
lead_examiner = Staff.objects.get(user__username="externalA")
resource = AssessmentResource.objects.get(name="test")
# Lead Examiners should be able to download
self.assertEqual(resource.is_downloadable_by(lead_examiner), True)
self.assertEqual(resource.is_downloadable_by_staff(lead_examiner), False)
self.assertEqual(resource.is_downloadable_by_external(lead_examiner), True)
def test_resource_associate_examiner_permissions(self):
associate_examiner = Staff.objects.get(user__username="externalB")
resource = AssessmentResource.objects.get(name="test")
# Lead Examiners should be able to download
self.assertEqual(resource.is_downloadable_by(associate_examiner), True)
self.assertEqual(resource.is_downloadable_by_staff(associate_examiner), False)
self.assertEqual(resource.is_downloadable_by_external(associate_examiner), True)
def test_resource_other_examiner_permissions(self):
other_examiner = Staff.objects.get(user__username="externalC")
resource = AssessmentResource.objects.get(name="test")
# Other Examiners should not be able to download
self.assertEqual(resource.is_downloadable_by(other_examiner), False)
self.assertEqual(resource.is_downloadable_by_staff(other_examiner), False)
self.assertEqual(resource.is_downloadable_by_external(other_examiner), False)
|
profcturner/WAM
|
loads/tests.py
|
Python
|
agpl-3.0
| 26,483
|
[
"exciting"
] |
42a9e1f048f0de47d777cb6b6c296d4f99c2acd06210e838f29e273539e23354
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Pipek-Mezey localization
'''
import time
import numpy
import scipy.linalg
from functools import reduce
from pyscf import lib
from pyscf.lib import logger
from pyscf.lo import orth
from pyscf.lo import boys
from pyscf import __config__
def atomic_pops(mol, mo_coeff, method='meta_lowdin'):
'''
Kwargs:
method : string
one of mulliken, lowdin, meta_lowdin
Returns:
A 3-index tensor [A,i,j] indicates the population of any orbital-pair
density |i><j| for each species (atom in this case). This tensor is
used to construct the population and gradients etc.
You can customize the PM localization wrt other population metric,
such as the charge of a site, the charge of a fragment (a group of
atoms) by overwriting this tensor. See also the example
pyscf/examples/loc_orb/40-hubbard_model_PM_localization.py for the PM
localization of site-based population for hubbard model.
'''
if getattr(mol, 'pbc_intor', None): # whether mol object is a cell
s = mol.pbc_intor('int1e_ovlp_sph', hermi=1)
else:
s = mol.intor_symmetric('int1e_ovlp')
nmo = mo_coeff.shape[1]
proj = numpy.empty((mol.natm,nmo,nmo))
if method.lower() == 'mulliken':
for i, (b0, b1, p0, p1) in enumerate(mol.offset_nr_by_atom()):
csc = reduce(numpy.dot, (mo_coeff[p0:p1].conj().T, s[p0:p1], mo_coeff))
proj[i] = (csc + csc.conj().T) * .5
elif method.lower() in ('lowdin', 'meta_lowdin'):
c = orth.restore_ao_character(mol, 'ANO')
csc = reduce(lib.dot, (mo_coeff.conj().T, s, orth.orth_ao(mol, method, c, s=s)))
for i, (b0, b1, p0, p1) in enumerate(mol.offset_nr_by_atom()):
proj[i] = numpy.dot(csc[:,p0:p1], csc[:,p0:p1].conj().T)
else:
raise KeyError('method = %s' % method)
return proj
class PipekMezey(boys.Boys):
'''
The Pipek-Mezey localization optimizer that maximizes the orbital
population
Args:
mol : Mole object
Kwargs:
mo_coeff : size (N,N) np.array
The orbital space to localize for PM localization.
When initializing the localization optimizer ``bopt = PM(mo_coeff)``,
Note these orbitals ``mo_coeff`` may or may not be used as initial
guess, depending on the attribute ``.init_guess`` . If ``.init_guess``
is set to None, the ``mo_coeff`` will be used as initial guess. If
``.init_guess`` is 'atomic', a few atomic orbitals will be
constructed inside the space of the input orbitals and the atomic
orbitals will be used as initial guess.
Note when calling .kernel(orb) method with a set of orbitals as
argument, the orbitals will be used as initial guess regardless of
the value of the attributes .mo_coeff and .init_guess.
Attributes for PM class:
verbose : int
Print level. Default value equals to :class:`Mole.verbose`.
max_memory : float or int
Allowed memory in MB. Default value equals to :class:`Mole.max_memory`.
conv_tol : float
Converge threshold. Default 1e-6
conv_tol_grad : float
Converge threshold for orbital rotation gradients. Default 1e-3
max_cycle : int
The max. number of macro iterations. Default 100
max_iters : int
The max. number of iterations in each macro iteration. Default 20
max_stepsize : float
The step size for orbital rotation. Small step (0.005 - 0.05) is prefered.
Default 0.03.
init_guess : str or None
Initial guess for optimization. If set to None, orbitals defined
by the attribute .mo_coeff will be used as initial guess. If set
to 'atomic', atomic orbitals will be used as initial guess.
Default 'atomic'
pop_method : str
How the orbital population is calculated. By default, meta-lowdin
population (JCTC, 10, 3784) is used. It can be set to 'mulliken',
or 'lowdin' for other population definition
exponent : int
The power to define norm. It can be 2 or 4. Default 2.
Saved results
mo_coeff : ndarray
Localized orbitals
'''
pop_method = getattr(__config__, 'lo_pipek_PM_pop_method', 'meta_lowdin')
conv_tol = getattr(__config__, 'lo_pipek_PM_conv_tol', 1e-6)
exponent = getattr(__config__, 'lo_pipek_PM_exponent', 2) # should be 2 or 4
def __init__(self, mol, mo_coeff=None):
boys.Boys.__init__(self, mol, mo_coeff)
self._keys = self._keys.union(['pop_method', 'exponent'])
def dump_flags(self, verbose=None):
boys.Boys.dump_flags(self, verbose)
logger.info(self, 'pop_method = %s',self.pop_method)
def gen_g_hop(self, u):
mo_coeff = lib.dot(self.mo_coeff, u)
pop = self.atomic_pops(self.mol, mo_coeff, self.pop_method)
if self.exponent == 2:
g0 = numpy.einsum('xii,xip->pi', pop, pop)
g = -self.pack_uniq_var(g0-g0.conj().T) * 2
elif self.exponent == 4:
pop3 = numpy.einsum('xii->xi', pop)**3
g0 = numpy.einsum('xi,xip->pi', pop3, pop)
g = -self.pack_uniq_var(g0-g0.conj().T) * 4
else:
raise NotImplementedError('exponent %s' % self.exponent)
h_diag = numpy.einsum('xii,xpp->pi', pop, pop) * 2
g_diag = g0.diagonal()
h_diag-= g_diag + g_diag.reshape(-1,1)
h_diag+= numpy.einsum('xip,xip->pi', pop, pop) * 2
h_diag+= numpy.einsum('xip,xpi->pi', pop, pop) * 2
h_diag = -self.pack_uniq_var(h_diag) * 2
g0 = g0 + g0.conj().T
if self.exponent == 2:
def h_op(x):
x = self.unpack_uniq_var(x)
norb = x.shape[0]
hx = lib.dot(x.T, g0.T).conj()
hx+= numpy.einsum('xip,xi->pi', pop, numpy.einsum('qi,xiq->xi', x, pop)) * 2
hx-= numpy.einsum('xpp,xip->pi', pop,
lib.dot(pop.reshape(-1,norb), x).reshape(-1,norb,norb)) * 2
hx-= numpy.einsum('xip,xp->pi', pop, numpy.einsum('qp,xpq->xp', x, pop)) * 2
return -self.pack_uniq_var(hx-hx.conj().T)
else:
def h_op(x):
x = self.unpack_uniq_var(x)
norb = x.shape[0]
hx = lib.dot(x.T, g0.T).conj() * 2
pop2 = numpy.einsum('xii->xi', pop)**2
pop3 = numpy.einsum('xii->xi', pop)**3
tmp = numpy.einsum('qi,xiq->xi', x, pop) * pop2
hx+= numpy.einsum('xip,xi->pi', pop, tmp) * 12
hx-= numpy.einsum('xp,xip->pi', pop3,
lib.dot(pop.reshape(-1,norb), x).reshape(-1,norb,norb)) * 4
tmp = numpy.einsum('qp,xpq->xp', x, pop) * pop2
hx-= numpy.einsum('xip,xp->pi', pop, tmp) * 12
return -self.pack_uniq_var(hx-hx.conj().T)
return g, h_op, h_diag
def get_grad(self, u=None):
if u is None: u = numpy.eye(self.mo_coeff.shape[1])
mo_coeff = lib.dot(self.mo_coeff, u)
pop = self.atomic_pops(self.mol, mo_coeff, self.pop_method)
if self.exponent == 2:
g0 = numpy.einsum('xii,xip->pi', pop, pop)
g = -self.pack_uniq_var(g0-g0.conj().T) * 2
else:
pop3 = numpy.einsum('xii->xi', pop)**3
g0 = numpy.einsum('xi,xip->pi', pop3, pop)
g = -self.pack_uniq_var(g0-g0.conj().T) * 4
return g
def cost_function(self, u=None):
if u is None: u = numpy.eye(self.mo_coeff.shape[1])
mo_coeff = lib.dot(self.mo_coeff, u)
pop = self.atomic_pops(self.mol, mo_coeff, self.pop_method)
if self.exponent == 2:
return numpy.einsum('xii,xii->', pop, pop)
else:
pop2 = numpy.einsum('xii->xi', pop)**2
return numpy.einsum('xi,xi', pop2, pop2)
@lib.with_doc(atomic_pops.__doc__)
def atomic_pops(self, mol, mo_coeff, method=None):
if method is None:
method = self.pop_method
return atomic_pops(mol, mo_coeff, method)
PM = Pipek = PipekMezey
if __name__ == '__main__':
from pyscf import gto, scf
mol = gto.Mole()
mol.atom = '''
O 0. 0. 0.2
H 0. -0.5 -0.4
H 0. 0.5 -0.4
'''
mol.basis = 'ccpvdz'
mol.build()
mf = scf.RHF(mol).run()
mo = PM(mol).kernel(mf.mo_coeff[:,5:9], verbose=4)
|
gkc1000/pyscf
|
pyscf/lo/pipek.py
|
Python
|
apache-2.0
| 9,347
|
[
"PySCF"
] |
c32ea7623459fa74d26e11022cc2eb4da24608f0323cc2dc55249677369fa1ed
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2010-2016 Bastian Kleineidam
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Setup file for the distuils module.
"""
from __future__ import print_function
import sys
if not hasattr(sys, "version_info") or sys.version_info < (3, 5):
raise SystemExit("This program requires Python 3.5 or later.")
import os
import re
from setuptools import setup
from distutils.core import Distribution
from distutils.command.install_lib import install_lib
from distutils import util
from distutils.file_util import write_file
AppName = "patool"
AppVersion = "1.12"
MyName = "Bastian Kleineidam"
MyEmail = "bastian.kleineidam@web.de"
def normpath (path):
"""Norm a path name to platform specific notation."""
return os.path.normpath(path)
def cnormpath (path):
"""Norm a path name to platform specific notation and make it absolute."""
path = normpath(path)
if os.name == 'nt':
# replace slashes with backslashes
path = path.replace("/", "\\")
if not os.path.isabs(path):
path = normpath(os.path.join(sys.prefix, path))
return path
release_ro = re.compile(r"\(released (.+)\)")
def get_release_date ():
"""Parse and return release date as string from doc/changelog.txt."""
fname = os.path.join("doc", "changelog.txt")
release_date = "unknown"
with open(fname) as fd:
# the release date is on the first line
line = fd.readline()
mo = release_ro.search(line)
if mo:
release_date = mo.groups(1)
return release_date
data_files = []
if os.name == 'nt':
data_files.append(('share', ['doc/patool.txt']))
else:
data_files.append(('share/man/man1', ['doc/patool.1']))
class MyInstallLib (install_lib, object):
"""Custom library installation."""
def install (self):
"""Install the generated config file."""
outs = super(MyInstallLib, self).install()
infile = self.create_conf_file()
outfile = os.path.join(self.install_dir, os.path.basename(infile))
self.copy_file(infile, outfile)
outs.append(outfile)
return outs
def create_conf_file (self):
"""Create configuration file."""
cmd_obj = self.distribution.get_command_obj("install")
cmd_obj.ensure_finalized()
# we have to write a configuration file because we need the
# <install_data> directory (and other stuff like author, url, ...)
# all paths are made absolute by cnormpath()
data = []
for d in ['purelib', 'platlib', 'lib', 'headers', 'scripts', 'data']:
attr = 'install_%s' % d
if cmd_obj.root:
# cut off root path prefix
cutoff = len(cmd_obj.root)
# don't strip the path separator
if cmd_obj.root.endswith(os.sep):
cutoff -= 1
val = getattr(cmd_obj, attr)[cutoff:]
else:
val = getattr(cmd_obj, attr)
if attr == 'install_data':
cdir = os.path.join(val, "share", AppName)
data.append('config_dir = %r' % cnormpath(cdir))
elif attr == 'install_lib':
if cmd_obj.root:
_drive, tail = os.path.splitdrive(val)
if tail.startswith(os.sep):
tail = tail[1:]
self.install_lib = os.path.join(cmd_obj.root, tail)
else:
self.install_lib = val
data.append("%s = %r" % (attr, cnormpath(val)))
self.distribution.create_conf_file(data, directory=self.install_lib)
return self.get_conf_output()
def get_conf_output (self):
"""Get filename for distribution configuration file."""
return self.distribution.get_conf_filename(self.install_lib)
def get_outputs (self):
"""Add the generated config file to the list of outputs."""
outs = super(MyInstallLib, self).get_outputs()
conf_output = self.get_conf_output()
outs.append(conf_output)
if self.compile:
outs.extend(self._bytecode_filenames([conf_output]))
return outs
class MyDistribution (Distribution, object):
"""Custom distribution class generating config file."""
def __init__ (self, attrs):
"""Set console and windows scripts."""
super(MyDistribution, self).__init__(attrs)
self.console = ['patool']
def run_commands (self):
"""Generate config file and run commands."""
cwd = os.getcwd()
data = []
data.append('config_dir = %r' % os.path.join(cwd, "config"))
data.append("install_data = %r" % cwd)
data.append("install_scripts = %r" % cwd)
self.create_conf_file(data)
super(MyDistribution, self).run_commands()
def get_conf_filename (self, directory):
"""Get name for config file."""
return os.path.join(directory, "_%s_configdata.py" % self.get_name())
def create_conf_file (self, data, directory=None):
"""Create local config file from given data (list of lines) in
the directory (or current directory if not given)."""
data.insert(0, "# this file is automatically created by setup.py")
data.insert(0, "# -*- coding: iso-8859-1 -*-")
if directory is None:
directory = os.getcwd()
filename = self.get_conf_filename(directory)
# add metadata
metanames = ("name", "version", "author", "author_email",
"maintainer", "maintainer_email", "url",
"license", "description", "long_description",
"keywords", "platforms", "fullname", "contact",
"contact_email")
for name in metanames:
method = "get_" + name
val = getattr(self.metadata, method)()
data.append("%s = %r" % (name, val))
data.append('release_date = "%s"' % get_release_date())
# write the config file
util.execute(write_file, (filename, data),
"creating %s" % filename, self.verbose >= 1, self.dry_run)
args = dict(
name = AppName,
version = AppVersion,
description = "portable archive file manager",
long_description = """Various archive formats can be created, extracted, tested, listed,
searched, compared and repacked by patool. The advantage of patool
is its simplicity in handling archive files without having to remember
a myriad of programs and options.
The archive format is determined by the file(1) program and as a
fallback by the archive file extension.
patool supports 7z (.7z), ACE (.ace), ADF (.adf), ALZIP (.alz), APE (.ape),
AR (.a), ARC (.arc), ARJ (.arj), BZIP2 (.bz2),
CAB (.cab), COMPRESS (.Z), CPIO (.cpio),
DEB (.deb), DMS (.dms), FLAC (.flac), GZIP (.gz), ISO (.iso), LRZIP (.lrz),
LZH (.lha, .lzh), LZIP (.lz), LZMA (.lzma), LZOP (.lzo), RPM (.rpm),
RAR (.rar), RZIP (.rz), SHN (.shn), TAR (.tar), XZ (.xz), ZIP (.zip, .jar),
ZOO (.zoo) and ZPAQ (.zpaq) formats.
It relies on helper applications to handle those archive formats
(for example bzip2 for BZIP2 archives).
The archive formats TAR, ZIP, BZIP2 and GZIP
are supported natively and do not require helper applications to be
installed.
""",
author = MyName,
author_email = MyEmail,
maintainer = MyName,
maintainer_email = MyEmail,
license = "GPL",
url = "https://wummel.github.io/patool/",
project_urls = {
"Source": "https://github.com/wummel/patool",
},
packages = ['patoolib', 'patoolib.programs'],
data_files = data_files,
scripts = ['patool'],
keywords = "archiver,archive,compression,commandline,manager",
classifiers = [
'Environment :: Console',
'Topic :: System :: Archiving',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Operating System :: OS Independent',
],
distclass = MyDistribution,
cmdclass = {
'install_lib': MyInstallLib,
},
)
setup(**args)
|
wummel/patool
|
setup.py
|
Python
|
gpl-3.0
| 9,029
|
[
"ADF"
] |
8c143a74429223af05abb2a5b56de62314fdaac54f365146ccb4ab7811e72e61
|
# -----------------------------------------------------------------------------
#
# Copyright (C) 2021 CERN & University of Surrey for the benefit of the
# BioDynaMo collaboration. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# See the LICENSE file distributed with this work for details.
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# -----------------------------------------------------------------------------
import argparse
import os
import math
import sys
from paraview.simple import *
from paraview import coprocessing
# Returning an error code using sys.exit does not work for the insitu tests
# because it would exit the whole unit test process.
# Thus we create a valid file to indicate a passing test
def CreateValidFile(sim_name):
with open("output/{0}/valid".format(sim_name), 'w') as fp:
pass
# Entry point for insitu visualization
def ExtendDefaultPipeline(renderview, coprocessor, datadescription, script_args):
parser = argparse.ArgumentParser(description='Validate Diffusion Grid')
parser.add_argument('--sim_name', action='store', type=str)
parser.add_argument('--num_elements', action='store', type=int)
params, other = parser.parse_known_args(script_args)
substance_source = FindSource('Substance-concentration')
substance = paraview.servermanager.Fetch(substance_source)
data = substance.GetPointData().GetArray('Substance Concentration')
if data.GetNumberOfTuples() != params.num_elements:
print("ERROR number of diffusion grid elements wrong: expected:", params.num_elements,
"actual:", data.GetNumberOfTuples())
return
for i in range(0, data.GetNumberOfTuples()):
if not math.isclose(float(i), data.GetValue(i), abs_tol=1e-5):
print("ERROR diffusion grid element", i, " has wrong value: expected:", i,
"actual:", data.GetValue(i))
return
CreateValidFile(params.sim_name)
# Entry point for export visualization
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Validate Diffusion Grid')
parser.add_argument('--sim_name', action='store', type=str)
parser.add_argument('--use_pvsm', action='store_true', dest="use_pvsm")
params, args = parser.parse_known_args()
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
if params.use_pvsm:
LoadState('output/{0}/{0}.pvsm'.format(params.sim_name))
else:
sys.path.insert(0, "{0}/include/core/visualization/paraview".format(os.environ['BDMSYS']))
from generate_pv_state import BuildDefaultPipeline
BuildDefaultPipeline('output/{0}/simulation_info.json'.format(params.sim_name))
ExtendDefaultPipeline(None, None, None, sys.argv[1:])
|
BioDynaMo/biodynamo
|
test/unit/core/visualization/paraview/validate_diffusion_grid.py
|
Python
|
apache-2.0
| 2,985
|
[
"ParaView"
] |
413a6fe9015011833b9c5705118f21be638b7e4bdadaf9be3903f2dd05303264
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=80 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2014 Raoul Snyman #
# Portions copyright (c) 2008-2014 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Ken Roberts #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
__version__ = '0.0.1'
__v = __version__.split('.')
__version_hex__ = int(__v[0]) << 24 | \
int(__v[1]) << 16 | \
int(__v[2]) << 8
__module = 'projectors'
import logging
log = logging.getLogger(__name__)
from PyQt4 import QtCore, QtGui
def ProjectorItem(QtCore.QObject):
'''
Gui interface to control/monitor a single projector
'''
|
alisonken1/openlp-projector-2.0
|
openlp/projectors/projectoritem.py
|
Python
|
gpl-2.0
| 2,490
|
[
"Brian"
] |
d293a53e3965bbe65ff0aaf775f7e69e0cf9966a4b7ba4c412b723d5697bd2e9
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""A Relay implementation of graph packing."""
import tvm
from tvm import relay
from tvm.relay import op, transform
from tvm.relay import ExprMutator
def run_opt_pass(expr, opt_pass):
"""Exectue a relay pass."""
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def _to_shape(shape):
"""convert shape into tuple."""
return tuple(int(sh) for sh in shape)
def _pack_batch_channel(data, dshape, bfactor, cfactor):
"""Pack the data channel dimension."""
assert int(dshape[0]) % bfactor == 0
assert int(dshape[1]) % cfactor == 0
data = op.reshape(
data,
newshape=(
int(dshape[0]) // bfactor,
bfactor,
int(dshape[1]) // cfactor,
cfactor,
int(dshape[2]),
int(dshape[3]),
),
)
data = op.transpose(data, axes=(0, 2, 4, 5, 1, 3))
return data
def _unpack_batch_channel(data, old_shape):
"""Unpack the data channel dimension."""
data = op.transpose(data, axes=(0, 4, 1, 5, 2, 3))
data = op.reshape(data, newshape=old_shape)
return data
def _const_shape_match(data, dshape, cfactor_out):
"""Pad the constant if the shape[0] not divisible by cfactor_out."""
assert len(dshape) == 3
pad_width = int(dshape[0]) % cfactor_out
if pad_width != 0:
pad_width = cfactor_out - pad_width
data = op.nn.pad(data, [[0, pad_width], [0, 0], [0, 0]])
dshape = tuple([dshape[0] + pad_width, dshape[1], dshape[2]])
return data, dshape
def _weight_shape_match(data, dshape, channels, cfactor_out, transpose=False):
"""Pad the weight if the shape[0] not divisible by cfactor_out."""
assert len(dshape) == 4
pad_width = int(dshape[0]) % cfactor_out
channels_pad = int(channels) % cfactor_out
if pad_width != 0:
pad_width = cfactor_out - pad_width
data = op.nn.pad(data, [[0, pad_width], [0, 0], [0, 0], [0, 0]])
dshape = tuple([dshape[0] + pad_width, dshape[1], dshape[2], dshape[3]])
if channels_pad != 0:
channels = channels + (cfactor_out - channels_pad)
return data, dshape, channels
def _weight_shape_match_transpose(data, dshape, channels, cfactor_out):
"""Pad the weight if the shape[1] not divisible by cfactor_out."""
assert len(dshape) == 4
pad_width = int(dshape[1]) % cfactor_out
channels_pad = int(channels) % cfactor_out
if pad_width != 0:
pad_width = cfactor_out - pad_width
data = op.nn.pad(data, [[0, 0], [0, pad_width], [0, 0], [0, 0]])
dshape = tuple(dshape[0], [dshape[1] + pad_width, dshape[2], dshape[3]])
if channels_pad != 0:
channels = channels + (cfactor_out - channels_pad)
return data, dshape, channels
def _pack_weight(data, dshape, cfactor):
"""Pack the weight into packed format."""
assert len(dshape) == 4
assert int(dshape[0]) % cfactor == 0
assert int(dshape[1]) % cfactor == 0
data = op.reshape(
data,
newshape=(
int(dshape[0]) // cfactor,
cfactor,
int(dshape[1]) // cfactor,
cfactor,
int(dshape[2]),
int(dshape[3]),
),
)
data = op.transpose(data, axes=(0, 2, 4, 5, 1, 3))
return data
def _pack_weight_conv2d_transpose(data, dshape, cfactor):
"""Pack the weight into packed format."""
dshape = _to_shape(dshape)
assert len(dshape) == 4
assert dshape[0] % cfactor == 0
assert dshape[1] % cfactor == 0
data = op.reshape(
data,
newshape=(
dshape[0] // cfactor,
cfactor,
dshape[1] // cfactor,
cfactor,
dshape[2],
dshape[3],
),
)
data = op.transpose(data, axes=(2, 0, 4, 5, 3, 1))
return data
def _pack_const(data, dshape, dtype, bfactor, cfactor):
"""Pack a constant parameter."""
dshape = _to_shape(dshape)
assert len(dshape) == 3
assert dshape[0] % cfactor == 0
data = op.reshape(data, newshape=(dshape[0] // cfactor, cfactor, dshape[1], dshape[2], 1))
data = op.transpose(data, axes=(0, 2, 3, 4, 1))
# broadcast batch dimension to bfactor
data = op.broadcast_to(
data, shape=(dshape[0] // cfactor, dshape[1], dshape[2], bfactor, cfactor)
)
return data
def _get_tensor_shape(node):
"""Get node shape."""
if isinstance(node.checked_type, relay.ty.TensorType):
return _to_shape(node.checked_type.shape)
return []
def _get_tensor_type(node):
"""Get node type."""
if isinstance(node.checked_type, relay.ty.TensorType):
return node.checked_type.dtype
return "float32"
def _operator_idx_inc(expr, count_meta, operator_current_idx):
"""Increase operator index"""
if isinstance(expr, relay.expr.Constant):
operator_current_idx = operator_current_idx + 1 if count_meta else operator_current_idx
else:
operator_current_idx = operator_current_idx + 1
return operator_current_idx
class ExprPack(ExprMutator):
"""Visitor to perform graph packing on an AST."""
def __init__(self, bfactor, cfactor, weight_bits):
self.bfactor = bfactor
self.cfactor = cfactor
self.weight_bits = weight_bits
self.start_pack = False
# Cache Operator the algorithm matches against.
self.bitpack_start = op.op.get("annotation.bitpack_start")
self.bitpack_end = op.op.get("annotation.bitpack_end")
self.conv2d = op.op.get("nn.conv2d")
self.conv2d_transpose = op.op.get("nn.conv2d_transpose")
self.add = op.op.get("add")
self.multiply = op.op.get("multiply")
self.bias_add = op.op.get("nn.bias_add")
self.pad = op.op.get("nn.pad")
self.upsampling = op.op.get("nn.upsampling")
self.reshape = op.op.get("reshape")
self.number_of_conv2d = 0
super().__init__()
def visit_call(self, call):
""" Visit the children. """
# First visit the children.
oshape = _get_tensor_shape(call)
odtype = _get_tensor_type(call)
input_types = [arg.checked_type for arg in call.args]
args = [self.visit(arg) for arg in call.args]
# Start and stop cases.
if call.op == self.bitpack_start:
assert not self.start_pack
self.start_pack = True
return _pack_batch_channel(args[0], oshape, self.bfactor, self.cfactor)
if call.op == self.bitpack_end:
if self.start_pack:
self.start_pack = False
data = args[0]
data_shape = _get_tensor_shape(call.args[0])
return _unpack_batch_channel(data, data_shape)
if self.start_pack:
# Operator cases
if call.op == self.conv2d and odtype == "int32":
self.number_of_conv2d += 1
assert 8 % self.weight_bits == 0
w_lanes = 8 // self.weight_bits
data_layout = "NCHW%dn%dc" % (self.bfactor, self.cfactor)
kernel_layout = "OIHW%do%di" % (self.cfactor, self.cfactor)
data, weight = args
data_shape = _to_shape(input_types[0].shape)
kernel_shape = _to_shape(input_types[1].shape)
channels = call.attrs.channels
weight, kernel_shape, channels = _weight_shape_match(
weight, kernel_shape, channels, self.cfactor
)
kernel = _pack_weight(weight, kernel_shape, self.cfactor)
# insert bit packing when necessary
if w_lanes != 1:
assert 8 % w_lanes == 0
kernel = op.bitpack(kernel, lanes=w_lanes)
conv2d = op.nn.conv2d(
data,
kernel,
strides=call.attrs.strides,
padding=call.attrs.padding,
dilation=call.attrs.dilation,
groups=call.attrs.groups,
channels=channels,
kernel_size=call.attrs.kernel_size,
data_layout=data_layout,
kernel_layout=kernel_layout,
out_dtype=call.attrs.out_dtype,
)
return conv2d
if call.op == self.conv2d_transpose and odtype == "int32":
self.number_of_conv2d += 1
assert 8 % self.weight_bits == 0
w_lanes = 8 // self.weight_bits
if self.start_pack:
data_layout = "NCHW%dn%dc" % (self.bfactor, self.cfactor)
kernel_layout = "IOHW%di%do" % (self.cfactor, self.cfactor)
data, weight = args
data_shape = _to_shape(input_types[0].shape)
kernel_shape = _to_shape(input_types[1].shape)
channels = call.attrs.channels
weight, kernel_shape, channels = _weight_shape_match_transpose(
weight, kernel_shape, channels, self.cfactor
)
kernel = _pack_weight_conv2d_transpose(weight, kernel_shape, self.cfactor)
conv2d = op.nn.conv2d_transpose(
data,
kernel,
strides=call.attrs.strides,
padding=call.attrs.padding,
dilation=call.attrs.dilation,
groups=call.attrs.groups,
channels=call.attrs.channels,
kernel_size=call.attrs.kernel_size,
data_layout=data_layout,
kernel_layout=kernel_layout,
output_padding=call.attrs.output_padding,
out_dtype=call.attrs.out_dtype,
)
return conv2d
if call.op == self.add and tuple(input_types[0].shape) == tuple(input_types[1].shape):
pass
elif call.op == self.add and len(input_types[1].shape) == 3:
data, const = args
const, input_shape = _const_shape_match(const, input_types[1].shape, self.cfactor)
const = _pack_const(
const, _to_shape(input_shape), input_types[1].dtype, self.bfactor, self.cfactor
)
return relay.Call(self.add, [data, const])
elif call.op == self.multiply and tuple(input_types[0].shape) == tuple(
input_types[1].shape
):
pass
elif call.op == self.multiply and len(input_types[1].shape) == 3:
data, const = args
const = _pack_const(
const,
_to_shape(input_types[1].shape),
input_types[1].dtype,
self.bfactor,
self.cfactor,
)
return relay.Call(self.multiply, [data, const])
elif self.start_pack and call.op == self.bias_add:
data, bias = args
bias = _pack_const(
bias,
_to_shape(input_types[1].shape),
input_types[1].dtype,
self.bfactor,
self.cfactor,
)
return relay.Call(self.add, [data, bias])
elif (
self.start_pack and call.op == op.op.get("cast") and input_types[0].dtype == "int32"
):
cast = relay.Call(op.op.get("cast"), [args[0]], call.attrs)
return relay.Call(op.op.get("copy"), [cast])
elif call.op == self.pad:
pad_width = call.attrs.pad_width
if len(pad_width) == 6:
pass
elif len(pad_width) == 4:
(data,) = args
new_pad_width = []
new_pad_width.extend(pad_width)
for _ in range(2):
new_pad_width.append([0, 0])
return op.nn.pad(data, pad_value=call.attrs.pad_value, pad_width=new_pad_width)
elif call.op == self.upsampling:
(data,) = args
scale_h = call.attrs.scale_h
scale_w = call.attrs.scale_w
data_layout = "NCHW%dn%dc" % (self.bfactor, self.cfactor)
method = call.attrs.method
align_corners = call.attrs.align_corners
return op.nn.upsampling(data, scale_h, scale_w, data_layout, method, align_corners)
elif call.op == self.reshape and len(input_types[0].shape) == 4:
(data,) = args
data = op.transpose(data, axes=(0, 4, 1, 5, 2, 3))
return op.reshape(data, [int(x) for x in input_types[0].shape])
return relay.Call(self.visit(call.op), args, call.attrs)
class BT(Exception):
pass
def get_subgraph(expr, start_name, stop_name, start_name_idx, stop_name_idx, count_meta):
"""We assume stop_name only appears once for simplicity.
This constraint will be lifted in the future.
bitpack_start and bitpack_end are both inclusive.
"""
bitpack_start = op.op.get("annotation.bitpack_start")
bitpack_end = op.op.get("annotation.bitpack_end")
anf = run_opt_pass(expr, transform.ToANormalForm())
operator_current_idx = 0
def _recursion(anf, start_found, stop_found, operator_current_idx):
"""Helper to obtain the subgraph."""
if isinstance(anf, relay.Function):
return relay.Function(
anf.params,
_recursion(anf.body, start_found, stop_found, operator_current_idx),
anf.ret_type,
anf.type_params,
anf.attrs,
)
if isinstance(anf, relay.expr.Let):
value = anf.value
if isinstance(value, relay.expr.Call):
if isinstance(value.op, tvm.ir.Op):
if value.op.name == start_name and not start_found:
if operator_current_idx == start_name_idx or start_name_idx is None:
value = relay.expr.Call(bitpack_start, [value])
start_found = True
elif value.op.name == stop_name:
if operator_current_idx == stop_name_idx or stop_name_idx is None:
raise BT()
operator_current_idx = _operator_idx_inc(value, count_meta, operator_current_idx)
try:
return relay.expr.Let(
anf.var,
value,
_recursion(anf.body, start_found, stop_found, operator_current_idx),
)
except BT:
assert start_found
assert not stop_found
stop_found = True
value = relay.expr.Call(bitpack_end, [value])
# todo: check anf.body has no more stop_name beside that one
return relay.expr.Let(anf.var, value, anf.body)
else:
assert start_found
assert stop_found
return anf
annotated = _recursion(anf, False, False, operator_current_idx)
return run_opt_pass(annotated, transform.ToGraphNormalForm())
def graph_pack(
expr,
bfactor,
cfactor,
weight_bits,
start_name="nn.max_pool2d",
stop_name="nn.global_avg_pool2d",
start_name_idx=None,
stop_name_idx=None,
count_meta=False,
):
"""Pack the graph into batch&channel packed format.
Parameters
----------
expr : relay.Expr
The input program.
bfactor : int
The packing factor in batch
cfactor : int
The packing factor in channel
weight_bits: int
The bit-width of the weights.
start_name: str, optional
Start packing from certain known node when start_name_idx is None.
stop_name: str, optional
Stop packing from certain known node when stop_name_idx is None.
start_name_idx: int, optional
When start_name_idx not None, start packing only when node name equal start_name
and node idx equals start_name_idx.
stop_name_idx: int, optional
When stop_name_idx not None, stop packing only when node name equal stop_name
and node index equals stop_name_idx.
count_meta:boolean, optional
When count_meta is False, the operator increase logic would not count the meta that have
the type 'relay.expr.Constant', start_name_idx and stop_name_idx follow the index from
'expr.astext(show_meta_data=False)'. When count_meta is True, the operator increase
logic would count the meta.
Returns
-------
expr : Expr
The transformed expression.
"""
assert isinstance(expr, relay.Function)
assert (start_name != stop_name) or (start_name_idx < stop_name_idx)
expr = get_subgraph(expr, start_name, stop_name, start_name_idx, stop_name_idx, count_meta)
expr = run_opt_pass(expr, transform.InferType())
packer = ExprPack(bfactor, cfactor, weight_bits)
expr = packer.visit(expr)
assert not packer.start_pack
return run_opt_pass(expr, transform.InferType())
|
sxjscience/tvm
|
vta/python/vta/top/graphpack.py
|
Python
|
apache-2.0
| 18,326
|
[
"VisIt"
] |
2d39d25a80e1d09a90e570cee041a971e0e2c4d78b76d1ab355c1414f26571e9
|
#!/usr/bin/env python
# -*- Mode: Python; py-indent-offset: 4 -*-
# vim: tabstop=4 shiftwidth=4 expandtab
#
# Copyright (C) 2010 Red Hat, Inc., John (J5) Palmieri <johnp@redhat.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
title = "Combo boxes"
description = """
The ComboBox widget allows to select one option out of a list.
The ComboBoxEntry additionally allows the user to enter a value
that is not in the list of options.
How the options are displayed is controlled by cell renderers.
"""
from gi.repository import Gtk, Gdk, GdkPixbuf, GLib, GObject
(PIXBUF_COL,
TEXT_COL) = range(2)
class MaskEntry(Gtk.Entry):
__gtype_name__ = 'MaskEntry'
def __init__(self, mask=None):
self.mask = mask
super(MaskEntry, self).__init__()
self.connect('changed', self.changed_cb)
self.error_color = Gdk.RGBA()
self.error_color.red = 1.0
self.error_color.green = 0.9
self.error_color_blue = 0.9
self.error_color.alpha = 1.0
# workaround since override_color doesn't accept None yet
style_ctx = self.get_style_context()
self.normal_color = style_ctx.get_color(0)
def set_background(self):
if self.mask:
if not GLib.regex_match_simple(self.mask,
self.get_text(), 0, 0):
self.override_color(0, self.error_color)
return
self.override_color(0, self.normal_color)
def changed_cb(self, entry):
self.set_background()
class ComboboxApp:
def __init__(self, demoapp):
self.demoapp = demoapp
self.window = Gtk.Window()
self.window.set_title('Combo boxes')
self.window.set_border_width(10)
self.window.connect('destroy', lambda w: Gtk.main_quit())
vbox = Gtk.VBox(homogeneous=False, spacing=2)
self.window.add(vbox)
frame = Gtk.Frame(label='Some stock icons')
vbox.pack_start(frame, False, False, 0)
box = Gtk.VBox(homogeneous=False, spacing=0)
box.set_border_width(5)
frame.add(box)
model = self.create_stock_icon_store()
combo = Gtk.ComboBox(model=model)
box.add(combo)
renderer = Gtk.CellRendererPixbuf()
combo.pack_start(renderer, False)
# FIXME: override set_attributes
combo.add_attribute(renderer, 'pixbuf', PIXBUF_COL)
combo.set_cell_data_func(renderer, self.set_sensitive, None)
renderer = Gtk.CellRendererText()
combo.pack_start(renderer, True)
combo.add_attribute(renderer, 'text', TEXT_COL)
combo.set_cell_data_func(renderer, self.set_sensitive, None)
combo.set_row_separator_func(self.is_separator, None)
combo.set_active(0)
# a combobox demonstrating trees
frame = Gtk.Frame(label='Where are we ?')
vbox.pack_start(frame, False, False, 0)
box = Gtk.VBox(homogeneous=False, spacing=0)
box.set_border_width(5)
frame.add(box)
model = self.create_capital_store()
combo = Gtk.ComboBox(model=model)
box.add(combo)
renderer = Gtk.CellRendererText()
combo.pack_start(renderer, True)
combo.add_attribute(renderer, 'text', 0)
combo.set_cell_data_func(renderer, self.is_capital_sensistive, None)
# FIXME: make new_from_indices work
# make constructor take list or string of indices
path = Gtk.TreePath.new_from_string('0:8')
treeiter = model.get_iter(path)
combo.set_active_iter(treeiter)
# A GtkComboBoxEntry with validation.
frame = Gtk.Frame(label='Editable')
vbox.pack_start(frame, False, False, 0)
box = Gtk.VBox(homogeneous=False, spacing=0)
box.set_border_width(5)
frame.add(box)
combo = Gtk.ComboBoxText.new_with_entry()
self.fill_combo_entry(combo)
box.add(combo)
entry = MaskEntry(mask='^([0-9]*|One|Two|2\302\275|Three)$')
Gtk.Container.remove(combo, combo.get_child())
combo.add(entry)
# A combobox with string IDs
frame = Gtk.Frame(label='String IDs')
vbox.pack_start(frame, False, False, 0)
box = Gtk.VBox(homogeneous=False, spacing=0)
box.set_border_width(5)
frame.add(box)
# FIXME: model is not setup when constructing Gtk.ComboBoxText()
# so we call new() - Gtk should fix this to setup the model
# in __init__, not in the constructor
combo = Gtk.ComboBoxText.new()
combo.append('never', 'Not visible')
combo.append('when-active', 'Visible when active')
combo.append('always', 'Always visible')
box.add(combo)
entry = Gtk.Entry()
combo.bind_property('active-id',
entry, 'text',
GObject.BindingFlags.BIDIRECTIONAL)
box.add(entry)
self.window.show_all()
def strip_underscore(self, s):
return s.replace('_', '')
def create_stock_icon_store(self):
stock_id = (Gtk.STOCK_DIALOG_WARNING,
Gtk.STOCK_STOP,
Gtk.STOCK_NEW,
Gtk.STOCK_CLEAR,
None,
Gtk.STOCK_OPEN)
cellview = Gtk.CellView()
store = Gtk.ListStore(GdkPixbuf.Pixbuf, str)
for id in stock_id:
if id is not None:
pixbuf = cellview.render_icon(id, Gtk.IconSize.BUTTON, None)
item = Gtk.stock_lookup(id)
label = self.strip_underscore(item.label)
store.append((pixbuf, label))
else:
store.append((None, 'separator'))
return store
def set_sensitive(self, cell_layout, cell, tree_model, treeiter, data):
"""
A GtkCellLayoutDataFunc that demonstrates how one can control
sensitivity of rows. This particular function does nothing
useful and just makes the second row insensitive.
"""
path = tree_model.get_path(treeiter)
indices = path.get_indices()
sensitive = not(indices[0] == 1)
cell.set_property('sensitive', sensitive)
def is_separator(self, model, treeiter, data):
"""
A GtkTreeViewRowSeparatorFunc that demonstrates how rows can be
rendered as separators. This particular function does nothing
useful and just turns the fourth row into a separator.
"""
path = model.get_path(treeiter)
indices = path.get_indices()
result = (indices[0] == 4)
return result
def create_capital_store(self):
capitals = (
{'group': 'A - B', 'capital': None},
{'group': None, 'capital': 'Albany'},
{'group': None, 'capital': 'Annapolis'},
{'group': None, 'capital': 'Atlanta'},
{'group': None, 'capital': 'Augusta'},
{'group': None, 'capital': 'Austin'},
{'group': None, 'capital': 'Baton Rouge'},
{'group': None, 'capital': 'Bismarck'},
{'group': None, 'capital': 'Boise'},
{'group': None, 'capital': 'Boston'},
{'group': 'C - D', 'capital': None},
{'group': None, 'capital': 'Carson City'},
{'group': None, 'capital': 'Charleston'},
{'group': None, 'capital': 'Cheyeene'},
{'group': None, 'capital': 'Columbia'},
{'group': None, 'capital': 'Columbus'},
{'group': None, 'capital': 'Concord'},
{'group': None, 'capital': 'Denver'},
{'group': None, 'capital': 'Des Moines'},
{'group': None, 'capital': 'Dover'},
{'group': 'E - J', 'capital': None},
{'group': None, 'capital': 'Frankfort'},
{'group': None, 'capital': 'Harrisburg'},
{'group': None, 'capital': 'Hartford'},
{'group': None, 'capital': 'Helena'},
{'group': None, 'capital': 'Honolulu'},
{'group': None, 'capital': 'Indianapolis'},
{'group': None, 'capital': 'Jackson'},
{'group': None, 'capital': 'Jefferson City'},
{'group': None, 'capital': 'Juneau'},
{'group': 'K - O', 'capital': None},
{'group': None, 'capital': 'Lansing'},
{'group': None, 'capital': 'Lincon'},
{'group': None, 'capital': 'Little Rock'},
{'group': None, 'capital': 'Madison'},
{'group': None, 'capital': 'Montgomery'},
{'group': None, 'capital': 'Montpelier'},
{'group': None, 'capital': 'Nashville'},
{'group': None, 'capital': 'Oklahoma City'},
{'group': None, 'capital': 'Olympia'},
{'group': 'P - S', 'capital': None},
{'group': None, 'capital': 'Phoenix'},
{'group': None, 'capital': 'Pierre'},
{'group': None, 'capital': 'Providence'},
{'group': None, 'capital': 'Raleigh'},
{'group': None, 'capital': 'Richmond'},
{'group': None, 'capital': 'Sacramento'},
{'group': None, 'capital': 'Salem'},
{'group': None, 'capital': 'Salt Lake City'},
{'group': None, 'capital': 'Santa Fe'},
{'group': None, 'capital': 'Springfield'},
{'group': None, 'capital': 'St. Paul'},
{'group': 'T - Z', 'capital': None},
{'group': None, 'capital': 'Tallahassee'},
{'group': None, 'capital': 'Topeka'},
{'group': None, 'capital': 'Trenton'}
)
parent = None
store = Gtk.TreeStore(str)
for item in capitals:
if item['group']:
parent = store.append(None, (item['group'],))
elif item['capital']:
store.append(parent, (item['capital'],))
return store
def is_capital_sensistive(self, cell_layout, cell, tree_model, treeiter, data):
sensitive = not tree_model.iter_has_child(treeiter)
cell.set_property('sensitive', sensitive)
def fill_combo_entry(self, entry):
entry.append_text('One')
entry.append_text('Two')
entry.append_text('2\302\275')
entry.append_text('Three')
def main(demoapp=None):
ComboboxApp(demoapp)
Gtk.main()
if __name__ == '__main__':
main()
|
davibe/pygobject
|
demos/gtk-demo/demos/combobox.py
|
Python
|
lgpl-2.1
| 11,098
|
[
"COLUMBUS"
] |
37fc134b130a4fc6ed76287b6bc18181b1dfb70cfaff885417e476d9d07bbebe
|
#!/usr/bin/env python
'''
Optimize the geometry of the excited states
Note when optiming the excited states, states may flip and this may cause
convergence issue in geometry optimizer.
'''
from pyscf import gto
from pyscf import scf
from pyscf import ci, tdscf, mcscf
from pyscf import geomopt
mol = gto.Mole()
mol.atom="N; N 1, 1.1"
mol.basis= "6-31g"
mol.build()
mol1 = mol.copy()
mf = scf.RHF(mol).run()
mc = mcscf.CASCI(mf, 4,4)
mc.fcisolver.nstates = 3
excited_grad = mc.nuc_grad_method().as_scanner(state=2)
mol1 = excited_grad.optimizer().kernel()
# or
#mol1 = geomopt.optimize(excited_grad)
td = tdscf.TDHF(mf)
td.nstates = 5
excited_grad = td.nuc_grad_method().as_scanner(state=4)
mol1 = excited_grad.optimizer().kernel()
# or
#mol1 = geomopt.optimize(excited_grad)
myci = ci.CISD(mf)
myci.nstates = 2
excited_grad = myci.nuc_grad_method().as_scanner(state=1)
mol1 = excited_grad.optimizer().kernel()
# or
#geomopt.optimize(excited_grad)
|
gkc1000/pyscf
|
examples/geomopt/12-excited_states.py
|
Python
|
apache-2.0
| 959
|
[
"PySCF"
] |
03f9b4c655301fbf74a8e13b30917677bffe93d9310531cf2ba12064155d2fa9
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Alex Grigorevskiy
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
Main functionality for state-space inference.
"""
import collections # for cheking whether a variable is iterable
import types # for cheking whether a variable is a function
import numpy as np
import scipy as sp
import scipy.linalg as linalg
import warnings
try:
from . import state_space_setup
setup_available = True
except ImportError as e:
setup_available = False
print_verbose = False
try:
import state_space_cython
cython_code_available = True
if print_verbose:
print("state_space: cython is available")
except ImportError as e:
cython_code_available = False
#cython_code_available = False
# Use cython by default
use_cython = False
if setup_available:
use_cython = state_space_setup.use_cython
if print_verbose:
if use_cython:
print("state_space: cython is used")
else:
print("state_space: cython is NOT used")
# When debugging external module can set some value to this variable (e.g.)
# 'model' and in this module this variable can be seen.s
tmp_buffer = None
class Dynamic_Callables_Python(object):
def f_a(self, k, m, A):
"""
p_a: function (k, x_{k-1}, A_{k}). Dynamic function.
k (iteration number), starts at 0
x_{k-1} State from the previous step
A_{k} Jacobian matrices of f_a. In the linear case it is exactly
A_{k}.
"""
raise NotImplemented("f_a is not implemented!")
def Ak(self, k, m, P): # returns state iteration matrix
"""
function (k, m, P) return Jacobian of dynamic function, it is passed
into p_a.
k (iteration number), starts at 0
m: point where Jacobian is evaluated
P: parameter for Jacobian, usually covariance matrix.
"""
raise NotImplemented("Ak is not implemented!")
def Qk(self, k):
"""
function (k). Returns noise matrix of dynamic model on iteration k.
k (iteration number). starts at 0
"""
raise NotImplemented("Qk is not implemented!")
def Q_srk(self, k):
"""
function (k). Returns the square root of noise matrix of dynamic model
on iteration k.
k (iteration number). starts at 0
This function is implemented to use SVD prediction step.
"""
raise NotImplemented("Q_srk is not implemented!")
def dAk(self, k):
"""
function (k). Returns the derivative of A on iteration k.
k (iteration number). starts at 0
"""
raise NotImplemented("dAk is not implemented!")
def dQk(self, k):
"""
function (k). Returns the derivative of Q on iteration k.
k (iteration number). starts at 0
"""
raise NotImplemented("dQk is not implemented!")
def reset(self, compute_derivatives=False):
"""
Return the state of this object to the beginning of iteration
(to k eq. 0).
"""
raise NotImplemented("reset is not implemented!")
if use_cython:
Dynamic_Callables_Class = state_space_cython.Dynamic_Callables_Cython
else:
Dynamic_Callables_Class = Dynamic_Callables_Python
class Measurement_Callables_Python(object):
def f_h(self, k, m_pred, Hk):
"""
function (k, x_{k}, H_{k}). Measurement function.
k (iteration number), starts at 0
x_{k} state
H_{k} Jacobian matrices of f_h. In the linear case it is exactly
H_{k}.
"""
raise NotImplemented("f_a is not implemented!")
def Hk(self, k, m_pred, P_pred): # returns state iteration matrix
"""
function (k, m, P) return Jacobian of measurement function, it is
passed into p_h.
k (iteration number), starts at 0
m: point where Jacobian is evaluated
P: parameter for Jacobian, usually covariance matrix.
"""
raise NotImplemented("Hk is not implemented!")
def Rk(self, k):
"""
function (k). Returns noise matrix of measurement equation
on iteration k.
k (iteration number). starts at 0
"""
raise NotImplemented("Rk is not implemented!")
def R_isrk(self, k):
"""
function (k). Returns the square root of the noise matrix of
measurement equation on iteration k.
k (iteration number). starts at 0
This function is implemented to use SVD prediction step.
"""
raise NotImplemented("Q_srk is not implemented!")
def dHk(self, k):
"""
function (k). Returns the derivative of H on iteration k.
k (iteration number). starts at 0
"""
raise NotImplemented("dAk is not implemented!")
def dRk(self, k):
"""
function (k). Returns the derivative of R on iteration k.
k (iteration number). starts at 0
"""
raise NotImplemented("dQk is not implemented!")
def reset(self, compute_derivatives=False):
"""
Return the state of this object to the beginning of iteration
(to k eq. 0)
"""
raise NotImplemented("reset is not implemented!")
if use_cython:
Measurement_Callables_Class = state_space_cython.\
Measurement_Callables_Cython
else:
Measurement_Callables_Class = Measurement_Callables_Python
class R_handling_Python(Measurement_Callables_Class):
"""
The calss handles noise matrix R.
"""
def __init__(self, R, index, R_time_var_index, unique_R_number, dR=None):
"""
Input:
---------------
R - array with noise on various steps. The result of preprocessing
the noise input.
index - for each step of Kalman filter contains the corresponding index
in the array.
R_time_var_index - another index in the array R. Computed earlier and
is passed here.
unique_R_number - number of unique noise matrices below which square
roots are cached and above which they are computed each time.
dR: 3D array[:, :, param_num]
derivative of R. Derivative is supported only when R do not change
over time
Output:
--------------
Object which has two necessary functions:
f_R(k)
inv_R_square_root(k)
"""
self.R = R
self.index = np.asarray(index, np.int_)
self.R_time_var_index = int(R_time_var_index)
self.dR = dR
if (len(np.unique(index)) > unique_R_number):
self.svd_each_time = True
else:
self.svd_each_time = False
self.R_square_root = {}
def Rk(self, k):
return self.R[:, :, int(self.index[self.R_time_var_index, k])]
def dRk(self, k):
if self.dR is None:
raise ValueError("dR derivative is None")
return self.dR # the same dirivative on each iteration
def R_isrk(self, k):
"""
Function returns the inverse square root of R matrix on step k.
"""
ind = int(self.index[self.R_time_var_index, k])
R = self.R[:, :, ind]
if (R.shape[0] == 1): # 1-D case handle simplier. No storage
# of the result, just compute it each time.
inv_square_root = np.sqrt(1.0/R)
else:
if self.svd_each_time:
(U, S, Vh) = sp.linalg.svd(R, full_matrices=False,
compute_uv=True, overwrite_a=False,
check_finite=True)
inv_square_root = U * 1.0/np.sqrt(S)
else:
if ind in self.R_square_root:
inv_square_root = self.R_square_root[ind]
else:
(U, S, Vh) = sp.linalg.svd(R, full_matrices=False,
compute_uv=True,
overwrite_a=False,
check_finite=True)
inv_square_root = U * 1.0/np.sqrt(S)
self.R_square_root[ind] = inv_square_root
return inv_square_root
if use_cython:
R_handling_Class = state_space_cython.R_handling_Cython
else:
R_handling_Class = R_handling_Python
class Std_Measurement_Callables_Python(R_handling_Class):
def __init__(self, H, H_time_var_index, R, index, R_time_var_index,
unique_R_number, dH=None, dR=None):
super(Std_Measurement_Callables_Python,
self).__init__(R, index, R_time_var_index, unique_R_number, dR)
self.H = H
self.H_time_var_index = int(H_time_var_index)
self.dH = dH
def f_h(self, k, m, H):
"""
function (k, x_{k}, H_{k}). Measurement function.
k (iteration number), starts at 0
x_{k} state
H_{k} Jacobian matrices of f_h. In the linear case it is exactly
H_{k}.
"""
return np.dot(H, m)
def Hk(self, k, m_pred, P_pred): # returns state iteration matrix
"""
function (k, m, P) return Jacobian of measurement function, it is
passed into p_h.
k (iteration number), starts at 0
m: point where Jacobian is evaluated
P: parameter for Jacobian, usually covariance matrix.
"""
return self.H[:, :, int(self.index[self.H_time_var_index, k])]
def dHk(self, k):
if self.dH is None:
raise ValueError("dH derivative is None")
return self.dH # the same dirivative on each iteration
if use_cython:
Std_Measurement_Callables_Class = state_space_cython.\
Std_Measurement_Callables_Cython
else:
Std_Measurement_Callables_Class = Std_Measurement_Callables_Python
class Q_handling_Python(Dynamic_Callables_Class):
def __init__(self, Q, index, Q_time_var_index, unique_Q_number, dQ=None):
"""
Input:
---------------
R - array with noise on various steps. The result of preprocessing
the noise input.
index - for each step of Kalman filter contains the corresponding index
in the array.
R_time_var_index - another index in the array R. Computed earlier and
passed here.
unique_R_number - number of unique noise matrices below which square
roots are cached and above which they are computed each time.
dQ: 3D array[:, :, param_num]
derivative of Q. Derivative is supported only when Q do not
change over time
Output:
--------------
Object which has two necessary functions:
f_R(k)
inv_R_square_root(k)
"""
self.Q = Q
self.index = np.asarray(index, np.int_)
self.Q_time_var_index = Q_time_var_index
self.dQ = dQ
if (len(np.unique(index)) > unique_Q_number):
self.svd_each_time = True
else:
self.svd_each_time = False
self.Q_square_root = {}
def Qk(self, k):
"""
function (k). Returns noise matrix of dynamic model on iteration k.
k (iteration number). starts at 0
"""
return self.Q[:, :, self.index[self.Q_time_var_index, k]]
def dQk(self, k):
if self.dQ is None:
raise ValueError("dQ derivative is None")
return self.dQ # the same dirivative on each iteration
def Q_srk(self, k):
"""
function (k). Returns the square root of noise matrix of dynamic model
on iteration k.
k (iteration number). starts at 0
This function is implemented to use SVD prediction step.
"""
ind = self.index[self.Q_time_var_index, k]
Q = self.Q[:, :, ind]
if (Q.shape[0] == 1): # 1-D case handle simplier. No storage
# of the result, just compute it each time.
square_root = np.sqrt(Q)
else:
if self.svd_each_time:
(U, S, Vh) = sp.linalg.svd(Q, full_matrices=False,
compute_uv=True,
overwrite_a=False,
check_finite=True)
square_root = U * np.sqrt(S)
else:
if ind in self.Q_square_root:
square_root = self.Q_square_root[ind]
else:
(U, S, Vh) = sp.linalg.svd(Q, full_matrices=False,
compute_uv=True,
overwrite_a=False,
check_finite=True)
square_root = U * np.sqrt(S)
self.Q_square_root[ind] = square_root
return square_root
if use_cython:
Q_handling_Class = state_space_cython.Q_handling_Cython
else:
Q_handling_Class = Q_handling_Python
class Std_Dynamic_Callables_Python(Q_handling_Class):
def __init__(self, A, A_time_var_index, Q, index, Q_time_var_index,
unique_Q_number, dA=None, dQ=None):
super(Std_Dynamic_Callables_Python,
self).__init__(Q, index, Q_time_var_index, unique_Q_number, dQ)
self.A = A
self.A_time_var_index = np.asarray(A_time_var_index, np.int_)
self.dA = dA
def f_a(self, k, m, A):
"""
f_a: function (k, x_{k-1}, A_{k}). Dynamic function.
k (iteration number), starts at 0
x_{k-1} State from the previous step
A_{k} Jacobian matrices of f_a. In the linear case it is exactly
A_{k}.
"""
return np.dot(A, m)
def Ak(self, k, m_pred, P_pred): # returns state iteration matrix
"""
function (k, m, P) return Jacobian of measurement function, it is
passed into p_h.
k (iteration number), starts at 0
m: point where Jacobian is evaluated
P: parameter for Jacobian, usually covariance matrix.
"""
return self.A[:, :, self.index[self.A_time_var_index, k]]
def dAk(self, k):
if self.dA is None:
raise ValueError("dA derivative is None")
return self.dA # the same dirivative on each iteration
def reset(self, compute_derivatives=False):
"""
Return the state of this object to the beginning of iteration
(to k eq. 0)
"""
return self
if use_cython:
Std_Dynamic_Callables_Class = state_space_cython.\
Std_Dynamic_Callables_Cython
else:
Std_Dynamic_Callables_Class = Std_Dynamic_Callables_Python
class AddMethodToClass(object):
def __init__(self, func=None, tp='staticmethod'):
"""
Input:
--------------
func: function to add
tp: string
Type of the method: normal, staticmethod, classmethod
"""
if func is None:
raise ValueError("Function can not be None")
self.func = func
self.tp = tp
def __get__(self, obj, klass=None, *args, **kwargs):
if self.tp == 'staticmethod':
return self.func
elif self.tp == 'normal':
def newfunc(obj, *args, **kwargs):
return self.func
elif self.tp == 'classmethod':
def newfunc(klass, *args, **kwargs):
return self.func
return newfunc
class DescreteStateSpaceMeta(type):
"""
Substitute necessary methods from cython.
"""
def __new__(typeclass, name, bases, attributes):
"""
After thos method the class object is created
"""
if use_cython:
if '_kalman_prediction_step_SVD' in attributes:
attributes['_kalman_prediction_step_SVD'] =\
AddMethodToClass(state_space_cython.
_kalman_prediction_step_SVD_Cython)
if '_kalman_update_step_SVD' in attributes:
attributes['_kalman_update_step_SVD'] =\
AddMethodToClass(state_space_cython.
_kalman_update_step_SVD_Cython)
if '_cont_discr_kalman_filter_raw' in attributes:
attributes['_cont_discr_kalman_filter_raw'] =\
AddMethodToClass(state_space_cython.
_cont_discr_kalman_filter_raw_Cython)
return super(DescreteStateSpaceMeta,
typeclass).__new__(typeclass, name, bases, attributes)
class DescreteStateSpace(object):
"""
This class implents state-space inference for linear and non-linear
state-space models.
Linear models are:
x_{k} = A_{k} * x_{k-1} + q_{k-1}; q_{k-1} ~ N(0, Q_{k-1})
y_{k} = H_{k} * x_{k} + r_{k}; r_{k-1} ~ N(0, R_{k})
Nonlinear:
x_{k} = f_a(k, x_{k-1}, A_{k}) + q_{k-1}; q_{k-1} ~ N(0, Q_{k-1})
y_{k} = f_h(k, x_{k}, H_{k}) + r_{k}; r_{k-1} ~ N(0, R_{k})
Here f_a and f_h are some functions of k (iteration number), x_{k-1} or
x_{k} (state value on certain iteration), A_{k} and H_{k} - Jacobian
matrices of f_a and f_h respectively. In the linear case they are exactly
A_{k} and H_{k}.
Currently two nonlinear Gaussian filter algorithms are implemented:
Extended Kalman Filter (EKF), Statistically linearized Filter (SLF), which
implementations are very similar.
"""
__metaclass__ = DescreteStateSpaceMeta
@staticmethod
def _reshape_input_data(shape, desired_dim=3):
"""
Static function returns the column-wise shape for for an input shape.
Input:
--------------
shape: tuple
Shape of an input array, so that it is always a column.
desired_dim: int
desired shape of output. For Y data it should be 3
(sample_no, dimension, ts_no). For X data - 2 (sample_no, 1)
Output:
--------------
new_shape: tuple
New shape of the measurements array. Idea is that samples are
along dimension 0, sample dimension - dimension 1, different
time series - dimension 2.
old_shape: tuple or None
If the shape has been modified, return old shape, otherwise
None.
"""
if (len(shape) > 3):
raise ValueError("""Input array is not supposed to be more
than 3 dimensional.""")
if (len(shape) > desired_dim):
raise ValueError("Input array shape is more than desired shape.")
elif len(shape) == 1:
if (desired_dim == 3):
return ((shape[0], 1, 1), shape) # last dimension is the
# time serime_series_no
elif (desired_dim == 2):
return ((shape[0], 1), shape)
elif len(shape) == 2:
if (desired_dim == 3):
return ((shape[1], 1, 1), shape) if (shape[0] == 1) else\
((shape[0], shape[1], 1), shape) # convert to column
# vector
elif (desired_dim == 2):
return ((shape[1], 1), shape) if (shape[0] == 1) else\
((shape[0], shape[1]), None) # convert to column vector
else: # len(shape) == 3
return (shape, None) # do nothing
@classmethod
def kalman_filter(cls, p_A, p_Q, p_H, p_R, Y, index=None, m_init=None,
P_init=None, p_kalman_filter_type='regular',
calc_log_likelihood=False,
calc_grad_log_likelihood=False, grad_params_no=None,
grad_calc_params=None):
"""
This function implements the basic Kalman Filter algorithm
These notations for the State-Space model are assumed:
x_{k} = A_{k} * x_{k-1} + q_{k-1}; q_{k-1} ~ N(0, Q_{k-1})
y_{k} = H_{k} * x_{k} + r_{k}; r_{k-1} ~ N(0, R_{k})
Returns estimated filter distributions x_{k} ~ N(m_{k}, P(k))
Current Features:
----------------------------------------
1) The function generaly do not modify the passed parameters. If
it happens then it is an error. There are several exeprions: scalars
can be modified into a matrix, in some rare cases shapes of
the derivatives matrices may be changed, it is ignored for now.
2) Copies of p_A, p_Q, index are created in memory to be used later
in smoother. References to copies are kept in "matrs_for_smoother"
return parameter.
3) Function support "multiple time series mode" which means that exactly
the same State-Space model is used to filter several sets of measurements.
In this case third dimension of Y should include these state-space measurements
Log_likelihood and Grad_log_likelihood have the corresponding dimensions then.
4) Calculation of Grad_log_likelihood is not supported if matrices A,Q,
H, or R changes over time. (later may be changed)
5) Measurement may include missing values. In this case update step is
not done for this measurement. (later may be changed)
Input:
-----------------
p_A: scalar, square matrix, 3D array
A_{k} in the model. If matrix then A_{k} = A - constant.
If it is 3D array then A_{k} = p_A[:,:, index[0,k]]
p_Q: scalar, square symmetric matrix, 3D array
Q_{k-1} in the model. If matrix then Q_{k-1} = Q - constant.
If it is 3D array then Q_{k-1} = p_Q[:,:, index[1,k]]
p_H: scalar, matrix (measurement_dim, state_dim) , 3D array
H_{k} in the model. If matrix then H_{k} = H - constant.
If it is 3D array then H_{k} = p_Q[:,:, index[2,k]]
p_R: scalar, square symmetric matrix, 3D array
R_{k} in the model. If matrix then R_{k} = R - constant.
If it is 3D array then R_{k} = p_R[:,:, index[3,k]]
Y: matrix or vector or 3D array
Data. If Y is matrix then samples are along 0-th dimension and
features along the 1-st. If 3D array then third dimension
correspond to "multiple time series mode".
index: vector
Which indices (on 3-rd dimension) from arrays p_A, p_Q,p_H, p_R to use
on every time step. If this parameter is None then it is assumed
that p_A, p_Q, p_H, p_R do not change over time and indices are not needed.
index[0,:] - correspond to A, index[1,:] - correspond to Q
index[2,:] - correspond to H, index[3,:] - correspond to R.
If index.shape[0] == 1, it is assumed that indides for all matrices
are the same.
m_init: vector or matrix
Initial distribution mean. If None it is assumed to be zero.
For "multiple time series mode" it is matrix, second dimension of
which correspond to different time series. In regular case ("one
time series mode") it is a vector.
P_init: square symmetric matrix or scalar
Initial covariance of the states. If the parameter is scalar
then it is assumed that initial covariance matrix is unit matrix
multiplied by this scalar. If None the unit matrix is used instead.
"multiple time series mode" does not affect it, since it does not
affect anything related to state variaces.
calc_log_likelihood: boolean
Whether to calculate marginal likelihood of the state-space model.
calc_grad_log_likelihood: boolean
Whether to calculate gradient of the marginal likelihood
of the state-space model. If true then "grad_calc_params" parameter must
provide the extra parameters for gradient calculation.
grad_params_no: int
If previous parameter is true, then this parameters gives the
total number of parameters in the gradient.
grad_calc_params: dictionary
Dictionary with derivatives of model matrices with respect
to parameters "dA", "dQ", "dH", "dR", "dm_init", "dP_init".
They can be None, in this case zero matrices (no dependence on parameters)
is assumed. If there is only one parameter then third dimension is
automatically added.
Output:
--------------
M: (no_steps+1,state_dim) matrix or (no_steps+1,state_dim, time_series_no) 3D array
Filter estimates of the state means. In the extra step the initial
value is included. In the "multiple time series mode" third dimension
correspond to different timeseries.
P: (no_steps+1, state_dim, state_dim) 3D array
Filter estimates of the state covariances. In the extra step the initial
value is included.
log_likelihood: double or (1, time_series_no) 3D array.
If the parameter calc_log_likelihood was set to true, return
logarithm of marginal likelihood of the state-space model. If
the parameter was false, return None. In the "multiple time series mode" it is a vector
providing log_likelihood for each time series.
grad_log_likelihood: column vector or (grad_params_no, time_series_no) matrix
If calc_grad_log_likelihood is true, return gradient of log likelihood
with respect to parameters. It returns it column wise, so in
"multiple time series mode" gradients for each time series is in the
corresponding column.
matrs_for_smoother: dict
Dictionary with model functions for smoother. The intrinsic model
functions are computed in this functions and they are returned to
use in smoother for convenience. They are: 'p_a', 'p_f_A', 'p_f_Q'
The dictionary contains the same fields.
"""
#import pdb; pdb.set_trace()
# Parameters checking ->
# index
p_A = np.atleast_1d(p_A)
p_Q = np.atleast_1d(p_Q)
p_H = np.atleast_1d(p_H)
p_R = np.atleast_1d(p_R)
# Reshape and check measurements:
Y.shape, old_Y_shape = cls._reshape_input_data(Y.shape)
measurement_dim = Y.shape[1]
time_series_no = Y.shape[2] # multiple time series mode
if ((len(p_A.shape) == 3) and (len(p_A.shape[2]) != 1)) or\
((len(p_Q.shape) == 3) and (len(p_Q.shape[2]) != 1)) or\
((len(p_H.shape) == 3) and (len(p_H.shape[2]) != 1)) or\
((len(p_R.shape) == 3) and (len(p_R.shape[2]) != 1)):
model_matrices_chage_with_time = True
else:
model_matrices_chage_with_time = False
# Check index
old_index_shape = None
if index is None:
if (len(p_A.shape) == 3) or (len(p_Q.shape) == 3) or\
(len(p_H.shape) == 3) or (len(p_R.shape) == 3):
raise ValueError("Parameter index can not be None for time varying matrices (third dimension is present)")
else: # matrices do not change in time, so form dummy zero indices.
index = np.zeros((1,Y.shape[0]))
else:
if len(index.shape) == 1:
index.shape = (1,index.shape[0])
old_index_shape = (index.shape[0],)
if (index.shape[1] != Y.shape[0]):
raise ValueError("Number of measurements must be equal the number of A_{k}, Q_{k}, H_{k}, R_{k}")
if (index.shape[0] == 1):
A_time_var_index = 0; Q_time_var_index = 0
H_time_var_index = 0; R_time_var_index = 0
elif (index.shape[0] == 4):
A_time_var_index = 0; Q_time_var_index = 1
H_time_var_index = 2; R_time_var_index = 3
else:
raise ValueError("First Dimension of index must be either 1 or 4.")
state_dim = p_A.shape[0]
# Check and make right shape for model matrices. On exit they all are 3 dimensional. Last dimension
# correspond to change in time.
(p_A, old_A_shape) = cls._check_SS_matrix(p_A, state_dim, measurement_dim, which='A')
(p_Q, old_Q_shape) = cls._check_SS_matrix(p_Q, state_dim, measurement_dim, which='Q')
(p_H, old_H_shape) = cls._check_SS_matrix(p_H, state_dim, measurement_dim, which='H')
(p_R, old_R_shape) = cls._check_SS_matrix(p_R, state_dim, measurement_dim, which='R')
# m_init
if m_init is None:
m_init = np.zeros((state_dim, time_series_no))
else:
m_init = np.atleast_2d(m_init).T
# P_init
if P_init is None:
P_init = np.eye(state_dim)
elif not isinstance(P_init, collections.Iterable): #scalar
P_init = P_init*np.eye(state_dim)
if p_kalman_filter_type not in ('regular', 'svd'):
raise ValueError("Kalman filer type neither 'regular nor 'svd'.")
# Functions to pass to the kalman_filter algorithm:
# Parameters:
# k - number of Kalman filter iteration
# m - vector for calculating matrices. Required for EKF. Not used here.
c_p_A = p_A.copy() # create a copy because this object is passed to the smoother
c_p_Q = p_Q.copy() # create a copy because this object is passed to the smoother
c_index = index.copy() # create a copy because this object is passed to the smoother
if calc_grad_log_likelihood:
if model_matrices_chage_with_time:
raise ValueError("When computing likelihood gradient A and Q can not change over time.")
dA = cls._check_grad_state_matrices(grad_calc_params.get('dA'), state_dim, grad_params_no, which = 'dA')
dQ = cls._check_grad_state_matrices(grad_calc_params.get('dQ'), state_dim, grad_params_no, which = 'dQ')
dH = cls._check_grad_measurement_matrices(grad_calc_params.get('dH'), state_dim, grad_params_no, measurement_dim, which = 'dH')
dR = cls._check_grad_measurement_matrices(grad_calc_params.get('dR'), state_dim, grad_params_no, measurement_dim, which = 'dR')
dm_init = grad_calc_params.get('dm_init')
if dm_init is None:
# multiple time series mode. Keep grad_params always as a last dimension
dm_init = np.zeros((state_dim, time_series_no, grad_params_no))
dP_init = grad_calc_params.get('dP_init')
if dP_init is None:
dP_init = np.zeros((state_dim,state_dim,grad_params_no))
else:
dA = None
dQ = None
dH = None
dR = None
dm_init = None
dP_init = None
dynamic_callables = Std_Dynamic_Callables_Class(c_p_A, A_time_var_index, c_p_Q, c_index, Q_time_var_index, 20, dA, dQ)
measurement_callables = Std_Measurement_Callables_Class(p_H, H_time_var_index, p_R, index, R_time_var_index, 20, dH, dR)
(M, P,log_likelihood, grad_log_likelihood, dynamic_callables) = \
cls._kalman_algorithm_raw(state_dim, dynamic_callables,
measurement_callables, Y, m_init,
P_init, p_kalman_filter_type = p_kalman_filter_type,
calc_log_likelihood=calc_log_likelihood,
calc_grad_log_likelihood=calc_grad_log_likelihood,
grad_params_no=grad_params_no,
dm_init=dm_init, dP_init=dP_init)
# restore shapes so that input parameters are unchenged
if old_index_shape is not None:
index.shape = old_index_shape
if old_Y_shape is not None:
Y.shape = old_Y_shape
if old_A_shape is not None:
p_A.shape = old_A_shape
if old_Q_shape is not None:
p_Q.shape = old_Q_shape
if old_H_shape is not None:
p_H.shape = old_H_shape
if old_R_shape is not None:
p_R.shape = old_R_shape
# Return values
return (M, P,log_likelihood, grad_log_likelihood, dynamic_callables)
@classmethod
def extended_kalman_filter(cls,p_state_dim, p_a, p_f_A, p_f_Q, p_h, p_f_H, p_f_R, Y, m_init=None,
P_init=None,calc_log_likelihood=False):
"""
Extended Kalman Filter
Input:
-----------------
p_state_dim: integer
p_a: if None - the function from the linear model is assumed. No non-
linearity in the dynamic is assumed.
function (k, x_{k-1}, A_{k}). Dynamic function.
k: (iteration number),
x_{k-1}: (previous state)
x_{k}: Jacobian matrices of f_a. In the linear case it is exactly A_{k}.
p_f_A: matrix - in this case function which returns this matrix is assumed.
Look at this parameter description in kalman_filter function.
function (k, m, P) return Jacobian of dynamic function, it is
passed into p_a.
k: (iteration number),
m: point where Jacobian is evaluated
P: parameter for Jacobian, usually covariance matrix.
p_f_Q: matrix. In this case function which returns this matrix is asumed.
Look at this parameter description in kalman_filter function.
function (k). Returns noise matrix of dynamic model on iteration k.
k: (iteration number).
p_h: if None - the function from the linear measurement model is assumed.
No nonlinearity in the measurement is assumed.
function (k, x_{k}, H_{k}). Measurement function.
k: (iteration number),
x_{k}: (current state)
H_{k}: Jacobian matrices of f_h. In the linear case it is exactly H_{k}.
p_f_H: matrix - in this case function which returns this matrix is assumed.
function (k, m, P) return Jacobian of dynamic function, it is
passed into p_h.
k: (iteration number),
m: point where Jacobian is evaluated
P: parameter for Jacobian, usually covariance matrix.
p_f_R: matrix. In this case function which returns this matrix is asumed.
function (k). Returns noise matrix of measurement equation
on iteration k.
k: (iteration number).
Y: matrix or vector
Data. If Y is matrix then samples are along 0-th dimension and
features along the 1-st. May have missing values.
p_mean: vector
Initial distribution mean. If None it is assumed to be zero
P_init: square symmetric matrix or scalar
Initial covariance of the states. If the parameter is scalar
then it is assumed that initial covariance matrix is unit matrix
multiplied by this scalar. If None the unit matrix is used instead.
calc_log_likelihood: boolean
Whether to calculate marginal likelihood of the state-space model.
"""
# Y
Y.shape, old_Y_shape = cls._reshape_input_data(Y.shape)
# m_init
if m_init is None:
m_init = np.zeros((p_state_dim,1))
else:
m_init = np.atleast_2d(m_init).T
# P_init
if P_init is None:
P_init = np.eye(p_state_dim)
elif not isinstance(P_init, collections.Iterable): #scalar
P_init = P_init*np.eye(p_state_dim)
if p_a is None:
p_a = lambda k,m,A: np.dot(A, m)
old_A_shape = None
if not isinstance(p_f_A, types.FunctionType): # not a function but array
p_f_A = np.atleast_1d(p_f_A)
(p_A, old_A_shape) = cls._check_A_matrix(p_f_A)
p_f_A = lambda k, m, P: p_A[:,:, 0] # make function
else:
if p_f_A(1, m_init, P_init).shape[0] != m_init.shape[0]:
raise ValueError("p_f_A function returns matrix of wrong size")
old_Q_shape = None
if not isinstance(p_f_Q, types.FunctionType): # not a function but array
p_f_Q = np.atleast_1d(p_f_Q)
(p_Q, old_Q_shape) = cls._check_Q_matrix(p_f_Q)
p_f_Q = lambda k: p_Q[:,:, 0] # make function
else:
if p_f_Q(1).shape[0] != m_init.shape[0]:
raise ValueError("p_f_Q function returns matrix of wrong size")
if p_h is None:
lambda k,m,H: np.dot(H, m)
old_H_shape = None
if not isinstance(p_f_H, types.FunctionType): # not a function but array
p_f_H = np.atleast_1d(p_f_H)
(p_H, old_H_shape) = cls._check_H_matrix(p_f_H)
p_f_H = lambda k, m, P: p_H # make function
else:
if p_f_H(1, m_init, P_init).shape[0] != Y.shape[1]:
raise ValueError("p_f_H function returns matrix of wrong size")
old_R_shape = None
if not isinstance(p_f_R, types.FunctionType): # not a function but array
p_f_R = np.atleast_1d(p_f_R)
(p_R, old_R_shape) = cls._check_H_matrix(p_f_R)
p_f_R = lambda k: p_R # make function
else:
if p_f_R(1).shape[0] != m_init.shape[0]:
raise ValueError("p_f_R function returns matrix of wrong size")
# class dynamic_callables_class(Dynamic_Model_Callables):
#
# Ak =
# Qk =
class measurement_callables_class(R_handling_Class):
def __init__(self,R, index, R_time_var_index, unique_R_number):
super(measurement_callables_class,self).__init__(R, index, R_time_var_index, unique_R_number)
Hk = AddMethodToClass(f_H)
f_h = AddMethodToClass(f_hl)
(M, P,log_likelihood, grad_log_likelihood) = cls._kalman_algorithm_raw(p_state_dim, p_a, p_f_A, p_f_Q, p_h, p_f_H, p_f_R, Y, m_init,
P_init, calc_log_likelihood,
calc_grad_log_likelihood=False, grad_calc_params=None)
if old_Y_shape is not None:
Y.shape = old_Y_shape
if old_A_shape is not None:
p_A.shape = old_A_shape
if old_Q_shape is not None:
p_Q.shape = old_Q_shape
if old_H_shape is not None:
p_H.shape = old_H_shape
if old_R_shape is not None:
p_R.shape = old_R_shape
return (M, P)
@classmethod
def _kalman_algorithm_raw(cls,state_dim, p_dynamic_callables, p_measurement_callables, Y, m_init,
P_init, p_kalman_filter_type='regular',
calc_log_likelihood=False,
calc_grad_log_likelihood=False, grad_params_no=None,
dm_init=None, dP_init=None):
"""
General nonlinear filtering algorithm for inference in the state-space
model:
x_{k} = f_a(k, x_{k-1}, A_{k}) + q_{k-1}; q_{k-1} ~ N(0, Q_{k-1})
y_{k} = f_h(k, x_{k}, H_{k}) + r_{k}; r_{k-1} ~ N(0, R_{k})
Returns estimated filter distributions x_{k} ~ N(m_{k}, P(k))
Current Features:
----------------------------------------
1) Function support "multiple time series mode" which means that exactly
the same State-Space model is used to filter several sets of measurements.
In this case third dimension of Y should include these state-space measurements
Log_likelihood and Grad_log_likelihood have the corresponding dimensions then.
2) Measurement may include missing values. In this case update step is
not done for this measurement. (later may be changed)
Input:
-----------------
state_dim: int
Demensionality of the states
p_a: function (k, x_{k-1}, A_{k}). Dynamic function.
k (iteration number),
x_{k-1}
A_{k} Jacobian matrices of f_a. In the linear case it is exactly A_{k}.
p_f_A: function (k, m, P) return Jacobian of dynamic function, it is
passed into p_a.
k (iteration number),
m: point where Jacobian is evaluated
P: parameter for Jacobian, usually covariance matrix.
p_f_Q: function (k). Returns noise matrix of dynamic model on iteration k.
k (iteration number).
p_h: function (k, x_{k}, H_{k}). Measurement function.
k (iteration number),
x_{k}
H_{k} Jacobian matrices of f_h. In the linear case it is exactly H_{k}.
p_f_H: function (k, m, P) return Jacobian of dynamic function, it is
passed into p_h.
k (iteration number),
m: point where Jacobian is evaluated
P: parameter for Jacobian, usually covariance matrix.
p_f_R: function (k). Returns noise matrix of measurement equation
on iteration k.
k (iteration number).
Y: matrix or vector or 3D array
Data. If Y is matrix then samples are along 0-th dimension and
features along the 1-st. If 3D array then third dimension
correspond to "multiple time series mode".
m_init: vector or matrix
Initial distribution mean. For "multiple time series mode"
it is matrix, second dimension of which correspond to different
time series. In regular case ("one time series mode") it is a
vector.
P_init: matrix or scalar
Initial covariance of the states. Must be not None
"multiple time series mode" does not affect it, since it does not
affect anything related to state variaces.
p_kalman_filter_type: string
calc_log_likelihood: boolean
Whether to calculate marginal likelihood of the state-space model.
calc_grad_log_likelihood: boolean
Whether to calculate gradient of the marginal likelihood
of the state-space model. If true then the next parameter must
provide the extra parameters for gradient calculation.
grad_calc_params: dictionary
Dictionary with derivatives of model matrices with respect
to parameters "dA", "dQ", "dH", "dR", "dm_init", "dP_init".
Output:
--------------
M: (no_steps+1,state_dim) matrix or (no_steps+1,state_dim, time_series_no) 3D array
Filter estimates of the state means. In the extra step the initial
value is included. In the "multiple time series mode" third dimension
correspond to different timeseries.
P: (no_steps+1, state_dim, state_dim) 3D array
Filter estimates of the state covariances. In the extra step the initial
value is included.
log_likelihood: double or (1, time_series_no) 3D array.
If the parameter calc_log_likelihood was set to true, return
logarithm of marginal likelihood of the state-space model. If
the parameter was false, return None. In the "multiple time series mode" it is a vector
providing log_likelihood for each time series.
grad_log_likelihood: column vector or (grad_params_no, time_series_no) matrix
If calc_grad_log_likelihood is true, return gradient of log likelihood
with respect to parameters. It returns it column wise, so in
"multiple time series mode" gradients for each time series is in the
corresponding column.
"""
steps_no = Y.shape[0] # number of steps in the Kalman Filter
time_series_no = Y.shape[2] # multiple time series mode
# Allocate space for results
# Mean estimations. Initial values will be included
M = np.empty(((steps_no+1),state_dim,time_series_no))
M[0,:,:] = m_init # Initialize mean values
# Variance estimations. Initial values will be included
P = np.empty(((steps_no+1),state_dim,state_dim))
P_init = 0.5*( P_init + P_init.T) # symmetrize initial covariance. In some ustable cases this is uiseful
P[0,:,:] = P_init # Initialize initial covariance matrix
if p_kalman_filter_type == 'svd':
(U,S,Vh) = sp.linalg.svd( P_init,full_matrices=False, compute_uv=True,
overwrite_a=False,check_finite=True)
S[ (S==0) ] = 1e-17 # allows to run algorithm for singular initial variance
P_upd = (P_init, S,U)
log_likelihood = 0 if calc_log_likelihood else None
grad_log_likelihood = 0 if calc_grad_log_likelihood else None
#setting initial values for derivatives update
dm_upd = dm_init
dP_upd = dP_init
# Main loop of the Kalman filter
for k in range(0,steps_no):
# In this loop index for new estimations is (k+1), old - (k)
# This happened because initial values are stored at 0-th index.
prev_mean = M[k,:,:] # mean from the previous step
if p_kalman_filter_type == 'svd':
m_pred, P_pred, dm_pred, dP_pred = \
cls._kalman_prediction_step_SVD(k, prev_mean ,P_upd, p_dynamic_callables,
calc_grad_log_likelihood=calc_grad_log_likelihood,
p_dm = dm_upd, p_dP = dP_upd)
else:
m_pred, P_pred, dm_pred, dP_pred = \
cls._kalman_prediction_step(k, prev_mean ,P[k,:,:], p_dynamic_callables,
calc_grad_log_likelihood=calc_grad_log_likelihood,
p_dm = dm_upd, p_dP = dP_upd )
k_measurment = Y[k,:,:]
if (np.any(np.isnan(k_measurment)) == False):
if p_kalman_filter_type == 'svd':
m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update = \
cls._kalman_update_step_SVD(k, m_pred , P_pred, p_measurement_callables,
k_measurment, calc_log_likelihood=calc_log_likelihood,
calc_grad_log_likelihood=calc_grad_log_likelihood,
p_dm = dm_pred, p_dP = dP_pred )
# m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update = \
# cls._kalman_update_step(k, m_pred , P_pred[0], f_h, f_H, p_R.f_R, k_measurment,
# calc_log_likelihood=calc_log_likelihood,
# calc_grad_log_likelihood=calc_grad_log_likelihood,
# p_dm = dm_pred, p_dP = dP_pred, grad_calc_params_2 = (dH, dR))
#
# (U,S,Vh) = sp.linalg.svd( P_upd,full_matrices=False, compute_uv=True,
# overwrite_a=False,check_finite=True)
# P_upd = (P_upd, S,U)
else:
m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update = \
cls._kalman_update_step(k, m_pred , P_pred, p_measurement_callables, k_measurment,
calc_log_likelihood=calc_log_likelihood,
calc_grad_log_likelihood=calc_grad_log_likelihood,
p_dm = dm_pred, p_dP = dP_pred )
else:
# if k_measurment.shape != (1,1):
# raise ValueError("Nan measurements are currently not supported for \
# multidimensional output and multiple time series.")
# else:
# m_upd = m_pred; P_upd = P_pred; dm_upd = dm_pred; dP_upd = dP_pred
# log_likelihood_update = 0.0;
# d_log_likelihood_update = 0.0;
if not np.all(np.isnan(k_measurment)):
raise ValueError("""Nan measurements are currently not supported if
they are intermixed with not NaN measurements""")
else:
m_upd = m_pred; P_upd = P_pred; dm_upd = dm_pred; dP_upd = dP_pred
if calc_log_likelihood:
log_likelihood_update = np.zeros((time_series_no,))
if calc_grad_log_likelihood:
d_log_likelihood_update = np.zeros((grad_params_no,time_series_no))
if calc_log_likelihood:
log_likelihood += log_likelihood_update
if calc_grad_log_likelihood:
grad_log_likelihood += d_log_likelihood_update
M[k+1,:,:] = m_upd # separate mean value for each time series
if p_kalman_filter_type == 'svd':
P[k+1,:,:] = P_upd[0]
else:
P[k+1,:,:] = P_upd
# !!!Print statistics! Print sizes of matrices
# !!!Print statistics! Print iteration time base on another boolean variable
return (M, P, log_likelihood, grad_log_likelihood, p_dynamic_callables.reset(False))
@staticmethod
def _kalman_prediction_step(k, p_m , p_P, p_dyn_model_callable, calc_grad_log_likelihood=False,
p_dm = None, p_dP = None):
"""
Desctrete prediction function
Input:
k:int
Iteration No. Starts at 0. Total number of iterations equal to the
number of measurements.
p_m: matrix of size (state_dim, time_series_no)
Mean value from the previous step. For "multiple time series mode"
it is matrix, second dimension of which correspond to different
time series.
p_P:
Covariance matrix from the previous step.
p_dyn_model_callable: class
calc_grad_log_likelihood: boolean
Whether to calculate gradient of the marginal likelihood
of the state-space model. If true then the next parameter must
provide the extra parameters for gradient calculation.
p_dm: 3D array (state_dim, time_series_no, parameters_no)
Mean derivatives from the previous step. For "multiple time series mode"
it is 3D array, second dimension of which correspond to different
time series.
p_dP: 3D array (state_dim, state_dim, parameters_no)
Mean derivatives from the previous step
Output:
----------------------------
m_pred, P_pred, dm_pred, dP_pred: metrices, 3D objects
Results of the prediction steps.
"""
# index correspond to values from previous iteration.
A = p_dyn_model_callable.Ak(k,p_m,p_P) # state transition matrix (or Jacobian)
Q = p_dyn_model_callable.Qk(k) # state noise matrix
# Prediction step ->
m_pred = p_dyn_model_callable.f_a(k, p_m, A) # predicted mean
P_pred = A.dot(p_P).dot(A.T) + Q # predicted variance
# Prediction step <-
if calc_grad_log_likelihood:
dA_all_params = p_dyn_model_callable.dAk(k) # derivatives of A wrt parameters
dQ_all_params = p_dyn_model_callable.dQk(k) # derivatives of Q wrt parameters
param_number = p_dP.shape[2]
# p_dm, p_dP - derivatives form the previoius step
dm_pred = np.empty(p_dm.shape)
dP_pred = np.empty(p_dP.shape)
for j in range(param_number):
dA = dA_all_params[:,:,j]
dQ = dQ_all_params[:,:,j]
dP = p_dP[:,:,j]
dm = p_dm[:,:,j]
dm_pred[:,:,j] = np.dot(dA, p_m) + np.dot(A, dm)
# prediction step derivatives for current parameter:
dP_pred[:,:,j] = np.dot( dA ,np.dot(p_P, A.T))
dP_pred[:,:,j] += dP_pred[:,:,j].T
dP_pred[:,:,j] += np.dot( A ,np.dot(dP, A.T)) + dQ
dP_pred[:,:,j] = 0.5*(dP_pred[:,:,j] + dP_pred[:,:,j].T) #symmetrize
else:
dm_pred = None
dP_pred = None
return m_pred, P_pred, dm_pred, dP_pred
@staticmethod
def _kalman_prediction_step_SVD(k, p_m , p_P, p_dyn_model_callable, calc_grad_log_likelihood=False,
p_dm = None, p_dP = None):
"""
Desctrete prediction function
Input:
k:int
Iteration No. Starts at 0. Total number of iterations equal to the
number of measurements.
p_m: matrix of size (state_dim, time_series_no)
Mean value from the previous step. For "multiple time series mode"
it is matrix, second dimension of which correspond to different
time series.
p_P: tuple (Prev_cov, S, V)
Covariance matrix from the previous step and its SVD decomposition.
Prev_cov = V * S * V.T The tuple is (Prev_cov, S, V)
p_dyn_model_callable: object
calc_grad_log_likelihood: boolean
Whether to calculate gradient of the marginal likelihood
of the state-space model. If true then the next parameter must
provide the extra parameters for gradient calculation.
p_dm: 3D array (state_dim, time_series_no, parameters_no)
Mean derivatives from the previous step. For "multiple time series mode"
it is 3D array, second dimension of which correspond to different
time series.
p_dP: 3D array (state_dim, state_dim, parameters_no)
Mean derivatives from the previous step
Output:
----------------------------
m_pred, P_pred, dm_pred, dP_pred: metrices, 3D objects
Results of the prediction steps.
"""
# covariance from the previous step and its SVD decomposition
# p_prev_cov = v * S * V.T
Prev_cov, S_old, V_old = p_P
#p_prev_cov_tst = np.dot(p_V, (p_S * p_V).T) # reconstructed covariance from the previous step
# index correspond to values from previous iteration.
A = p_dyn_model_callable.Ak(k,p_m,Prev_cov) # state transition matrix (or Jacobian)
Q = p_dyn_model_callable.Qk(k) # state noise matrx. This is necessary for the square root calculation (next step)
Q_sr = p_dyn_model_callable.Q_srk(k)
# Prediction step ->
m_pred = p_dyn_model_callable.f_a(k, p_m, A) # predicted mean
# coavariance prediction have changed:
svd_1_matr = np.vstack( ( (np.sqrt(S_old)* np.dot(A,V_old)).T , Q_sr.T) )
(U,S,Vh) = sp.linalg.svd( svd_1_matr,full_matrices=False, compute_uv=True,
overwrite_a=False,check_finite=True)
# predicted variance computed by the regular method. For testing
#P_pred_tst = A.dot(Prev_cov).dot(A.T) + Q
V_new = Vh.T
S_new = S**2
P_pred = np.dot(V_new * S_new, V_new.T) # prediction covariance
P_pred = (P_pred, S_new, Vh.T)
# Prediction step <-
# derivatives
if calc_grad_log_likelihood:
dA_all_params = p_dyn_model_callable.dAk(k) # derivatives of A wrt parameters
dQ_all_params = p_dyn_model_callable.dQk(k) # derivatives of Q wrt parameters
param_number = p_dP.shape[2]
# p_dm, p_dP - derivatives form the previoius step
dm_pred = np.empty(p_dm.shape)
dP_pred = np.empty(p_dP.shape)
for j in range(param_number):
dA = dA_all_params[:,:,j]
dQ = dQ_all_params[:,:,j]
#dP = p_dP[:,:,j]
#dm = p_dm[:,:,j]
dm_pred[:,:,j] = np.dot(dA, p_m) + np.dot(A, p_dm[:,:,j])
# prediction step derivatives for current parameter:
dP_pred[:,:,j] = np.dot( dA ,np.dot(Prev_cov, A.T))
dP_pred[:,:,j] += dP_pred[:,:,j].T
dP_pred[:,:,j] += np.dot( A ,np.dot(p_dP[:,:,j], A.T)) + dQ
dP_pred[:,:,j] = 0.5*(dP_pred[:,:,j] + dP_pred[:,:,j].T) #symmetrize
else:
dm_pred = None
dP_pred = None
return m_pred, P_pred, dm_pred, dP_pred
@staticmethod
def _kalman_update_step(k, p_m , p_P, p_meas_model_callable, measurement, calc_log_likelihood= False,
calc_grad_log_likelihood=False, p_dm = None, p_dP = None):
"""
Input:
k: int
Iteration No. Starts at 0. Total number of iterations equal to the
number of measurements.
m_P: matrix of size (state_dim, time_series_no)
Mean value from the previous step. For "multiple time series mode"
it is matrix, second dimension of which correspond to different
time series.
p_P:
Covariance matrix from the prediction step.
p_meas_model_callable: object
measurement: (measurement_dim, time_series_no) matrix
One measurement used on the current update step. For
"multiple time series mode" it is matrix, second dimension of
which correspond to different time series.
calc_log_likelihood: boolean
Whether to calculate marginal likelihood of the state-space model.
calc_grad_log_likelihood: boolean
Whether to calculate gradient of the marginal likelihood
of the state-space model. If true then the next parameter must
provide the extra parameters for gradient calculation.
p_dm: 3D array (state_dim, time_series_no, parameters_no)
Mean derivatives from the prediction step. For "multiple time series mode"
it is 3D array, second dimension of which correspond to different
time series.
p_dP: array
Covariance derivatives from the prediction step.
Output:
----------------------------
m_upd, P_upd, dm_upd, dP_upd: metrices, 3D objects
Results of the prediction steps.
log_likelihood_update: double or 1D array
Update to the log_likelihood from this step
d_log_likelihood_update: (grad_params_no, time_series_no) matrix
Update to the gradient of log_likelihood, "multiple time series mode"
adds extra columns to the gradient.
"""
#import pdb; pdb.set_trace()
m_pred = p_m # from prediction step
P_pred = p_P # from prediction step
H = p_meas_model_callable.Hk(k, m_pred, P_pred)
R = p_meas_model_callable.Rk(k)
time_series_no = p_m.shape[1] # number of time serieses
log_likelihood_update=None; dm_upd=None; dP_upd=None; d_log_likelihood_update=None
# Update step (only if there is data)
#if not np.any(np.isnan(measurement)): # TODO: if some dimensions are missing, do properly computations for other.
v = measurement-p_meas_model_callable.f_h(k, m_pred, H)
S = H.dot(P_pred).dot(H.T) + R
if measurement.shape[0]==1: # measurements are one dimensional
if (S < 0):
raise ValueError("Kalman Filter Update: S is negative step %i" % k )
#import pdb; pdb.set_trace()
K = P_pred.dot(H.T) / S
if calc_log_likelihood:
log_likelihood_update = -0.5 * ( np.log(2*np.pi) + np.log(S) +
v*v / S)
#log_likelihood_update = log_likelihood_update[0,0] # to make int
if np.any(np.isnan(log_likelihood_update)): # some member in P_pred is None.
raise ValueError("Nan values in likelihood update!")
LL = None; islower = None
else:
LL,islower = linalg.cho_factor(S)
K = linalg.cho_solve((LL,islower), H.dot(P_pred.T)).T
if calc_log_likelihood:
log_likelihood_update = -0.5 * ( v.shape[0]*np.log(2*np.pi) +
2*np.sum( np.log(np.diag(LL)) ) +\
np.sum((linalg.cho_solve((LL,islower),v)) * v, axis = 0) ) # diagonal of v.T*S^{-1}*v
if calc_grad_log_likelihood:
dm_pred_all_params = p_dm # derivativas of the prediction phase
dP_pred_all_params = p_dP
param_number = p_dP.shape[2]
dH_all_params = p_meas_model_callable.dHk(k)
dR_all_params = p_meas_model_callable.dRk(k)
dm_upd = np.empty(dm_pred_all_params.shape)
dP_upd = np.empty(dP_pred_all_params.shape)
# firts dimension parameter_no, second - time series number
d_log_likelihood_update = np.empty((param_number,time_series_no))
for param in range(param_number):
dH = dH_all_params[:,:,param]
dR = dR_all_params[:,:,param]
dm_pred = dm_pred_all_params[:,:,param]
dP_pred = dP_pred_all_params[:,:,param]
# Terms in the likelihood derivatives
dv = - np.dot( dH, m_pred) - np.dot( H, dm_pred)
dS = np.dot(dH, np.dot( P_pred, H.T))
dS += dS.T
dS += np.dot(H, np.dot( dP_pred, H.T)) + dR
# TODO: maybe symmetrize dS
#dm and dP for the next stem
if LL is not None: # the state vector is not a scalar
tmp1 = linalg.cho_solve((LL,islower), H).T
tmp2 = linalg.cho_solve((LL,islower), dH).T
tmp3 = linalg.cho_solve((LL,islower), dS).T
else: # the state vector is a scalar
tmp1 = H.T / S
tmp2 = dH.T / S
tmp3 = dS.T / S
dK = np.dot( dP_pred, tmp1) + np.dot( P_pred, tmp2) - \
np.dot( P_pred, np.dot( tmp1, tmp3 ) )
# terms required for the next step, save this for each parameter
dm_upd[:,:,param] = dm_pred + np.dot(dK, v) + np.dot(K, dv)
dP_upd[:,:,param] = -np.dot(dK, np.dot(S, K.T))
dP_upd[:,:,param] += dP_upd[:,:,param].T
dP_upd[:,:,param] += dP_pred - np.dot(K , np.dot( dS, K.T))
dP_upd[:,:,param] = 0.5*(dP_upd[:,:,param] + dP_upd[:,:,param].T) #symmetrize
# computing the likelihood change for each parameter:
if LL is not None: # the state vector is not 1D
#tmp4 = linalg.cho_solve((LL,islower), dv)
tmp5 = linalg.cho_solve((LL,islower), v)
else: # the state vector is a scalar
#tmp4 = dv / S
tmp5 = v / S
d_log_likelihood_update[param,:] = -(0.5*np.sum(np.diag(tmp3)) + \
np.sum(tmp5*dv, axis=0) - 0.5 * np.sum(tmp5 * np.dot(dS, tmp5), axis=0) )
# Before
#d_log_likelihood_update[param,0] = -(0.5*np.sum(np.diag(tmp3)) + \
#np.dot(tmp5.T, dv) - 0.5 * np.dot(tmp5.T ,np.dot(dS, tmp5)) )
# Compute the actual updates for mean and variance of the states.
m_upd = m_pred + K.dot( v )
# Covariance update and ensure it is symmetric
P_upd = K.dot(S).dot(K.T)
P_upd = 0.5*(P_upd + P_upd.T)
P_upd = P_pred - P_upd# this update matrix is symmetric
return m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update
@staticmethod
def _kalman_update_step_SVD(k, p_m , p_P, p_meas_model_callable, measurement, calc_log_likelihood= False,
calc_grad_log_likelihood=False, p_dm = None, p_dP = None):
"""
Input:
k: int
Iteration No. Starts at 0. Total number of iterations equal to the
number of measurements.
m_P: matrix of size (state_dim, time_series_no)
Mean value from the previous step. For "multiple time series mode"
it is matrix, second dimension of which correspond to different
time series.
p_P: tuple (P_pred, S, V)
Covariance matrix from the prediction step and its SVD decomposition.
P_pred = V * S * V.T The tuple is (P_pred, S, V)
p_h: function (k, x_{k}, H_{k}). Measurement function.
k (iteration number), starts at 0
x_{k} state
H_{k} Jacobian matrices of f_h. In the linear case it is exactly H_{k}.
p_f_H: function (k, m, P) return Jacobian of measurement function, it is
passed into p_h.
k (iteration number), starts at 0
m: point where Jacobian is evaluated
P: parameter for Jacobian, usually covariance matrix.
p_f_R: function (k). Returns noise matrix of measurement equation
on iteration k.
k (iteration number). starts at 0
p_f_iRsr: function (k). Returns the square root of the noise matrix of
measurement equation on iteration k.
k (iteration number). starts at 0
measurement: (measurement_dim, time_series_no) matrix
One measurement used on the current update step. For
"multiple time series mode" it is matrix, second dimension of
which correspond to different time series.
calc_log_likelihood: boolean
Whether to calculate marginal likelihood of the state-space model.
calc_grad_log_likelihood: boolean
Whether to calculate gradient of the marginal likelihood
of the state-space model. If true then the next parameter must
provide the extra parameters for gradient calculation.
p_dm: 3D array (state_dim, time_series_no, parameters_no)
Mean derivatives from the prediction step. For "multiple time series mode"
it is 3D array, second dimension of which correspond to different
time series.
p_dP: array
Covariance derivatives from the prediction step.
grad_calc_params_2: List or None
List with derivatives. The first component is 'f_dH' - function(k)
which returns the derivative of H. The second element is 'f_dR'
- function(k). Function which returns the derivative of R.
Output:
----------------------------
m_upd, P_upd, dm_upd, dP_upd: metrices, 3D objects
Results of the prediction steps.
log_likelihood_update: double or 1D array
Update to the log_likelihood from this step
d_log_likelihood_update: (grad_params_no, time_series_no) matrix
Update to the gradient of log_likelihood, "multiple time series mode"
adds extra columns to the gradient.
"""
#import pdb; pdb.set_trace()
m_pred = p_m # from prediction step
P_pred,S_pred,V_pred = p_P # from prediction step
H = p_meas_model_callable.Hk(k, m_pred, P_pred)
R = p_meas_model_callable.Rk(k)
R_isr = p_meas_model_callable.R_isrk(k) # square root of the inverse of R matrix
time_series_no = p_m.shape[1] # number of time serieses
log_likelihood_update=None; dm_upd=None; dP_upd=None; d_log_likelihood_update=None
# Update step (only if there is data)
#if not np.any(np.isnan(measurement)): # TODO: if some dimensions are missing, do properly computations for other.
v = measurement-p_meas_model_callable.f_h(k, m_pred, H)
svd_2_matr = np.vstack( ( np.dot( R_isr.T, np.dot(H, V_pred)) , np.diag( 1.0/np.sqrt(S_pred) ) ) )
(U,S,Vh) = sp.linalg.svd( svd_2_matr,full_matrices=False, compute_uv=True,
overwrite_a=False,check_finite=True)
# P_upd = U_upd S_upd**2 U_upd.T
U_upd = np.dot(V_pred, Vh.T)
S_upd = (1.0/S)**2
P_upd = np.dot(U_upd * S_upd, U_upd.T) # update covariance
P_upd = (P_upd,S_upd,U_upd) # tuple to pass to the next step
# stil need to compute S and K for derivative computation
S = H.dot(P_pred).dot(H.T) + R
if measurement.shape[0]==1: # measurements are one dimensional
if (S < 0):
raise ValueError("Kalman Filter Update: S is negative step %i" % k )
#import pdb; pdb.set_trace()
K = P_pred.dot(H.T) / S
if calc_log_likelihood:
log_likelihood_update = -0.5 * ( np.log(2*np.pi) + np.log(S) +
v*v / S)
#log_likelihood_update = log_likelihood_update[0,0] # to make int
if np.any(np.isnan(log_likelihood_update)): # some member in P_pred is None.
raise ValueError("Nan values in likelihood update!")
LL = None; islower = None
else:
LL,islower = linalg.cho_factor(S)
K = linalg.cho_solve((LL,islower), H.dot(P_pred.T)).T
if calc_log_likelihood:
log_likelihood_update = -0.5 * ( v.shape[0]*np.log(2*np.pi) +
2*np.sum( np.log(np.diag(LL)) ) +\
np.sum((linalg.cho_solve((LL,islower),v)) * v, axis = 0) ) # diagonal of v.T*S^{-1}*v
# Old method of computing updated covariance (for testing) ->
#P_upd_tst = K.dot(S).dot(K.T)
#P_upd_tst = 0.5*(P_upd_tst + P_upd_tst.T)
#P_upd_tst = P_pred - P_upd_tst# this update matrix is symmetric
# Old method of computing updated covariance (for testing) <-
if calc_grad_log_likelihood:
dm_pred_all_params = p_dm # derivativas of the prediction phase
dP_pred_all_params = p_dP
param_number = p_dP.shape[2]
dH_all_params = p_meas_model_callable.dHk(k)
dR_all_params = p_meas_model_callable.dRk(k)
dm_upd = np.empty(dm_pred_all_params.shape)
dP_upd = np.empty(dP_pred_all_params.shape)
# firts dimension parameter_no, second - time series number
d_log_likelihood_update = np.empty((param_number,time_series_no))
for param in range(param_number):
dH = dH_all_params[:,:,param]
dR = dR_all_params[:,:,param]
dm_pred = dm_pred_all_params[:,:,param]
dP_pred = dP_pred_all_params[:,:,param]
# Terms in the likelihood derivatives
dv = - np.dot( dH, m_pred) - np.dot( H, dm_pred)
dS = np.dot(dH, np.dot( P_pred, H.T))
dS += dS.T
dS += np.dot(H, np.dot( dP_pred, H.T)) + dR
# TODO: maybe symmetrize dS
#dm and dP for the next stem
if LL is not None: # the state vector is not a scalar
tmp1 = linalg.cho_solve((LL,islower), H).T
tmp2 = linalg.cho_solve((LL,islower), dH).T
tmp3 = linalg.cho_solve((LL,islower), dS).T
else: # the state vector is a scalar
tmp1 = H.T / S
tmp2 = dH.T / S
tmp3 = dS.T / S
dK = np.dot( dP_pred, tmp1) + np.dot( P_pred, tmp2) - \
np.dot( P_pred, np.dot( tmp1, tmp3 ) )
# terms required for the next step, save this for each parameter
dm_upd[:,:,param] = dm_pred + np.dot(dK, v) + np.dot(K, dv)
dP_upd[:,:,param] = -np.dot(dK, np.dot(S, K.T))
dP_upd[:,:,param] += dP_upd[:,:,param].T
dP_upd[:,:,param] += dP_pred - np.dot(K , np.dot( dS, K.T))
dP_upd[:,:,param] = 0.5*(dP_upd[:,:,param] + dP_upd[:,:,param].T) #symmetrize
# computing the likelihood change for each parameter:
if LL is not None: # the state vector is not 1D
tmp5 = linalg.cho_solve((LL,islower), v)
else: # the state vector is a scalar
tmp5 = v / S
d_log_likelihood_update[param,:] = -(0.5*np.sum(np.diag(tmp3)) + \
np.sum(tmp5*dv, axis=0) - 0.5 * np.sum(tmp5 * np.dot(dS, tmp5), axis=0) )
# Before
#d_log_likelihood_update[param,0] = -(0.5*np.sum(np.diag(tmp3)) + \
#np.dot(tmp5.T, dv) - 0.5 * np.dot(tmp5.T ,np.dot(dS, tmp5)) )
# Compute the actual updates for mean of the states. Variance update
# is computed earlier.
m_upd = m_pred + K.dot( v )
return m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update
@staticmethod
def _rts_smoother_update_step(k, p_m , p_P, p_m_pred, p_P_pred, p_m_prev_step,
p_P_prev_step, p_dynamic_callables):
"""
Rauch–Tung–Striebel(RTS) update step
Input:
-----------------------------
k: int
Iteration No. Starts at 0. Total number of iterations equal to the
number of measurements.
p_m: matrix of size (state_dim, time_series_no)
Filter mean on step k
p_P: matrix of size (state_dim,state_dim)
Filter Covariance on step k
p_m_pred: matrix of size (state_dim, time_series_no)
Means from the smoother prediction step.
p_P_pred:
Covariance from the smoother prediction step.
p_m_prev_step
Smoother mean from the previous step.
p_P_prev_step:
Smoother covariance from the previous step.
p_f_A: function (k, m, P) return Jacobian of dynamic function, it is
passed into p_a.
k (iteration number), starts at 0
m: point where Jacobian is evaluated
P: parameter for Jacobian, usually covariance matrix.
"""
A = p_dynamic_callables.Ak(k,p_m,p_P) # state transition matrix (or Jacobian)
tmp = np.dot( A, p_P.T)
if A.shape[0] == 1: # 1D states
G = tmp.T / p_P_pred # P[:,:,k] is symmetric
else:
try:
LL,islower = linalg.cho_factor(p_P_pred)
G = linalg.cho_solve((LL,islower),tmp).T
except:
# It happende that p_P_pred has several near zero eigenvalues
# hence the Cholesky method does not work.
res = sp.linalg.lstsq(p_P_pred, tmp)
G = res[0].T
m_upd = p_m + G.dot( p_m_prev_step-p_m_pred )
P_upd = p_P + G.dot( p_P_prev_step-p_P_pred).dot(G.T)
P_upd = 0.5*(P_upd + P_upd.T)
return m_upd, P_upd, G
@classmethod
def rts_smoother(cls,state_dim, p_dynamic_callables, filter_means,
filter_covars):
"""
This function implements Rauch–Tung–Striebel(RTS) smoother algorithm
based on the results of kalman_filter_raw.
These notations are the same:
x_{k} = A_{k} * x_{k-1} + q_{k-1}; q_{k-1} ~ N(0, Q_{k-1})
y_{k} = H_{k} * x_{k} + r_{k}; r_{k-1} ~ N(0, R_{k})
Returns estimated smoother distributions x_{k} ~ N(m_{k}, P(k))
Input:
--------------
p_a: function (k, x_{k-1}, A_{k}). Dynamic function.
k (iteration number), starts at 0
x_{k-1} State from the previous step
A_{k} Jacobian matrices of f_a. In the linear case it is exactly A_{k}.
p_f_A: function (k, m, P) return Jacobian of dynamic function, it is
passed into p_a.
k (iteration number), starts at 0
m: point where Jacobian is evaluated
P: parameter for Jacobian, usually covariance matrix.
p_f_Q: function (k). Returns noise matrix of dynamic model on iteration k.
k (iteration number). starts at 0
filter_means: (no_steps+1,state_dim) matrix or (no_steps+1,state_dim, time_series_no) 3D array
Results of the Kalman Filter means estimation.
filter_covars: (no_steps+1, state_dim, state_dim) 3D array
Results of the Kalman Filter covariance estimation.
Output:
-------------
M: (no_steps+1, state_dim) matrix
Smoothed estimates of the state means
P: (no_steps+1, state_dim, state_dim) 3D array
Smoothed estimates of the state covariances
"""
no_steps = filter_covars.shape[0]-1# number of steps (minus initial covariance)
M = np.empty(filter_means.shape) # smoothed means
P = np.empty(filter_covars.shape) # smoothed covars
#G = np.empty( (no_steps,state_dim,state_dim) ) # G from the update step of the smoother
M[-1,:] = filter_means[-1,:]
P[-1,:,:] = filter_covars[-1,:,:]
for k in range(no_steps-1,-1,-1):
m_pred, P_pred, tmp1, tmp2 = \
cls._kalman_prediction_step(k, filter_means[k,:],
filter_covars[k,:,:], p_dynamic_callables,
calc_grad_log_likelihood=False)
p_m = filter_means[k,:]
if len(p_m.shape)<2:
p_m.shape = (p_m.shape[0],1)
p_m_prev_step = M[k+1,:]
if len(p_m_prev_step.shape)<2:
p_m_prev_step.shape = (p_m_prev_step.shape[0],1)
m_upd, P_upd, G_tmp = cls._rts_smoother_update_step(k,
p_m ,filter_covars[k,:,:],
m_pred, P_pred, p_m_prev_step ,P[k+1,:,:], p_dynamic_callables)
M[k,:] = m_upd#np.squeeze(m_upd)
P[k,:,:] = P_upd
#G[k,:,:] = G_upd.T # store transposed G.
# Return values
return (M, P) #, G)
@staticmethod
def _EM_gradient(A,Q,H,R,m_init,P_init,measurements, M, P, G, dA, dQ, dH, dR, dm_init, dP_init):
"""
Gradient computation with the EM algorithm.
Input:
-----------------
M: Means from the smoother
P: Variances from the smoother
G: Gains? from the smoother
"""
import pdb; pdb.set_trace();
param_number = dA.shape[-1]
d_log_likelihood_update = np.empty((param_number,1))
sample_no = measurements.shape[0]
P_1 = P[1:,:,:] # remove 0-th step
P_2 = P[0:-1,:,:] # remove 0-th step
M_1 = M[1:,:] # remove 0-th step
M_2 = M[0:-1,:] # remove the last step
Sigma = np.mean(P_1,axis=0) + np.dot(M_1.T, M_1) / sample_no #
Phi = np.mean(P_2,axis=0) + np.dot(M_2.T, M_2) / sample_no #
B = np.dot( measurements.T, M_1 )/ sample_no
C = (sp.einsum( 'ijk,ikl', P_1, G) + np.dot(M_1.T, M_2)) / sample_no #
# C1 = np.zeros( (P_1.shape[1],P_1.shape[1]) )
# for k in range(P_1.shape[0]):
# C1 += np.dot(P_1[k,:,:],G[k,:,:]) + sp.outer( M_1[k,:], M_2[k,:] )
# C1 = C1 / sample_no
D = np.dot( measurements.T, measurements ) / sample_no
try:
P_init_inv = sp.linalg.inv(P_init)
if np.max( np.abs(P_init_inv)) > 10e13:
compute_P_init_terms = False
else:
compute_P_init_terms = True
except np.linalg.LinAlgError:
compute_P_init_terms = False
try:
Q_inv = sp.linalg.inv(Q)
if np.max( np.abs(Q_inv)) > 10e13:
compute_Q_terms = False
else:
compute_Q_terms = True
except np.linalg.LinAlgError:
compute_Q_terms = False
try:
R_inv = sp.linalg.inv(R)
if np.max( np.abs(R_inv)) > 10e13:
compute_R_terms = False
else:
compute_R_terms = True
except np.linalg.LinAlgError:
compute_R_terms = False
d_log_likelihood_update = np.zeros((param_number,1))
for j in range(param_number):
if compute_P_init_terms:
d_log_likelihood_update[j,:] -= 0.5 * np.sum(P_init_inv* dP_init[:,:,j].T ) #p #m
M0_smoothed = M[0]; M0_smoothed.shape = (M0_smoothed.shape[0],1)
tmp1 = np.dot( dP_init[:,:,j], np.dot( P_init_inv, (P[0,:,:] + sp.outer( (M0_smoothed - m_init), (M0_smoothed - m_init) )) ) ) #p #m
d_log_likelihood_update[j,:] += 0.5 * np.sum(P_init_inv* tmp1.T )
tmp2 = sp.outer( dm_init[:,j], M0_smoothed )
tmp2 += tmp2.T
d_log_likelihood_update[j,:] += 0.5 * np.sum(P_init_inv* tmp2.T )
if compute_Q_terms:
d_log_likelihood_update[j,:] -= sample_no/2.0 * np.sum(Q_inv* dQ[:,:,j].T ) #m
tmp1 = np.dot(C,A.T); tmp1 += tmp1.T; tmp1 = Sigma - tmp1 + np.dot(A, np.dot(Phi,A.T)) #m
tmp1 = np.dot( dQ[:,:,j], np.dot( Q_inv, tmp1) )
d_log_likelihood_update[j,:] += sample_no/2.0 * np.sum(Q_inv * tmp1.T)
tmp2 = np.dot( dA[:,:,j], C.T); tmp2 += tmp2.T;
tmp3 = np.dot(dA[:,:,j], np.dot(Phi,A.T)); tmp3 += tmp3.T
d_log_likelihood_update[j,:] -= sample_no/2.0 * np.sum(Q_inv.T * (tmp3 - tmp2) )
if compute_R_terms:
d_log_likelihood_update[j,:] -= sample_no/2.0 * np.sum(R_inv* dR[:,:,j].T )
tmp1 = np.dot(B,H.T); tmp1 += tmp1.T; tmp1 = D - tmp1 + np.dot(H, np.dot(Sigma,H.T))
tmp1 = np.dot( dR[:,:,j], np.dot( R_inv, tmp1) )
d_log_likelihood_update[j,:] += sample_no/2.0 * np.sum(R_inv * tmp1.T)
tmp2 = np.dot( dH[:,:,j], B.T); tmp2 += tmp2.T;
tmp3 = np.dot(dH[:,:,j], np.dot(Sigma,H.T)); tmp3 += tmp3.T
d_log_likelihood_update[j,:] -= sample_no/2.0 * np.sum(R_inv.T * (tmp3 - tmp2) )
return d_log_likelihood_update
@staticmethod
def _check_SS_matrix(p_M, state_dim, measurement_dim, which='A'):
"""
Veryfy that on exit the matrix has appropriate shape for the KF algorithm.
Input:
p_M: matrix
As it is given for the user
state_dim: int
State dimensioanlity
measurement_dim: int
Measurement dimensionality
which: string
One of: 'A', 'Q', 'H', 'R'
Output:
---------------
p_M: matrix of the right shape
old_M_shape: tuple
Old Shape
"""
old_M_shape = None
if len(p_M.shape) < 3: # new shape is 3 dimensional
old_M_shape = p_M.shape # save shape to restore it on exit
if len(p_M.shape) == 2: # matrix
p_M.shape = (p_M.shape[0],p_M.shape[1],1)
elif len(p_M.shape) == 1: # scalar but in array already
if (p_M.shape[0] != 1):
raise ValueError("Matrix %s is an 1D array, while it must be a matrix or scalar", which)
else:
p_M.shape = (1,1,1)
if (which == 'A') or (which == 'Q'):
if (p_M.shape[0] != state_dim) or (p_M.shape[1] != state_dim):
raise ValueError("%s must be a square matrix of size (%i,%i)" % (which, state_dim, state_dim))
if (which == 'H'):
if (p_M.shape[0] != measurement_dim) or (p_M.shape[1] != state_dim):
raise ValueError("H must be of shape (measurement_dim, state_dim) (%i,%i)" % (measurement_dim, state_dim))
if (which == 'R'):
if (p_M.shape[0] != measurement_dim) or (p_M.shape[1] != measurement_dim):
raise ValueError("R must be of shape (measurement_dim, measurement_dim) (%i,%i)" % (measurement_dim, measurement_dim))
return (p_M,old_M_shape)
@staticmethod
def _check_grad_state_matrices(dM, state_dim, grad_params_no, which = 'dA'):
"""
Function checks (mostly check dimensions) matrices for marginal likelihood
gradient parameters calculation. It check dA, dQ matrices.
Input:
-------------
dM: None, scaler or 3D matrix
It is supposed to be (state_dim,state_dim,grad_params_no) matrix.
If None then zero matrix is assumed. If scalar then the function
checks consistency with "state_dim" and "grad_params_no".
state_dim: int
State dimensionality
grad_params_no: int
How many parrameters of likelihood gradient in total.
which: string
'dA' or 'dQ'
Output:
--------------
function of (k) which returns the parameters matrix.
"""
if dM is None:
dM=np.zeros((state_dim,state_dim,grad_params_no))
elif isinstance(dM, np.ndarray):
if state_dim == 1:
if len(dM.shape) < 3:
dM.shape = (1,1,1)
else:
if len(dM.shape) < 3:
dM.shape = (state_dim,state_dim,1)
elif isinstance(dM, np.int):
if state_dim > 1:
raise ValueError("When computing likelihood gradient wrong %s dimension." % which)
else:
dM = np.ones((1,1,1)) * dM
# if not isinstance(dM, types.FunctionType):
# f_dM = lambda k: dM
# else:
# f_dM = dM
return dM
@staticmethod
def _check_grad_measurement_matrices(dM, state_dim, grad_params_no, measurement_dim, which = 'dH'):
"""
Function checks (mostly check dimensions) matrices for marginal likelihood
gradient parameters calculation. It check dH, dR matrices.
Input:
-------------
dM: None, scaler or 3D matrix
It is supposed to be
(measurement_dim ,state_dim,grad_params_no) for "dH" matrix.
(measurement_dim,measurement_dim,grad_params_no) for "dR"
If None then zero matrix is assumed. If scalar then the function
checks consistency with "state_dim" and "grad_params_no".
state_dim: int
State dimensionality
grad_params_no: int
How many parrameters of likelihood gradient in total.
measurement_dim: int
Dimensionality of measurements.
which: string
'dH' or 'dR'
Output:
--------------
function of (k) which returns the parameters matrix.
"""
if dM is None:
if which == 'dH':
dM=np.zeros((measurement_dim ,state_dim,grad_params_no))
elif which == 'dR':
dM=np.zeros((measurement_dim,measurement_dim,grad_params_no))
elif isinstance(dM, np.ndarray):
if state_dim == 1:
if len(dM.shape) < 3:
dM.shape = (1,1,1)
else:
if len(dM.shape) < 3:
if which == 'dH':
dM.shape = (measurement_dim,state_dim,1)
elif which == 'dR':
dM.shape = (measurement_dim,measurement_dim,1)
elif isinstance(dM, np.int):
if state_dim > 1:
raise ValueError("When computing likelihood gradient wrong dH dimension.")
else:
dM = np.ones((1,1,1)) * dM
# if not isinstance(dM, types.FunctionType):
# f_dM = lambda k: dM
# else:
# f_dM = dM
return dM
class Struct(object):
pass
class ContDescrStateSpace(DescreteStateSpace):
"""
Class for continuous-discrete Kalman filter. State equation is
continuous while measurement equation is discrete.
d x(t)/ dt = F x(t) + L q; where q~ N(0, Qc)
y_{t_k} = H_{k} x_{t_k} + r_{k}; r_{k-1} ~ N(0, R_{k})
"""
class AQcompute_once(Q_handling_Class):
"""
Class for calculating matrices A, Q, dA, dQ of the discrete Kalman Filter
from the matrices F, L, Qc, P_ing, dF, dQc, dP_inf of the continuos state
equation. dt - time steps.
It has the same interface as AQcompute_batch.
It computes matrices for only one time step. This object is used when
there are many different time steps and storing matrices for each of them
would take too much memory.
"""
def __init__(self, F,L,Qc,dt,compute_derivatives=False, grad_params_no=None, P_inf=None, dP_inf=None, dF = None, dQc=None):
"""
Constructor. All necessary parameters are passed here and stored
in the opject.
Input:
-------------------
F, L, Qc, P_inf : matrices
Parameters of corresponding continuous state model
dt: array
All time steps
compute_derivatives: bool
Whether to calculate derivatives
dP_inf, dF, dQc: 3D array
Derivatives if they are required
Output:
-------------------
Nothing
"""
# Copies are done because this object is used later in smoother
# and these parameters must not change.
self.F = F.copy()
self.L = L.copy()
self.Qc = Qc.copy()
self.dt = dt # copy is not taken because dt is internal parameter
# Parameters are used to calculate derivatives but derivatives
# are not used in the smoother. Therefore copies are not taken.
self.P_inf = P_inf
self.dP_inf = dP_inf
self.dF = dF
self.dQc = dQc
self.compute_derivatives = compute_derivatives
self.grad_params_no = grad_params_no
self.last_k = 0
self.last_k_computed = False
self.v_Ak = None
self.v_Qk = None
self.v_dAk = None
self.v_dQk = None
self.square_root_computed = False
self.Q_inverse_computed = False
self.Q_svd_computed = False
# !!!Print statistics! Which object is created
def f_a(self, k,m,A):
"""
Dynamic model
"""
return np.dot(A, m) # default dynamic model
def _recompute_for_new_k(self,k):
"""
Computes the necessary matrices for an index k and store the results.
Input:
----------------------
k: int
Index in the time differences array dt where to compute matrices
Output:
----------------------
Ak,Qk, dAk, dQk: matrices and/or 3D arrays
A, Q, dA dQ on step k
"""
if (self.last_k != k) or (self.last_k_computed == False):
v_Ak,v_Qk, tmp, v_dAk, v_dQk = ContDescrStateSpace.lti_sde_to_descrete(self.F,
self.L,self.Qc,self.dt[k],self.compute_derivatives,
grad_params_no=self.grad_params_no, P_inf=self.P_inf, dP_inf=self.dP_inf, dF=self.dF, dQc=self.dQc)
self.last_k = k
self.last_k_computed = True
self.v_Ak = v_Ak
self.v_Qk = v_Qk
self.v_dAk = v_dAk
self.v_dQk = v_dQk
self.Q_square_root_computed = False
self.Q_inverse_computed = False
self.Q_svd_computed = False
else:
v_Ak = self.v_Ak
v_Qk = self.v_Qk
v_dAk = self.v_dAk
v_dQk = self.v_dQk
# !!!Print statistics! Print sizes of matrices
return v_Ak,v_Qk, v_dAk, v_dQk
def reset(self, compute_derivatives):
"""
For reusing this object e.g. in smoother computation. Actually,
this object can not be reused because it computes the matrices on
every iteration. But this method is written for keeping the same
interface with the class AQcompute_batch.
"""
self.last_k = 0
self.last_k_computed = False
self.compute_derivatives = compute_derivatives
self.Q_square_root_computed = False
self.Q_inverse_computed = False
self.Q_svd_computed = False
self.Q_eigen_computed = False
return self
def Ak(self,k,m,P):
v_Ak,v_Qk, v_dAk, v_dQk = self._recompute_for_new_k(k)
return v_Ak
def Qk(self,k):
v_Ak,v_Qk, v_dAk, v_dQk = self._recompute_for_new_k(k)
return v_Qk
def dAk(self, k):
v_Ak,v_Qk, v_dAk, v_dQk = self._recompute_for_new_k(k)
return v_dAk
def dQk(self, k):
v_Ak,v_Qk, v_dAk, v_dQk = self._recompute_for_new_k(k)
return v_dQk
def Q_srk(self,k):
"""
Check square root, maybe rewriting for Spectral decomposition is needed.
Square root of the noise matrix Q
"""
if ((self.last_k == k) and (self.last_k_computed == True)):
if not self.Q_square_root_computed:
if not self.Q_svd_computed:
(U, S, Vh) = sp.linalg.svd( self.v_Qk, full_matrices=False, compute_uv=True, overwrite_a=False, check_finite=False)
self.Q_svd = (U, S, Vh)
self.Q_svd_computed = True
else:
(U, S, Vh) = self.Q_svd
square_root = U * np.sqrt(S)
self.square_root_computed = True
self.Q_square_root = square_root
else:
square_root = self.Q_square_root
else:
raise ValueError("Square root of Q can not be computed")
return square_root
def Q_inverse(self, k, p_largest_cond_num, p_regularization_type):
"""
Function inverts Q matrix and regularizes the inverse.
Regularization is useful when original matrix is badly conditioned.
Function is currently used only in SparseGP code.
Inputs:
------------------------------
k: int
Iteration number.
p_largest_cond_num: float
Largest condition value for the inverted matrix. If cond. number is smaller than that
no regularization happen.
regularization_type: 1 or 2
Regularization type.
regularization_type: int (1 or 2)
type 1: 1/(S[k] + regularizer) regularizer is computed
type 2: S[k]/(S^2[k] + regularizer) regularizer is computed
"""
#import pdb; pdb.set_trace()
if ((self.last_k == k) and (self.last_k_computed == True)):
if not self.Q_inverse_computed:
if not self.Q_svd_computed:
(U, S, Vh) = sp.linalg.svd( self.v_Qk, full_matrices=False, compute_uv=True, overwrite_a=False, check_finite=False)
self.Q_svd = (U, S, Vh)
self.Q_svd_computed = True
else:
(U, S, Vh) = self.Q_svd
Q_inverse_r = psd_matrix_inverse(k, 0.5*(self.v_Qk + self.v_Qk.T), U,S, p_largest_cond_num, p_regularization_type)
self.Q_inverse_computed = True
self.Q_inverse_r = Q_inverse_r
else:
Q_inverse_r = self.Q_inverse_r
else:
raise ValueError("""Inverse of Q can not be computed, because Q has not been computed.
This requires some programming""")
return Q_inverse_r
def return_last(self):
"""
Function returns last computed matrices.
"""
if not self.last_k_computed:
raise ValueError("Matrices are not computed.")
else:
k = self.last_k
A = self.v_Ak
Q = self.v_Qk
dA = self.v_dAk
dQ = self.v_dQk
return k, A, Q, dA, dQ
class AQcompute_batch_Python(Q_handling_Class):
"""
Class for calculating matrices A, Q, dA, dQ of the discrete Kalman Filter
from the matrices F, L, Qc, P_ing, dF, dQc, dP_inf of the continuos state
equation. dt - time steps.
It has the same interface as AQcompute_once.
It computes matrices for all time steps. This object is used when
there are not so many (controlled by internal variable)
different time steps and storing all the matrices do not take too much memory.
Since all the matrices are computed all together, this object can be used
in smoother without repeating the computations.
"""
def __init__(self, F,L,Qc,dt,compute_derivatives=False, grad_params_no=None, P_inf=None, dP_inf=None, dF = None, dQc=None):
"""
Constructor. All necessary parameters are passed here and stored
in the opject.
Input:
-------------------
F, L, Qc, P_inf : matrices
Parameters of corresponding continuous state model
dt: array
All time steps
compute_derivatives: bool
Whether to calculate derivatives
dP_inf, dF, dQc: 3D array
Derivatives if they are required
Output:
-------------------
Nothing
"""
As, Qs, reconstruct_indices, dAs, dQs = ContDescrStateSpace.lti_sde_to_descrete(F,
L,Qc,dt,compute_derivatives,
grad_params_no=grad_params_no, P_inf=P_inf, dP_inf=dP_inf, dF=dF, dQc=dQc)
self.As = As
self.Qs = Qs
self.dAs = dAs
self.dQs = dQs
self.reconstruct_indices = reconstruct_indices
self.total_size_of_data = self.As.nbytes + self.Qs.nbytes +\
(self.dAs.nbytes if (self.dAs is not None) else 0) +\
(self.dQs.nbytes if (self.dQs is not None) else 0) +\
(self.reconstruct_indices.nbytes if (self.reconstruct_indices is not None) else 0)
self.Q_svd_dict = {}
self.Q_square_root_dict = {}
self.Q_inverse_dict = {}
self.last_k = None
# !!!Print statistics! Which object is created
# !!!Print statistics! Print sizes of matrices
def f_a(self, k,m,A):
"""
Dynamic model
"""
return np.dot(A, m) # default dynamic model
def reset(self, compute_derivatives=False):
"""
For reusing this object e.g. in smoother computation. It makes sence
because necessary matrices have been already computed for all
time steps.
"""
return self
def Ak(self,k,m,P):
self.last_k = k
return self.As[:,:, self.reconstruct_indices[k]]
def Qk(self,k):
self.last_k = k
return self.Qs[:,:, self.reconstruct_indices[k]]
def dAk(self,k):
self.last_k = k
return self.dAs[:,:, :, self.reconstruct_indices[k]]
def dQk(self,k):
self.last_k = k
return self.dQs[:,:, :, self.reconstruct_indices[k]]
def Q_srk(self,k):
"""
Square root of the noise matrix Q
"""
matrix_index = self.reconstruct_indices[k]
if matrix_index in self.Q_square_root_dict:
square_root = self.Q_square_root_dict[matrix_index]
else:
if matrix_index in self.Q_svd_dict:
(U, S, Vh) = self.Q_svd_dict[matrix_index]
else:
(U, S, Vh) = sp.linalg.svd( self.Qs[:,:, matrix_index],
full_matrices=False, compute_uv=True,
overwrite_a=False, check_finite=False)
self.Q_svd_dict[matrix_index] = (U,S,Vh)
square_root = U * np.sqrt(S)
self.Q_square_root_dict[matrix_index] = square_root
return square_root
def Q_inverse(self, k, p_largest_cond_num, p_regularization_type):
"""
Function inverts Q matrix and regularizes the inverse.
Regularization is useful when original matrix is badly conditioned.
Function is currently used only in SparseGP code.
Inputs:
------------------------------
k: int
Iteration number.
p_largest_cond_num: float
Largest condition value for the inverted matrix. If cond. number is smaller than that
no regularization happen.
regularization_type: 1 or 2
Regularization type.
regularization_type: int (1 or 2)
type 1: 1/(S[k] + regularizer) regularizer is computed
type 2: S[k]/(S^2[k] + regularizer) regularizer is computed
"""
#import pdb; pdb.set_trace()
matrix_index = self.reconstruct_indices[k]
if matrix_index in self.Q_inverse_dict:
Q_inverse_r = self.Q_inverse_dict[matrix_index]
else:
if matrix_index in self.Q_svd_dict:
(U, S, Vh) = self.Q_svd_dict[matrix_index]
else:
(U, S, Vh) = sp.linalg.svd( self.Qs[:,:, matrix_index],
full_matrices=False, compute_uv=True,
overwrite_a=False, check_finite=False)
self.Q_svd_dict[matrix_index] = (U,S,Vh)
Q_inverse_r = psd_matrix_inverse(k, 0.5*(self.Qs[:,:, matrix_index] + self.Qs[:,:, matrix_index].T), U,S, p_largest_cond_num, p_regularization_type)
self.Q_inverse_dict[matrix_index] = Q_inverse_r
return Q_inverse_r
def return_last(self):
"""
Function returns last available matrices.
"""
if (self.last_k is None):
raise ValueError("Matrices are not computed.")
else:
ind = self.reconstruct_indices[self.last_k]
A = self.As[:,:, ind]
Q = self.Qs[:,:, ind]
dA = self.dAs[:,:, :, ind]
dQ = self.dQs[:,:, :, ind]
return self.last_k, A, Q, dA, dQ
@classmethod
def cont_discr_kalman_filter(cls, F, L, Qc, p_H, p_R, P_inf, X, Y, index = None,
m_init=None, P_init=None,
p_kalman_filter_type='regular',
calc_log_likelihood=False,
calc_grad_log_likelihood=False,
grad_params_no=0, grad_calc_params=None):
"""
This function implements the continuous-discrete Kalman Filter algorithm
These notations for the State-Space model are assumed:
d/dt x(t) = F * x(t) + L * w(t); w(t) ~ N(0, Qc)
y_{k} = H_{k} * x_{k} + r_{k}; r_{k-1} ~ N(0, R_{k})
Returns estimated filter distributions x_{k} ~ N(m_{k}, P(k))
Current Features:
----------------------------------------
1) The function generaly do not modify the passed parameters. If
it happens then it is an error. There are several exeprions: scalars
can be modified into a matrix, in some rare cases shapes of
the derivatives matrices may be changed, it is ignored for now.
2) Copies of F,L,Qc are created in memory because they may be used later
in smoother. References to copies are kept in "AQcomp" object
return parameter.
3) Function support "multiple time series mode" which means that exactly
the same State-Space model is used to filter several sets of measurements.
In this case third dimension of Y should include these state-space measurements
Log_likelihood and Grad_log_likelihood have the corresponding dimensions then.
4) Calculation of Grad_log_likelihood is not supported if matrices
H, or R changes overf time (with index k). (later may be changed)
5) Measurement may include missing values. In this case update step is
not done for this measurement. (later may be changed)
Input:
-----------------
F: (state_dim, state_dim) matrix
F in the model.
L: (state_dim, noise_dim) matrix
L in the model.
Qc: (noise_dim, noise_dim) matrix
Q_c in the model.
p_H: scalar, matrix (measurement_dim, state_dim) , 3D array
H_{k} in the model. If matrix then H_{k} = H - constant.
If it is 3D array then H_{k} = p_Q[:,:, index[2,k]]
p_R: scalar, square symmetric matrix, 3D array
R_{k} in the model. If matrix then R_{k} = R - constant.
If it is 3D array then R_{k} = p_R[:,:, index[3,k]]
P_inf: (state_dim, state_dim) matrix
State varince matrix on infinity.
X: 1D array
Time points of measurements. Needed for converting continuos
problem to the discrete one.
Y: matrix or vector or 3D array
Data. If Y is matrix then samples are along 0-th dimension and
features along the 1-st. If 3D array then third dimension
correspond to "multiple time series mode".
index: vector
Which indices (on 3-rd dimension) from arrays p_H, p_R to use
on every time step. If this parameter is None then it is assumed
that p_H, p_R do not change over time and indices are not needed.
index[0,:] - correspond to H, index[1,:] - correspond to R
If index.shape[0] == 1, it is assumed that indides for all matrices
are the same.
m_init: vector or matrix
Initial distribution mean. If None it is assumed to be zero.
For "multiple time series mode" it is matrix, second dimension of
which correspond to different time series. In regular case ("one
time series mode") it is a vector.
P_init: square symmetric matrix or scalar
Initial covariance of the states. If the parameter is scalar
then it is assumed that initial covariance matrix is unit matrix
multiplied by this scalar. If None the unit matrix is used instead.
"multiple time series mode" does not affect it, since it does not
affect anything related to state variaces.
p_kalman_filter_type: string, one of ('regular', 'svd')
Which Kalman Filter is used. Regular or SVD. SVD is more numerically
stable, in particular, Covariace matrices are guarantied to be
positive semi-definite. However, 'svd' works slower, especially for
small data due to SVD call overhead.
calc_log_likelihood: boolean
Whether to calculate marginal likelihood of the state-space model.
calc_grad_log_likelihood: boolean
Whether to calculate gradient of the marginal likelihood
of the state-space model. If true then "grad_calc_params" parameter must
provide the extra parameters for gradient calculation.
grad_params_no: int
If previous parameter is true, then this parameters gives the
total number of parameters in the gradient.
grad_calc_params: dictionary
Dictionary with derivatives of model matrices with respect
to parameters "dF", "dL", "dQc", "dH", "dR", "dm_init", "dP_init".
They can be None, in this case zero matrices (no dependence on parameters)
is assumed. If there is only one parameter then third dimension is
automatically added.
Output:
--------------
M: (no_steps+1,state_dim) matrix or (no_steps+1,state_dim, time_series_no) 3D array
Filter estimates of the state means. In the extra step the initial
value is included. In the "multiple time series mode" third dimension
correspond to different timeseries.
P: (no_steps+1, state_dim, state_dim) 3D array
Filter estimates of the state covariances. In the extra step the initial
value is included.
log_likelihood: double or (1, time_series_no) 3D array.
If the parameter calc_log_likelihood was set to true, return
logarithm of marginal likelihood of the state-space model. If
the parameter was false, return None. In the "multiple time series mode" it is a vector
providing log_likelihood for each time series.
grad_log_likelihood: column vector or (grad_params_no, time_series_no) matrix
If calc_grad_log_likelihood is true, return gradient of log likelihood
with respect to parameters. It returns it column wise, so in
"multiple time series mode" gradients for each time series is in the
corresponding column.
AQcomp: object
Contains some pre-computed values for converting continuos model into
discrete one. It can be used later in the smoothing pahse.
"""
p_H = np.atleast_1d(p_H)
p_R = np.atleast_1d(p_R)
X.shape, old_X_shape = cls._reshape_input_data(X.shape, 2) # represent as column
if (X.shape[1] != 1):
raise ValueError("Only one dimensional X data is supported.")
Y.shape, old_Y_shape = cls._reshape_input_data(Y.shape) # represent as column
state_dim = F.shape[0]
measurement_dim = Y.shape[1]
time_series_no = Y.shape[2] # multiple time series mode
if ((len(p_H.shape) == 3) and (len(p_H.shape[2]) != 1)) or\
((len(p_R.shape) == 3) and (len(p_R.shape[2]) != 1)):
model_matrices_chage_with_time = True
else:
model_matrices_chage_with_time = False
# Check index
old_index_shape = None
if index is None:
if (len(p_H.shape) == 3) or (len(p_R.shape) == 3):
raise ValueError("Parameter index can not be None for time varying matrices (third dimension is present)")
else: # matrices do not change in time, so form dummy zero indices.
index = np.zeros((1,Y.shape[0]))
else:
if len(index.shape) == 1:
index.shape = (1,index.shape[0])
old_index_shape = (index.shape[0],)
if (index.shape[1] != Y.shape[0]):
raise ValueError("Number of measurements must be equal the number of H_{k}, R_{k}")
if (index.shape[0] == 1):
H_time_var_index = 0; R_time_var_index = 0
elif (index.shape[0] == 4):
H_time_var_index = 0; R_time_var_index = 1
else:
raise ValueError("First Dimension of index must be either 1 or 2.")
(p_H, old_H_shape) = cls._check_SS_matrix(p_H, state_dim, measurement_dim, which='H')
(p_R, old_R_shape) = cls._check_SS_matrix(p_R, state_dim, measurement_dim, which='R')
if m_init is None:
m_init = np.zeros((state_dim, time_series_no))
else:
m_init = np.atleast_2d(m_init).T
if P_init is None:
P_init = P_inf.copy()
if p_kalman_filter_type not in ('regular', 'svd'):
raise ValueError("Kalman filer type neither 'regular nor 'svd'.")
# Functions to pass to the kalman_filter algorithm:
# Parameters:
# k - number of Kalman filter iteration
# m - vector for calculating matrices. Required for EKF. Not used here.
# f_hl = lambda k,m,H: np.dot(H, m)
# f_H = lambda k,m,P: p_H[:,:, index[H_time_var_index, k]]
#f_R = lambda k: p_R[:,:, index[R_time_var_index, k]]
#o_R = R_handling( p_R, index, R_time_var_index, 20)
if calc_grad_log_likelihood:
dF = cls._check_grad_state_matrices(grad_calc_params.get('dF'), state_dim, grad_params_no, which = 'dA')
dQc = cls._check_grad_state_matrices(grad_calc_params.get('dQc'), state_dim, grad_params_no, which = 'dQ')
dP_inf = cls._check_grad_state_matrices(grad_calc_params.get('dP_inf'), state_dim, grad_params_no, which = 'dA')
dH = cls._check_grad_measurement_matrices(grad_calc_params.get('dH'), state_dim, grad_params_no, measurement_dim, which = 'dH')
dR = cls._check_grad_measurement_matrices(grad_calc_params.get('dR'), state_dim, grad_params_no, measurement_dim, which = 'dR')
dm_init = grad_calc_params.get('dm_init') # Initial values for the Kalman Filter
if dm_init is None:
# multiple time series mode. Keep grad_params always as a last dimension
dm_init = np.zeros( (state_dim, time_series_no, grad_params_no) )
dP_init = grad_calc_params.get('dP_init') # Initial values for the Kalman Filter
if dP_init is None:
dP_init = dP_inf(0).copy() # get the dP_init matrix, because now it is a function
else:
dP_inf = None
dF = None
dQc = None
dH = None
dR = None
dm_init = None
dP_init = None
measurement_callables = Std_Measurement_Callables_Class(p_H, H_time_var_index, p_R, index, R_time_var_index, 20, dH, dR)
#import pdb; pdb.set_trace()
dynamic_callables = cls._cont_to_discrete_object(X, F, L, Qc, compute_derivatives=calc_grad_log_likelihood,
grad_params_no=grad_params_no,
P_inf=P_inf, dP_inf=dP_inf, dF = dF, dQc=dQc)
if print_verbose:
print("General: run Continuos-Discrete Kalman Filter")
# Also for dH, dR and probably for all derivatives
(M, P, log_likelihood, grad_log_likelihood, AQcomp) = cls._cont_discr_kalman_filter_raw(state_dim,
dynamic_callables, measurement_callables,
X, Y, m_init=m_init, P_init=P_init,
p_kalman_filter_type=p_kalman_filter_type,
calc_log_likelihood=calc_log_likelihood,
calc_grad_log_likelihood=calc_grad_log_likelihood, grad_params_no=grad_params_no,
dm_init=dm_init, dP_init=dP_init)
if old_index_shape is not None:
index.shape = old_index_shape
if old_X_shape is not None:
X.shape = old_X_shape
if old_Y_shape is not None:
Y.shape = old_Y_shape
if old_H_shape is not None:
p_H.shape = old_H_shape
if old_R_shape is not None:
p_R.shape = old_R_shape
return (M, P, log_likelihood, grad_log_likelihood, AQcomp)
@classmethod
def _cont_discr_kalman_filter_raw(cls,state_dim, p_dynamic_callables, p_measurement_callables, X, Y,
m_init, P_init,
p_kalman_filter_type='regular',
calc_log_likelihood=False,
calc_grad_log_likelihood=False, grad_params_no=None,
dm_init=None, dP_init=None):
"""
General filtering algorithm for inference in the continuos-discrete
state-space model:
d/dt x(t) = F * x(t) + L * w(t); w(t) ~ N(0, Qc)
y_{k} = H_{k} * x_{k} + r_{k}; r_{k-1} ~ N(0, R_{k})
Returns estimated filter distributions x_{k} ~ N(m_{k}, P(k))
Current Features:
----------------------------------------
1) Function support "multiple time series mode" which means that exactly
the same State-Space model is used to filter several sets of measurements.
In this case third dimension of Y should include these state-space measurements
Log_likelihood and Grad_log_likelihood have the corresponding dimensions then.
2) Measurement may include missing values. In this case update step is
not done for this measurement. (later may be changed)
Input:
-----------------
state_dim: int
Demensionality of the states
F: (state_dim, state_dim) matrix
F in the model.
L: (state_dim, noise_dim) matrix
L in the model.
Qc: (noise_dim, noise_dim) matrix
Q_c in the model.
P_inf: (state_dim, state_dim) matrix
State varince matrix on infinity.
p_h: function (k, x_{k}, H_{k}). Measurement function.
k (iteration number),
x_{k}
H_{k} Jacobian matrices of f_h. In the linear case it is exactly H_{k}.
f_H: function (k, m, P) return Jacobian of dynamic function, it is
passed into p_h.
k (iteration number),
m: point where Jacobian is evaluated,
P: parameter for Jacobian, usually covariance matrix.
p_f_R: function (k). Returns noise matrix of measurement equation
on iteration k.
k (iteration number).
m_init: vector or matrix
Initial distribution mean. For "multiple time series mode"
it is matrix, second dimension of which correspond to different
time series. In regular case ("one time series mode") it is a
vector.
P_init: matrix or scalar
Initial covariance of the states. Must be not None
"multiple time series mode" does not affect it, since it does not
affect anything related to state variaces.
p_kalman_filter_type: string, one of ('regular', 'svd')
Which Kalman Filter is used. Regular or SVD. SVD is more numerically
stable, in particular, Covariace matrices are guarantied to be
positive semi-definite. However, 'svd' works slower, especially for
small data due to SVD call overhead.
calc_log_likelihood: boolean
Whether to calculate marginal likelihood of the state-space model.
calc_grad_log_likelihood: boolean
Whether to calculate gradient of the marginal likelihood
of the state-space model. If true then the next parameter must
provide the extra parameters for gradient calculation.
grad_params_no: int
Number of gradient parameters
dP_inf, dF, dQc, dH, dR, dm_init, dP_init: matrices or 3D arrays.
Necessary parameters for derivatives calculation.
"""
#import pdb; pdb.set_trace()
steps_no = Y.shape[0] # number of steps in the Kalman Filter
time_series_no = Y.shape[2] # multiple time series mode
# Allocate space for results
# Mean estimations. Initial values will be included
M = np.empty(((steps_no+1),state_dim,time_series_no))
M[0,:,:] = m_init # Initialize mean values
# Variance estimations. Initial values will be included
P = np.empty(((steps_no+1),state_dim,state_dim))
P_init = 0.5*( P_init + P_init.T) # symmetrize initial covariance. In some ustable cases this is uiseful
P[0,:,:] = P_init # Initialize initial covariance matrix
#import pdb;pdb.set_trace()
if p_kalman_filter_type == 'svd':
(U,S,Vh) = sp.linalg.svd( P_init,full_matrices=False, compute_uv=True,
overwrite_a=False,check_finite=True)
S[ (S==0) ] = 1e-17 # allows to run algorithm for singular initial variance
P_upd = (P_init, S,U)
#log_likelihood = 0
#grad_log_likelihood = np.zeros((grad_params_no,1))
log_likelihood = 0 if calc_log_likelihood else None
grad_log_likelihood = 0 if calc_grad_log_likelihood else None
#setting initial values for derivatives update
dm_upd = dm_init
dP_upd = dP_init
# Main loop of the Kalman filter
for k in range(0,steps_no):
# In this loop index for new estimations is (k+1), old - (k)
# This happened because initial values are stored at 0-th index.
#import pdb; pdb.set_trace()
prev_mean = M[k,:,:] # mean from the previous step
if p_kalman_filter_type == 'svd':
m_pred, P_pred, dm_pred, dP_pred = \
cls._kalman_prediction_step_SVD(k, prev_mean ,P_upd, p_dynamic_callables,
calc_grad_log_likelihood=calc_grad_log_likelihood,
p_dm = dm_upd, p_dP = dP_upd)
else:
m_pred, P_pred, dm_pred, dP_pred = \
cls._kalman_prediction_step(k, prev_mean ,P[k,:,:], p_dynamic_callables,
calc_grad_log_likelihood=calc_grad_log_likelihood,
p_dm = dm_upd, p_dP = dP_upd )
#import pdb; pdb.set_trace()
k_measurment = Y[k,:,:]
if (np.any(np.isnan(k_measurment)) == False):
if p_kalman_filter_type == 'svd':
m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update = \
cls._kalman_update_step_SVD(k, m_pred , P_pred, p_measurement_callables,
k_measurment, calc_log_likelihood=calc_log_likelihood,
calc_grad_log_likelihood=calc_grad_log_likelihood,
p_dm = dm_pred, p_dP = dP_pred )
# m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update = \
# cls._kalman_update_step(k, m_pred , P_pred[0], f_h, f_H, p_R.f_R, k_measurment,
# calc_log_likelihood=calc_log_likelihood,
# calc_grad_log_likelihood=calc_grad_log_likelihood,
# p_dm = dm_pred, p_dP = dP_pred, grad_calc_params_2 = (dH, dR))
#
# (U,S,Vh) = sp.linalg.svd( P_upd,full_matrices=False, compute_uv=True,
# overwrite_a=False,check_finite=True)
# P_upd = (P_upd, S,U)
else:
m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update = \
cls._kalman_update_step(k, m_pred , P_pred, p_measurement_callables, k_measurment,
calc_log_likelihood=calc_log_likelihood,
calc_grad_log_likelihood=calc_grad_log_likelihood,
p_dm = dm_pred, p_dP = dP_pred )
else:
if k_measurment.shape != (1,1):
raise ValueError("Nan measurements are currently not supported for \
multidimensional output and multiple tiem series.")
else:
m_upd = m_pred; P_upd = P_pred; dm_upd = dm_pred; dP_upd = dP_pred
log_likelihood_update = 0.0;
d_log_likelihood_update = 0.0;
if calc_log_likelihood:
log_likelihood += log_likelihood_update
if calc_grad_log_likelihood:
grad_log_likelihood += d_log_likelihood_update
M[k+1,:,:] = m_upd # separate mean value for each time series
if p_kalman_filter_type == 'svd':
P[k+1,:,:] = P_upd[0]
else:
P[k+1,:,:] = P_upd
#print("kf it: %i" % k)
# !!!Print statistics! Print sizes of matrices
# !!!Print statistics! Print iteration time base on another boolean variable
return (M, P, log_likelihood, grad_log_likelihood, p_dynamic_callables.reset(False))
@classmethod
def cont_discr_rts_smoother(cls,state_dim, filter_means, filter_covars,
p_dynamic_callables=None, X=None, F=None,L=None,Qc=None):
"""
Continuos-discrete Rauch–Tung–Striebel(RTS) smoother.
This function implements Rauch–Tung–Striebel(RTS) smoother algorithm
based on the results of _cont_discr_kalman_filter_raw.
Model:
d/dt x(t) = F * x(t) + L * w(t); w(t) ~ N(0, Qc)
y_{k} = H_{k} * x_{k} + r_{k}; r_{k-1} ~ N(0, R_{k})
Returns estimated smoother distributions x_{k} ~ N(m_{k}, P(k))
Input:
--------------
filter_means: (no_steps+1,state_dim) matrix or (no_steps+1,state_dim, time_series_no) 3D array
Results of the Kalman Filter means estimation.
filter_covars: (no_steps+1, state_dim, state_dim) 3D array
Results of the Kalman Filter covariance estimation.
Dynamic_callables: object or None
Object form the filter phase which provides functions for computing
A, Q, dA, dQ fro discrete model from the continuos model.
X, F, L, Qc: matrices
If AQcomp is None, these matrices are used to create this object from scratch.
Output:
-------------
M: (no_steps+1,state_dim) matrix
Smoothed estimates of the state means
P: (no_steps+1,state_dim, state_dim) 3D array
Smoothed estimates of the state covariances
"""
f_a = lambda k,m,A: np.dot(A, m) # state dynamic model
if p_dynamic_callables is None: # make this object from scratch
p_dynamic_callables = cls._cont_to_discrete_object(cls, X, F,L,Qc,f_a,compute_derivatives=False,
grad_params_no=None, P_inf=None, dP_inf=None, dF = None, dQc=None)
no_steps = filter_covars.shape[0]-1# number of steps (minus initial covariance)
M = np.empty(filter_means.shape) # smoothed means
P = np.empty(filter_covars.shape) # smoothed covars
if print_verbose:
print("General: run Continuos-Discrete Kalman Smoother")
M[-1,:,:] = filter_means[-1,:,:]
P[-1,:,:] = filter_covars[-1,:,:]
for k in range(no_steps-1,-1,-1):
prev_mean = filter_means[k,:] # mean from the previous step
m_pred, P_pred, tmp1, tmp2 = \
cls._kalman_prediction_step(k, prev_mean,
filter_covars[k,:,:], p_dynamic_callables,
calc_grad_log_likelihood=False)
p_m = filter_means[k,:]
p_m_prev_step = M[(k+1),:]
m_upd, P_upd, tmp_G = cls._rts_smoother_update_step(k,
p_m ,filter_covars[k,:,:],
m_pred, P_pred, p_m_prev_step ,P[(k+1),:,:], p_dynamic_callables)
M[k,:,:] = m_upd
P[k,:,:] = P_upd
# Return values
return (M, P)
@classmethod
def _cont_to_discrete_object(cls, X, F, L, Qc, compute_derivatives=False,
grad_params_no=None,
P_inf=None, dP_inf=None, dF = None, dQc=None,
dt0=None):
"""
Function return the object which is used in Kalman filter and/or
smoother to obtain matrices A, Q and their derivatives for discrete model
from the continuous model.
There are 2 objects AQcompute_once and AQcompute_batch and the function
returs the appropriate one based on the number of different time steps.
Input:
----------------------
X, F, L, Qc: matrices
Continuous model matrices
f_a: function
Dynamic Function is attached to the Dynamic_Model_Callables class
compute_derivatives: boolean
Whether to compute derivatives
grad_params_no: int
Number of parameters in the gradient
P_inf, dP_inf, dF, dQ: matrices and 3D objects
Data necessary to compute derivatives.
Output:
--------------------------
AQcomp: object
Its methods return matrices (and optionally derivatives) for the
discrete state-space model.
"""
unique_round_decimals = 10
threshold_number_of_unique_time_steps = 20 # above which matrices are separately each time
dt = np.empty((X.shape[0],))
dt[1:] = np.diff(X[:,0],axis=0)
if dt0 is None:
dt[0] = 0#dt[1]
else:
if isinstance(dt0,str):
dt = dt[1:]
else:
dt[0] = dt0
unique_indices = np.unique(np.round(dt, decimals=unique_round_decimals))
number_unique_indices = len(unique_indices)
#import pdb; pdb.set_trace()
if use_cython:
class AQcompute_batch(state_space_cython.AQcompute_batch_Cython):
def __init__(self, F,L,Qc,dt,compute_derivatives=False, grad_params_no=None, P_inf=None, dP_inf=None, dF = None, dQc=None):
As, Qs, reconstruct_indices, dAs, dQs = ContDescrStateSpace.lti_sde_to_descrete(F,
L,Qc,dt,compute_derivatives,
grad_params_no=grad_params_no, P_inf=P_inf, dP_inf=dP_inf, dF=dF, dQc=dQc)
super(AQcompute_batch,self).__init__(As, Qs, reconstruct_indices, dAs,dQs)
else:
AQcompute_batch = cls.AQcompute_batch_Python
if number_unique_indices > threshold_number_of_unique_time_steps:
AQcomp = cls.AQcompute_once(F,L,Qc, dt,compute_derivatives=compute_derivatives,
grad_params_no=grad_params_no, P_inf=P_inf, dP_inf=dP_inf, dF=dF, dQc=dQc)
if print_verbose:
print("CDO: Continue-to-discrete INSTANTANEOUS object is created.")
print("CDO: Number of different time steps: %i" % (number_unique_indices,) )
else:
AQcomp = AQcompute_batch(F,L,Qc,dt,compute_derivatives=compute_derivatives,
grad_params_no=grad_params_no, P_inf=P_inf, dP_inf=dP_inf, dF=dF, dQc=dQc)
if print_verbose:
print("CDO: Continue-to-discrete BATCH object is created.")
print("CDO: Number of different time steps: %i" % (number_unique_indices,) )
print("CDO: Total size if its data: %i" % (AQcomp.total_size_of_data,) )
return AQcomp
@staticmethod
def lti_sde_to_descrete(F,L,Qc,dt,compute_derivatives=False,
grad_params_no=None, P_inf=None,
dP_inf=None, dF = None, dQc=None):
"""
Linear Time-Invariant Stochastic Differential Equation (LTI SDE):
dx(t) = F x(t) dt + L d \beta ,where
x(t): (vector) stochastic process
\beta: (vector) Brownian motion process
F, L: (time invariant) matrices of corresponding dimensions
Qc: covariance of noise.
This function rewrites it into the corresponding state-space form:
x_{k} = A_{k} * x_{k-1} + q_{k-1}; q_{k-1} ~ N(0, Q_{k-1})
TODO: this function can be redone to "preprocess dataset", when
close time points are handeled properly (with rounding parameter) and
values are averaged accordingly.
Input:
--------------
F,L: LTI SDE matrices of corresponding dimensions
Qc: matrix (n,n)
Covarince between different dimensions of noise \beta.
n is the dimensionality of the noise.
dt: double or iterable
Time difference used on this iteration.
If dt is iterable, then A and Q_noise are computed for every
unique dt
compute_derivatives: boolean
Whether derivatives of A and Q are required.
grad_params_no: int
Number of gradient parameters
P_inf: (state_dim. state_dim) matrix
dP_inf
dF: 3D array
Derivatives of F
dQc: 3D array
Derivatives of Qc
dR: 3D array
Derivatives of R
Output:
--------------
A: matrix
A_{k}. Because we have LTI SDE only dt can affect on matrix
difference for different k.
Q_noise: matrix
Covariance matrix of (vector) q_{k-1}. Only dt can affect the
matrix difference for different k.
reconstruct_index: array
If dt was iterable return three dimensinal arrays A and Q_noise.
Third dimension of these arrays correspond to unique dt's.
This reconstruct_index contain indices of the original dt's
in the uninue dt sequence. A[:,:, reconstruct_index[5]]
is matrix A of 6-th(indices start from zero) dt in the original
sequence.
dA: 3D array
Derivatives of A
dQ: 3D array
Derivatives of Q
"""
# Dimensionality
n = F.shape[0]
if not isinstance(dt, collections.Iterable): # not iterable, scalar
#import pdb; pdb.set_trace()
# The dynamical model
A = matrix_exponent(F*dt)
# The covariance matrix Q by matrix fraction decomposition ->
Phi = np.zeros((2*n,2*n))
Phi[:n,:n] = F
Phi[:n,n:] = L.dot(Qc).dot(L.T)
Phi[n:,n:] = -F.T
AB = matrix_exponent(Phi*dt)
AB = np.dot(AB, np.vstack((np.zeros((n,n)),np.eye(n))))
Q_noise_1 = linalg.solve(AB[n:,:].T,AB[:n,:].T)
Q_noise_2 = P_inf - A.dot(P_inf).dot(A.T)
# The covariance matrix Q by matrix fraction decomposition <-
if compute_derivatives:
dA = np.zeros([n, n, grad_params_no])
dQ = np.zeros([n, n, grad_params_no])
#AA = np.zeros([2*n, 2*n, nparam])
FF = np.zeros([2*n, 2*n])
AA = np.zeros([2*n, 2*n, grad_params_no])
for p in range(0, grad_params_no):
FF[:n,:n] = F
FF[n:,:n] = dF[:,:,p]
FF[n:,n:] = F
# Solve the matrix exponential
AA[:,:,p] = matrix_exponent(FF*dt)
# Solve the differential equation
#foo = AA[:,:,p].dot(np.vstack([m, dm[:,p]]))
#mm = foo[:n,:]
#dm[:,p] = foo[n:,:]
# The discrete-time dynamical model*
if p==0:
A = AA[:n,:n,p]
Q_noise_3 = P_inf - A.dot(P_inf).dot(A.T)
Q_noise = Q_noise_3
#PP = A.dot(P).dot(A.T) + Q_noise_2
# The derivatives of A and Q
dA[:,:,p] = AA[n:,:n,p]
tmp = dA[:,:,p].dot(P_inf).dot(A.T)
dQ[:,:,p] = dP_inf[:,:,p] - tmp \
- A.dot(dP_inf[:,:,p]).dot(A.T) - tmp.T
dQ[:,:,p] = 0.5*(dQ[:,:,p] + dQ[:,:,p].T) # Symmetrize
else:
dA = None
dQ = None
Q_noise = Q_noise_2
# Innacuracies have been observed when Q_noise_1 was used.
#Q_noise = Q_noise_1
Q_noise = 0.5*(Q_noise + Q_noise.T) # Symmetrize
return A, Q_noise,None, dA, dQ
else: # iterable, array
# Time discretizations (round to 14 decimals to avoid problems)
dt_unique, tmp, reconstruct_index = np.unique(np.round(dt,8),
return_index=True,return_inverse=True)
del tmp
# Allocate space for A and Q
A = np.empty((n,n,dt_unique.shape[0]))
Q_noise = np.empty((n,n,dt_unique.shape[0]))
if compute_derivatives:
dA = np.empty((n,n,grad_params_no,dt_unique.shape[0]))
dQ = np.empty((n,n,grad_params_no,dt_unique.shape[0]))
else:
dA = None
dQ = None
# Call this function for each unique dt
for j in range(0,dt_unique.shape[0]):
A[:,:,j], Q_noise[:,:,j], tmp1, dA_t, dQ_t = ContDescrStateSpace.lti_sde_to_descrete(F,L,Qc,dt_unique[j],
compute_derivatives=compute_derivatives, grad_params_no=grad_params_no, P_inf=P_inf, dP_inf=dP_inf, dF = dF, dQc=dQc)
if compute_derivatives:
dA[:,:,:,j] = dA_t
dQ[:,:,:,j] = dQ_t
# Return
return A, Q_noise, reconstruct_index, dA, dQ
def matrix_exponent(M):
"""
The function computes matrix exponent and handles some special cases
"""
if (M.shape[0] == 1): # 1*1 matrix
Mexp = np.array( ((np.exp(M[0,0]) ,),) )
else: # matrix is larger
method = None
try:
Mexp = linalg.expm(M)
method = 1
except (Exception,) as e:
Mexp = linalg.expm3(M)
method = 2
finally:
if np.any(np.isnan(Mexp)):
if method == 2:
raise ValueError("Matrix Exponent is not computed 1")
else:
Mexp = linalg.expm3(M)
method = 2
if np.any(np.isnan(Mexp)):
raise ValueError("Matrix Exponent is not computed 2")
return Mexp
def balance_matrix(A):
"""
Balance matrix, i.e. finds such similarity transformation of the original
matrix A: A = T * bA * T^{-1}, where norms of columns of bA and of rows of bA
are as close as possible. It is usually used as a preprocessing step in
eigenvalue calculation routine. It is useful also for State-Space models.
See also:
[1] Beresford N. Parlett and Christian Reinsch (1969). Balancing
a matrix for calculation of eigenvalues and eigenvectors.
Numerische Mathematik, 13(4): 293-304.
Input:
----------------------
A: square matrix
Matrix to be balanced
Output:
----------------
bA: matrix
Balanced matrix
T: matrix
Left part of the similarity transformation
T_inv: matrix
Right part of the similarity transformation.
"""
if len(A.shape) != 2 or (A.shape[0] != A.shape[1]):
raise ValueError('balance_matrix: Expecting square matrix')
N = A.shape[0] # matrix size
gebal = sp.linalg.lapack.get_lapack_funcs('gebal',(A,))
bA, lo, hi, pivscale, info = gebal(A, permute=True, scale=True,overwrite_a=False)
if info < 0:
raise ValueError('balance_matrix: Illegal value in %d-th argument of internal gebal ' % -info)
# calculating the similarity transforamtion:
def perm_matr(D, c1,c2):
"""
Function creates the permutation matrix which swaps columns c1 and c2.
Input:
--------------
D: int
Size of the permutation matrix
c1: int
Column 1. Numeration starts from 1...D
c2: int
Column 2. Numeration starts from 1...D
"""
i1 = c1-1; i2 = c2-1 # indices
P = np.eye(D);
P[i1,i1] = 0.0; P[i2,i2] = 0.0; # nullify diagonal elements
P[i1,i2] = 1.0; P[i2,i1] = 1.0
return P
P = np.eye(N) # permutation matrix
if (hi != N-1): # there are row permutations
for k in range(N-1,hi,-1):
new_perm = perm_matr(N, k+1, pivscale[k])
P = np.dot(P,new_perm)
if (lo != 0):
for k in range(0,lo,1):
new_perm = perm_matr(N, k+1, pivscale[k])
P = np.dot(P,new_perm)
D = pivscale.copy()
D[0:lo] = 1.0; D[hi+1:N] = 1.0 # thesee scaling factors must be set to one.
#D = np.diag(D) # make a diagonal matrix
T = np.dot(P,np.diag(D)) # similarity transformation in question
T_inv = np.dot(np.diag(D**(-1)),P.T)
#print( np.max(A - np.dot(T, np.dot(bA, T_inv) )) )
return bA.copy(), T, T_inv
def balance_ss_model(F,L,Qc,H,Pinf,P0,dF=None,dQc=None,dPinf=None,dP0=None):
"""
Balances State-Space model for more numerical stability
This is based on the following:
dx/dt = F x + L w
y = H x
Let T z = x, which gives
dz/dt = inv(T) F T z + inv(T) L w
y = H T z
"""
bF,T,T_inv = balance_matrix(F)
bL = np.dot( T_inv, L)
bQc = Qc # not affected
bH = np.dot(H, T)
bPinf = np.dot(T_inv, np.dot(Pinf, T_inv.T))
#import pdb; pdb.set_trace()
# LL,islower = linalg.cho_factor(Pinf)
# inds = np.triu_indices(Pinf.shape[0],k=1)
# LL[inds] = 0.0
# bLL = np.dot(T_inv, LL)
# bPinf = np.dot( bLL, bLL.T)
bP0 = np.dot(T_inv, np.dot(P0, T_inv.T))
if dF is not None:
bdF = np.zeros(dF.shape)
for i in range(dF.shape[2]):
bdF[:,:,i] = np.dot( T_inv, np.dot( dF[:,:,i], T))
else:
bdF = None
if dPinf is not None:
bdPinf = np.zeros(dPinf.shape)
for i in range(dPinf.shape[2]):
bdPinf[:,:,i] = np.dot( T_inv, np.dot( dPinf[:,:,i], T_inv.T))
# LL,islower = linalg.cho_factor(dPinf[:,:,i])
# inds = np.triu_indices(dPinf[:,:,i].shape[0],k=1)
# LL[inds] = 0.0
# bLL = np.dot(T_inv, LL)
# bdPinf[:,:,i] = np.dot( bLL, bLL.T)
else:
bdPinf = None
if dP0 is not None:
bdP0 = np.zeros(dP0.shape)
for i in range(dP0.shape[2]):
bdP0[:,:,i] = np.dot( T_inv, np.dot( dP0[:,:,i], T_inv.T))
else:
bdP0 = None
bdQc = dQc # not affected
# (F,L,Qc,H,Pinf,P0,dF,dQc,dPinf,dP0)
return bF, bL, bQc, bH, bPinf, bP0, bdF, bdQc, bdPinf, bdP0
|
SheffieldML/GPy
|
GPy/models/state_space_main.py
|
Python
|
bsd-3-clause
| 143,832
|
[
"Gaussian"
] |
9617358dd6529d963e7043acea4de04e185367efcfb2f6960331a6e76dcf86d8
|
"""
@name: Modules/Web/web_computerMenu.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2015-2020y D. Brian Kimmel
@license: MIT License
@note: Created on Jun 21, 2015
@Summary:
"""
# Import system type stuff
import os
from nevow import loaders
from nevow import athena
# Import PyMh files and modules.
from Modules.Core import logging_pyh as Logger
# Handy helper for finding external resources nearby.
webpath = os.path.join(os.path.split(__file__)[0])
templatepath = os.path.join(webpath, 'template')
g_debug = 0
LOG = Logger.getLogger('PyHouse.ComputerMenu')
class ComputerMenuElement(athena.LiveElement):
"""
"""
docFactory = loaders.xmlfile(os.path.join(templatepath, 'computerMenuElement.html'))
jsClass = u'computerMenu.ComputerMenuWidget'
def __init__(self, p_workspace_obj):
self.m_workspace_obj = p_workspace_obj
self.m_pyhouse_obj = p_workspace_obj.m_pyhouse_obj
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Computer/Web/web_computerMenu.py
|
Python
|
mit
| 1,022
|
[
"Brian"
] |
9b3bff5e4fb68e19c2f069e311434eea47fd6199daeface9749e03fdc61cd2c9
|
import numpy as np
from scipy import optimize
import matplotlib.pylab as plt
import collections, copy, itertools
class TrajectorySource(object):
"""
Class to generate initial trajectories for linkage inference as well as for continued production
of trajectories feeding into a "live" linking process.
"""
def __init__(self,p_nd,n,ran_dis_spec,dim=2,intensity_choices=[1.],ran_int_spec={'paras':{'delta':0.1},'type':'uniform'}):
"""
Input:
p_nd - float, within [0,1] = probability of non-disappearance for an atom of a trajectroy from one step to the next
n - int, maximum number of positions per step
ran_dis_spec - dict, defines the generation of spatial displacements.
expected structure: {'paras':{'mu':0,'sig':1.},'type':'gaussian'}
dim - int (optional), defines the spatial dimensionality of the problem
"""
#trajectory generation related
self.implemented_ran_dis = ['gaussian','cubic grid']
assert isinstance(p_nd,(int,float)) and (0. <= p_nd <= 1.), "Assertion failed - p_nd is not within [0,1]. p_nd = {}".format(p_nd)
assert isinstance(n,(int,float)) and n>0, "Assertion failed - expected int or float value (to convert to int) for 'n' greater than 0, got {}".format(n)
assert ('paras' in ran_dis_spec) and ('type' in ran_dis_spec) and isinstance(ran_dis_spec['paras'],dict) and isinstance(ran_dis_spec['type'],str), "Assertion failed - expected structure {'paras':[1.,0.],'type':'gaussian'} for ran_dis_spec with type any of {}, got {} instead.".format(self.implemented_ran_dis,ran_dis_spec)
self.p_nd = p_nd
self.n = n
self.ran_dis_spec = ran_dis_spec
self.dim = dim
self.intensity_choices = intensity_choices
self.ran_int_spec = ran_int_spec
#trajectory related
self.positions = None
self.LM_traj = None
self.intensities = None
def generate_intensity_deviation(self):
ran_int_spec={'paras':{'delta':0.1},'type':'uniform'}
if self.ran_int_spec['type'] == 'uniform':
dx = self.ran_int_spec['paras']['delta']*.5
deviation = np.random.uniform(low=-dx,high=dx,size=self.n)
else:
raise ValueError("Error - got unexpected random generator type for intensity noise {}".format(self.ran_int_spec['type']))
return deviation
def _get_initial_frame(self,bounds):
pos = np.array([np.random.uniform(low=bounds[v][0],high=bounds[v][1],size=self.n) for v in xrange(self.dim)])
to_skip = np.random.random(self.n)
pos[:,np.where(to_skip>self.p_nd)] = np.nan
intensities = np.random.choice(self.intensity_choices,size=self.n) + self.generate_intensity_deviation()
return pos, intensities
def generate_displacements(self):
if self.ran_dis_spec['type'] == 'gaussian':
deviation = np.random.normal(scale=self.ran_dis_spec['paras']['sig'],loc=self.ran_dis_spec['paras']['mu'],size=(self.dim,))
signs = np.random.choice(np.array([-1,1]))
deviation *= signs
elif self.ran_dis_spec['type'] == 'cubic grid':
deviation = np.random.choice(np.array([0,1]),size=(self.dim,))
signs = np.random.choice(np.array([-1,1]))
deviation *= signs
else:
raise ValueError("Error - got unexpected random generator type for displacements {}".format(self.ran_dis_spec['type']))
return deviation
def update_positions(self,bounds):
"""
Expects ndarray of shape (Nt,dim,nmax) and extends along the first dimension of the ndarray.
Input:
pos - ndarray of shape (Nt,dim,nmax) containing particle positions
Returns:
new_pos - ndarray of shape (Nt+1,dim,nmax) containing particle positions
"""
pos = self.positions
intensities = self.intensities
#call generate_displacements using the supplied info in ran_dis_spec
last_steps = [None]*self.n #index to timestep which is the last non nan value for the given trajectory
for i in xrange(self.n):
not_nan = np.where([not v for v in np.isnan(pos[:,0,i])])[0]
#if most recent step is not nan or any of the other write down last timestep with non nan value
#is only nan then last_step entry is 'None'
if len(not_nan) > 0:
last_steps[i] = not_nan[-1]
#update all recent positions which are not skipped (i.e. pos val == nan)
new_pos = np.zeros((pos.shape[0]+1,pos.shape[1],pos.shape[2]))
new_pos[:-1] = pos
new_pos[-1,:,:] = np.array([pos[val,:,i] if val!=None else [np.nan,np.nan] for i,val in enumerate(last_steps)]).T
new_int = np.zeros((intensities.shape[0]+1,intensities.shape[1]))
new_int[:-1] = intensities
Nt = pos.shape[0] #new timestep
if None in last_steps:
new_initial, _ = self._get_initial_frame(bounds)
deviations = self.generate_displacements()
intensity_deviations = self.generate_intensity_deviation()
new_int[-1,:] = intensities[-1,:] + intensity_deviations
for i,val in enumerate(last_steps):
if val==None: #case that no initial position exists yet
new_pos[-1,:,i] = new_initial[:,i]
else: #case that previous position exists but is more than step ago
if np.random.uniform() > self.p_nd: #throwing the dice whether or not the position in the next move will be known
nan_array = np.zeros((self.dim,))
nan_array[:] = np.nan
new_pos[-1,:,i] = nan_array
else:
n_dis = Nt - val #is 1 if position at previous timestep present or larger if steps were skipped
dis = [self.generate_displacements() for v in xrange(n_dis)]
dis = np.array(reduce(lambda x,y: x+y,dis))
new_pos[-1,:,i] += dis
return new_pos, new_int
def generate_initial(self,Nt,bounds=[(0,1),(0,1)]):
"""
Input:
Nt - int, number of steps to generate including the initial
bounds - list of tuples of floats or ints (optional), the bounds for the initial frame where each tuple corresponds to one dimension in space
"""
print("Simulating {} initial steps...".format(Nt))
#initial positions
x0, I0 = self._get_initial_frame(bounds)
self.positions = np.array([x0])
self.intensities = np.array([I0])
for i in xrange(Nt-1):
self.positions, self.intensities = self.update_positions(bounds)
def generate_more_steps(self,Nt,bounds=[(0,1),(0,1)]):
print("Simulating {} more steps...".format(Nt))
for i in xrange(Nt):
self.positions, self.intensities = self.update_positions(bounds)
def generate_LM_traj(self):
LM_traj = np.zeros((self.n,self.positions.shape[0]))
for i in xrange(self.n):
LM_traj[i,:] = i
for t in xrange(self.positions.shape[0]):
if np.isnan(self.positions[t,0,i]):
LM_traj[i,t] = np.nan
self.LM_traj = LM_traj
def get_positions(self,shuffle=False):
"""
Return positions as well as original Linkage Matrix (LM).
"""
self.generate_LM_traj()
if shuffle:
for i in xrange(self.positions.shape[0]):
idx_shuffle = np.arange(self.positions.shape[2])
np.random.shuffle(idx_shuffle)
self.positions[i,:,:] = self.positions[i,:,(idx_shuffle)].T
self.intensities[i] = self.intensities[i,(idx_shuffle)]
self.LM_traj[:,i] = self.LM_traj[(idx_shuffle),i]
new_LM_traj = np.zeros(self.LM_traj.shape)
new_LM_traj[:] = np.nan
for j,config in enumerate(self.LM_traj.T):
for i,ix in enumerate(config):
if ix==ix:
new_LM_traj[ix,j] = i
self.LM_traj = new_LM_traj
return self.positions, self.LM_traj, self.intensities
def coordinates_interpreter(path):
"""
Reads the coordinates file produced by ImagePeakClassifier.
"""
with open(path,'r') as f:
lines = map(lambda x: x.rstrip('\n'),f.readlines())
positions = []
for i,line in enumerate(lines):
if 'frame' in line and i>0:
positions += [pos]
pos = []
elif i==0 and 'frame' in line:
pos = []
else:
pos += [map(int,line.split())]
print("num frames {} num positions each frame {}".format(len(positions),[len(v) for v in positions]))
max_num_pos = max([len(v) for v in positions])
num_t = len(positions)
arr_positions = np.zeros((num_t,2,max_num_pos))
arr_positions[:] = np.nan
for i,pos in enumerate(positions):
for j,particle in enumerate(pos):
arr_positions[i,:,j] = np.array(particle)
intensities = np.ones((num_t,max_num_pos))
return arr_positions, intensities
|
Hamstard/AtomStalker
|
AtomStalker/TrajectorySimulator.py
|
Python
|
mit
| 9,717
|
[
"Gaussian"
] |
0de368bf6043bdf1bdef0068141a3e0c895586f9ce5e703f62a517fa835c590a
|
"""
Comparison of Dimension Reduction Techniques
--------------------------------------------
A comparison of several different dimension reduction
techniques on a variety of toy datasets. The datasets
are all toy datasets, but should provide a representative
range of the strengths and weaknesses of the different
algorithms.
The time to perform the dimension reduction with each
algorithm and each dataset is recorded in the lower
right of each plot.
Things to note about the datasets:
- Blobs: A set of five gaussian blobs in 10 dimensional
space. This should be a prototypical example
of something that should clearly separate
even in a reduced dimension space.
- Iris: a classic small dataset with one distinct class
and two classes that are not clearly separated.
- Digits: handwritten digits -- ideally different digit
classes should form distinct groups. Due to
the nature of handwriting digits may have several
forms (crossed or uncrossed sevens, capped or
straight line oes, etc.)
- Wine: wine characteristics ideally used for a toy
regression. Ultimately the data is essentially
one dimensional in nature.
- Swiss Roll: data is essentially a rectangle, but
has been "rolled up" like a swiss roll
in three dimensional space. Ideally a
dimension reduction technique should
be able to "unroll" it. The data
has been coloured according to one dimension
of the rectangle, so should form
a rectangle of smooth color variation.
- Sphere: the two dimensional surface of a three
dimensional sphere. This cannot be represented
accurately in two dimensions without tearing.
The sphere has been coloured with hue around
the equator and black to white from the south
to north pole.
"""
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import time
from sklearn import datasets, decomposition, manifold, preprocessing
from colorsys import hsv_to_rgb
import umap
sns.set(context="paper", style="white")
blobs, blob_labels = datasets.make_blobs(
n_samples=500, n_features=10, centers=5, random_state=42
)
iris = datasets.load_iris()
digits = datasets.load_digits(n_class=10)
wine = datasets.load_wine()
swissroll, swissroll_labels = datasets.make_swiss_roll(
n_samples=1000, noise=0.1, random_state=42
)
sphere = np.random.normal(size=(600, 3))
sphere = preprocessing.normalize(sphere)
sphere_hsv = np.array(
[
(
(np.arctan2(c[1], c[0]) + np.pi) / (2 * np.pi),
np.abs(c[2]),
min((c[2] + 1.1), 1.0),
)
for c in sphere
]
)
sphere_colors = np.array([hsv_to_rgb(*c) for c in sphere_hsv])
reducers = [
(manifold.TSNE, {"perplexity": 50}),
# (manifold.LocallyLinearEmbedding, {'n_neighbors':10, 'method':'hessian'}),
(manifold.Isomap, {"n_neighbors": 30}),
(manifold.MDS, {}),
(decomposition.PCA, {}),
(umap.UMAP, {"n_neighbors": 30, "min_dist": 0.3}),
]
test_data = [
(blobs, blob_labels),
(iris.data, iris.target),
(digits.data, digits.target),
(wine.data, wine.target),
(swissroll, swissroll_labels),
(sphere, sphere_colors),
]
dataset_names = ["Blobs", "Iris", "Digits", "Wine", "Swiss Roll", "Sphere"]
n_rows = len(test_data)
n_cols = len(reducers)
ax_index = 1
ax_list = []
# plt.figure(figsize=(9 * 2 + 3, 12.5))
plt.figure(figsize=(10, 8))
plt.subplots_adjust(
left=0.02, right=0.98, bottom=0.001, top=0.96, wspace=0.05, hspace=0.01
)
for data, labels in test_data:
for reducer, args in reducers:
start_time = time.time()
embedding = reducer(n_components=2, **args).fit_transform(data)
elapsed_time = time.time() - start_time
ax = plt.subplot(n_rows, n_cols, ax_index)
if isinstance(labels[0], tuple):
ax.scatter(*embedding.T, s=10, c=labels, alpha=0.5)
else:
ax.scatter(*embedding.T, s=10, c=labels, cmap="Spectral", alpha=0.5)
ax.text(
0.99,
0.01,
"{:.2f} s".format(elapsed_time),
transform=ax.transAxes,
size=14,
horizontalalignment="right",
)
ax_list.append(ax)
ax_index += 1
plt.setp(ax_list, xticks=[], yticks=[])
for i in np.arange(n_rows) * n_cols:
ax_list[i].set_ylabel(dataset_names[i // n_cols], size=16)
for i in range(n_cols):
ax_list[i].set_xlabel(repr(reducers[i][0]()).split("(")[0], size=16)
ax_list[i].xaxis.set_label_position("top")
plt.tight_layout()
plt.show()
|
lmcinnes/umap
|
examples/plot_algorithm_comparison.py
|
Python
|
bsd-3-clause
| 4,675
|
[
"Gaussian"
] |
e2742d59592e066c8d7485eae1eaf964599663e47fe9d5fe560739c2b70c40fb
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from PyQt4 import QtGui
from PyQt4 import QtCore
from camelot.core.utils import ugettext_lazy as _
from camelot.core.utils import ugettext, variant_to_pyobject
from camelot.view.art import Icon
import logging
logger = logging.getLogger('camelot.view.wizard.pages.backup_page')
class LabelLineEdit(QtGui.QLineEdit):
_file_name = ''
def __init__(self, storage, parent=None):
super(LabelLineEdit, self).__init__(parent)
self.textChanged.connect(self._onTextChanged)
self._storage = storage
def _onTextChanged(self, text):
if text == '':
self._file_name = ''
else:
file_name = '%s.db'%text
if self._storage.exists(file_name):
self._file_name = ''
else:
self._file_name = file_name
def filename(self):
return self._file_name
class LabelComboBox(QtGui.QComboBox):
_file_name = ''
def __init__(self, storage, parent=None):
super(LabelComboBox, self).__init__(parent)
self._storage = storage
self._setDefaultLabels()
self.currentIndexChanged[int].connect(self._onCurrentIndexChanged)
def _setDefaultLabels(self):
for i, stored_file in enumerate(self._storage.list()):
if i == 0:
self._file_name = stored_file.name
self.addItem( unicode(stored_file.verbose_name), QtCore.QVariant(stored_file))
def _onCurrentIndexChanged(self, index):
self._file_name = variant_to_pyobject( self.itemData(index) ).name
def filename(self):
return self._file_name
class Page(QtGui.QWizardPage):
title = _('Select backup file')
sub_title = _('Please select a backup file. All data in this file will be overwritten.')
icon = Icon('tango/32x32/actions/document-save.png')
caption = _('Select file')
extension = '.db'
def __init__(self, backup_mechanism=None, parent=None):
self.backup_mechanism = backup_mechanism
super(Page, self).__init__(parent)
self.setTitle( unicode(self.title) )
self.setSubTitle( unicode(self.sub_title) )
self.setPixmap(QtGui.QWizard.LogoPixmap, self.icon.getQPixmap())
self._storage = backup_mechanism.get_default_storage()
self._setupUi()
# final touches - select the default radio button
self._default_radio.setChecked(True)
self._showWidgets(self._default_radio)
def _setupUi(self):
# controls
self._default_radio = QtGui.QRadioButton(ugettext('Default Location'))
self._custom_radio = QtGui.QRadioButton(ugettext('Custom Location'))
self._custom_edit = QtGui.QLineEdit()
self._custom_button = QtGui.QPushButton(ugettext('Browse...'))
button_group = QtGui.QButtonGroup(self)
button_group.addButton(self._default_radio)
button_group.addButton(self._custom_radio)
# layout
layout = QtGui.QVBoxLayout()
layout.addWidget(self._default_radio)
self._hlayout = QtGui.QHBoxLayout()
layout.addLayout(self._hlayout)
layout.addWidget(self._custom_radio)
hlayout2 = QtGui.QHBoxLayout()
hlayout2.addWidget(self._custom_edit)
hlayout2.addWidget(self._custom_button)
layout.addLayout(hlayout2)
self.setLayout(layout)
# connect signals to slots
button_group.buttonClicked[QtGui.QAbstractButton].connect(self._showWidgets)
button_group.buttonClicked[QtGui.QAbstractButton].connect(self.completeChanged)
self._custom_button.clicked.connect(self._customButtonClicked)
self._custom_edit.textChanged.connect(self.completeChanged)
def _showWidgets(self, selection):
default_selected = self._isDefaultSelected(selection)
self._custom_edit.setVisible(not default_selected)
self._custom_button.setVisible(not default_selected)
def _isDefaultSelected(self, selection):
return (selection == self._default_radio)
def _customButtonClicked(self):
settings = QtCore.QSettings()
dir = settings.value('custom_filename').toString()
path = self._setPath(dir)
if path:
self._custom_edit.setText(QtCore.QDir.toNativeSeparators(path))
class SelectRestoreFilePage(Page):
title = _('Select restore file')
sub_title = _( "Please select a backup file from which to restore the database. All data in the database will be overwritten with data from this file" )
icon = Icon('tango/32x32/devices/drive-harddisk.png')
def __init__(self, parent=None):
super(SelectRestoreFilePage, self).__init__(parent)
self.setCommitPage(True)
if self._default_combo.count() == 0:
self._default_radio.setEnabled(False)
self._custom_radio.setChecked(True)
self._showWidgets(self._custom_radio)
def _setupUi(self):
super(SelectRestoreFilePage, self)._setupUi()
self._default_combo = LabelComboBox(self._storage)
self._default_combo.currentIndexChanged[int].connect(self.completeChanged)
self._hlayout.addWidget(self._default_combo)
def _showWidgets(self, selection):
default_selected = self._isDefaultSelected(selection)
self._default_combo.setVisible(default_selected)
super(SelectRestoreFilePage, self)._showWidgets(selection)
def isComplete(self):
default_selected = self._default_radio.isChecked()
if default_selected:
self.wizard().filename = self._default_combo.filename()
self.wizard().storage = self._storage
return self._default_combo.filename() != ''
else:
self.wizard().filename = self._custom_edit.text()
self.wizard().storage = None
return self._custom_edit.text() != ''
def _setPath(self, dir):
path = QtGui.QFileDialog.getOpenFileName(
self, unicode(self.caption), dir, ugettext('Database files (*%s);;All files (*.*)' % self.extension),
)
return path
class SelectBackupFilePage(Page):
def __init__(self, backup_mechanism):
super(SelectBackupFilePage, self).__init__(backup_mechanism)
self.setCommitPage(True)
def _setupUi(self):
from camelot.view.model_thread import post
self._default_label = QtGui.QLabel(ugettext('Label:'))
self._default_edit = LabelLineEdit(self._storage)
self._default_label.setBuddy(self._default_edit)
super(SelectBackupFilePage, self)._setupUi()
self._hlayout.addWidget(self._default_label)
self._hlayout.addWidget(self._default_edit)
self._default_edit.textChanged.connect(self._onDefaultEditChanged)
self._default_edit.textChanged.connect(self.completeChanged)
post(self._get_default_label, self._set_default_label)
def _set_default_label(self, label):
self._default_edit.setText(label)
def _onDefaultEditChanged(self, text):
if self._default_radio.isChecked():
self.wizard().filename = self._default_edit.filename()
def _get_default_label(self):
locale = QtCore.QLocale()
format = locale.dateTimeFormat(locale.ShortFormat)
formatted_date_time = QtCore.QDateTime.currentDateTime().toString(format)
# replace all non-ascii chars with underscores
import string
formatted_date_time_str = unicode(formatted_date_time)
for c in formatted_date_time_str:
if c not in string.ascii_letters and c not in string.digits:
formatted_date_time_str = formatted_date_time_str.replace(c, '_')
filename_prefix = self.backup_mechanism.get_filename_prefix()
formatted_date_time_str = '-'.join([filename_prefix, formatted_date_time_str])
return formatted_date_time_str
def _showWidgets(self, selection):
default_selected = self._isDefaultSelected(selection)
self._default_label.setVisible(default_selected)
self._default_edit.setVisible(default_selected)
super(SelectBackupFilePage, self)._showWidgets(selection)
def isComplete(self):
default_selected = self._default_radio.isChecked()
if default_selected:
self.wizard().storage = self._storage
self.wizard().filename = self._default_edit.filename()
return self._default_edit.filename() != ''
else:
self.wizard().storage = None
self.wizard().filename = self._custom_edit.text()
return self._custom_edit.text() != ''
def _setPath(self, dir):
path = QtGui.QFileDialog.getSaveFileName(
self, unicode(self.caption), dir, ugettext('Database files (*%s);;All files (*.*)' % self.extension),
)
return path
|
kurtraschke/camelot
|
camelot/view/wizard/pages/backup_page.py
|
Python
|
gpl-2.0
| 9,943
|
[
"VisIt"
] |
3beedee8d29e48027ab538adbbcb7cec854303a6bdd26e53635b1acd2a2e0b40
|
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def pubdev_random_cv():
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
response_col = "economy"
distribution = "gaussian"
predictors = ["displacement","power","weight","acceleration","year"]
gbm1 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=3, distribution=distribution,
fold_assignment="Random")
gbm2 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=3, distribution=distribution,
fold_assignment="Random")
mse1 = gbm1.mse(xval=True)
mse2 = gbm2.mse(xval=True)
assert mse1 != mse2, "The first model has an MSE of {0} and the second model has an MSE of {1}. Expected the " \
"first to be different from the second.".format(mse1, mse2)
if __name__ == "__main__":
pyunit_utils.standalone_test(pubdev_random_cv)
else:
pubdev_random_cv()
|
madmax983/h2o-3
|
h2o-py/tests/testdir_jira/pyunit_pubdev_1752_random_cv.py
|
Python
|
apache-2.0
| 992
|
[
"Gaussian"
] |
b84fea55749b2a20ec0c2e98373d9e301d213ed64482fcc0405bd6e49182947e
|
#
# Author: Henrique Pereira Coutada Miranda
# Check the convergence of the coulomb cutoff for a BSE calculation using yambo
#
from __future__ import print_function
from yambopy import *
from qepy import *
from schedulerpy import *
from functools import partial
import multiprocessing
import argparse
import sys
prefix = "bn"
yambo = "yambo"
p2y = 'p2y'
pw = 'pw.x'
layer_separations = [10,15,20,25,30,35,40]
scf_kpoints = [ 9, 9,1]
nscf_kpoints = [12,12,1]
nbands = 20
ecutwf = 50
scheduler = Scheduler.factory
# create the quantum espresso input file
def get_inputfile():
""" Define a Quantum espresso input file for boron nitride
"""
qe = PwIn()
qe.atoms = [['N',[ 0.0, 0.0,0.5]],
['B',[1./3,2./3,0.5]]]
qe.atypes = {'B': [10.811, "B.pbe-mt_fhi.UPF"],
'N': [14.0067,"N.pbe-mt_fhi.UPF"]}
qe.control['prefix'] = "'%s'"%prefix
qe.control['wf_collect'] = '.true.'
qe.control['pseudo_dir'] = "'../../../pseudos/'"
qe.system['celldm(1)'] = 4.7
qe.system['celldm(3)'] = 14/qe.system['celldm(1)']
qe.system['ecutwfc'] = ecutwf
qe.system['occupations'] = "'fixed'"
qe.system['nat'] = 2
qe.system['ntyp'] = 2
qe.system['ibrav'] = 4
qe.kpoints = scf_kpoints
qe.electrons['conv_thr'] = 1e-10
return qe
#run the self consistent calculation
def scf(layer_separation,folder='scf'):
if not os.path.isdir(folder):
os.makedirs(folder)
qe = get_inputfile()
qe.system['celldm(3)'] = layer_separation/qe.system['celldm(1)']
qe.control['calculation'] = "'scf'"
qe.write('%s/%s.scf'%(folder,prefix))
#run the non-self consistent calculation
def nscf(layer_separation,folder='nscf'):
if not os.path.isdir(folder):
os.makedirs(folder)
qe = get_inputfile()
qe.control['calculation'] = "'nscf'"
qe.electrons['diago_full_acc'] = ".true."
qe.electrons['conv_thr'] = 1e-8
qe.system['nbnd'] = nbands
qe.system['force_symmorphic'] = ".true."
qe.system['celldm(3)'] = layer_separation/qe.system['celldm(1)']
qe.kpoints = nscf_kpoints
qe.write('%s/%s.nscf'%(folder,prefix))
def database(shell,output_folder,nscf_folder='nscf'):
if not os.path.isdir('%s/SAVE'%output_folder):
print('preparing yambo database...')
shell.add_command('mkdir -p %s'%nscf_folder)
shell.add_command('pushd %s/%s.save; %s; %s'%(nscf_folder,prefix,p2y,yambo))
shell.add_command('popd')
shell.add_command('mv %s/%s.save/SAVE %s'%(nscf_folder,prefix,output_folder))
print('done!')
def run_job(layer_separation,nthreads=1,work_folder='bse_cutoff',cut=False):
"""
Given a layer separation run the calculation
1. scf calculation with QE
2. nscf calculation
3. BSE with yambo
"""
#check if the calculation exists
done_stamp = '%s/%d/done'%(work_folder,layer_separation)
print(done_stamp)
if os.path.isfile(done_stamp):
return
print("layer separation: %d bohr cutoff:"%layer_separation, cut)
root_folder = "%s/%d"%(work_folder,layer_separation)
shell = scheduler()
if not os.path.isdir(root_folder):
shell.add_command( 'mkdir -p %s'%root_folder )
# 1. run the ground state calculation
print("scf cycle")
print("kpoints",scf_kpoints)
scf(layer_separation,folder="%s/scf"%root_folder)
shell.add_command("pushd %s/scf; mpirun -np %d %s < %s.scf > scf.log"%(root_folder,nthreads,pw,prefix))
shell.add_command("popd")
# 2. run the non self consistent calculation
print("nscf cycle")
print("kpoints",nscf_kpoints)
src ='%s/scf/%s.save'%(root_folder,prefix)
dst ='%s/nscf/%s.save'%(root_folder,prefix)
nscf(layer_separation,folder="%s/nscf"%root_folder)
shell.add_command('cp -r %s %s'%(src,dst) )
shell.add_command("pushd %s/nscf; mpirun -np %d %s < %s.nscf > nscf.log"%(root_folder,nthreads,pw,prefix))
shell.add_command('popd')
# generate the database
database(shell,'%s'%root_folder,nscf_folder="%s/nscf"%root_folder)
shell.run()
#wait for execution
# 3. calculate the absorption spectra
y = YamboIn('mpirun -np %d yambo -r -b -o b -k sex -y d -V all'%nthreads,folder=root_folder)
if cut:
y['CUTGeo'] = 'box z'
y['CUTBox'] = [0,0,layer_separation-2]
y['RandQpts'] = 1000000
y['RandGvec'] = [1,'Ry']
y['FFTGvecs'] = [20,'Ry']
y['NGsBlkXs'] = [1,'Ry'] #local field effects
y['BndsRnXs'] = [1,nbands] #number of bands for static screening
y['KfnQP_E'] = [2.91355133,1.0,1.0] #scissor operator
y['BSEBands'] = [4,5] #number of bands in BSE kernel
y['BEnRange'] = [[4.0,8.0],'eV'] #energy range to plot optical absorption
y['BEnSteps'] = 500 #energy steps in the range
y.write('%s/yambo_run.in'%root_folder)
shell = scheduler()
shell.add_command('cd %s; %s -F yambo_run.in -J %d'%(root_folder,yambo,layer_separation))
shell.add_command('touch done')
shell.run()
def run(mpthreads=1,nthreads=1,work_folder='bse_cutoff',cut=True):
if (mpthreads > 1):
p = multiprocessing.Pool(nthreads)
run = partial(run_job,nthreads=nthreads,work_folder=work_folder,cut=cut)
try:
#reversed list because of load imbalance
p.map(run, reversed(layer_separations))
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, terminating workers")
p.terminate()
p.join()
else:
for layer_separation in layer_separations:
run_job(layer_separation,nthreads=nthreads,work_folder=work_folder,cut=cut)
def plot(work_folder,filename,cut):
ax = plt.gca()
for layer_separation in layer_separations:
root_folder = "%s/%d"%(work_folder,layer_separation)
#gather the results
pack_files_in_folder(root_folder)
#plot the results
ya = YamboAnalyser(work_folder)
print(ya)
ax = ya.plot_bse('eps',ax=ax)
if cut: title = "with coulomb cutoff"
else: title = "without coulomb cutoff"
plt.title(title)
if filename is None: filename = "%s.pdf"%work_folder
plt.savefig(filename)
plt.show()
if __name__ == "__main__":
#parse options
parser = argparse.ArgumentParser(description='Convergence test of the colomb cutoff')
parser.add_argument('-r' ,'--run', action="store_true", help='Run the calculation')
parser.add_argument('-c' ,'--cut', action="store_true", help='Use coulomb cutoff')
parser.add_argument('-p' ,'--plot', action="store_true", help='Run the analysis')
parser.add_argument('-f' ,'--plotfile', help='name of the plot file', default=None)
parser.add_argument('-t' ,'--nthreads', help='threads for yambo', default=1, type=int)
parser.add_argument('-mp' ,'--mpthreads', help='theads using python multiprocessing module', default=1, type=int)
args = parser.parse_args()
print("yambo using %d threads"%args.nthreads)
print("multiprocessing using %d threads"%args.mpthreads)
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
cut = args.cut
#choose work_folder
if cut:
work_folder = "bse_cutoff_cut"
else:
work_folder = "bse_cutoff"
if args.run:
run(args.mpthreads,args.nthreads,work_folder,cut)
if args.plot:
plot(work_folder,args.plotfile,cut)
|
henriquemiranda/yambopy
|
tutorial/bn/bse_cutoff.py
|
Python
|
bsd-3-clause
| 7,445
|
[
"Quantum ESPRESSO",
"Yambo"
] |
b2c8170331403a5a606867a8595e09db6cc402369250a1484a5142c91075f652
|
"""
Test Display Options
"""
__RCSID__ = "$Id$"
import unittest
import thread
from DIRAC.FrameworkSystem.test.testLogging.Test_Logging import Test_Logging, cleaningLog
from DIRAC.FrameworkSystem.test.testLogging.Test_Logging import gLogger, oldgLogger
class Test_DisplayOptions(Test_Logging):
"""
Test the creation of subloggers and their properties
"""
def setUp(self):
super(Test_DisplayOptions, self).setUp()
self.filename = '/tmp/logtmp.log'
with open(self.filename, "w"):
pass
def test_00setShowHeaders(self):
"""
Set the headers
"""
gLogger.showHeaders(False)
gLogger.notice('message')
oldgLogger.showHeaders(False)
oldgLogger.notice('message')
self.assertEqual("message \n", self.buffer.getvalue())
self.assertEqual(self.buffer.getvalue().replace(" ", ""), self.oldbuffer.getvalue().replace(" ", ""))
self.buffer.truncate(0)
self.oldbuffer.truncate(0)
gLogger.showHeaders(True)
gLogger.notice('message')
oldgLogger.showHeaders(True)
oldgLogger.notice('message')
logstring1 = cleaningLog(self.buffer.getvalue())
logstring2 = cleaningLog(self.oldbuffer.getvalue())
self.assertEqual("UTCFrameworkNOTICE:message\n", logstring1)
self.assertEqual(logstring1, logstring2)
self.buffer.truncate(0)
self.oldbuffer.truncate(0)
def test_01setShowThreadIDs(self):
"""
Set the thread ID
Differences between the two systems :
- gLogger: threadID [1254868214]
- old gLogger: threadID [GEko]
"""
gLogger.showThreadIDs(False)
gLogger.notice('message')
oldgLogger.showThreadIDs(False)
oldgLogger.notice('message')
logstring1 = cleaningLog(self.buffer.getvalue())
logstring2 = cleaningLog(self.oldbuffer.getvalue())
self.assertEqual("UTCFrameworkNOTICE:message\n", logstring1)
self.assertEqual(logstring1, logstring2)
self.buffer.truncate(0)
self.oldbuffer.truncate(0)
gLogger.showThreadIDs(True)
gLogger.notice('message')
oldgLogger.showThreadIDs(True)
oldgLogger.notice('message')
logstring1 = cleaningLog(self.buffer.getvalue())
logstring2 = cleaningLog(self.oldbuffer.getvalue())
self.assertIn(str(thread.get_ident()), logstring1)
self.assertNotEqual(logstring1, logstring2)
self.buffer.truncate(0)
self.oldbuffer.truncate(0)
def test_02setShowThreadIDsHeaders(self):
"""
Create a subsubsublogger and create a logrecord
"""
gLogger.showHeaders(False)
gLogger.showThreadIDs(False)
gLogger.notice('message')
oldgLogger.showHeaders(False)
oldgLogger.showThreadIDs(False)
oldgLogger.notice('message')
self.assertEqual("message \n", self.buffer.getvalue())
self.assertEqual(self.buffer.getvalue().replace(" ", ""), self.oldbuffer.getvalue().replace(" ", ""))
self.buffer.truncate(0)
self.oldbuffer.truncate(0)
gLogger.showHeaders(False)
gLogger.showThreadIDs(True)
gLogger.notice('message')
oldgLogger.showHeaders(False)
oldgLogger.showThreadIDs(True)
oldgLogger.notice('message')
self.assertEqual("message \n", self.buffer.getvalue())
self.assertEqual(self.buffer.getvalue().replace(" ", ""), self.oldbuffer.getvalue().replace(" ", ""))
self.buffer.truncate(0)
self.oldbuffer.truncate(0)
gLogger.showHeaders(True)
gLogger.showThreadIDs(False)
gLogger.notice('message')
oldgLogger.showHeaders(True)
oldgLogger.showThreadIDs(False)
oldgLogger.notice('message')
logstring1 = cleaningLog(self.buffer.getvalue())
logstring2 = cleaningLog(self.oldbuffer.getvalue())
self.assertEqual("UTCFrameworkNOTICE:message\n", logstring1)
self.assertEqual(logstring1, logstring2)
self.buffer.truncate(0)
self.oldbuffer.truncate(0)
gLogger.showHeaders(True)
gLogger.showThreadIDs(True)
gLogger.notice('message')
oldgLogger.showHeaders(True)
oldgLogger.showThreadIDs(True)
oldgLogger.notice('message')
logstring1 = cleaningLog(self.buffer.getvalue())
logstring2 = cleaningLog(self.oldbuffer.getvalue())
self.assertIn(str(thread.get_ident()), logstring1)
self.assertNotEqual(logstring1, logstring2)
self.buffer.truncate(0)
self.oldbuffer.truncate(0)
def test_03setSubLogShowHeaders(self):
"""
Create a sublogger and set it its own Header option.
"""
sublog = gLogger.getSubLogger('sublog')
sublog.setLevel('notice')
sublog.showHeaders(False)
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
sublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual(message, "message \n")
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual(logstring1, "UTCFramework/sublogNOTICE:message\n")
def test_04SubLogShowHeadersChange(self):
"""
Create a sublogger and show that its Header option follow the change of its parent Header option.
"""
sublog = gLogger.getSubLogger('sublog2')
sublog.setLevel('notice')
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
gLogger.showHeaders(False)
sublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual(message, "message \n")
self.assertEqual(self.buffer.getvalue(), "message \n")
def test_05setSubLoggLoggerShowHeaders(self):
"""
Create a sublogger, set its Header option and the Header option of the gLogger.
Show that its Header option do not follow the change of its parent Header option.
"""
sublog = gLogger.getSubLogger('sublog3')
sublog.setLevel('notice')
sublog.showHeaders(False)
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
gLogger.showHeaders(True)
sublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual(message, "message \n")
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual(logstring1, "UTCFramework/sublog3NOTICE:message\n")
def test_06setSubLoggLoggerShowHeadersInverse(self):
"""
Create a sublogger, set the Header option of the gLogger and its Header option.
Show that the gLogger Header option do not follow the change of its child Header option.
"""
sublog = gLogger.getSubLogger('sublog4')
sublog.setLevel('notice')
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
gLogger.showHeaders(True)
sublog.showHeaders(False)
sublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual(message, "message \n")
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual(logstring1, "UTCFramework/sublog4NOTICE:message\n")
def test_07subLogShowHeadersChange(self):
"""
Create a subsublogger and show that its Header option follow the change of its parent Header option.
"""
sublog = gLogger.getSubLogger('sublog5')
sublog.setLevel('notice')
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
subsublog = sublog.getSubLogger('subsublog')
subsublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
gLogger.showHeaders(False)
subsublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual(message, "message \nmessage \n")
self.assertEqual(self.buffer.getvalue(), "message \n")
def test_07subLogShowHeadersChangeSetSubLogger(self):
"""
Create a subsublogger and show that its Header option follow the change of its parent Header option.
"""
sublog = gLogger.getSubLogger('sublog6')
sublog.setLevel('notice')
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
subsublog = sublog.getSubLogger('subsublog')
subsublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
sublog.showHeaders(False)
subsublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual(message, "message \nmessage \n")
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual(logstring1, "UTCFramework/sublog6/subsublogNOTICE:message\n")
def test_09subLogShowHeadersChangeSetSubLogger(self):
"""
Create a subsublogger and set its Header option and show that
its Header option do not follow the change of its parent Header option.
"""
sublog = gLogger.getSubLogger('sublog7')
sublog.setLevel('notice')
sublog.registerBackend('file', {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
subsublog = sublog.getSubLogger('subsublog')
subsublog.registerBackends(['file'], {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
sublog.showHeaders(False)
subsublog.showHeaders(True)
subsublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertIn("UTC Framework/sublog7/subsublog NOTICE: message \nmessage \n", message)
logstring1 = cleaningLog(self.buffer.getvalue())
self.assertEqual(logstring1, "UTCFramework/sublog7/subsublogNOTICE:message\n")
def test_10gLoggerShowHeadersChange2Times(self):
"""
Create a sublogger with a file backend and change the Header option of gLogger 2 times
in order to verify the propagation.
"""
sublog = gLogger.getSubLogger('sublog8')
sublog.registerBackends(['file'], {'FileName': self.filename})
# Empty the buffer to remove the Object Loader log message "trying to load..."
self.buffer.truncate(0)
gLogger.showHeaders(False)
sublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertEqual("message \n", message)
gLogger.showHeaders(True)
sublog.notice("message")
with open(self.filename) as file:
message = file.read()
self.assertIn("UTC Framework/sublog8 NOTICE: message \n", message)
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test_DisplayOptions)
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
|
hgiemza/DIRAC
|
FrameworkSystem/test/testLogging/Test_DisplayOptions.py
|
Python
|
gpl-3.0
| 11,122
|
[
"DIRAC"
] |
f57f1aaf77f78e96737b4bc1cf5cf71c75a8e266c512dbb179426703e4e2c8cd
|
# The MIT License (MIT)
#
# Copyright (c) 2015 Brian Wray (brian@wrocket.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import subprocess
import json
import unittest
def call_tulip(args):
cmd = ['../../src/tulip']
cmd.extend(args)
out = subprocess.check_output(cmd)
return out.decode('utf-8')
class TestBook(unittest.TestCase):
def setUp(self):
None
def get_book_moves(self, bookfile, fen):
result = call_tulip(['-bookmoves', bookfile, fen])
parsed_output = json.loads(result)
return parsed_output['moveList']
def test_no_bookfile(self):
result = self.get_book_moves('this_doesnt_exist.asdf.sqlite', 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1')
self.assertEqual(0, len(result))
if __name__ == '__main__':
unittest.main()
|
wrocket/Tulip-Chess
|
tests/basic-tests/test_book.py
|
Python
|
mit
| 1,837
|
[
"Brian"
] |
af1b88de3972c3c5b28952a30c73406d67e4bff5e78c52d1bdb0c2f58321a8fe
|
#### import the simple module from the paraview
from paraview.simple import *
#from paraview_find_arrayname import get_Para_Array_name
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# create a new 'PVD Reader'
#import os
#terminal_path=os.getcwd() + "\n"
#cSF_Subdomains_unrefpvd = PVDReader(FileName=terminal_path+'/Results_adaptive/parallel_Subdomains.pvd')
import os
def make_a_screenshot(path_to_insert):
# create a new 'PVD Reader'
home_dir=os.path.expanduser("~")
cSF_Subdomains_adapted = PVDReader(FileName=[home_dir+path_to_insert+'/Field_solutions/parallel_Subdomains.pvd'])
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
# uncomment following to set a specific view size
# renderView1.ViewSize = [1164, 808]
# get color transfer function/color map for 'f'
fLUT = GetColorTransferFunction('f')
# show data in view
cSF_Subdomains_unrefpvdDisplay = Show(cSF_Subdomains_adapted, renderView1)
# trace defaults for the display properties.
cSF_Subdomains_unrefpvdDisplay.ColorArrayName = ['CELLS', 'f']
cSF_Subdomains_unrefpvdDisplay.LookupTable = fLUT
cSF_Subdomains_unrefpvdDisplay.GlyphType = 'Arrow'
cSF_Subdomains_unrefpvdDisplay.ScalarOpacityUnitDistance = 0.42132744935143623
# reset view to fit data
renderView1.ResetCamera()
# show color bar/color legend
cSF_Subdomains_unrefpvdDisplay.SetScalarBarVisibility(renderView1, True)
# get opacity transfer function/opacity map for 'f'
fPWF = GetOpacityTransferFunction('f')
# create a new 'Extract Cells By Region'
extractCellsByRegion1 = ExtractCellsByRegion(Input=cSF_Subdomains_adapted)
extractCellsByRegion1.IntersectWith = 'Plane'
# init the 'Plane' selected for 'IntersectWith'
extractCellsByRegion1.IntersectWith.Origin=[100.226087,135.58128000000002,22.247337]
# show data in view
extractCellsByRegion1Display = Show(extractCellsByRegion1, renderView1)
# trace defaults for the display properties.
extractCellsByRegion1Display.ColorArrayName = ['CELLS', 'f']
extractCellsByRegion1Display.LookupTable = fLUT
extractCellsByRegion1Display.GlyphType = 'Arrow'
extractCellsByRegion1Display.ScalarOpacityUnitDistance = 0.5033337160022229
# hide data in view
Hide(cSF_Subdomains_adapted, renderView1)
# show color bar/color legend
extractCellsByRegion1Display.SetScalarBarVisibility(renderView1, True)
# Properties modified on extractCellsByRegion1.IntersectWith
extractCellsByRegion1.IntersectWith.Normal = [-1.0, 0.0, 0.0]
# show color bar/color legend
extractCellsByRegion1Display.SetScalarBarVisibility(renderView1, True)
# update the view to ensure updated data information
renderView1.Update()
# Properties modified on fLUT
fLUT.RGBPoints = [1.0, 0.231373, 0.298039, 0.752941, 1.4484847784042358, 0.36470588235294116, 0.48627450980392156, 0.9019607843137255, 3.0, 0.865003, 0.865003, 0.865003, 5.0, 0.705882, 0.0156863, 0.14902]
# Properties modified on fLUT
fLUT.RGBPoints = [1.0, 0.2823529411764706, 0.6039215686274509, 0.8509803921568627, 1.4484847784042358, 0.36470588235294116, 0.48627450980392156, 0.9019607843137255, 3.0, 0.865003, 0.865003, 0.865003, 5.0, 0.705882, 0.0156863, 0.14902]
# Properties modified on fLUT
fLUT.RGBPoints = [1.0, 0.06274509803921569, 0.6039215686274509, 0.9607843137254902, 1.4484847784042358, 0.36470588235294116, 0.48627450980392156, 0.9019607843137255, 3.0, 0.865003, 0.865003, 0.865003, 5.0, 0.705882, 0.0156863, 0.14902]
# Properties modified on fLUT
fLUT.RGBPoints = [1.0, 0.06274509803921569, 0.6039215686274509, 0.9607843137254902, 3.0, 0.865003, 0.865003, 0.865003, 5.0, 0.705882, 0.0156863, 0.14902]
# Properties modified on fLUT
fLUT.RGBPoints = [1.0, 0.06274509803921569, 0.6039215686274509, 0.9607843137254902, 3.0, 0.4980392156862745, 0.5019607843137255, 0.5725490196078431, 5.0, 0.705882, 0.0156863, 0.14902]
# Properties modified on fLUT
fLUT.RGBPoints = [1.0, 0.06274509803921569, 0.6039215686274509, 0.9607843137254902, 1.860606074333191, 0.4745098039215686, 0.5882352941176471, 0.8156862745098039, 3.0, 0.4980392156862745, 0.5019607843137255, 0.5725490196078431, 5.0, 0.705882, 0.0156863, 0.14902]
# Properties modified on fLUT
fLUT.RGBPoints = [1.0, 0.06274509803921569, 0.6039215686274509, 0.9607843137254902, 2.0, 0.4745098039215686, 0.5882352941176471, 0.8156862745098039, 3.0, 0.4980392156862745, 0.5019607843137255, 0.5725490196078431, 5.0, 0.705882, 0.0156863, 0.14902]
# Properties modified on fLUT
fLUT.RGBPoints = [1.0, 0.06274509803921569, 0.6039215686274509, 0.9607843137254902, 2.0, 1.0, 1.0, 1.0, 3.0, 0.4980392156862745, 0.5019607843137255, 0.5725490196078431, 5.0, 0.705882, 0.0156863, 0.14902]
# Properties modified on fLUT
fLUT.RGBPoints = [1.0, 0.06274509803921569, 0.6039215686274509, 0.9607843137254902, 2.0, 1.0, 1.0, 1.0, 3.0, 0.4980392156862745, 0.5019607843137255, 0.5725490196078431, 4.090909004211426, 0.9098039215686274, 0.8392156862745098, 0.8, 5.0, 0.705882, 0.0156863, 0.14902]
# Properties modified on fLUT
fLUT.RGBPoints = [1.0, 0.06274509803921569, 0.6039215686274509, 0.9607843137254902, 2.0, 1.0, 1.0, 1.0, 3.0, 0.4980392156862745, 0.5019607843137255, 0.5725490196078431, 4.0, 0.9098039215686274, 0.8392156862745098, 0.8, 5.0, 0.705882, 0.0156863, 0.14902]
# Properties modified on fLUT
fLUT.RGBPoints = [1.0, 0.06274509803921569, 0.6039215686274509, 0.9607843137254902, 2.0, 1.0, 1.0, 1.0, 3.0, 0.4980392156862745, 0.5019607843137255, 0.5725490196078431, 4.0, 0.9098039215686274, 0.0, 0.0, 5.0, 0.705882, 0.0156863, 0.14902]
# Properties modified on fLUT
fLUT.RGBPoints = [1.0, 0.06274509803921569, 0.6039215686274509, 0.9607843137254902, 2.0, 1.0, 1.0, 1.0, 3.0, 0.4980392156862745, 0.5019607843137255, 0.5725490196078431, 4.0, 0.9098039215686274, 0.0, 0.0, 5.0, 0.10588235294117647, 0.7058823529411765, 0.0]
# reset view to fit data
renderView1.ResetCamera()
# current camera placement for renderView1
renderView1.CameraPosition = [-35.61998003697748, 1.1129615306854248, 1.0326027870178223]
renderView1.CameraFocalPoint = [1.0243473052978516, 1.1129615306854248, 1.0326027870178223]
renderView1.CameraViewUp = [0.0, 0.0, 1.0]
renderView1.CameraParallelScale = 9.484249811151892
# save screenshot
renderView1.ViewSize = [1600, 1000]
renderView1.ResetCamera()
SaveScreenshot(home_dir+path_to_insert+'/Images/Adapted_mesh.png', magnification=1, quality=100, view=renderView1)
#### saving camera placements for all active views
# current camera placement for renderView1
renderView1.CameraPosition = [-35.61998003697748, 1.1129615306854248, 1.0326027870178223]
renderView1.CameraFocalPoint = [1.0243473052978516, 1.1129615306854248, 1.0326027870178223]
renderView1.CameraViewUp = [0.0, 0.0, 1.0]
renderView1.CameraParallelScale = 9.484249811151892
#### uncomment the following to render all views
# RenderAllViews()
# alternatively, if you want to write images, you can use SaveScreenshot(...).
if __name__ == '__main__':
make_a_screenshot(*sys.argv[1:])
|
andreashorn/lead_dbs
|
ext_libs/OSS-DBS/OSS_platform/Visualization_files/Paraview_adapted.py
|
Python
|
gpl-3.0
| 7,006
|
[
"ParaView"
] |
7580e844a13dbe9248ac0688f3de73421e4629a53b47f9f45a899340c53901e7
|
from util import devlib
from color import yuvlib
texshearlib = devlib(defs=r'''
// Filter directions specified in degrees, using image/texture addressing
// [(0,0) is upper left corner, 90 degrees is down].
__constant__ float2 addressing_patterns[16] = {
{ 1.0f, 0.0f}, { 0.0f, 1.0f}, // 0, 1: 0, 90
{ 1.0f, 1.0f}, {-1.0f, 1.0f}, // 2, 3: 45, 135
{ 1.0f, 0.5f}, {-0.5f, 1.0f}, // 4, 5: 22.5, 112.5
{ 1.0f, -0.5f}, { 0.5f, 1.0f}, // 6, 7: -22.5, 67.5
{ 1.0f, 0.666667f}, {-0.666667f, 1.0f}, // 8, 9: 30, 120
{ 1.0f, -0.666667f}, { 0.666667f, 1.0f}, // 10, 11: -30, 60
{ 1.0f, 0.333333f}, {-0.333333f, 1.0f}, // 12, 13: 15, 105
{ 1.0f, -0.333333f}, { 0.333333f, 1.0f}, // 14, 15: -15, 75
};
// Mon dieu! A C++ feature? Gotta close the "extern C" added by the compiler.
}
template <typename T> __device__ T
tex_shear(texture<T, cudaTextureType2D> ref, int pattern,
float x, float y, float radius) {
float2 scale = addressing_patterns[pattern];
float i = scale.x * radius, j = scale.y * radius;
// Round i and j to the nearest integer, choosing the nearest even when
// equidistant. It's critical that this be done before adding 'x' and 'y',
// so that addressing patterns remain consistent across the grid.
asm("{\n\t"
"cvt.rni.ftz.f32.f32 %0, %0;\n\t"
"cvt.rni.ftz.f32.f32 %1, %1;\n\t"
"}\n" : "+f"(i), "+f"(j));
return tex2D(ref, x + i, y + j);
}
extern "C" {
''')
logscalelib = devlib(defs=r'''
__global__ void
logscale(float4 *outbuf, const float4 *pixbuf, float k1, float k2) {
GET_IDX(i);
float4 pix = pixbuf[i];
float ls = fmaxf(0, k1 * logf(1.0f + pix.w * k2) / pix.w);
pix.x *= ls;
pix.y *= ls;
pix.z *= ls;
pix.w *= ls;
outbuf[i] = pix;
}
''')
fmabuflib = devlib(defs=r'''
// Element-wise computation of ``dst[i]=dst[i]+src[i]*scale``.
__global__ void
fma_buf(float4 *dst, const float4 *src, float scale) {
GET_IDX(i);
float4 d = dst[i], s = src[i];
d.x += s.x * scale;
d.y += s.y * scale;
d.z += s.z * scale;
d.w += s.w * scale;
dst[i] = d;
}
''')
yuvfilterlib = devlib(deps=[yuvlib], defs=r'''
__global__ void
yuv_to_rgb(float4 *dst, const float4 *src) {
GET_IDX(i);
float4 pix = src[i];
yuvo2rgb(pix);
dst[i] = pix;
}
''')
logencodelib = devlib(defs=r'''
__global__ void
logencode(float4 *dst, const float4 *src, float degamma) {
GET_IDX(i);
float4 pix = src[i];
pix.x = log2f(powf(pix.x, degamma)) / 12.0f + 1.0f;
pix.y = log2f(powf(pix.y, degamma)) / 12.0f + 1.0f;
pix.z = log2f(powf(pix.z, degamma)) / 12.0f + 1.0f;
pix.w = log2f(powf(pix.w, degamma)) / 12.0f + 1.0f;
dst[i] = pix;
}
''')
denblurlib = devlib(deps=[texshearlib], decls='''
texture<float4, cudaTextureType2D> chan4_src;
texture<float, cudaTextureType2D> chan1_src;
// Call the Python function set_blur_width() to override these defaults.
__constant__ float gauss_coefs[7] = {
0.00443305f, 0.05400558f, 0.24203623f, 0.39905028f,
0.24203623f, 0.05400558f, 0.00443305f
};
''', defs=r'''
// Apply a Gaussian-esque blur to the density channel of the texture in
// ``chan4_src`` in the horizontal direction, and write it to ``dst``, a
// one-channel buffer.
__global__ void den_blur(float *dst, int pattern, int upsample) {
GET_IDX_2(xi, yi, gi);
float x = xi, y = yi;
float den = 0.0f;
#pragma unroll
for (int i = 0; i < 7; i++)
den += tex_shear(chan4_src, pattern, x, y, (i - 3) << upsample).w
* gauss_coefs[i];
dst[gi] = den;
}
// As den_blur, but with the one-channel texture as source
__global__ void den_blur_1c(float *dst, int pattern, int upsample) {
GET_IDX_2(xi, yi, gi);
float x = xi, y = yi;
float den = 0.0f;
#pragma unroll
for (int i = 0; i < 7; i++)
den += tex_shear(chan1_src, pattern, x, y, (i - 3) << upsample)
* gauss_coefs[i];
dst[gi] = den;
}
''')
fullblurlib = devlib(deps=[denblurlib], defs=r'''
__global__ void full_blur(float4 *dst, int pattern, int upsample) {
GET_IDX_2(xi, yi, gi);
float x = xi, y = yi;
float4 val = make_float4(0, 0, 0, 0);
#pragma unroll
for (int i = 0; i < 7; i++) {
float4 pix = tex_shear(chan4_src, pattern, x, y, (i - 3) << upsample);
val.x += pix.x * gauss_coefs[i];
val.y += pix.y * gauss_coefs[i];
val.z += pix.z * gauss_coefs[i];
val.w += pix.w * gauss_coefs[i];
}
dst[gi] = val;
}
''')
bilaterallib = devlib(deps=[logscalelib, texshearlib, denblurlib], defs=r'''
/* sstd: spatial standard deviation (Gaussian filter)
* cstd: color standard deviation (Gaussian on the range [0, 1], where 1
* represents an "opposite" color).
* dstd: Standard deviation (exp2f) of density filter at density = 1.0.
* dpow: Exponent applied to density values before taking difference.
* At dpow=0.8, difference between 1000 and 1001 is about 0.2.
* Use bigger dstd and bigger dpow to blur low-density areas more
* without clobbering high-density areas.
* gspeed: Speed of (exp2f) Gompertz distribution governing how much to
* tighten gradients. Zero and negative values OK.
*/
__global__ void
bilateral(float4 *dst, int pattern, int radius,
float sstd, float cstd, float dstd, float dpow, float gspeed)
{
GET_IDX_2(xi, yi, gi);
float x = xi, y = yi;
// Precalculate the spatial coeffecients.
__shared__ float spa_coefs[32];
if (threadIdx.y == 0) {
float df = threadIdx.x;
spa_coefs[threadIdx.x] = expf(df * df / (-M_SQRT2 * sstd));
}
// 3.0f compensates for [0,3] range of `cdiff`
float cscale = 1.0f / (-M_SQRT2 * 3.0f * cstd);
float dscale = -0.5f / dstd;
// Gather the center point, and pre-average the color values for faster
// comparison.
float4 cen = tex2D(chan4_src, x, y);
float cdrcp = 1.0f / (cen.w + 1.0e-6f);
cen.x *= cdrcp;
cen.y *= cdrcp;
cen.z *= cdrcp;
float cpowden = powf(cen.w, dpow);
float4 out = make_float4(0, 0, 0, 0);
float weightsum = 0.0f;
// Be extra-sure spatial coeffecients have been written
__syncthreads();
float4 pix = tex_shear(chan4_src, pattern, x, y, -radius - 1.0f);
float4 next = tex_shear(chan4_src, pattern, x, y, -radius);
for (float r = -radius; r <= radius; r++) {
float prev = pix.w;
pix = next;
next = tex_shear(chan4_src, pattern, x, y, r + 1.0f);
// This initial factor is arbitrary, but seems to do a decent job at
// preventing excessive bleed-out from points inside an empty region.
// (It's used when either the center or the current point has no
// sample energy at all.)
float cdiff = 0.5f;
if (pix.w > 0.0f && cen.w > 0.0f) {
// Compute the color difference as the simple magnitude difference
// between the YUV colors at the sampling location, unweighted by
// density. Essentially, this just identifies regions whose average
// color coordinates are similar.
float pdrcp = 1.0f / pix.w;
float yd = pix.x * pdrcp - cen.x;
float ud = pix.y * pdrcp - cen.y;
float vd = pix.z * pdrcp - cen.z;
cdiff = yd * yd + ud * ud + vd * vd;
}
// Density factor
float powden = powf(pix.w, dpow);
float dfact = exp2f(dscale * fabsf(cpowden - powden));
// Gradient energy factor. This favors points whose local energy
// gradient points towards the current point - in essence, it draws
// sampling energy "uphill" into denser regions rather than allowing
// it to be smeared in all directions. The effect is modulated by the
// average energy in the region (as determined from a blurred copy of
// the density map); weak gradients in dense image regions aren't
// affected as strongly. This is all very experimental, with little
// theoretical justification, but it seems to work very well.
//
// Note that both the gradient and the blurred weight are calculated
// in one dimension, along the current sampling vector.
float avg = tex_shear(chan1_src, pattern, x, y, r);
float gradfact = (next.w - prev) / (avg + 1.0e-6f);
if (r < 0) gradfact = -gradfact;
gradfact = exp2f(-exp2f(gspeed * gradfact));
float factor = spa_coefs[(int) fabsf(r)] * expf(cscale * cdiff) * dfact;
if (r != 0) factor *= gradfact;
weightsum += factor;
out.x += factor * pix.x;
out.y += factor * pix.y;
out.z += factor * pix.z;
out.w += factor * pix.w;
}
float weightrcp = 1.0f / (weightsum + 1e-10f);
out.x *= weightrcp;
out.y *= weightrcp;
out.z *= weightrcp;
out.w *= weightrcp;
dst[gi] = out;
}
''')
halocliplib = devlib(deps=[yuvlib, denblurlib], defs=r'''
__global__ void apply_gamma(float *dst, float4 *src, float gamma) {
GET_IDX(i);
float4 pix = src[i];
dst[i] = powf(pix.x, gamma);
}
__global__ void
haloclip(float4 *pixbuf, const float *denbuf, float gamma_m_1) {
GET_IDX(i);
float4 pix = pixbuf[i];
float areaval = denbuf[i];
if (pix.w <= 0) {
pixbuf[i] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
return;
}
float ls = powf(pix.w, gamma_m_1) / fmaxf(1.0f, areaval);
scale_float4(pix, ls);
pixbuf[i] = pix;
}
''')
smearcliplib = devlib(deps=[yuvlib, fullblurlib], defs=r'''
// Apply gamma to all four pixels. Subtract one from the result, and clamp at
// a minimum of 0.
__global__ void apply_gamma_full_hi(float4 *dst, float4 *src, float gamma_m_1) {
GET_IDX(i);
float4 pix = src[i];
float ls = 0.0f;
if (pix.w > 0.0f)
ls = fmaxf(0.0f, pix.w - 1.0f) / pix.w;
scale_float4(pix, ls);
dst[i] = pix;
}
__global__ void
smearclip(float4 *pixbuf, const float4 *smearbuf,
float gamma_m_1, float linrange, float lingam) {
GET_IDX(i);
float4 pix = pixbuf[i];
float4 areaval = smearbuf[i];
pix.x += areaval.x;
pix.y += areaval.y;
pix.z += areaval.z;
pix.w += areaval.w;
if (pix.w <= 0) {
pixbuf[i] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
return;
}
float ls = powf(pix.w, gamma_m_1);
if (pix.w < linrange) {
float frac = pix.w / linrange;
ls = (1.0f - frac) * lingam + frac * ls;
}
scale_float4(pix, ls);
pixbuf[i] = pix;
}
''')
plaincliplib = devlib(deps=[yuvlib], defs=r'''
__global__ void
plainclip(float4 *pixbuf, float gamma_m_1, float linrange, float lingam,
float brightness) {
GET_IDX(i);
float4 pix = pixbuf[i];
if (pix.w <= 0) {
pixbuf[i] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
return;
}
float ls = powf(pix.w, gamma_m_1);
if (pix.w < linrange) {
float frac = pix.w / linrange;
ls = (1.0f - frac) * lingam + frac * ls;
}
scale_float4(pix, ls * brightness);
pixbuf[i] = pix;
}
''')
colorcliplib = devlib(deps=[yuvlib], defs=r'''
__global__ void
colorclip(float4 *pixbuf, float vibrance, float highpow,
float gamma, float linrange, float lingam)
{
GET_IDX(i);
float4 pix = pixbuf[i];
if (pix.w <= 0) {
pixbuf[i] = make_float4(0, 0, 0, 0);
return;
}
float4 opix = pix;
float alpha = powf(pix.w, gamma);
if (pix.w < linrange) {
float frac = pix.w / linrange;
alpha = (1.0f - frac) * pix.w * lingam + frac * alpha;
}
float ls = vibrance * alpha / pix.w;
alpha = fminf(1.0f, fmaxf(0.0f, alpha));
float maxc = fmaxf(pix.x, fmaxf(pix.y, pix.z));
float maxa = maxc * ls;
float newls = 1.0f / maxc;
if (maxa > 1.0f && highpow >= 0.0f) {
float lsratio = powf(newls / ls, highpow);
pix.x *= newls;
pix.y *= newls;
pix.z *= newls;
// Reduce saturation (according to the HSV model) by proportionally
// increasing the values of the other colors.
pix.x = maxc - (maxc - pix.x) * lsratio;
pix.y = maxc - (maxc - pix.y) * lsratio;
pix.z = maxc - (maxc - pix.z) * lsratio;
} else {
float adjhlp = -highpow;
if (adjhlp > 1.0f || maxa <= 1.0f) adjhlp = 1.0f;
if (maxc > 0.0f) {
float adj = ((1.0f - adjhlp) * newls + adjhlp * ls);
pix.x *= adj;
pix.y *= adj;
pix.z *= adj;
}
}
pix.x += (1.0f - vibrance) * powf(opix.x, gamma);
pix.y += (1.0f - vibrance) * powf(opix.y, gamma);
pix.z += (1.0f - vibrance) * powf(opix.z, gamma);
pix.x = fminf(1.0f, pix.x);
pix.y = fminf(1.0f, pix.y);
pix.z = fminf(1.0f, pix.z);
pix.w = alpha;
pixbuf[i] = pix;
}
''')
|
stevenrobertson/cuburn
|
cuburn/code/filters.py
|
Python
|
gpl-2.0
| 12,956
|
[
"Gaussian"
] |
722818e80524621bfac4732af2cfc7f32e78ef51cc1cd02fc0946ab7f8484453
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import warn_if_not_float
from ..utils.extmath import row_norms
from ..utils.fixes import (combinations_with_replacement as combinations_w_r,
bincount)
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis)
from ..utils.validation import check_is_fitted
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'Normalizer',
'OneHotEncoder',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
if isinstance(std_, np.ndarray):
std_[std_ == 0.0] = 1.0
elif std_ == 0.:
std_ = 1.
else:
std_ = None
return mean_, std_
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False)
warn_if_not_float(X, estimator='The scale function')
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var[var == 0.0] = 1.0
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
if with_std:
Xr /= std_
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Standardizes features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The standardization is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This standardization is often used as an alternative to zero mean,
unit variance scaling.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False)
warn_if_not_float(X, estimator=self)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
# Do not scale constant features
if isinstance(data_range, np.ndarray):
data_range[data_range == 0.0] = 1.0
elif data_range == 0.:
data_range = 1.
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False)
if warn_if_not_float(X, estimator=self):
X = X.astype(np.float)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_[var == 0.0] = 1.0
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False)
if warn_if_not_float(X, estimator=self):
X = X.astype(np.float)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ :
powers_[i, j] is the exponent of the jth input in the ith output.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _power_matrix(n_features, degree, interaction_only, include_bias):
"""Compute the matrix of polynomial powers"""
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
combn = chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
powers = np.vstack(bincount(c, minlength=n_features) for c in combn)
return powers
def fit(self, X, y=None):
"""
Compute the polynomial feature combinations
"""
n_samples, n_features = check_array(X).shape
self.powers_ = self._power_matrix(n_features, self.degree,
self.interaction_only,
self.include_bias)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, 'powers_')
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.powers_.shape[1]:
raise ValueError("X shape does not match training shape")
return (X[:, None, :] ** self.powers_).prod(-1)
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy)
warn_if_not_float(X, 'The normalize function')
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
norms[norms == 0.0] = 1.0
elif norm == 'l2':
norms = row_norms(X)
norms[norms == 0.0] = 1.0
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Parameters
----------
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
|
mehdidc/scikit-learn
|
sklearn/preprocessing/data.py
|
Python
|
bsd-3-clause
| 39,855
|
[
"Gaussian"
] |
5cdff2f06b44c91842b1363fa4029150bbefcd1c517ca0ae8b8af065595330e4
|
import os, re, sys, glob, logging
from bx.seq.twobit import TwoBitFile
from galaxy.util.json import loads
from galaxy import model, util
from galaxy.util.bunch import Bunch
log = logging.getLogger( __name__ )
# FIXME: copied from tracks.py
# Message strings returned to browser
messages = Bunch(
PENDING = "pending",
NO_DATA = "no data",
NO_CHROMOSOME = "no chromosome",
NO_CONVERTER = "no converter",
NO_TOOL = "no tool",
DATA = "data",
ERROR = "error",
OK = "ok"
)
def decode_dbkey( dbkey ):
""" Decodes dbkey and returns tuple ( username, dbkey )"""
if ':' in dbkey:
return dbkey.split( ':' )
else:
return None, dbkey
class GenomeRegion( object ):
"""
A genomic region on an individual chromosome.
"""
def __init__( self, chrom = None, start = 0, end = 0, sequence=None ):
self.chrom = chrom
self.start = int( start )
self.end = int( end )
self.sequence = sequence
def __str__( self ):
return self.chrom + ":" + str( self.start ) + "-" + str( self.end )
@staticmethod
def from_dict( obj_dict ):
return GenomeRegion( chrom = obj_dict[ 'chrom' ],
start = obj_dict[ 'start' ],
end = obj_dict[ 'end' ] )
@staticmethod
def from_str( obj_str ):
# check for gene region
gene_region = obj_str.split(':')
# split gene region into components
if (len(gene_region) == 2):
gene_interval = gene_region[1].split('-')
# check length
if (len(gene_interval) == 2):
return GenomeRegion(chrom = gene_region[0],
start = gene_interval[0],
end = gene_interval[1])
# return genome region instance
return GenomeRegion()
class Genome( object ):
"""
Encapsulates information about a known genome/dbkey.
"""
def __init__( self, key, description, len_file=None, twobit_file=None ):
self.key = key
self.description = description
self.len_file = len_file
self.twobit_file = twobit_file
def to_dict( self, num=None, chrom=None, low=None ):
"""
Returns representation of self as a dictionary.
"""
def check_int(s):
if s.isdigit():
return int(s)
else:
return s
def split_by_number(s):
return [ check_int(c) for c in re.split('([0-9]+)', s) ]
#
# Parameter check, setting.
#
if num:
num = int( num )
else:
num = sys.maxint
if low:
low = int( low )
if low < 0:
low = 0
else:
low = 0
#
# Get chroms data:
# (a) chrom name, len;
# (b) whether there are previous, next chroms;
# (c) index of start chrom.
#
len_file_enumerate = enumerate( open( self.len_file ) )
chroms = {}
prev_chroms = False
start_index = 0
if chrom:
# Use starting chrom to start list.
found = False
count = 0
for line_num, line in len_file_enumerate:
if line.startswith("#"):
continue
name, len = line.split("\t")
if found:
chroms[ name ] = int( len )
count += 1
elif name == chrom:
# Found starting chrom.
chroms[ name ] = int ( len )
count += 1
found = True
start_index = line_num
if line_num != 0:
prev_chroms = True
if count >= num:
break
else:
# Use low to start list.
high = low + int( num )
prev_chroms = ( low != 0 )
start_index = low
# Read chrom data from len file.
for line_num, line in len_file_enumerate:
if line_num < low:
continue
if line_num >= high:
break
if line.startswith("#"):
continue
# LEN files have format:
# <chrom_name><tab><chrom_length>
fields = line.split("\t")
chroms[ fields[0] ] = int( fields[1] )
# Set flag to indicate whether there are more chroms after list.
next_chroms = False
try:
len_file_enumerate.next()
next_chroms = True
except:
# No more chroms to read.
pass
to_sort = [{ 'chrom': chrm, 'len': length } for chrm, length in chroms.iteritems()]
to_sort.sort(lambda a,b: cmp( split_by_number(a['chrom']), split_by_number(b['chrom']) ))
return {
'id': self.key,
'reference': self.twobit_file is not None,
'chrom_info': to_sort,
'prev_chroms' : prev_chroms,
'next_chroms' : next_chroms,
'start_index' : start_index
}
class Genomes( object ):
"""
Provides information about available genome data and methods for manipulating that data.
"""
def __init__( self, app ):
# Create list of genomes from app.genome_builds
self.genomes = {}
for key, description in app.genome_builds.get_genome_build_names():
self.genomes[ key ] = Genome( key, description )
# Add len files to genomes.
len_files = glob.glob( os.path.join( app.config.len_file_path, "*.len" ) )
for f in len_files:
key = os.path.split( f )[1].split( ".len" )[0]
if key in self.genomes:
self.genomes[ key ].len_file = f
# Add genome data (twobit files) to genomes.
try:
for line in open( os.path.join( app.config.tool_data_path, "twobit.loc" ) ):
if line.startswith("#"): continue
val = line.split()
if len( val ) == 2:
key, path = val
if key in self.genomes:
self.genomes[ key ].twobit_file = path
except IOError, e:
# Thrown if twobit.loc does not exist.
log.exception( str( e ) )
def get_build( self, dbkey ):
""" Returns build for the given key. """
rval = None
if dbkey in self.genomes:
rval = self.genomes[ dbkey ]
return rval
def get_dbkeys( self, trans, chrom_info=False, **kwd ):
""" Returns all known dbkeys. If chrom_info is True, only dbkeys with
chromosome lengths are returned. """
dbkeys = []
# Add user's custom keys to dbkeys.
user_keys_dict = {}
user = trans.get_user()
if user:
if 'dbkeys' in user.preferences:
user_keys_dict = loads( user.preferences[ 'dbkeys' ] )
dbkeys.extend( [ (attributes[ 'name' ], key ) for key, attributes in user_keys_dict.items() ] )
# Add app keys to dbkeys.
# If chrom_info is True, only include keys with len files (which contain chromosome info).
filter_fn = lambda b: True
if chrom_info:
filter_fn = lambda b: b.len_file is not None
dbkeys.extend( [ ( genome.description, genome.key ) for key, genome in self.genomes.items() if filter_fn( genome ) ] )
return dbkeys
def chroms( self, trans, dbkey=None, num=None, chrom=None, low=None ):
"""
Returns a naturally sorted list of chroms/contigs for a given dbkey.
Use either chrom or low to specify the starting chrom in the return list.
"""
# If there is no dbkey owner, default to current user.
dbkey_owner, dbkey = decode_dbkey( dbkey )
if dbkey_owner:
dbkey_user = trans.sa_session.query( trans.app.model.User ).filter_by( username=dbkey_owner ).first()
else:
dbkey_user = trans.user
#
# Get/create genome object.
#
genome = None
twobit_file = None
# Look first in user's custom builds.
if dbkey_user and 'dbkeys' in dbkey_user.preferences:
user_keys = loads( dbkey_user.preferences['dbkeys'] )
if dbkey in user_keys:
dbkey_attributes = user_keys[ dbkey ]
dbkey_name = dbkey_attributes[ 'name' ]
# If there's a fasta for genome, convert to 2bit for later use.
if 'fasta' in dbkey_attributes:
build_fasta = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( dbkey_attributes[ 'fasta' ] )
len_file = build_fasta.get_converted_dataset( trans, 'len' ).file_name
build_fasta.get_converted_dataset( trans, 'twobit' )
# HACK: set twobit_file to True rather than a file name because
# get_converted_dataset returns null during conversion even though
# there will eventually be a twobit file available for genome.
twobit_file = True
# Backwards compatibility: look for len file directly.
elif 'len' in dbkey_attributes:
len_file = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( user_keys[ dbkey ][ 'len' ] ).file_name
if len_file:
genome = Genome( dbkey, dbkey_name, len_file=len_file, twobit_file=twobit_file )
# Look in history and system builds.
if not genome:
# Look in history for chromosome len file.
len_ds = trans.db_dataset_for( dbkey )
if len_ds:
genome = Genome( dbkey, dbkey_name, len_file=len_ds.file_name )
# Look in system builds.
elif dbkey in self.genomes:
genome = self.genomes[ dbkey ]
# Set up return value or log exception if genome not found for key.
rval = None
if genome:
rval = genome.to_dict( num=num, chrom=chrom, low=low )
else:
log.exception( 'genome not found for key %s' % dbkey )
return rval
def has_reference_data( self, dbkey, dbkey_owner=None ):
"""
Returns true if there is reference data for the specified dbkey. If dbkey is custom,
dbkey_owner is needed to determine if there is reference data.
"""
# Look for key in built-in builds.
if dbkey in self.genomes and self.genomes[ dbkey ].twobit_file:
# There is built-in reference data.
return True
# Look for key in owner's custom builds.
if dbkey_owner and 'dbkeys' in dbkey_owner.preferences:
user_keys = loads( dbkey_owner.preferences[ 'dbkeys' ] )
if dbkey in user_keys:
dbkey_attributes = user_keys[ dbkey ]
if 'fasta' in dbkey_attributes:
# Fasta + converted datasets can provide reference data.
return True
return False
def reference( self, trans, dbkey, chrom, low, high ):
"""
Return reference data for a build.
"""
# If there is no dbkey owner, default to current user.
dbkey_owner, dbkey = decode_dbkey( dbkey )
if dbkey_owner:
dbkey_user = trans.sa_session.query( trans.app.model.User ).filter_by( username=dbkey_owner ).first()
else:
dbkey_user = trans.user
if not self.has_reference_data( dbkey, dbkey_user ):
return None
#
# Get twobit file with reference data.
#
twobit_file_name = None
if dbkey in self.genomes:
# Built-in twobit.
twobit_file_name = self.genomes[dbkey].twobit_file
else:
user_keys = loads( dbkey_user.preferences['dbkeys'] )
dbkey_attributes = user_keys[ dbkey ]
fasta_dataset = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( dbkey_attributes[ 'fasta' ] )
msg = fasta_dataset.convert_dataset( trans, 'twobit' )
if msg:
return msg
else:
twobit_dataset = fasta_dataset.get_converted_dataset( trans, 'twobit' )
twobit_file_name = twobit_dataset.file_name
# Read and return reference data.
try:
twobit = TwoBitFile( open( twobit_file_name ) )
if chrom in twobit:
seq_data = twobit[chrom].get( int(low), int(high) )
return GenomeRegion( chrom=chrom, start=low, end=high, sequence=seq_data )
except IOError:
return None
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/visualization/genomes.py
|
Python
|
gpl-3.0
| 12,963
|
[
"Galaxy"
] |
905dce6796eda60e69ffd57a8e06fe0a0236d63a45a941b86a9a17231618b295
|
""" Threaded implementation of service interface
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
from DIRAC import gLogger
from DIRAC.ConfigurationSystem.private.ServiceInterfaceBase import ServiceInterfaceBase
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
__RCSID__ = "$Id$"
class ServiceInterface(ServiceInterfaceBase, threading.Thread):
"""
Service interface, manage Slave/Master server for CS
Thread components
"""
def __init__(self, sURL):
threading.Thread.__init__(self)
ServiceInterfaceBase.__init__(self, sURL)
def _launchCheckSlaves(self):
"""
Start loop which check if slaves are alive
"""
gLogger.info("Starting purge slaves thread")
self.setDaemon(1)
self.start()
def run(self):
while True:
iWaitTime = gConfigurationData.getSlavesGraceTime()
time.sleep(iWaitTime)
self._checkSlavesStatus()
def _updateServiceConfiguration(self, urlSet, fromMaster=False):
"""
Update configuration of a set of slave services in parallel
:param set urlSet: a set of service URLs
:param fromMaster: flag to force updating from the master CS
:return: Nothing
"""
if not urlSet:
return
with ThreadPoolExecutor(max_workers=len(urlSet)) as executor:
futureUpdate = {executor.submit(self._forceServiceUpdate, url, fromMaster): url for url in urlSet}
for future in as_completed(futureUpdate):
url = futureUpdate[future]
result = future.result()
if result["OK"]:
gLogger.info("Successfully updated slave configuration", url)
else:
gLogger.error("Failed to update slave configuration", url)
|
ic-hep/DIRAC
|
src/DIRAC/ConfigurationSystem/private/ServiceInterface.py
|
Python
|
gpl-3.0
| 2,024
|
[
"DIRAC"
] |
e149abeaaa67250ee5eb8de5d27fc100edb0b8e00db659ff190476b19bf2b955
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2011, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 or (at your
# option) any later version as published by the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
from logging import getLogger
log = getLogger('zen.python')
import os.path
from zope.event import notify
from twisted.internet import defer
from Products.Zuul.catalog.events import IndexingEvent
from ZenPacks.zenoss.PythonCollector.datasources.PythonDataSource \
import PythonDataSourcePlugin
def load_data(filename):
path = os.path.join(os.path.dirname(__file__), 'data', filename)
with open(path, 'r') as f:
return f.read()
def add_obj(relationship, obj):
'''
Add obj to relationship, index it, then returns the persistent
object.
'''
relationship._setObject(obj.id, obj)
obj = relationship._getOb(obj.id)
obj.index_object()
notify(IndexingEvent(obj))
return obj
def test_device(dmd, factor=1):
'''
Return an example MySqlMonitorDevice with a full set of example components.
'''
from ZenPacks.zenoss.MySqlMonitor.MySQLDatabase import MySQLDatabase
from ZenPacks.zenoss.MySqlMonitor.MySQLServer import MySQLServer
dc = dmd.Devices.createOrganizer('/Server')
dc.setZenProperty('zPythonClass', 'Products.ZenModel.Device.Device')
device = dc.createInstance('device')
device.setPerformanceMonitor('localhost')
device.index_object()
notify(IndexingEvent(device))
# Server
for server_id in range(factor):
server = add_obj(
device.mysql_servers,
MySQLServer('server%s' % (
server_id)))
# Database
for database_id in range(factor):
database = add_obj(
server.databases,
MySQLDatabase('database%s-%s' % (
server_id, database_id)))
return device
class MysqlFakePlugin(PythonDataSourcePlugin):
"""Fake plugin to test non-blocking.
1. To make it happen open templates for say 'MySQLServer' and add
datasource with any name but type=Python and cycle time=10secs, and
plugin class name ZenPacks.zenoss.MySqlMonitor.tests.util.MysqlFakePlugin
2. You also need rule for iptables to block normal plugins, like:
iptables -A OUTPUT -p tcp -d MYSQL_IP_HERE --dport 3306 -j DROP
3. Run zenpython and watch for log.
"""
@defer.inlineCallbacks
def collect(self, config):
log.info("== Fake plugin called")
_ = yield
log.info("==== After yield in fake plugin")
defer.returnValue({})
|
krull/docker-zenoss4
|
init_fs/usr/local/zenoss/ZenPacks/ZenPacks.zenoss.MySqlMonitor-3.0.7.egg/ns/tests/util.py
|
Python
|
gpl-3.0
| 3,042
|
[
"VisIt"
] |
a7f207fdee620923c3b08b3d7c915297db4cbbc8bcf631359a02c1c097beca5b
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 15 08:33:32 2016
@author: GDM
"""
##### Importing modules #####
import HTSeq
import cPickle as pickle
import pandas as pd
from collections import Counter
import os
from HaSAPPy.HaSAPPY_time import *
import itertools
############################################################
#1) Defining the Class Library
class Library():
"""...defintion..."""
def __init__(self,exp,input_):
self.name = exp
self.input = input_
self.informations = {'Total':'nd','Aligned':'nd','Unique_reads':'nd','Insertions':'nd','I.I.':'nd'}
self.raw = pd.Series()
#2) Function library_gneration
def library_generation (exp, Info):
#Generation of the Class Library specific for "exp"
library = Library(exp,Info.IIDefinition.input_files[Info.IIDefinition.lib_names.index(exp)])
print library.name
string ='\n\n***\tInedependent Insertions (I.I.) definition\t***\n\n- Input file: %s\n- Pair ends: %s\n- Alignment cutoff: %s\n- Remove duplicates: %s\n- Insertion cutoff: %i' %(library.input, Info.General.pair_ends,Info.IIDefinition.fidelity_limit,Info.IIDefinition.reads_duplicate,Info.IIDefinition.ins_iv)
Info.print_save(exp,string)
startTime = getCurrTime()
string = '\tSelection of Insertions (I.): %s' %startTime
Info.print_save(exp,string)
aligned_file = HTSeq.SAM_Reader(library.input)
#aligned_file = [seq for seq in itertools.islice(aligned_file,100000)]
insertions_counts = Counter()
count_aligned = 0
count_GoodQualityAlignment = 0
count_total = 0
for algnt in aligned_file:
if algnt.aligned:
if algnt.iv.chrom.startswith('chr'):
chromosome_style = ''
else:
chromosome_style = 'chr'
break
if Info.General.pair_ends: #Pair ends library
for bundle in HTSeq.pair_SAM_alignments(aligned_file, bundle=True):
if len(bundle) != 1:
continue # Skip multiple alignments
first_almnt, second_almnt = bundle[0] # extract pair
if first_almnt.aligned and second_almnt.aligned:
if first_almnt.aQual >= Info.IIDefinition.fidelity_limit:
ins = HTSeq.GenomicPosition('%s%s' %(chromosome_style,str(first_almnt.iv.chrom)),first_almnt.iv.start_d,first_almnt.iv.strand)
insertions_counts[ins] +=1
count_GoodQualityAlignment +=1
count_aligned +=1
count_total +=1
else: #Single ends library
for algnt in aligned_file:
if algnt.aligned:
if algnt.aQual >= Info.IIDefinition.fidelity_limit:
ins = HTSeq.GenomicPosition('%s%s' %(chromosome_style,str(algnt.iv.chrom)),algnt.iv.start_d,algnt.iv.strand)
insertions_counts[ins] +=1
count_GoodQualityAlignment +=1
count_aligned +=1
count_total +=1
del aligned_file
string = '\t-Total reads: %i\n\t-Aligned reads: %i\n\t-Aligned Reads trusted: %i\n\t-Insertions identified: %i' %(count_total,count_aligned,count_GoodQualityAlignment, len(insertions_counts.keys()))
Info.print_save(exp,string)
string = '\tRunTime: %s' % computeRunTime(startTime, getCurrTime())
Info.print_save(exp,string)
### To collapse insertions in insertion array that are in the same interval (4bps)
startTime = getCurrTime()
string = 'Define Independent Insertions\n\tStarted: %s' %startTime
Info.print_save(exp,string)
insertions_series = pd.Series(insertions_counts, index = insertions_counts.keys())
del insertions_counts
insertions_order = insertions_series.copy()
insertions_order.sort_values(ascending = False)
insertions_genomicarray = HTSeq.GenomicArray("auto",stranded = True)
count_indipendent_insertions = 0
count_indipendent_insertions_aborted = 0
insertions_tuple = zip(insertions_order.index,insertions_order.values)
del insertions_order
del insertions_series
for ins in insertions_tuple:
insertions_genomicarray[ins[0]] = ins[1]
insertions_collapsed = {}
for n in insertions_tuple:
i = n[0]
if insertions_genomicarray[i]>0:
counted = 0
iv_i = HTSeq.GenomicInterval(i.chrom,i.start-2,i.start+2,i.strand)
for i_2 in iv_i.xrange(step=1):
try:
counted += insertions_genomicarray[i_2]
insertions_genomicarray[i_2] = 0
except IndexError:
string = "\t!!!Skipped from analysis: %s" % i_2
Info.print_save(exp,string)
continue
if counted >= Info.IIDefinition.ins_iv:
if insertions_collapsed.has_key(i):
insertions_collapsed[i] += counted
else:
insertions_collapsed[i] = counted
count_indipendent_insertions +=1
else:
count_indipendent_insertions_aborted +=1
string = '\t-Total insertions: %i\n\t-Independent Insertions (I.I.): %i' % ((count_indipendent_insertions + count_indipendent_insertions_aborted), count_indipendent_insertions)
Info.print_save(exp,string)
string = '\tRunTime: %s' % computeRunTime(startTime, getCurrTime())
Info.print_save(exp,string)
###Storing data in library class that will be returned modifed as result of the function
library.informations['Total'] = count_total
library.informations['Aligned'] = count_aligned
library.informations['Insertions'] = count_indipendent_insertions
library.informations['II'] = count_indipendent_insertions
if Info.IIDefinition.reads_duplicate:
library.informations['Unique_reads'] = count_reads
library.raw = pd.Series(insertions_collapsed, index = insertions_collapsed.keys())
#####Store the class!!!!!#####
location = os.path.join(Info.General.storing_loc,exp + '_' +Info.General.date,'raw',exp + '_IIRawdata.pkl')
with open (location,'wb') as saving:
pickle.dump(library,saving)
#####END the program#####
string = 'Informations stored in %s\n***\tEND of Inedependent Insertions (I.I.) definition\t***' % location
Info.print_save(exp,string)
return library
def load(Info):
for exp in Info.IIDefinition.lib_names:
library_generation(exp,Info)
|
gdiminin/HaSAPPy
|
HaSAPPy/IIDefinition.py
|
Python
|
mit
| 6,622
|
[
"HTSeq"
] |
9688776358d4658cfb4e69bc94cb583a2eb57deecb932a7e3edb708913bd3ef7
|
#./parallel_manager.sh ./create_skysub_delink_para.sh ${SUBARUDIR}/${cluster}/${filter} SCIENCE ${ending} ".sub" TWOPASS
global tmpdir
import os
#os.system('mkdir -p ' + tmpdir)
astrom = 'solve-field'
import traceback, tempfile
def describe_db_long(c,db=['illumination_db']):
if type(db) != type([]):
db = [db]
keys = []
for d in db:
command = "DESCRIBE " + d
#print command
c.execute(command)
results = c.fetchall()
for line in results:
keys.append([line[0],line[1]])
return keys
def fix_table2():
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
db_keys = describe_db_long(c,['fit_db'])
print db_keys
raw_input()
for key in db_keys:
if key[0][0:4] == 'nosd':
try:
command = 'alter table fit_db change ' + key[0] + ' ' + key[0].replace('None','None') + ' ' + key[1]
print command
c.execute(command)
except: print 'fail'
if key[0][0:4] == 'matc':
command = 'alter table fit_db drop column ' + key[0]
print command
c.execute(command)
def fix_table():
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
db_keys_f = describe_db(c,['fit_db'])
command = 'select * from fit_db group by objname, pprun'
print command
c.execute(command)
results=c.fetchall()
for line in results:
global tmpdir
dtop = {}
for i in range(len(db_keys_f)):
dtop[db_keys_f[i]] = str(line[i])
FILTER = dtop['FILTER']
PPRUN = dtop['PPRUN']
OBJNAME = dtop['OBJNAME']
command = 'select * from fit_db where OBJNAME="' + dtop['OBJNAME'] + '" and PPRUN = "' + dtop['PPRUN'] + '"'
print command
c.execute(command)
results=c.fetchall()
ids = []
for line in results:
global tmpdir
dtop = {}
for i in range(len(db_keys_f)):
dtop[db_keys_f[i]] = str(line[i])
ids.append(dtop['id'])
print ids
ids.sort()
print ids[-1]
if len(ids) > 1:
for i in ids[:-1]:
command = 'delete from fit_db where id =' + i
print command
c.execute(command)
def variance(data,err):
d = 0
w = 0
for i in range(len(data)):
w += 1/err[i]**2.
d += data[i]/err[i]**2.
mean = d/w
w = 0
d = 0
for i in range(len(data)):
w += 1/err[i]**2.
d += 1/err[i]**2.*(data[i] - mean)**2.
weight_variance = d/w
import scipy
variance = scipy.var(data)
n = 0
d = 0
for i in range(len(data)):
n += 1.
d += 1/err[i]*abs(data[i] - mean)
redchi = d/n
return variance, weight_variance, redchi
def random_cmp(x,y):
import random
a = random.random()
b = random.random()
if a > b: return 1
else: return -1
def starStats(supas):
dict = {}
dict['rot'] = 0
dict['match'] = 0
dict['match_exists'] = 0
for s in supas:
if s['match']: dict['match'] += 1
if s['match_exists']: dict['match_exists'] += 1
s = s['supa files']
rot1 = 0
rot0 = 0
rot2 = 0
for ele in s:
if not dict.has_key(ele['name']):
dict[ele['name']] = 0
dict[ele['name']] += 1
if ele['rotation'] == '1':rot1 = 1
if ele['rotation'] == '0':rot0 = 1
if ele['rotation'] == '2':rot2 = 1
if rot0 + rot1 + rot2 > 1:
dict['rot'] += 1
#print dict['rot'], 'rot'
for key in dict.keys():
print key, dict[key]
return dict
def length_swarp(SUPA,FLAT_TYPE,CHIPS):
import os, re, utilities, bashreader, sys, string
from copy import copy
from glob import glob
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
all_chip_dict = {}
NUMScommas = reduce(lambda x,y: str(x) + ',' + str(y),CHIPS.keys())
all_chip_dict['CHIPS'] = NUMScommas
print sorted(CHIPS.keys())
NUMS = []
start = 1
crpix1s = []
crpix2s = []
for CHIP in CHIPS.keys():
NUMS.append(CHIP)
if len(CHIPS[CHIP]) == 0:
print CHIP
if len(CHIPS[CHIP]) > 0:
crpix = CHIPS[CHIP]
import re
p = re.compile('\_\d+O')
file = p.sub('_' + str(CHIP) + 'O',search_params['file'])
print file, CHIP
naxis = utilities.get_header_kw(file,['NAXIS1','NAXIS2'])
print naxis, CHIP
for kw in ['NAXIS1','NAXIS2']:
crpix[kw] = float(naxis[kw])
print naxis[kw]
print file
if start == 1:
crpixzero = copy(crpix)
crpixhigh = copy(crpix)
start = 0
from copy import copy
print float(crpix['CRPIX1']) < float(crpixzero['CRPIX1']), float(crpix['CRPIX2']) < float(crpixzero['CRPIX2'])
if float(crpix['CRPIX1']) + 0 >= float(crpixzero['CRPIX1']):
crpixzero['CRPIX1'] = copy(crpix['CRPIX1'])
if float(crpix['CRPIX2']) + 0 >= float(crpixzero['CRPIX2']):
crpixzero['CRPIX2'] = copy(crpix['CRPIX2'])
if float(crpix['CRPIX1']) - 0 <= float(crpixhigh['CRPIX1']):
crpixhigh['CRPIX1'] = copy(crpix['CRPIX1'])
if float(crpix['CRPIX2']) - 0 <= float(crpixhigh['CRPIX2']):
crpixhigh['CRPIX2'] = copy(crpix['CRPIX2'])
crpix1s.append(copy(crpix['CRPIX1']))
crpix2s.append(copy(crpix['CRPIX2']))
print crpix['CRPIX1'], crpix['CRPIX2'], crpixzero['CRPIX1'], crpixzero['CRPIX2'], crpixhigh['CRPIX1'], crpixhigh['CRPIX2']#, crpixhigh
print crpix.keys()
for kw in ['CRPIX1','CRPIX2','CRVAL1','CRVAL2','NAXIS1','NAXIS2']:
all_chip_dict[kw+ '_' + str(CHIP)] = crpix[kw]
#plot_chips(crpix1s,crpix2s)
for i in range(len(crpix1s)):
print crpix1s[i],crpix2s[i], NUMS[i]
crpix1s.sort()
crpix2s.sort()
print len(crpix1s), crpix1s, crpix2s, crpix1s[-1] - crpix1s[0] + crpix['NAXIS1'], crpix2s[-1] - crpix2s[0] + crpix['NAXIS2']
print all_chip_dict
LENGTH1 = abs(float(crpixhigh['CRPIX1']) - float(crpixzero['CRPIX1'])) + float(crpix['NAXIS1'])
LENGTH2 = abs(float(crpixhigh['CRPIX2']) - float(crpixzero['CRPIX2'])) + float(crpix['NAXIS2'])
print LENGTH1, LENGTH2, crpixzero['CRPIX1'], crpixzero['CRPIX2'], crpixhigh['CRPIX1'], crpixhigh['CRPIX2']#, crpixhigh
all_chip_dict.update({'crfixednew':'third','LENGTH1':LENGTH1,'LENGTH2':LENGTH2,'CRPIX1ZERO':crpixzero['CRPIX1'],'CRPIX2ZERO':crpixzero['CRPIX2'],'CRVAL1':crpix['CRVAL1'],'CRVAL2':crpix['CRVAL2']})
save_exposure(all_chip_dict,SUPA,FLAT_TYPE)
return all_chip_dict
def fix_radec(SUPA,FLAT_TYPE):
#cats = [{'im_type': 'DOMEFLAT', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.DOMEFLAT.fixwcs.rawconv'}, {'im_type': 'SKYFLAT', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.SKYFLAT.fixwcs.rawconv'}, {'im_type': 'OCIMAGE', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.OCIMAGE.fixwcs.rawconv'}]
#outfile = '' + search_params['TEMPDIR'] + 'stub'
#cats = [{'im_type': 'MAIN', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS..fixwcs.rawconv'}, {'im_type': 'D', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.D.fixwcs.rawconv'}]
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy
from config_bonn import cluster, tag, arc, filters
ppid = str(os.getppid())
#chips = length(SUPA,FLAT_TYPE)
#import time
#time.sleep(2)
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
from copy import copy
chips = {}
NUMS = []
at_least_one = False
print dict['file']
for image in dict['files']:
params = copy(search_params)
ROOT = re.split('\.',re.split('\/',image)[-1])[0]
params['ROOT'] = ROOT
BASE = re.split('O',ROOT)[0]
params['BASE'] = BASE
NUM = re.split('O',re.split('\_',ROOT)[1])[0]
params['NUM'] = NUM
print NUM, BASE, ROOT, image
params['GAIN'] = 2.50 ## WARNING!!!!!!
print ROOT
finalflagim = "%(TEMPDIR)sflag_%(ROOT)s.fits" % params
res = re.split('SCIENCE',image)
res = re.split('/',res[0])
if res[-1]=='':res = res[:-1]
params['path'] = reduce(lambda x,y:x+'/'+y,res[:-1])
params['fil_directory'] = res[-1]
print params['fil_directory']
res = re.split('_',res[-1])
''' if three second exposure, use the headers in the directory '''
if string.find(dict['fil_directory'],'CALIB') != -1:
params['directory'] = params['fil_directory']
else:
params['directory'] = res[0]
print params['directory']
print BASE
SDSS = "/%(path)s/%(directory)s/SCIENCE/headers_scamp_SDSS-R6/%(BASE)s.head" % params # it's not a ZERO!!!
TWOMASS = "/%(path)s/%(directory)s/SCIENCE/headers_scamp_2MASS/%(BASE)s.head" % params
NOMAD = "/%(path)s/%(directory)s/SCIENCE/headers_scamp_NOMAD*/%(BASE)s.head" % params
SDSS = SDSS.replace('I_','_').replace('I.','.')
from glob import glob
print SDSS
print SDSS, TWOMASS, NOMAD
print glob(SDSS), glob(TWOMASS), glob(NOMAD)
head = None
heads = []
if len(glob(TWOMASS)) > 0:
heads.append(glob(TWOMASS)[0])
if len(glob(TWOMASS.replace('.head','O*.head'))) > 0:
heads.append(glob(TWOMASS.replace('.head','O*.head'))[0])
if len(glob(NOMAD)) > 0:
heads.append(glob(NOMAD)[0])
if len(glob(NOMAD.replace('.head','O*.head'))) > 0:
heads.append(glob(NOMAD.replace('.head','O*.head'))[0])
print heads
''' pick out latest SCAMP solution not SDSS '''
if len(heads) > 0:
a = [[os.stat(f).st_mtime,f] for f in heads ]
a.sort()
print a
head = a[-1][1]
print head
''' if SDSS exists, use that '''
if len(glob(SDSS)) > 0:
head = glob(SDSS)[0]
if len(glob(SDSS.replace('.head','O*.head'))) > 0:
head = glob(SDSS.replace('.head','O*.head'))[0]
print head, SDSS, glob(SDSS)
#else:
# raise Exception
print head, SDSS
w = {}
if head is not None:
keys = []
hf = open(head,'r').readlines()
print head
for line in hf:
at_least_one = True
import re
if string.find(line,'=') != -1:
res = re.split('=',line)
name = res[0].replace(' ','')
res = re.split('/',res[1])
value = res[0].replace(' ','')
print name, value
if string.find(name,'CD')!=-1 or string.find(name,'PV')!=-1 or string.find(name,'CR')!=-1 or string.find(name,'NAXIS') != -1:
w[name] = float(value)
print line, w[name]
keys.append(name)
from copy import copy
chips[NUM] = copy(w)
print w
NUMS.append(NUM)
if at_least_one:
chip_dict = length_swarp(SUPA,FLAT_TYPE,chips)
vecs = {}
for key in keys:
vecs[key] = []
vecs['good_scamp'] = []
hdu= pyfits.open(search_params['pasted_cat'])
print search_params['pasted_cat']
table = hdu['OBJECTS'].data
print type(table)
if str(type(table)) == "<type 'NoneType'>":
save_exposure({'fixradecCR':-2},SUPA,FLAT_TYPE)
return -2
else:
CHIP = table.field('CHIP')
print keys
print chips.keys()
for k in chips.keys():
print chips[k].has_key('CRVAL1'), k
print keys
for i in range(len(CHIP)):
NUM = str(int(CHIP[i]))
good = False
for key in keys:
if chips[NUM].has_key(key):
good = True
vecs[key].append(float(chips[NUM][key]))
else:
vecs[key].append(-1.)
if good:
vecs['good_scamp'].append(1)
else:
vecs['good_scamp'].append(0)
print vecs['good_scamp']
print vecs.keys()
import scipy
for key in vecs.keys():
vecs[key] = scipy.array(vecs[key])
print vecs[key][0:20], key
ra_cat = table.field('ALPHA_J2000')
dec_cat = table.field('DELTA_J2000')
x0 = (table.field('Xpos') - vecs['CRPIX1'])
y0 = (table.field('Ypos') - vecs['CRPIX2'])
x0_ABS = (table.field('Xpos') + chip_dict['CRPIX1ZERO'] - vecs['CRPIX1'])
y0_ABS = (table.field('Ypos') + chip_dict['CRPIX2ZERO'] - vecs['CRPIX2'])
x = x0*vecs['CD1_1'] + y0*vecs['CD1_2']
y = x0*vecs['CD2_1'] + y0*vecs['CD2_2']
r = (x**2. + y**2.)**0.5
xi_terms = {'PV1_0':scipy.ones(len(x)),'PV1_1':x,'PV1_2':y,'PV1_3':r,'PV1_4':x**2.,'PV1_5':x*y,'PV1_6':y**2.,'PV1_7':x**3.,'PV1_8':x**2.*y,'PV1_9':x*y**2.,'PV1_10':y**3.}
pv1_keys = filter(lambda x: string.find(x,'PV1') != -1, vecs.keys())
print 'pv1_keys', pv1_keys
xi = reduce(lambda x,y: x + y, [xi_terms[k]*vecs[k] for k in pv1_keys])
eta_terms = {'PV2_0':scipy.ones(len(x)),'PV2_1':y,'PV2_2':x,'PV2_3':r,'PV2_4':y**2.,'PV2_5':y*x,'PV2_6':x**2.,'PV2_7':y**3.,'PV2_8':y**2.*x,'PV2_9':y*x**2.,'PV2_10':x**3.}
pv2_keys = filter(lambda x: string.find(x,'PV2') != -1, vecs.keys())
print 'pv2_keys', pv2_keys
eta = reduce(lambda x,y: x + y, [eta_terms[k]*vecs[k] for k in pv2_keys])
print xi[0:10],eta[0:10], len(eta)
print vecs.keys(), vecs['CD1_1'][0],vecs['CD1_2'][0],vecs['CD2_2'][0],vecs['CD2_1'][0]
import math
ra_out = []
dec_out = []
os.system('mkdir -p ' + tmpdir)
cat = open(tmpdir + '/' + BASE + 'cat','w')
for i in range(len(xi)):
XI = xi[i] / 180.0 * math.pi
ETA = eta[i] / 180.0 * math.pi
CRVAL1 = vecs['CRVAL1'][i]/180.0* math.pi
CRVAL2 = vecs['CRVAL2'][i]/180.0 * math.pi
p = math.sqrt(XI**2. + ETA**2.)
c = math.atan(p)
a = CRVAL1 + math.atan((XI*math.sin(c))/(p*math.cos(CRVAL2)*math.cos(c) - ETA*math.sin(CRVAL2)*math.sin(c)))
d = math.asin(math.cos(c)*math.sin(CRVAL2) + ETA*math.sin(c)*math.cos(CRVAL2)/p)
ra = a*180.0/math.pi
dec = d*180.0/math.pi
if i % 100== 0:
print 'ra_cat','dec_cat',ra,ra_cat[i], dec, dec_cat[i]
print (ra-ra_cat[i])*3600.,(dec-dec_cat[i])*3600.
''' if no solution, give a -999 value '''
if vecs['good_scamp'][i] != 1:
import random
ra = -999 - 200*random.random()
dec = -999 - 200*random.random()
ra_out.append(ra)
dec_out.append(dec)
cat.write(str(ra) + ' ' + str(dec) + '\n')
#cat.write(str(ra[i]) + ' ' + str(dec[i]) + '\n')
cat.close()
import random
index = int(random.random()*4)
colour = ['red','blue','green','yellow'][index]
rad = [1,2,3,4][index]
#os.system(' mkreg.pl -xcol 0 -ycol 1 -c -rad ' + str(rad) + ' -wcs -colour ' + colour + ' ' + BASE + 'cat')
hdu[2].data.field('Xpos_ABS')[:] = scipy.array(x0_ABS)
hdu[2].data.field('Ypos_ABS')[:] = scipy.array(y0_ABS)
hdu[2].data.field('ALPHA_J2000')[:] = scipy.array(ra_out)
hdu[2].data.field('DELTA_J2000')[:] = scipy.array(dec_out)
table = hdu[2].data
print 'BREAK'
print ra_out[0:10], table.field('ALPHA_J2000')[0:10]
print 'BREAK'
print dec_out[0:10], table.field('DELTA_J2000')[0:10]
print SUPA, search_params['pasted_cat']
os.system('rm ' + search_params['pasted_cat'])
hdu.writeto(search_params['pasted_cat'])
save_exposure({'fixradecCR':1},SUPA,FLAT_TYPE)
return 1
else:
save_exposure({'fixradecCR':-1},SUPA,FLAT_TYPE)
return -1
def mk_tab(list):
import astropy, astropy.io.fits as pyfits
from pyfits import Column
import numarray
cols = []
for ele in list:
array = ele[0]
name = ele[1]
vec = numarray.array(array)
cols.append(Column(name=name,format='1E',array=array))
coldefs = pyfits.ColDefs(cols)
hdu = pyfits.BinTableHDU.from_columns(coldefs)
return hdu
def merge(t1,t2):
import astropy, astropy.io.fits as pyfits
t = t1.columns + t2[1].columns
hdu = pyfits.BinTableHDU.from_columns(t)
return hdu
def cutout(infile,mag,color='red'):
import os, utilities
ppid = str(os.getppid())
print ppid + 'a'
#pylab.show()
outfile = raw_input('name of output file?')
color = raw_input('color of regions?')
limits = ['lower_mag','upper_mag','lower_diff','upper_diff']
lim_dict = {}
for lim in limits:
print lim + '?'
b = raw_input()
lim_dict[lim] = b
utilities.run('ldacfilter -i ' + infile + ' -t PSSC\
-c "(((SEx_' + mag + '>' + str(lim_dict['lower_mag']) + ') AND (SEx_' + mag + '<' + str(lim_dict['upper_mag']) + ')) AND (magdiff>' + str(lim_dict['lower_diff']) + ')) AND (magdiff<' + str(lim_dict['upper_diff']) + ');"\
-o cutout1.' + ppid,['cutout1.' + ppid])
utilities.run('ldactoasc -b -q -i cutout1.' + ppid + ' -t PSSC\
-k Ra Dec > ' + tmpdir + outfile,[outfile])
utilities.run('mkreg.pl -c -rad 8 -xcol 0 -ycol 1 -wcs -colour ' + color + ' ' + tmpdir + + outfile)
def get_median(cat,key):
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy
p = pyfits.open(cat)
magdiff = p[1].data.field(key)
magdiff.sort()
return magdiff[int(len(magdiff)/2)]
def coordinate_limits(cat):
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy
p = pyfits.open(cat)
good_entries = p[2].data
if 1:
mask = abs(good_entries.field('ALPHA_J2000')) > 0.0001
good_entries = good_entries[mask]
mask = abs(good_entries.field('ALPHA_J2000')) < 400
good_entries = good_entries[mask]
mask = abs(good_entries.field('DELTA_J2000')) > 0.0001
good_entries = good_entries[mask]
mask = abs(good_entries.field('DELTA_J2000')) < 300
good_entries = good_entries[mask]
mask = 100000 > abs(good_entries.field('Xpos'))
good_entries = good_entries[mask]
mask = abs(good_entries.field('Xpos')) > 0.00001
good_entries = good_entries[mask]
mask = 100000 > abs(good_entries.field('Ypos'))
good_entries = good_entries[mask]
mask = abs(good_entries.field('Ypos')) > 0.00001
good_entries = good_entries[mask]
ra = good_entries.field('ALPHA_J2000')
ra.sort()
dec = good_entries.field('DELTA_J2000')
dec.sort()
print cat, 'cat'
print ra[:100]
print dec[:100]
print ra[-100:]
print dec[-100:]
return ra[0],ra[-1],dec[0],dec[-1]
def combine_cats(cats,outfile,search_params):
#cats = [{'im_type': 'DOMEFLAT', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.DOMEFLAT.fixwcs.rawconv'}, {'im_type': 'SKYFLAT', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.SKYFLAT.fixwcs.rawconv'}, {'im_type': 'OCIMAGE', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.OCIMAGE.fixwcs.rawconv'}]
#outfile = '' + search_params['TEMPDIR'] + 'stub'
#cats = [{'im_type': 'MAIN', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS..fixwcs.rawconv'}, {'im_type': 'D', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.D.fixwcs.rawconv'}]
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy
from config_bonn import cluster, tag, arc, filters
ppid = str(os.getppid())
tables = {}
colset = 0
cols = []
for catalog in cats:
file = catalog['cat']
os.system('mkdir ' + search_params['TEMPDIR'] )
aper = tempfile.NamedTemporaryFile(dir=search_params['TEMPDIR']).name
os.system('ldactoasc -i ' + catalog['cat'] + ' -b -s -k MAG_APER MAGERR_APER -t OBJECTS > ' + aper)
cat1 = tempfile.NamedTemporaryFile(dir=search_params['TEMPDIR']).name
os.system('asctoldac -i ' + aper + ' -o ' + cat1 + ' -t OBJECTS -c ' + os.environ['bonn'] + '/photconf/MAG_APER.conf')
allconv = tempfile.NamedTemporaryFile(dir=search_params['TEMPDIR']).name
os.system('ldacjoinkey -i ' + catalog['cat'] + ' -p ' + cat1 + ' -o ' + allconv + ' -k MAG_APER1 MAG_APER2 MAGERR_APER1 MAGERR_APER2')
tables[catalog['im_type']] = pyfits.open(allconv)
#if filter == filters[0]:
# tables['notag'] = pyfits.open('' + search_params['TEMPDIR'] + 'all.conv' )
for catalog in cats:
for i in range(len(tables[catalog['im_type']][1].columns)):
print catalog['im_type'], catalog['cat']
if catalog['im_type'] != '':
tables[catalog['im_type']][1].columns[i].name = tables[catalog['im_type']][1].columns[i].name + catalog['im_type']
else:
tables[catalog['im_type']][1].columns[i].name = tables[catalog['im_type']][1].columns[i].name
cols.append(tables[catalog['im_type']][1].columns[i])
print cols
print len(cols)
hdu = pyfits.PrimaryHDU()
hduIMHEAD = pyfits.BinTableHDU.from_columns(tables[catalog['im_type']][2].columns)
hduOBJECTS = pyfits.BinTableHDU.from_columns(cols)
hdulist = pyfits.HDUList([hdu])
hdulist.append(hduIMHEAD)
hdulist.append(hduOBJECTS)
hdulist[1].header['EXTNAME']='FIELDS'
hdulist[2].header['EXTNAME']='OBJECTS'
print file
os.system('rm ' + outfile)
import re
res = re.split('/',outfile)
os.system('mkdir -p ' + reduce(lambda x,y: x + '/' + y,res[:-1]))
hdulist.writeto(outfile)
print outfile , '$#######$'
#print 'done'
def paste_cats(cats,outfile): #cats,outfile,search_params):
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy
from config_bonn import cluster, tag, arc, filters
ppid = str(os.getppid())
tables = {}
colset = 0
cols = []
table = pyfits.open(cats[0])
data = []
nrows = 0
good_cats = []
''' get rid of empty tables '''
for catalog in cats:
cattab = pyfits.open(catalog)
if not str(type(cattab[2].data)) == "<type 'NoneType'>":
good_cats.append(catalog)
cats = good_cats
for catalog in cats:
cattab = pyfits.open(catalog)
nrows += cattab[2].data.shape[0]
hduOBJECTS = pyfits.BinTableHDU.from_columns(table[2].columns, nrows=nrows)
rowstart = 0
rowend = 0
for catalog in cats:
cattab = pyfits.open(catalog)
rowend += cattab[2].data.shape[0]
for i in range(len(cattab[2].columns)):
hduOBJECTS.data.field(i)[rowstart:rowend]=cattab[2].data.field(i)
rowstart = rowend
# update SeqNr
print rowend,len( hduOBJECTS.data.field('SeqNr')), len(range(1,rowend+1))
hduOBJECTS.data.field('SeqNr')[0:rowend]=range(1,rowend+1)
#hdu[0].header['EXTNAME']='FIELDS'
hduIMHEAD = pyfits.BinTableHDU.from_columns(table[1])
print cols
print len(cols)
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
hdulist.append(hduIMHEAD)
hdulist.append(hduOBJECTS)
hdulist[1].header['EXTNAME']='FIELDS'
hdulist[2].header['EXTNAME']='OBJECTS'
print file
os.system('rm ' + outfile)
hdulist.writeto(outfile)
print outfile , '$#######$'
#print 'done'
def imstats(SUPA,FLAT_TYPE):
import os, re, utilities, bashreader, sys, string
from copy import copy
from glob import glob
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
print dict['files']
import commands
tmp_dicts = []
for file in dict['files']:
op = commands.getoutput('imstats ' + dict['files'][0])
print op
res = re.split('\n',op)
for line in res:
if string.find(line,'filename') != -1:
line = line.replace('$ imstats: ','')
res2 = re.split('\t',line)
res3 = re.split('\s+',res[-1])
tmp_dict = {}
for i in range(len(res3)):
tmp_dict[res2[i]] = res3[i]
tmp_dicts.append(tmp_dict)
print tmp_dicts
median_average = 0
sigma_average = 0
for d in tmp_dicts:
print d.keys()
sigma_average += float(d['sigma'])
median_average += float(d['median'])
dict['sigma_average'] = sigma_average / len(tmp_dicts)
dict['median_average'] = median_average / len(tmp_dicts)
print dict['sigma_average'], dict['median_average']
save_exposure(dict,SUPA,FLAT_TYPE)
def save_fit_WHATISTHIS(fits,im_type,type,SUPA,FLAT_TYPE):
import MySQLdb, sys, os, re, time
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
for fit in fits:
#which_solution += 1
user_name = os.environ['USER']
time_now = time.asctime()
user = user_name #+ str(time.time())
dict = {}
#copy array but exclude lists
for ele in fit['class'].fitvars.keys():
if ele != 'condition' and ele != 'model_name' and ele != 'fixed_name':
dict[ele + '_' + type + '_' + im_type] = fit['class'].fitvars[ele]
save_exposure(dict,SUPA,FLAT_TYPE)
db2.close()
def select_analyze():
import MySQLdb, sys, os, re, time, string
from copy import copy
db2,c = connect_except()
command = "DESCRIBE fit_db"
print command
c.execute(command)
results = c.fetchall()
keys = []
for line in results:
keys.append(line[0])
command = "SELECT * from illumination_db where zp_err_galaxy_D is null and PPRUN='2002-06-04_W-J-V'" # where OBJNAME='HDFN' and filter='W-J-V' and ROTATION=0"
command = "SELECT * from fit_db where color1_star > 0.2 and OBJNAME!='HDFN' limit 2" # where matched_cat_star is null" # where OBJNAME='HDFN' and filter='W-J-V' and ROTATION=0"
first = True
while len(results) > 1 or first:
first = False
command= "SELECT * from illumination_db where (OBJNAME like 'A%' or OBJNAME like 'MACS%') and (pasted_cat is null or pasted_cat like '%None%') and CORRECTED='True' " # and PPRUN='2003-04-04_W-C-IC'"
command= "SELECT * from fit_db where correction_applied!='yes' and (None$good='y' or bootstrap$good='y' or sdss$good='y') ORDER BY RAND()" # and PPRUN='2003-04-04_W-C-IC'"
#command= "SELECT * from illumination_db where SUPA='SUPA0011100'" # and PPRUN='2003-04-04_W-C-IC'"
#command= "SELECT * from illumination_db where (OBJNAME like 'A%' or OBJNAME like 'MACS%') and SUPA='SUPA0021827'" # and PPRUN='2003-04-04_W-C-IC'"
#command = "select * from illumination_db where SUPA='SUPA0028506'"
#command = "select * from illumination_db where (OBJECT like '%0018short%') and (FILTER='W-J-B' or FILTER='W-S-Z+')" # or OBJECT like '%0018short%')" # and pasted_cat is null" # and color1_star_ is null"
print command
c.execute(command)
results = c.fetchall()
print len(results)
#print results
dicts = []
for j in range(1): #len(results)):
dict = {}
for i in range(len(results[j])):
dict[keys[i]] = results[j][i]
if 0:
construct_correction(dict['OBJNAME'],dict['FILTER'],dict['PPRUN'])
#try:
# fix_radec(dict['SUPA'],dict['FLAT_TYPE'])
#except:
# print 'failed'
trial = True
ppid = str(os.getppid())
try:
construct_correction(dict['OBJNAME'],dict['FILTER'],dict['PPRUN'])
print 'finished'
except:
ppid_loc = str(os.getppid())
print traceback.print_exc(file=sys.stdout)
if ppid_loc != ppid: sys.exit(0)
print 'exiting here'
#if trial: raise Exception
print dict['OBJNAME'], dict['PPRUN']
raw_input()
if 0:
#print dict['SUPA'], dict['file'], dict['OBJNAME'], dict['pasted_cat'], dict['matched_cat_star']
d_update = get_files(dict['SUPA'],dict['FLAT_TYPE'])
go = 0
if d_update.has_key('TRIED'):
if d_update['TRIED'] != 'YES':
go = 1
else: go = 1
if string.find(str(dict['TIME']),'N') == -1:
#print dict['TIME']
if time.time() - float(dict['TIME']) > 600:
go = 1
else: go = 0
else: go = 1
if 0: # go:
#print str(time.time())
save_exposure({'ACTIVE':'YES','TIME':str(time.time())},dict['SUPA'],dict['FLAT_TYPE'])
os.system('rm -R ' + tmpdir)
analyze(dict['SUPA'],dict['FLAT_TYPE'],dict)
save_exposure({'ACTIVE':'FINISHED'},dict['SUPA'],dict['FLAT_TYPE'])
def analyze(SUPA,FLAT_TYPE,params={}):
#try:
import sys, os, string
#os.system('rm -rf ' + search_params['TEMPDIR'] + '*')
trial = True
ppid = str(os.getppid())
try:
construct_correction(dict['OBJNAME'],dict['FILTER'],dict['PPRUN'])
#imstats(SUPA,FLAT_TYPE)
#if string.find(str(params['CRPIX1ZERO']),'None') != -1:
# length(SUPA,FLAT_TYPE)
#if string.find(str(params['fwhm']),'None') != -1 or str(params['fwhm'])=='0.3':
# find_seeing(SUPA,FLAT_TYPE)
#sextract(SUPA,FLAT_TYPE)
print 'finished'
#match_simple(SUPA,FLAT_TYPE)
#phot(SUPA,FLAT_TYPE)
#get_sdss_obj(SUPA,FLAT_TYPE)
#apply_photometric_calibration(SUPA,FLAT_TYPE)
print 'finished'
except:
ppid_loc = str(os.getppid())
print traceback.print_exc(file=sys.stdout)
if ppid_loc != ppid: sys.exit(0)
if trial: raise Exception
#except KeyboardInterrupt:
# raise
#except:
# ppid_loc = str(os.getppid())
# print sys.exc_info()
# print 'something else failed',ppid, ppid_loc
# if ppid_loc != ppid: sys.exit(0)
# # os.system('rm -rf /tmp/' + ppid)
##
# os.system('rm -rf /tmp/' + ppid)
#
def get_files(SUPA,FLAT_TYPE=None):
import MySQLdb, sys, os, re
db2,c = connect_except()
command = "DESCRIBE illumination_db"
#print command
c.execute(command)
results = c.fetchall()
keys = []
for line in results:
keys.append(line[0])
command = "SELECT * from illumination_db where SUPA='" + SUPA + "'" # AND FLAT_TYPE='" + FLAT_TYPE + "'"
#print command
c.execute(command)
results = c.fetchall()
dict = {}
for i in range(len(results[0])):
dict[keys[i]] = results[0][i]
#print dict
file_pat = dict['file']
import re, glob
res = re.split('_\d+O',file_pat)
pattern = res[0] + '_*O' + res[1]
files = glob.glob(pattern)
dict['files'] = files
db2.close()
return dict
def get_a_file(OBJNAME,FILTER,PPRUN):
''' get a single file w/ OBJNAME FILTER PPRUN'''
import MySQLdb, sys, os, re
db2,c = connect_except()
command = "DESCRIBE illumination_db"
#print command
c.execute(command)
results = c.fetchall()
keys = []
for line in results:
keys.append(line[0])
command="SELECT * from illumination_db where FILTER='" + FILTER + "' and OBJNAME='" + OBJNAME + "' and PPRUN='" + PPRUN + "' limit 1"
#print command
c.execute(command)
results = c.fetchall()
dict = {}
for i in range(len(results[0])):
dict[keys[i]] = results[0][i]
#print dict
file_pat = dict['file']
import re, glob
res = re.split('_\d+O',file_pat)
pattern = res[0] + '_*O' + res[1]
files = glob.glob(pattern)
dict['files'] = files
db2.close()
return dict
def get_fits(OBJNAME,FILTER,PPRUN):
import MySQLdb, sys, os, re
db2,c = connect_except()
command="SELECT * from fit_db where FILTER='" + FILTER + "' and OBJNAME='" + OBJNAME + "' and PPRUN='" + PPRUN + "'"
print command
c.execute(command)
results=c.fetchall()
db_keys = describe_db(c,'fit_db')
dtop = {}
for line in results:
for i in range(len(db_keys)):
dtop[db_keys[i]] = str(line[i])
db2.close()
return dtop
def connect_except():
import MySQLdb, sys, os, re
notConnect = True
tried = 0
while notConnect:
tried += 1
try:
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
notConnect = False
except:
print traceback.print_exc(file=sys.stdout)
import random, time
randwait = int(random.random()*30)
if randwait < 10: randwait=10
print 'rand wait!', randwait
time.sleep(randwait)
if tried > 15:
print 'too many failures'
sys.exit(0)
#print 'done'
return db2,c
def save_exposure(dict,SUPA=None,FLAT_TYPE=None):
if SUPA != None and FLAT_TYPE != None:
dict['SUPA'] = SUPA
dict['FLAT_TYPE'] = FLAT_TYPE
db2,c = connect_except()
#command = "CREATE TABLE IF NOT EXISTS illumination_db ( id MEDIUMINT NOT NULL AUTO_INCREMENT, PRIMARY KEY (id))"
#print command
#c.execute("DROP TABLE IF EXISTS illumination_db")
#c.execute(command)
from copy import copy
floatvars = {}
stringvars = {}
#copy array but exclude lists
import string
letters = string.ascii_lowercase + string.ascii_uppercase.replace('E','') + '_' + '-' + ','
for ele in dict.keys():
type = 'float'
for l in letters:
if string.find(str(dict[ele]),l) != -1:
type = 'string'
if type == 'float':
floatvars[ele] = str(float(dict[ele]))
elif type == 'string':
stringvars[ele] = dict[ele]
# make database if it doesn't exist
print 'floatvars', floatvars
print 'stringvars', stringvars
for column in stringvars:
try:
command = 'ALTER TABLE illumination_db ADD ' + column + ' varchar(240)'
c.execute(command)
except: nope = 1
for column in floatvars:
try:
command = 'ALTER TABLE illumination_db ADD ' + column + ' float(30)'
c.execute(command)
except: nope = 1
# insert new observation
SUPA = dict['SUPA']
flat = dict['FLAT_TYPE']
c.execute("SELECT SUPA from illumination_db where SUPA = '" + SUPA + "' and flat_type = '" + flat + "'")
results = c.fetchall()
print results
if len(results) > 0:
print 'already added'
else:
command = "INSERT INTO illumination_db (SUPA,FLAT_TYPE) VALUES ('" + dict['SUPA'] + "','" + dict['FLAT_TYPE'] + "')"
print command
c.execute(command)
import commands
vals = ''
for key in stringvars.keys():
print key, stringvars[key]
vals += ' ' + key + "='" + str(stringvars[key]) + "',"
for key in floatvars.keys():
print key, floatvars[key]
vals += ' ' + key + "='" + floatvars[key] + "',"
vals = vals[:-1]
command = "UPDATE illumination_db set " + vals + " WHERE SUPA='" + dict['SUPA'] + "' AND FLAT_TYPE='" + dict['FLAT_TYPE'] + "'"
print command
c.execute(command)
print vals
#names = reduce(lambda x,y: x + ',' + y, [x for x in floatvars.keys()])
#values = reduce(lambda x,y: str(x) + ',' + str(y), [floatvars[x] for x in floatvars.keys()])
#names += ',' + reduce(lambda x,y: x + ',' + y, [x for x in stringvars.keys()])
#values += ',' + reduce(lambda x,y: x + ',' + y, ["'" + str(stringvars[x]) + "'" for x in stringvars.keys()])
#command = "INSERT INTO illumination_db (" + names + ") VALUES (" + values + ")"
#print command
#os.system(command)
db2.close()
def initialize(filter,OBJNAME):
import os, re, bashreader, sys, string, utilities
from glob import glob
from copy import copy
dict = bashreader.parseFile(os.environ['bonn'] + 'progs.ini')
for key in dict.keys():
os.environ[key] = str(dict[key])
import os
ppid = str(os.getppid())
PHOTCONF = os.environ['bonn'] + '/photconf/'
#TEMPDIR = '/usr/work/pkelly/' + ppid + '/'
TEMPDIR = tmpdir
os.system('mkdir ' + TEMPDIR)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':OBJNAME}
search_params = {'path':path, 'OBJNAME':OBJNAME, 'filter':filter, 'PHOTCONF':PHOTCONF, 'DATACONF':os.environ['DATACONF'], 'TEMPDIR':TEMPDIR}
return search_params
def update_dict(SUPA,FLAT_TYPE):
import utilities
dict = get_files(SUPA,FLAT_TYPE)
kws = utilities.get_header_kw(dict['file'],['ROTATION','OBJECT','GABODSID','CONFIG','EXPTIME','AIRMASS','INSTRUM','PPRUN','BADCCD']) # return KEY/NA if not SUBARU
save_exposure(kws,SUPA,FLAT_TYPE)
def gather_exposures(OBJNAME,filters=None):
Corrected = True
if Corrected: pattern = 'I.fits'
else: pattern = ''
if not filters:
filters = ['B','W-J-B','W-J-V','W-C-RC','W-C-IC','I','W-S-Z+']
for filter_name in filters:
search_params = initialize(filter_name,OBJNAME)
import os, re, bashreader, sys, string, utilities
from glob import glob
from copy import copy
searchstr = "/%(path)s/%(filter)s/SCIENCE/*.fits" % search_params
print searchstr
files = glob(searchstr)
''' filter_name out corrected or not corrected files '''
print files
if Corrected:
files = filter(lambda x:string.find(x,'I.fits')!=-1,files)
elif not Corrected:
files = filter(lambda x:string.find(x,'I.fits')==-1,files)
print files
files.sort()
#print files
exposures = {}
# first 30 files
#print files[0:30]
import MySQLdb, sys, os, re
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
for file in files:
if string.find(file,'wcs') == -1 and string.find(file,'.sub.fits') == -1:
res = re.split('_',re.split('/',file)[-1])
exp_name = res[0]
if not exposures.has_key(exp_name): exposures[exp_name] = {'images':[],'keywords':{}}
exposures[exp_name]['images'].append(file) # exp_name is the root of the image name
if len(exposures[exp_name]['keywords'].keys()) == 0: #not exposures[exp_name]['keywords'].has_key('ROTATION'): #if exposure does not have keywords yet, then get them
exposures[exp_name]['keywords']['filter'] = filter_name
exposures[exp_name]['keywords']['file'] = file
res2 = re.split('/',file)
for r in res2:
if string.find(r,filter_name) != -1:
print r
exposures[exp_name]['keywords']['date'] = r.replace(filter_name + '_','')
exposures[exp_name]['keywords']['fil_directory'] = r
search_params['fil_directory'] = r
kws = utilities.get_header_kw(file,['CRVAL1','CRVAL2','ROTATION','OBJECT','GABODSID','CONFIG','EXPTIME','AIRMASS','INSTRUM','PPRUN','BADCCD']) # return KEY/NA if not SUBARU
''' figure out a way to break into SKYFLAT, DOMEFLAT '''
ppid = str(os.getppid())
command = 'dfits ' + file + ' > ' + search_params['TEMPDIR'] + '/header'
utilities.run(command)
file = open('' + search_params['TEMPDIR'] + 'header','r').read()
import string
if string.find(file,'SKYFLAT') != -1: exposures[exp_name]['keywords']['FLAT_TYPE'] = 'SKYFLAT'
elif string.find(file,'DOMEFLAT') != -1: exposures[exp_name]['keywords']['FLAT_TYPE'] = 'DOMEFLAT'
#print file, exposures[exp_name]['keywords']['FLAT_TYPE']
file = open('' + search_params['TEMPDIR'] + 'header','r').readlines()
import string
for line in file:
print line
if string.find(line,'Flat frame:') != -1 and string.find(line,'illum') != -1:
import re
res = re.split('SET',line)
if len(res) > 1:
res = re.split('_',res[1])
set = res[0]
exposures[exp_name]['keywords']['FLAT_SET'] = set
res = re.split('illum',line)
res = re.split('\.',res[1])
smooth = res[0]
exposures[exp_name]['keywords']['SMOOTH'] = smooth
break
for kw in kws.keys():
exposures[exp_name]['keywords'][kw] = kws[kw]
if Corrected:
exposures[exp_name]['keywords']['SUPA'] = exp_name+'I'
if not Corrected:
exposures[exp_name]['keywords']['SUPA'] = exp_name
exposures[exp_name]['keywords']['OBJNAME'] = OBJNAME
exposures[exp_name]['keywords']['CORRECTED'] = str(Corrected)
print exposures[exp_name]['keywords']
save_exposure(exposures[exp_name]['keywords'])
return exposures
def find_seeing(SUPA,FLAT_TYPE):
import os, re, utilities, sys
from copy import copy
dict = get_files(SUPA,FLAT_TYPE)
print dict['file']
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
print dict['files']
#params PIXSCALE GAIN
''' quick run through for seeing '''
children = []
for image in search_params['files']:
child = os.fork()
if child:
children.append(child)
else:
params = copy(search_params)
ROOT = re.split('\.',re.split('\/',image)[-1])[0]
params['ROOT'] = ROOT
params['ROOT_WEIGHT'] = ROOT.replace('I','')
NUM = re.split('O',re.split('\_',ROOT)[1])[0]
params['NUM'] = NUM
print ROOT
weightim = "/%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits" % params
#flagim = "/%(path)s/%(fil_directory)s/WEIGHTS/globalflag_%(NUM)s.fits" % params
#finalflagim = TEMPDIR + "flag_%(ROOT)s.fits" % params
os.system('mkdir -p ' + params['TEMPDIR'])
params['finalflagim'] = weightim
#os.system('rm ' + finalflagim)
#command = "ic -p 16 '1 %2 %1 0 == ?' " + weightim + " " + flagim + " > " + finalflagim
#utilities.run(command)
command = "nice sex %(file)s -c %(PHOTCONF)s/singleastrom.conf.sex \
-FLAG_IMAGE ''\
-FLAG_TYPE MAX \
-CATALOG_NAME %(TEMPDIR)s/seeing_%(ROOT)s.cat \
-FILTER_NAME %(PHOTCONF)s/default.conv\
-CATALOG_TYPE 'ASCII' \
-DETECT_MINAREA 8 -DETECT_THRESH 8.\
-ANALYSIS_THRESH 8 \
-WEIGHT_IMAGE /%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT_WEIGHT)s.weight.fits\
-WEIGHT_TYPE MAP_WEIGHT\
-PARAMETERS_NAME %(PHOTCONF)s/singleastrom.ascii.flag.sex" % params
print command
os.system(command)
sys.exit(0)
for child in children:
os.waitpid(child,0)
command = 'cat ' + search_params['TEMPDIR'] + 'seeing_' + SUPA.replace('I','*I') + '*cat > ' + search_params['TEMPDIR'] + 'paste_seeing_' + SUPA.replace('I','*I') + '.cat'
utilities.run(command)
file_seeing = search_params['TEMPDIR'] + '/paste_seeing_' + SUPA.replace('I','*I') + '.cat'
PIXSCALE = float(search_params['PIXSCALE'])
reload(utilities)
fwhm = utilities.calc_seeing(file_seeing,10,PIXSCALE)
save_exposure({'fwhm':fwhm},SUPA,FLAT_TYPE)
print file_seeing, SUPA, PIXSCALE
def length_DONTUSE(SUPA,FLAT_TYPE):
import os, re, utilities, bashreader, sys, string
from copy import copy
from glob import glob
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
res = re.split('SCIENCE',search_params['files'][0])
res = re.split('/',res[0])
if res[-1]=='':res = res[:-1]
search_params['path'] = reduce(lambda x,y:x+'/'+y,res[:-1])
search_params['fil_directory'] = res[-1]
print search_params['path'], search_params['fil_directory'], 'list'
save_exposure({'path':search_params['path'],'fil_directory':search_params['fil_directory']},SUPA,FLAT_TYPE)
''' get the CRPIX values '''
start = 1
#CRPIXZERO is at the chip at the bottom left and so has the greatest value!!!!
x = []
y = []
chips = {}
NUMS = []
all_chip_dict = {}
for image in search_params['files']:
print image
res = re.split('\_\d+',re.split('\/',image)[-1])
#print res
imroot = "/%(path)s/%(fil_directory)s/SCIENCE/" % search_params
im = imroot + res[0] + '_1' + res[1]
#print im
crpix = utilities.get_header_kw(image,['CRPIX1','CRPIX2','NAXIS1','NAXIS2','CRVAL1','CRVAL2','IMAGEID'])
if start == 1:
crpixzero = copy(crpix)
crpixhigh = copy(crpix)
start = 0
from copy import copy
print float(crpix['CRPIX1']) < float(crpixzero['CRPIX1']), float(crpix['CRPIX2']) < float(crpixzero['CRPIX2'])
if float(crpix['CRPIX1']) + 0 >= float(crpixzero['CRPIX1']):
crpixzero['CRPIX1'] = copy(crpix['CRPIX1'])
if float(crpix['CRPIX2']) + 0 >= float(crpixzero['CRPIX2']):
crpixzero['CRPIX2'] = copy(crpix['CRPIX2'])
if float(crpix['CRPIX1']) - 0 <= float(crpixhigh['CRPIX1']):
crpixhigh['CRPIX1'] = copy(crpix['CRPIX1'])
if float(crpix['CRPIX2']) - 0 <= float(crpixhigh['CRPIX2']):
crpixhigh['CRPIX2'] = copy(crpix['CRPIX2'])
print crpix['CRPIX1'], crpix['CRPIX2'], crpixzero['CRPIX1'], crpixzero['CRPIX2'], crpixhigh['CRPIX1'], crpixhigh['CRPIX2']#, crpixhigh
x.append(float(crpix['CRPIX1']))
y.append(float(crpix['CRPIX2']))
chips[crpix['IMAGEID']] = crpix
NUMS.append(crpix['IMAGEID'])
for kw in ['CRPIX1','CRPIX2','NAXIS1','NAXIS2','CRVAL1','CRVAL2']:
all_chip_dict[kw+ '_' + str(crpix['IMAGEID'])] = crpix[kw]
NUMScommas = reduce(lambda x,y: str(x) + ',' + str(y),NUMS)
all_chip_dict['CHIPS'] = NUMScommas
print all_chip_dict
LENGTH1 = abs(float(crpixhigh['CRPIX1']) - float(crpixzero['CRPIX1'])) + float(crpix['NAXIS1'])
LENGTH2 = abs(float(crpixhigh['CRPIX2']) - float(crpixzero['CRPIX2'])) + float(crpix['NAXIS2'])
chips['CRPIX1ZERO'] = crpixzero['CRPIX1']
chips['CRPIX2ZERO'] = crpixzero['CRPIX2']
chips['NAXIS1'] = crpixzero['NAXIS1']
chips['NAXIS2'] = crpixzero['NAXIS2']
print LENGTH1, LENGTH2, crpixzero['CRPIX1'], crpixzero['CRPIX2'], crpixhigh['CRPIX1'], crpixhigh['CRPIX2']#, crpixhigh
all_chip_dict.update({'crfixed':'third','LENGTH1':LENGTH1,'LENGTH2':LENGTH2,'CRPIX1ZERO':crpixzero['CRPIX1'],'CRPIX2ZERO':crpixzero['CRPIX2'],'CRVAL1':crpix['CRVAL1'],'CRVAL2':crpix['CRVAL2']})
save_exposure(all_chip_dict,SUPA,FLAT_TYPE)
return chips
#return x,y
def apply_correction2(SUPA,FLAT_TYPE):
chips = length(SUPA,FLAT_TYPE)
for chip in [1]:
#retrieve coefficients
d = get_fits(CLUSTER,FILTER,PPRUN)
column_prefix = sample+'$'+sample_size+'$'
position_columns_names = re.split('\,',d[column_prefix + 'positioncolumns'])
fitvars = {}
cheby_terms_dict = {}
for ele in position_columns:
res = re.split('$',ele['name'])
fitvars[ele['name']] = float(d[sample+'$'+sample_size+'$'+ele['name']])
for term in cheby_terms:
if term['n'] == ele['name'][2:]:
cheby_terms_dict[term['n']] = term
cheby_terms_use = [cheby_terms_dict[k] for k in cheby_terms_dict.keys()]
print cheby_terms_use, fitvars
''' make images of illumination corrections '''
for ROT in EXPS.keys():
size_x=LENGTH1
size_y=LENGTH2
bin=100
import numpy, math, pyfits, os
x,y = numpy.meshgrid(numpy.arange(0,size_x,bin),numpy.arange(0,size_y,bin))
F=0.1
print 'calculating'
x = coord_conv_x(x)
y = coord_conv_y(y)
epsilon = 0
index = 0
for term in cheby_terms_use:
index += 1
print index, ROT, term, fitvars[str(ROT)+'$'+term['n']]
epsilon += fitvars[str(ROT)+'$'+term['n']]*term['fx'](x,y)*term['fy'](x,y)
print 'writing'
hdu = pyfits.PrimaryHDU(epsilon)
#os.system('rm ' + tmpdir + 'correction' + ROT + filter + sample_size + '.fits')
#hdu.writeto(tmpdir + '/correction' + ROT + filter + sample_size + '.fits')
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':CLUSTER}
illum_dir = path + 'PHOTOMETRY/ILLUMINATION/' + FILTER + '/' + str(ROT)
os.system('mkdir -p ' + illum_dir)
im = illum_dir + '/correction' + sample + sample_size + '.fits'
save_fit({'PPRUN':PPRUN,'FILTER':FILTER,'CLUSTER':CLUSTER,sample+'$'+sample_size+'$'+str(ROT)+'$im':im})
os.system('rm ' + im)
hdu.writeto(im)
def sdss_coverage(SUPA,FLAT_TYPE):
import commands, string
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
if 0:
#print 'CRVAL1', search_params['CRVAL1'], search_params['CRVAL1'] == 'None'
#if str(search_params['CRVAL1']) == 'None':
# print search_params['FLAT_TYPE'], 'FLAT_TYPE'
if search_params['CRVAL1'] is None:
length(search_params['SUPA'],search_params['FLAT_TYPE'])
dict = get_files(SUPA,FLAT_TYPE)
search_params.update(dict)
print search_params['CRVAL1']
crval1 = float(search_params['CRVAL1'])
crval2 = float(search_params['CRVAL2'])
query = 'select ra, dec from star where ra between ' + str(crval1-0.1) + ' and ' + str(crval1+0.1) + ' and dec between ' + str(crval2-0.1) + ' and ' + str(crval2+0.1)
print query
import sqlcl
lines = sqlcl.query(query).readlines()
print lines
if len(lines) > 1: sdss_coverage=True
else: sdss_coverage=False
save_exposure({'sdss_coverage':sdss_coverage},SUPA,FLAT_TYPE)
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
command = "select cov from sdss_db where OBJNAME='" + dict['OBJNAME'] + "'"
c.execute(command)
results=c.fetchall()
print results
sdss_coverage = results[0][0]
import string
if string.find(sdss_coverage,'True') != -1:
cov = True
else: cov=False
starcat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/PHOTOMETRY/sdssstar.cat' % {'OBJNAME':search_params['OBJNAME']}
galaxycat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/PHOTOMETRY/sdssgalaxy.cat' % {'OBJNAME':search_params['OBJNAME']}
return cov, galaxycat, starcat
def sextract(SUPA,FLAT_TYPE):
import os, re, utilities, bashreader, sys, string
from copy import copy
from glob import glob
trial = False
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
subpath='/nfs/slac/g/ki/ki05/anja/SUBARU/'
print search_params
print SUPA, FLAT_TYPE, search_params['files']
kws = utilities.get_header_kw(search_params['files'][0],['PPRUN'])
print kws['PPRUN']
pprun = kws['PPRUN']
#fs = glob.glob(subpath+pprun+'/SCIENCE_DOMEFLAT*.tarz')
#if len(fs) > 0:
# os.system('tar xzvf ' + fs[0])
#fs = glob.glob(subpath+pprun+'/SCIENCE_SKYFLAT*.tarz')
#if len(fs) > 0:
# os.system('tar xzvf ' + fs[0])
search_params['files'].sort()
children = []
if 1:
for image in search_params['files']:
ROOT = re.split('\.',re.split('\/',image)[-1])[0]
BASE = re.split('O',ROOT)[0]
NUM = re.split('O',re.split('\_',ROOT)[1])[0]
print image, search_params['CRVAL1ASTROMETRY_'+NUM]
''' copy over ASTROMETRY keywords to Corrected if they exist for the unCorrected frame '''
if search_params['CORRECTED']=='True': # and string.find(str(search_params['CRVAL1ASTROMETRY_2']),'None') != -1:
''' adding correct WCS info '''
dict_uncorrected = get_files(SUPA[:-1],FLAT_TYPE)
d = {}
akeys = filter(lambda x:string.find(x,'ASTROMETRY')!=-1,dict_uncorrected.keys())
for key in akeys:
d[key] = dict_uncorrected[key]
print key, d[key], SUPA[:-1]
save_exposure(d,SUPA,FLAT_TYPE)
os.system('mkdir -p ' + search_params['TEMPDIR'])
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
for key in akeys:
print key, search_params[key]
for image in search_params['files']:
print image
child = False
if not trial:
child = os.fork()
if child:
children.append(child)
params = copy(search_params)
ROOT = re.split('\.',re.split('\/',image)[-1])[0]
params['ROOT'] = ROOT
params['ROOT_WEIGHT'] = ROOT.replace('I','')
BASE = re.split('O',ROOT)[0]
params['BASE'] = BASE
NUM = re.split('O',re.split('\_',ROOT)[1])[0]
params['NUM'] = NUM
print NUM, BASE, ROOT
if not child:
if (search_params['CORRECTED']!='True' or (search_params['CORRECTED']=='True' and string.find(str(search_params['CRVAL1ASTROMETRY_' + NUM]),'None') == -1)):
try:
params['GAIN'] = 2.50 ## WARNING!!!!!!
print ROOT
finalflagim = "%(TEMPDIR)sflag_%(ROOT)s.fits" % params
res = re.split('SCIENCE',image)
res = re.split('/',res[0])
if res[-1]=='':res = res[:-1]
params['path'] = reduce(lambda x,y:x+'/'+y,res[:-1])
params['fil_directory'] = res[-1]
weightim = "/%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits" % params
#flagim = "/%(path)s/%(fil_directory)s/WEIGHTS/globalflag_%(NUM)s.fits" % params
#finalflagim = TEMPDIR + "flag_%(ROOT)s.fits" % params
params['finalflagim'] = weightim
im = "/%(path)s/%(fil_directory)s/SCIENCE/%(ROOT)s.fits" % params
crpix = utilities.get_header_kw(im,['CRPIX1','CRPIX2'])
#if search_params['SDSS_coverage'] == 'yes': catalog = 'SDSS-R6'
#else: catalog = '2MASS'
SDSS1 = "/%(path)s/%(fil_directory)s/SCIENCE/headers_scamp_SDSS-R6/%(BASE)s.head" % params
SDSS2 = "/%(path)s/%(fil_directory)s/SCIENCE/headers_scamp_SDSS-R6/%(BASE)sO*.head" % params
from glob import glob
print glob(SDSS1), glob(SDSS2)
head = None
if len(glob(SDSS1)) > 0:
head = glob(SDSS1)[0]
elif len(glob(SDSS2)) > 0:
head = glob(SDSS2)[0]
''' see if image has been run through astrometry.net. if not, run it. '''
if True:
if not search_params.has_key('ASTROMETRYNET_' + NUM):
save_exposure({'ASTROMETRYNET_' + NUM:'None'},SUPA,FLAT_TYPE)
dict = get_files(dict['SUPA'],dict['FLAT_TYPE'])
search_params.update(dict)
if search_params['CORRECTED']!='True' and string.find(str(search_params['CRVAL1ASTROMETRY_' + NUM]),'None') != -1: #head is None:
save_exposure({'ASTROMETRYNET_' + NUM:'yes'},SUPA,FLAT_TYPE)
imtmp = "%(TEMPDIR)s/%(ROOT)s.tmp.fits" % params
imfix = "%(TEMPDIR)s/%(ROOT)s.fixwcs.fits" % params
imwcs = "%(TEMPDIR)s/%(ROOT)s.wcsfile" % params
command = "cp " + im + " " + imtmp
print command
utilities.run(command)
os.system('rm ' + imfix)
#command = '/nfs/slac/g/ki/ki04/pkelly/astrometry/bin//solve-field --cpulimit 60 --no-verify --no-plots --overwrite --scale-units arcsecperpix --scale-low ' + str(float(params['PIXSCALE'])-0.005) + ' --scale-high ' + str(float(params['PIXSCALE'])+0.005) + ' -N ' + imfix + ' ' + imtmp
command = astrom + ' --temp-dir ' + tmpdir + ' --cpulimit 100 --no-verify --no-plots --overwrite --scale-units arcsecperpix --scale-low ' + str(float(params['PIXSCALE'])-0.005) + ' --scale-high ' + str(float(params['PIXSCALE'])+0.005) + ' -N ' + imfix + ' ' + imtmp
print command
os.system(command)
os.system('rm ' + imtmp)
from glob import glob
if len(glob(imfix)):
command = 'imhead < ' + imfix + ' > ' + imwcs
print command
os.system(command)
hf = open(imwcs,'r').readlines()
hdict = {}
for line in hf:
import re
if string.find(line,'=') != -1:
res = re.split('=',line)
name = res[0].replace(' ','')
res = re.split('/',res[1])
value = res[0].replace(' ','')
print name, value
hdict[name] = value
''' now save the wcs '''
wcsdict = {}
import commands
out = commands.getoutput('gethead ' + imfix + ' CRPIX1 CRPIX2')
import re
res = re.split('\s+',out)
os.system('sethead ' + imfix + ' CRPIX1OLD=' + res[0])
os.system('sethead ' + imfix + ' CRPIX2OLD=' + res[1])
for name in ['CRVAL1','CRVAL2','CD1_1','CD1_2','CD2_1','CD2_2','CRPIX1','CRPIX2']:
print name + 'ASTROMETRY_' + NUM, hdict[name]
wcsdict[name + 'ASTROMETRY_' + NUM] = hdict[name]
save_exposure(wcsdict,SUPA,FLAT_TYPE)
dict = get_files(dict['SUPA'],dict['FLAT_TYPE'])
search_params.update(dict)
hdict = {}
if string.find(str(search_params['CD1_1ASTROMETRY_' + NUM]),'None') == -1: #head is None:
for name in ['CRVAL1','CRVAL2','CD1_1','CD1_2','CD2_1','CD2_2','CRPIX1','CRPIX2']:
print name + 'ASTROMETRY', search_params[name+'ASTROMETRY_' + NUM]
hdict[name] = search_params[name+'ASTROMETRY_' + NUM]
elif head is not None:
''' if no solution from astrometry.net, use the Swarp solution '''
hf = open(head,'r').readlines()
for line in hf:
import re
if string.find(line,'=') != -1:
res = re.split('=',line)
name = res[0].replace(' ','')
res = re.split('/',res[1])
value = res[0].replace(' ','')
print name, value
hdict[name] = value
else: sys.exit(0)
imfix = "%(TEMPDIR)s/%(ROOT)s.fixwcs.fits" % params
print imfix
os.system('mkdir ' + search_params['TEMPDIR'])
command = "cp " + im + " " + imfix
print command
print 'copying file', im
utilities.run(command)
print 'finished copying'
import commands
out = commands.getoutput('gethead ' + imfix + ' CRPIX1 CRPIX2')
import re
res = re.split('\s+',out)
os.system('sethead ' + imfix + ' CRPIX1OLD=' + res[0])
os.system('sethead ' + imfix + ' CRPIX2OLD=' + res[1])
for name in ['CRVAL1','CRVAL2','CD1_1','CD1_2','CD2_1','CD2_2','CRPIX1','CRPIX2']:
command = 'sethead ' + imfix + ' ' + name + '=' + str(hdict[name])
print command
os.system(command)
''' now run sextractor '''
if 1:
main_file = '%(TEMPDIR)s/%(ROOT)s.fixwcs.fits' % params
doubles_raw = [{'file_pattern':main_file,'im_type':''},]
#{'file_pattern':subpath+pprun+'/SCIENCE_DOMEFLAT*/'+BASE+'OC*.fits','im_type':'D'},
#{'file_pattern':subpath+pprun+'/SCIENCE_SKYFLAT*/'+BASE+'OC*.fits','im_type':'S'}]
#{'file_pattern':subpath+pprun+'/SCIENCE/OC_IMAGES/'+BASE+'OC*.fits','im_type':'OC'}
# ]
print doubles_raw
doubles_output = []
print doubles_raw
for double in doubles_raw:
file = glob(double['file_pattern'])
if len(file) > 0:
params.update(double)
params['double_cat'] = '%(TEMPDIR)s/%(ROOT)s.%(im_type)s.fixwcs.cat' % params
params['file_double'] = file[0]
#print params
#for par in ['fwhm','GAIN']:
# print par, type(params[par]), params[par]
command = "nice sex %(TEMPDIR)s%(ROOT)s.fixwcs.fits,%(file_double)s -c %(PHOTCONF)s/phot.conf.sex \
-PARAMETERS_NAME %(PHOTCONF)s/phot.param.sex \
-CATALOG_NAME %(double_cat)s \
-FILTER_NAME %(DATACONF)s/default.conv\
-FILTER Y \
-FLAG_TYPE MAX\
-FLAG_IMAGE ''\
-SEEING_FWHM %(fwhm).3f \
-DETECT_MINAREA 3 -DETECT_THRESH 3 -ANALYSIS_THRESH 3 \
-MAG_ZEROPOINT 27.0 \
-GAIN %(GAIN).3f \
-WEIGHT_IMAGE /%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT_WEIGHT)s.weight.fits\
-WEIGHT_TYPE MAP_WEIGHT" % params
#-CHECKIMAGE_TYPE BACKGROUND,APERTURES,SEGMENTATION\
#-CHECKIMAGE_NAME /%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.background.fits,/%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.apertures.fits,/%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.segmentation.fits\
catname = "%(TEMPDIR)s/%(ROOT)s.cat" % params
print command
utilities.run(command,[catname])
command = 'ldacconv -b 1 -c R -i ' + params['double_cat'] + ' -o ' + params['double_cat'].replace('cat','rawconv')
print command
utilities.run(command)
#command = 'ldactoasc -b -q -i ' + params['double_cat'].replace('cat','rawconv') + ' -t OBJECTS\
# -k ALPHA_J2000 DELTA_J2000 > ' + params['double_cat'].replace('cat','pos')
#print command
#utilities.run(command)
#print 'mkreg.pl -c -rad 8 -xcol 0 -ycol 1 -wcs -colour green ' + params['double_cat'].replace('cat','pos')
#utilities.run(command)
#print params['double_cat'].replace('cat','pos')
# Xpos_ABS is difference of CRPIX and zero CRPIX
doubles_output.append({'cat':params['double_cat'].replace('cat','rawconv'),'im_type':double['im_type']})
print doubles_output
print '***********************************'
outfile = params['TEMPDIR'] + params['ROOT'] + '.conv'
combine_cats(doubles_output,outfile,search_params)
#outfile_field = params['TEMPDIR'] + params['ROOT'] + '.field'
#command = 'ldacdeltab -i ' + outfile + ' -t FIELDS -o ' + outfile_field
#utilities.run(command)
command = 'ldactoasc -b -q -i ' + outfile + ' -t OBJECTS\
-k ALPHA_J2000 DELTA_J2000 > ' + outfile.replace('conv','pos')
print command
utilities.run(command)
command = 'mkreg.pl -c -rad 8 -xcol 0 -ycol 1 -wcs -colour green ' + outfile.replace('conv','pos')
print command
utilities.run(command)
print outfile
command = 'ldaccalc -i ' + outfile + ' -o ' + params['TEMPDIR'] + params['ROOT'] + '.newpos -t OBJECTS -c "(Xpos + ' + str(float(search_params['CRPIX1ZERO']) - float(crpix['CRPIX1'])) + ');" -k FLOAT -n Xpos_ABS "" -c "(Ypos + ' + str(float(search_params['CRPIX2ZERO']) - float(crpix['CRPIX2'])) + ');" -k FLOAT -n Ypos_ABS "" -c "(Ypos*0 + ' + str(params['NUM']) + ');" -k FLOAT -n CHIP "" '
print command
utilities.run(command)
except:
print traceback.print_exc(file=sys.stdout)
if not trial:
sys.exit(0)
if trial:
raise Exception
if not trial:
sys.exit(0)
print children
for child in children:
print 'waiting for', child
os.waitpid(child,0)
print 'finished waiting'
print path, SUPA, search_params['filter'], search_params['ROTATION']
pasted_cat = path + 'PHOTOMETRY/ILLUMINATION/' + 'pasted_' + SUPA + '_' + search_params['filter'] + '_' + str(search_params['ROTATION']) + '.cat'
print pasted_cat
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/')
from glob import glob
outcat = search_params['TEMPDIR'] + 'tmppaste_' + SUPA + '.cat'
newposlist = glob(search_params['TEMPDIR'] + SUPA.replace('I','*I') + '*newpos')
print search_params['TEMPDIR'] + SUPA.replace('I','*I') + '*newpos'
if len(newposlist) > 1:
#command = 'ldacpaste -i ' + search_params['TEMPDIR'] + SUPA + '*newpos -o ' + pasted_cat
#print command
files = glob(search_params['TEMPDIR'] + SUPA.replace('I','*I') + '*newpos')
print files, search_params['TEMPDIR'] + SUPA.replace('I','*I') + '*newpos'
paste_cats(files,pasted_cat)
else:
command = 'cp ' + newposlist[0] + ' ' + pasted_cat
utilities.run(command)
save_exposure({'pasted_cat':pasted_cat},SUPA,FLAT_TYPE)
command = "rm -rf " + search_params['TEMPDIR']
os.system(command)
#fs = glob.glob(subpath+pprun+'/SCIENCE_DOMEFLAT*.tarz'.replace('.tarz',''))
#if len(fs) > 0:
# os.system('tar xzvf ' + fs[0])
#fs = glob.glob(subpath+pprun+'/SCIENCE_SKYFLAT*.tarz'.replace('.tarz',''))
#fs = glob.glob(subpath+pprun+'/SCIENCE_SKYFLAT*.tarz')
#if len(fs) > 0:
# os.system('tar xzvf ' + fs[0])
#return exposures, LENGTH1, LENGTH2
def get_sdss_obj_ext(SUPA, FLAT_TYPE):
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
ROTATION = str(search_params['ROTATION']) #exposures[exposure]['keywords']['ROTATION']
import os
starcat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/PHOTOMETRY/sdssstar%(ROTATION)s.cat' % {'ROTATION':ROTATION,'OBJNAME':search_params['OBJNAME']}
galaxycat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/PHOTOMETRY/sdssgalaxy%(ROTATION)s.cat' % {'ROTATION':ROTATION,'OBJNAME':search_params['OBJNAME']}
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
illum_path='/nfs/slac/g/ki/ki05/anja/SUBARU/ILLUMINATION/' % {'OBJNAME':search_params['OBJNAME']}
#os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/STAR/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/GALAXY/')
from glob import glob
print starcat
for type,cat in [['star',starcat]]: #,['galaxy',galaxycat]]:
catalog = search_params['pasted_cat'] #exposures[exposure]['pasted_cat']
ramin,ramax, decmin, decmax = coordinate_limits(catalog)
limits = {'ramin':ramin-0.2,'ramax':ramax+0.2,'decmin':decmin-0.2,'decmax':decmax+0.2}
print ramin,ramax, decmin, decmax
if len(glob(cat)) == 0:
#os.system('rm ' + cat)
image = search_params['files'][0]
print image
import retrieve_test
retrieve_test.run(image,cat,type,limits)
save_exposure({'starcat':cat},SUPA,FLAT_TYPE)
return cat
def get_sdss_obj(SUPA, FLAT_TYPE):
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
ROTATION = str(search_params['ROTATION']) #exposures[exposure]['keywords']['ROTATION']
import os
starcat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/PHOTOMETRY/sdssstar%(ROTATION)s.cat' % {'ROTATION':ROTATION,'OBJNAME':search_params['OBJNAME']}
galaxycat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/PHOTOMETRY/sdssgalaxy%(ROTATION)s.cat' % {'ROTATION':ROTATION,'OBJNAME':search_params['OBJNAME']}
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
illum_path='/nfs/slac/g/ki/ki05/anja/SUBARU/ILLUMINATION/' % {'OBJNAME':search_params['OBJNAME']}
#os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/STAR/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/GALAXY/')
from glob import glob
print starcat
for type,cat in [['star',starcat]]: #,['galaxy',galaxycat]]:
catalog = search_params['pasted_cat'] #exposures[exposure]['pasted_cat']
ramin,ramax, decmin, decmax = coordinate_limits(catalog)
limits = {'ramin':ramin-0.2,'ramax':ramax+0.2,'decmin':decmin-0.2,'decmax':decmax+0.2}
print ramin,ramax, decmin, decmax
if len(glob(cat)) == 0:
#os.system('rm ' + cat)
image = search_params['files'][0]
print image
import retrieve_test
retrieve_test.run(image,cat,type,limits)
save_exposure({'starcat':cat},SUPA,FLAT_TYPE)
def match_simple(SUPA,FLAT_TYPE):
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
print 'hey'
ROTATION = str(search_params['ROTATION']) #exposures[exposure]['keywords']['ROTATION']
import os
starcat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/PHOTOMETRY/sdssstar%(ROTATION)s.cat' % {'ROTATION':ROTATION,'OBJNAME':search_params['OBJNAME']}
galaxycat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/PHOTOMETRY/sdssgalaxy%(ROTATION)s.cat' % {'ROTATION':ROTATION,'OBJNAME':search_params['OBJNAME']}
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
illum_path='/nfs/slac/g/ki/ki05/anja/SUBARU/ILLUMINATION/' % {'OBJNAME':search_params['OBJNAME']}
#os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/STAR/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/GALAXY/')
from glob import glob
print starcat
for type,cat in [['star',starcat]]: #,['galaxy',galaxycat]]:
catalog = search_params['pasted_cat'] #exposures[exposure]['pasted_cat']
ramin,ramax, decmin, decmax = coordinate_limits(catalog)
limits = {'ramin':ramin-0.2,'ramax':ramax+0.2,'decmin':decmin-0.2,'decmax':decmax+0.2}
print ramin,ramax, decmin, decmax
if len(glob(cat)) == 0:
#os.system('rm ' + cat)
image = search_params['files'][0]
print image
import retrieve_test
retrieve_test.run(image,cat,type,limits)
filter = search_params['filter'] #exposures[exposure]['keywords']['filter']
#GABODSID = exposures[exposure]['keywords']['GABODSID']
OBJECT = search_params['OBJECT'] #exposures[exposure]['keywords']['OBJECT']
print catalog
outcat = path + 'PHOTOMETRY/ILLUMINATION/' + type + '/' + 'matched_' + SUPA + '_' + filter + '_' + ROTATION + '_' + type + '.cat'
outcat_dir = path + 'PHOTOMETRY/ILLUMINATION/' + type + '/' + ROTATION + '/' + OBJECT + '/'
os.system('mkdir -p ' + outcat_dir)
file = 'matched_' + SUPA + '.cat'
linkdir = illum_path + '/' + filter + '/' + ROTATION + '/' + OBJECT + '/'
#outcatlink = linkdir + 'matched_' + exposure + '_' + OBJNAME + '_' + GABODSID + '.cat'
outcatlink = linkdir + 'matched_' + SUPA + '_' + search_params['OBJNAME'] + '_' + type + '.cat'
os.system('mkdir -p ' + linkdir)
os.system('rm ' + outcat)
command = 'match_simple.sh ' + catalog + ' ' + cat + ' ' + outcat
print command
os.system(command)
os.system('rm ' + outcatlink)
command = 'ln -s ' + outcat + ' ' + outcatlink
print command
os.system(command)
save_exposure({'matched_cat_' + type:outcat},SUPA,FLAT_TYPE)
print type, 'TYPE!'
print outcat, type
#exposures[exposure]['matched_cat_' + type] = outcat
#return exposures
def phot(SUPA,FLAT_TYPE):
dict = get_files(SUPA,FLAT_TYPE)
print dict.keys()
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
filter = dict['filter']
import utilities
info = {'B':{'filter':'g','color1':'gmr','color2':'umg','EXTCOEFF':-0.2104,'COLCOEFF':0.0},\
'W-J-B':{'filter':'g','color1':'gmr','color2':'umg','EXTCOEFF':-0.2104,'COLCOEFF':0.0},\
'W-J-V':{'filter':'g','color1':'gmr','color2':'rmi','EXTCOEFF':-0.1202,'COLCOEFF':0.0},\
'W-C-RC':{'filter':'r','color1':'rmi','color2':'gmr','EXTCOEFF':-0.0925,'COLCOEFF':0.0},\
'W-C-IC':{'filter':'i','color1':'imz','color2':'rmi','EXTCOEFF':-0.02728,'COLCOEFF':0.0},\
'W-S-I+':{'filter':'i','color1':'imz','color2':'rmi','EXTCOEFF':-0.02728,'COLCOEFF':0.0},\
'W-S-Z+':{'filter':'z','color1':'imz','color2':'rmi','EXTCOEFF':0.0,'COLCOEFF':0.0}}
import mk_saturation_plot,os,re
os.environ['BONN_TARGET'] = search_params['OBJNAME']
os.environ['INSTRUMENT'] = 'SUBARU'
stars_0 = []
stars_90 = []
ROTATION = dict['ROTATION']
print ROTATION
import os
ppid = str(os.getppid())
from glob import glob
for im_type in ['']: #,'D','S']:
for type in ['star']: #,'galaxy']:
file = dict['matched_cat_' + type]
print file
print file
if type == 'galaxy':
mag='MAG_AUTO' + im_type
magerr='MAGERR_AUTO' + im_type
class_star = "<0.9"
if type == 'star':
mag='MAG_APER2' + im_type
magerr='MAGERR_APER2' + im_type
class_star = ">0.9"
print 'filter', filter
os.environ['BONN_FILTER'] = filter
filt = re.split('_',filter)[0]
d = info[filt]
print file
utilities.run('ldacfilter -i ' + file + ' -o ' + search_params['TEMPDIR'] + 'good.stars' + ' -t PSSC\
-c "(Flag!=-99);"',['' + search_params['TEMPDIR'] + 'good.stars'])
utilities.run('ldacfilter -i ' + search_params['TEMPDIR'] + 'good.stars -o ' + search_params['TEMPDIR'] + 'good.colors -t PSSC\
-c "((((SEx_' + mag + '!=0 AND ' + d['color1'] + '<900) AND ' + d['color1'] + '!=0) AND ' + d['color1'] + '>-900) AND ' + d['color1'] + '!=0);"',['' + search_params['TEMPDIR'] + 'good.colors'])
print '' + search_params['TEMPDIR'] + 'good.colors'
utilities.run('ldaccalc -i ' + search_params['TEMPDIR'] + 'good.colors -t PSSC -c "(' + d['filter'] + 'mag - SEx_' + mag + ');" -k FLOAT -n magdiff "" -o ' + search_params['TEMPDIR'] + 'all.diffA.cat' ,[search_params['TEMPDIR'] + 'all.diffA.cat'] )
median = get_median('' + search_params['TEMPDIR'] + 'all.diffA.cat','magdiff')
utilities.run('ldacfilter -i ' + search_params['TEMPDIR'] + 'all.diffA.cat -o ' + search_params['TEMPDIR'] + 'all.diffB.cat -t PSSC\
-c "((magdiff > ' + str(median -1.25) + ') AND (magdiff < ' + str(median + 1.25) + '));"',['' + search_params['TEMPDIR'] + 'good.colors'])
utilities.run('ldaccalc -i ' + search_params['TEMPDIR'] + 'all.diffB.cat -t PSSC -c "(SEx_MaxVal + SEx_BackGr);" -k FLOAT -n MaxVal "" -o ' + search_params['TEMPDIR'] + 'all.diff.cat' ,['' + search_params['TEMPDIR'] + 'all.diff.cat'] )
command = 'ldactoasc -b -q -i ' + search_params['TEMPDIR'] + 'all.diff.cat -t PSSC -k SEx_' + mag + ' ' + d['filter'] + 'mag SEx_FLUX_RADIUS ' + im_type + ' SEx_CLASS_STAR' + im_type + ' ' + d['filter'] + 'err ' + d['color1'] + ' MaxVal > ' + search_params['TEMPDIR'] + 'mk_sat_all'
#print command
utilities.run(command,['' + search_params['TEMPDIR'] + 'mk_sat_all'] )
import commands
length = commands.getoutput('wc -l ' + search_params['TEMPDIR'] + 'mk_sat_all')
print 'TOTAL # of STARS:', length
cuts_to_make = ['MaxVal>27500.0','Clean!=1','SEx_IMAFLAGS_ISO'+im_type + '!=0','SEx_CLASS_STAR'+im_type+ class_star,'SEx_Flag'+im_type+'!=0',]
files = ['' + search_params['TEMPDIR'] + 'mk_sat_all']
titles = ['raw']
for cut in cuts_to_make:
#print 'making cut:', cut
cut_name = cut.replace('>','').replace('<','')
os.system('rm ' + cut_name)
command = 'ldacfilter -i ' + search_params['TEMPDIR'] + 'all.diff.cat -o ' + search_params['TEMPDIR'] + '' + cut_name + ' -t PSSC\
-c "(' + cut + ');"'
utilities.run(command,['' + search_params['TEMPDIR'] + '' + cut_name])
import glob
#print len(glob.glob('' + search_params['TEMPDIR'] + '' + cut_name)), glob.glob('' + search_params['TEMPDIR'] + '' + cut_name)
if len(glob.glob('' + search_params['TEMPDIR'] + '' + cut_name)) > 0:
utilities.run('ldactoasc -b -q -i ' + search_params['TEMPDIR'] + '' + cut_name + ' -t PSSC\
-k SEx_' + mag + ' ' + d['filter'] + 'mag SEx_FLUX_RADIUS SEx_CLASS_STAR ' + d['filter'] + 'err ' + d['color1'] + ' > ' + search_params['TEMPDIR'] + '' + cut_name + '.cat',['' + search_params['TEMPDIR'] + '' + cut_name + '.cat'])
length = commands.getoutput('wc -l ' + search_params['TEMPDIR'] + '' + cut_name + '.cat')
print 'TOTAL # of STARS CUT:', length
titles.append(cut_name)
files.append('' + search_params['TEMPDIR'] + '' + cut_name + '.cat')
#run('ldactoasc -b -q -i cutout1.' + ppid + ' -t PSSC\
# -k Ra Dec > ' + search_params['TEMPDIR'] + '' + outfile,['' + search_params['TEMPDIR'] + '' + outfile])
#run('mkreg.pl -c -rad 8 -xcol 0 -ycol 1 -wcs -colour ' + color + ' ' + search_params['TEMPDIR'] + '' + outfile)
utilities.run('ldacfilter -i ' + search_params['TEMPDIR'] + 'all.diff.cat -o ' + search_params['TEMPDIR'] + 'good.stars -t PSSC\
-c "(MaxVal<27500 AND SEx_IMAFLAGS_ISO'+im_type+'=0);"',['' + search_params['TEMPDIR'] + 'good.stars'])
#-c "((MaxVal<27500 AND SEx_CLASS_STAR'+im_type+class_star + ') AND SEx_IMAFLAGS_ISO'+im_type+'=0);"',['' + search_params['TEMPDIR'] + 'good.stars'])
#-c "(MaxVal<27500 AND SEx_IMAFLAGS_ISO'+im_type+'=0);"',['' + search_params['TEMPDIR'] + 'good.stars' + ppid])
utilities.run('ldactoasc -b -q -i ' + search_params['TEMPDIR'] + 'good.stars -t PSSC\
-k SEx_' + mag + ' ' + d['filter'] + 'mag SEx_FLUX_RADIUS' + im_type + ' SEx_CLASS_STAR'+im_type+' ' + d['filter'] + 'err ' + d['color1'] + ' > ' + search_params['TEMPDIR'] + 'mk_sat',['' + search_params['TEMPDIR'] + 'mk_sat'])
if len(glob.glob('' + search_params['TEMPDIR'] + 'mk_sat')) > 0:
files.append('' + search_params['TEMPDIR'] + 'mk_sat')
titles.append('filtered')
print files, titles
mk_saturation_plot.mk_saturation_all(files,titles,filter)
#cutout('' + search_params['TEMPDIR'] + 'good.stars' + ppid,mag)
print mag
val = raw_input("Look at the saturation plot?")
if len(val)>0:
if val[0] == 'y' or val[0] == 'Y':
mk_saturation_plot.mk_saturation(search_params['TEMPDIR'] + '/mk_sat',filter)
val = raw_input("Make a box?")
if len(val)>0:
if val[0] == 'y' or val[0] == 'Y':
mk_saturation_plot.use_box(filter)
lower_mag,upper_mag,lower_diff,upper_diff = re.split('\s+',open('box' + filter,'r').readlines()[0])
utilities.run('ldacfilter -i ' + search_params['TEMPDIR'] + '/good.stars -t PSSC\
-c "(((SEx_' + mag + '>' + lower_mag + ') AND (SEx_' + mag + '<' + upper_mag + ')) AND (magdiff>' + lower_diff + ')) AND (magdiff<' + upper_diff + ');"\
-o ' + search_params['TEMPDIR'] + '/filt.mag.new.cat',[search_params['TEMPDIR'] + '/filt.mag.new.cat'])
raw_input()
os.system('mv ' + search_params['TEMPDIR'] + '/filt.mag.new.cat ' + search_params['TEMPDIR'] + '/good.stars')
#val = []
#val = raw_input("Look at the saturation plot?")
#if len(val)>0:
# if val[0] == 'y' or val[0] == 'Y':
# mk_saturation_plot.mk_saturation('' + search_params['TEMPDIR'] + 'mk_sat' + ppid,filter)
# make stellar saturation plot
#lower_mag,upper_mag,lower_diff,upper_diff = re.split('\s+',open('box' + filter,'r').readlines()[0])
lower_mag = str(10)
upper_mag = str(14.0)
lower_diff = str(5)
upper_diff = str(9)
if type == 'star':
lower_mag = str(13.2)
utilities.run('ldactoasc -b -q -i ' + search_params['TEMPDIR'] + 'good.stars -t PSSC -k SEx_Xpos_ABS SEx_Ypos_ABS > ' + search_params['TEMPDIR'] + 'positions',[search_params['TEMPDIR'] + 'positions'] )
utilities.run('ldacaddkey -i ' + search_params['TEMPDIR'] + 'good.stars -o ' + search_params['TEMPDIR'] + 'filt.airmass.cat -t PSSC -k AIRMASS 0.0 FLOAT "" ',[search_params['TEMPDIR'] + 'filt.airmass.cat'] )
utilities.run('ldacfilter -i ' + search_params['TEMPDIR'] + 'filt.airmass.cat -o ' + search_params['TEMPDIR'] + 'filt.crit.cat -t PSSC\
-c "((magdiff>-900) AND magdiff<900) AND SEx_' + mag + '!=0) ;"',['' + search_params['TEMPDIR'] + 'filt.crit.cat'])
utilities.run('ldacfilter -i ' + search_params['TEMPDIR'] + 'filt.crit.cat -o ' + search_params['TEMPDIR'] + 'all.colors.cat -t PSSC\
-c "(((' + d['color1'] + '<900 AND ' + d['color2'] + '<900) AND ' + d['color1'] + '>-900) AND ' + d['color2'] + '>-900);"',['' + search_params['TEMPDIR'] + 'all.colors.cat'])
utilities.run('ldactoasc -b -q -i ' + search_params['TEMPDIR'] + 'all.colors.cat -t PSSC -k SEx_' + mag + ' ' + d['filter'] + 'mag ' + d['color1'] + ' ' + d['color2'] + ' AIRMASS SEx_' + magerr + ' ' + d['filter'] + 'err SEx_Xpos_ABS SEx_Ypos_ABS > ' + search_params['TEMPDIR'] + 'input.asc' ,['' + search_params['TEMPDIR'] + 'input.asc'] )
import photo_abs_new
good = photo_abs_new.run_through('illumination',infile='' + search_params['TEMPDIR'] + 'input.asc',output='' + search_params['TEMPDIR'] + 'photo_res',extcoeff=d['color1'],sigmareject=6,step='STEP_1',bandcomp=d['filter'],color1which=d['color1'],color2which=d['color2'])
import astropy, astropy.io.fits as pyfits
cols = []
for key in ['corr_data','color1_good','color2_good','magErr_good','X_good','Y_good','airmass_good']:
cols.append(pyfits.Column(name=key, format='E',array=good[key]))
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
print cols
tbhu = pyfits.BinTableHDU.from_columns(cols)
hdulist.append(tbhu)
hdulist[1].header['EXTNAME']='STDTAB'
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
outcat = path + 'PHOTOMETRY/ILLUMINATION/fit_' + im_type + '_' + search_params['SUPA'] + '_' + type + '.cat'
os.system('rm ' + outcat)
hdulist.writeto(outcat)
save_exposure({'fit_cat_' + im_type + '_' + type: outcat,'airmass_add':'yes'},SUPA,FLAT_TYPE)
save_fit(good['fits'],im_type,type,SUPA,FLAT_TYPE)
def nightrun():
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
keystop = ['PPRUN']
list = reduce(lambda x,y: x + ',' + y, keystop)
command="SELECT " + list + " from illumination_db where zp_star_ is not null and PPRUN!='KEY_N/A' GROUP BY PPRUN"
print command
c.execute(command)
results=c.fetchall()
db_keys = describe_db(c)
h = []
for line in results:
dtop = {}
for i in range(len(keystop)):
dtop[keystop[i]] = line[i]
directory = 'run_' + dtop['PPRUN']
os.system('mkdir ' + os.environ['sne'] + '/plots/' + directory )
os.system('rm ' + os.environ['sne'] + '/plots/' + directory + '/*')
keys = ['OBJNAME','ROTATION']
list = reduce(lambda x,y: x + ',' + y, keys)
command="SELECT " + list + " from illumination_db where zp_star_ is not null and PPRUN='" + dtop['PPRUN'] + "' GROUP BY OBJNAME,ROTATION"
print command
c.execute(command)
results=c.fetchall()
db_keys = describe_db(c)
h = []
for line in results:
d = {}
for i in range(len(keys)):
d[keys[i]] = line[i]
if 1:
#print d
if 1:
crit = reduce(lambda x,y: x + ' AND ' + y,[str(y) + "='" + str(d[y]) + "'" for y in keys])
file = directory + '/' + reduce(lambda x,y: x + 'AND' + y,[str(y)[0:4] + "_" + str(d[y]) for y in keys])
#print crit
command = "SELECT * from illumination_db where zp_star_ is not null and " + crit
#print command
c.execute(command)
results = c.fetchall()
#print results
fit_files = []
for j in range(len(results)):
dict = {}
for i in range(len(results[j])):
dict[db_keys[i]] = results[j][i]
#print dict['SUPA'], dict['OBJNAME'], dict['pasted_cat'], dict['matched_cat_star']
fit_files.append(dict['fit_cat__star'])
#print fit_files
dict = get_files(dict['SUPA'],dict['FLAT_TYPE'])
#print dict.keys()
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
from copy import copy
import photo_abs_new
reload(photo_abs_new)
files = reduce(lambda x,y: x + ' ' + y,fit_files)
#print files
tempfile = '' + search_params['TEMPDIR'] + 'spit'
command = 'ldacpaste -i ' + files + ' -t STDTAB -o ' + tempfile
print command
utilities.run(command)
hdulist = pyfits.open(tempfile)
args = {}
for column in hdulist["STDTAB"].columns:
args[column.name] = hdulist["STDTAB"].data.field(column.name)
photo_abs_new.calcDataIllum(file,search_params['LENGTH1'], search_params['LENGTH2'], 1000, args['corr_data'], args['airmass_good'], args['color1_good'], args['color2_good'], args['magErr_good'], args['X_good'], args['Y_good'],rot=0)
#except: print 'failed'
def auto_print():
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
keys = ['FILTER','ROTATION']
list = reduce(lambda x,y: x + ',' + y, keys)
command="SELECT " + list + " from illumination_db where zp_star_ is not null and PPRUN!='KEY_N/A' and good_stars_star_ > 400 GROUP BY "+list
print command
c.execute(command)
results=c.fetchall()
db_keys = describe_db(c)
h = []
for line in results:
d = {}
for i in range(len(keys)):
d[keys[i]] = line[i]
if 1:
print d
if 1:
crit = reduce(lambda x,y: x + ' AND ' + y,[str(y) + "='" + str(d[y]) + "'" for y in keys])
file = 'filt_' + reduce(lambda x,y: x + 'AND' + y,[str(y)[0:4] + "_" + str(d[y]) for y in keys])
print crit
command = "SELECT * from illumination_db where zp_star_ is not null and " + crit
print command
c.execute(command)
results = c.fetchall()
print results
fit_files = []
for j in range(len(results)):
dict = {}
for i in range(len(results[j])):
dict[db_keys[i]] = results[j][i]
print dict['SUPA'], dict['OBJNAME'], dict['pasted_cat'], dict['matched_cat_star']
fit_files.append(dict['fit_cat__star'])
print fit_files
dict = get_files(dict['SUPA'],dict['FLAT_TYPE'])
print dict.keys()
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
from copy import copy
import photo_abs_new
reload(photo_abs_new)
files = reduce(lambda x,y: x + ' ' + y,fit_files)
print files
tempfile = '' + search_params['TEMPDIR'] + 'spit'
command = 'ldacpaste -i ' + files + ' -t STDTAB -o ' + tempfile
print command
utilities.run(command)
hdulist = pyfits.open(tempfile)
args = {}
for column in hdulist["STDTAB"].columns:
args[column.name] = hdulist["STDTAB"].data.field(column.name)
photo_abs_new.calcDataIllum(file,search_params['LENGTH1'], search_params['LENGTH2'], 1000, args['corr_data'], args['airmass_good'], args['color1_good'], args['color2_good'], args['magErr_good'], args['X_good'], args['Y_good'],rot=0)
#except: print 'failed'
def describe_db(c,db=['illumination_db']):
if type(db) != type([]):
db = [db]
keys = []
for d in db:
command = "DESCRIBE " + d
#print command
c.execute(command)
results = c.fetchall()
for line in results:
keys.append(line[0])
return keys
def printer():
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
if 1: #for set in [{'OBJNAME':'HDFN', 'filters':['W-J-B','W-J-V','W-C-RC','W-C-IC','W-S-Z+']},{'OBJNAME':'MACS2243-09', 'filters':['W-J-V','W-C-RC','W-C-IC','W-S-Z+']},{'OBJNAME':'A2219', 'filters':['W-J-B','W-J-V','W-C-RC']}]:
#OBJNAME = set['OBJNAME']
if 1: #for filter in set['filters']:
if 1: #try:
print keys
OBJNAME = 'HDFN'
filter = 'W-C-ICSF'
ROTATION = 1
command = "select * from illumination_db where OBJNAME='" + OBJNAME + "' and filter='" + filter + "' and fit_cat_galaxy is not null and crfixed='third' and good_stars_star is not null and good_stars_star>10 and ROTATION=" + str(ROTATION)
command = "select * from illumination_db where SUPA='SUPA0011022' and zp_err_galaxy_D is not null"
#command = "select * from illumination_db where OBJNAME='" + OBJNAME + "' and filter='" + filter + "' and fit_cat_galaxy is not null and crfixed='third' and ROTATION=" + str(ROTATION) + ' and good_stars_star is not null and good_stars_star>10'
command = "SELECT * from illumination_db where zp_star_ is not null and ROTATION='0'" # where OBJNAME='HDFN' and filter='W-J-V' and ROTATION=0"
print command
c.execute(command)
results = c.fetchall()
fit_files = []
for j in range(len(results)):
dict = {}
for i in range(len(results[j])):
dict[keys[i]] = results[j][i]
print dict['SUPA'], dict['OBJNAME'], dict['pasted_cat'], dict['matched_cat_star']
fit_files.append(dict['fit_cat__star'])
print fit_files
dict = get_files(dict['SUPA'],dict['FLAT_TYPE'])
print dict.keys()
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
from copy import copy
import photo_abs_new
reload(photo_abs_new)
files = reduce(lambda x,y: x + ' ' + y,fit_files)
print files
tempfile = '' + search_params['TEMPDIR'] + 'spit'
command = 'ldacpaste -i ' + files + ' -t STDTAB -o ' + tempfile
print command
utilities.run(command)
hdulist = pyfits.open(tempfile)
args = {}
for column in hdulist["STDTAB"].columns:
args[column.name] = hdulist["STDTAB"].data.field(column.name)
file = OBJNAME + '_' + filter + '_' + str(ROTATION)
file = raw_input('filename?')
photo_abs_new.calcDataIllum(file,search_params['LENGTH1'], search_params['LENGTH2'], 1000, args['corr_data'], args['airmass_good'], args['color1_good'], args['color2_good'], args['magErr_good'], args['X_good'], args['Y_good'],rot=0)
#except: print 'failed'
#filter = 'W-C-IC'
import pickle
#filters = ['W-J-B','W-J-V','W-C-RC','W-C-IC','W-S-Z+']
#for filter in filters:
# exposures_zero = {}
# exposures_one = {}
# print '$$$$$'
# print 'separating into different camera rotations'
# for exposure in exposures.keys():
# print exposure,exposures[exposure]['keywords']['ROTATION']
# if int(exposures[exposure]['keywords']['ROTATION']) == 1:
# exposures_one[exposure] = exposures[exposure]
# if int(exposures[exposure]['keywords']['ROTATION']) == 0:
# exposures_zero[exposure] = exposures[exposure]
if 0:
reopen = 0
save = 0
if reopen:
f = open('' + search_params['TEMPDIR'] + 'tmppickle' + OBJNAME + filter,'r')
m = pickle.Unpickler(f)
exposures, LENGTH1, LENGTH2 = m.load()
print image.latest
if 1: images = gather_exposures(filter,OBJNAME)
print images
''' strip down exposure list '''
for key in exposures.keys():
print exposures[key]['images']
for image in exposures:
if 1: image.find_seeing(exposures) # save seeing info?
if 1: image.sextract(exposures)
if 1: image.match_simple(exposures,OBJNAME)
if 1: image.phot(exposures,filter,type,LENGTH1,LENGTH2)
if save:
f = open('' + search_params['TEMPDIR'] + 'tmppickle' + OBJNAME + filter,'w')
m = pickle.Pickler(f)
pickle.dump([exposures,LENGTH1,LENGTH2],m)
f.close()
def get_sdss(dict):
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
import os
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
starcat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/PHOTOMETRY/sdssstar%(ROTATION)s.cat' % {'ROTATION':search_params['ROTATION'],'OBJNAME':search_params['OBJNAME']}
galaxycat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/PHOTOMETRY/sdssgalaxy%(ROTATION)s.cat' % {'ROTATION':search_params['ROTATION'],'OBJNAME':search_params['OBJNAME']}
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
illum_path='/nfs/slac/g/ki/ki05/anja/SUBARU/ILLUMINATION/' % {'OBJNAME':search_params['OBJNAME']}
#os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/STAR/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/GALAXY/')
from glob import glob
print starcat
for type,cat in [['star',starcat]]: #,['galaxy',galaxycat]]:
catalog = search_params['pasted_cat'] #exposures[exposure]['pasted_cat']
ramin,ramax, decmin, decmax = coordinate_limits(catalog)
limits = {'ramin':ramin-0.2,'ramax':ramax+0.2,'decmin':decmin-0.2,'decmax':decmax+0.2}
print ramin,ramax, decmin, decmax
if len(glob(cat)) == 0:
#os.system('rm ' + cat)
image = search_params['files'][0]
print image
import retrieve_test
retrieve_test.run(image,cat,type,limits)
return starcat
def match_PPRUN(OBJNAME=None,FILTER=None,PPRUN=None):
associate = {'W-S-I+':['W-C-IC','W-C-RC'],'W-S-G+':['W-J-V','W-J-B'],'W-J-U':['W-J-B']}
if OBJNAME is None:
batchmode = True
else: batchmode = False
trial = False
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
db_keys = describe_db(c,['illumination_db','fit_db'])
keystop = ['PPRUN','ROTATION','OBJNAME']
list = reduce(lambda x,y: x + ',' + y, keystop)
if OBJNAME is None:
command="SELECT * from illumination_db where zp_star_ is not null and PPRUN='2002-06-04_W-J-V' and OBJECT='MACSJ1423.8' GROUP BY OBJNAME,ROTATION"
#command="SELECT * from illumination_db where OBJNAME like '%2243%' and filter='W-J-V' GROUP BY OBJNAME,pprun,filter "
#command="SELECT * from illumination_db where file not like '%CALIB%' and OBJECT like '%1423%' GROUP BY OBJNAME,pprun,filter"
#command="SELECT * from illumination_db where file not like '%CALIB%' GROUP BY OBJNAME,pprun,filter"
command="SELECT * from illumination_db where file not like '%CALIB%' GROUP BY OBJNAME,pprun,filter"
command="SELECT * from illumination_db where file not like '%CALIB%' and SUPA not like '%I' and OBJNAME='MACS1423+24' and filter='W-J-V' GROUP BY OBJNAME,pprun,filter"
#command="SELECT * from illumination_db where file not like '%CALIB%' and SUPA not like '%I' and OBJNAME like 'MACS1824%' and filter='W-C-IC' and PPRUN !='KEY_N/A' GROUP BY pprun,filter, OBJNAME" # ORDER BY RAND()" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
command="SELECT * from illumination_db where file not like '%CALIB%' and SUPA not like '%I' and PPRUN !='KEY_N/A' and fixradecCR=1 and PPRUN='2007-07-18_W-J-B' and OBJNAME='MACS2211-03' GROUP BY pprun,filter, OBJNAME ORDER BY RAND()" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
command="SELECT * from illumination_db where file not like '%CALIB%' and SUPA not like '%I' and PPRUN !='KEY_N/A' and fixradecCR=1 and OBJNAME like 'MACS1423%' GROUP BY pprun,filter,OBJNAME" # ORDER BY RAND()" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
#command="SELECT * from illumination_db i left join fit_db f on (i.pprun=f.pprun and i.OBJNAME=f.OBJNAME) where i.file not like '%CALIB%' and i.SUPA not like '%I' and i.PPRUN !='KEY_N/A' and i.fixradecCR=1 and i.OBJNAME like 'MACS1423%'and f.linearfit=0 GROUP BY i.pprun,i.filter,i.OBJNAME" # ORDER BY RAND()" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
command="SELECT * from illumination_db i left join fit_db f on (i.pprun=f.pprun and i.OBJNAME=f.OBJNAME) where i.file not like '%CALIB%' and i.SUPA not like '%I' and i.PPRUN !='KEY_N/A' and i.fixradecCR=1 and i.OBJNAME like 'Zw3146%' and i.filter='W-J-V' GROUP BY i.pprun,i.filter,i.OBJNAME" # ORDER BY RAND()" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
command="SELECT * from illumination_db i left join fit_db f on (i.pprun=f.pprun and i.OBJNAME=f.OBJNAME) where i.file not like '%CALIB%' and i.SUPA not like '%I' and i.PPRUN !='KEY_N/A' and i.OBJNAME like '%MACS0850%' GROUP BY i.OBJNAME limit 1" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
#command="SELECT * from illumination_db i where OBJNAME like '%MACS0850%' GROUP BY i.OBJNAME ORDER BY RAND()" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
#command="SELECT * from (illumination_db i left join fit_db f on (i.pprun=f.pprun and i.OBJNAME=f.OBJNAME)) left join sdss_db s on (s.OBJNAME = i.OBJNAME) where i.file not like '%CALIB%' and i.SUPA not like '%I' and i.PPRUN !='KEY_N/A' and f.linearfit is null GROUP BY i.pprun,i.filter,i.OBJNAME ORDER BY RAND()" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
#command="SELECT * from illumination_db i left join fit_db f on (i.pprun=f.pprun and i.OBJNAME=f.OBJNAME) where i.file not like '%CALIB%' and i.SUPA not like '%I' and i.PPRUN !='KEY_N/A' and i.fixradecCR=1 and f.linearfit is null and i.PPRUN='2002-12-03_W-C-RC' GROUP BY i.pprun,i.filter,i.OBJNAME" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
else:
#command="SELECT * from illumination_db i left join fit_db f on (i.pprun=f.pprun and i.OBJNAME=f.OBJNAME) where i.file not like '%CALIB%' and i.SUPA not like '%I' and i.objname='"+OBJNAME+"' and i.pprun='"+PPRUN+"' and i.filter='" + FILTER + "' GROUP BY i.pprun,i.filter,i.OBJNAME ORDER BY RAND()" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
command="SELECT * from illumination_db where file not like '%CALIB%' and PPRUN !='KEY_N/A' and OBJNAME like '" + OBJNAME + "' and FILTER like '" + FILTER + "' and PPRUN='" + PPRUN + "' GROUP BY pprun,filter,OBJNAME" # ORDER BY RAND()" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
print command
c.execute(command)
results=c.fetchall()
for line in results:
'start next'
if 1: #try:
dtop = {}
for i in range(len(db_keys)):
dtop[db_keys[i]] = str(line[i])
res = re.split('\/',dtop['file'])
command="SELECT * from illumination_db i left join fit_db f on (i.pprun=f.pprun and i.OBJNAME=f.OBJNAME) where i.OBJNAME='" + dtop['OBJNAME'] + "' and i.pasted_cat is not NULL"
print command
c.execute(command)
results2=c.fetchall()
rotation_runs = {}
for line in results2:
dict = {}
for i in range(len(db_keys)):
dict[db_keys[i]] = str(line[i])
GID = float(dict['GABODSID'])
config_list = [[575,691,'8'],[691,871,'9'],[817,1309,'10_1'],[1309,3470,'10_2'],[3470,4000,'10_3']]
CONFIG_IM = None
for config in config_list:
if config[0] < GID < config[1]:
CONFIG_IM = config[2]
break
if float(dict['EXPTIME']) > 10.0:
if not dict['PPRUN'] in rotation_runs:
rotation_runs[dict['PPRUN']] = {'ROTATION':{dict['ROTATION']:'yes'},'FILTER':dict['filter'],'CONFIG_IM':CONFIG_IM,'EXPTIME':dict['EXPTIME'],'file':dict['file'],'linearfit':dict['linearfit'],'OBJNAME':dict['OBJNAME'],'catalog':dict['catalog']}
rotation_runs[dict['PPRUN']]['ROTATION'][dict['ROTATION']] = 'yes'
print rotation_runs
help_list = {}
good_list = {}
for y in rotation_runs.keys():
print rotation_runs[y]['CONFIG_IM']
if rotation_runs[y]['CONFIG_IM'] != '8' and rotation_runs[y]['CONFIG_IM'] != '9' and rotation_runs[y]['CONFIG_IM'] != '10_3' and len(rotation_runs[y]['ROTATION'].keys()) > 1:
good_list[y] = rotation_runs[y]
else:
help_list[y] = rotation_runs[y]
raw_input()
orphan_list = {}
matched_list = {}
for y in help_list.keys():
matched = False
for x in good_list.keys():
if help_list[y]['FILTER'] == good_list[x]['FILTER']:
matched_list[y] = help_list[y]
matched = True
break
if matched == False:
orphan_list[y] = help_list[y]
print good_list
print help_list
print 'good'
for key in sorted(good_list.keys()): print key, good_list[key]['EXPTIME'], good_list[key]['file']
print 'help'
for key in sorted(help_list.keys()): print key, help_list[key]['EXPTIME'],help_list[key]['file']
print 'matched'
for key in sorted(matched_list.keys()): print key, matched_list[key]['EXPTIME'],matched_list[key]['file']
print 'orphaned'
for key in sorted(orphan_list.keys()): print key, orphan_list[key]['EXPTIME'],orphan_list[key]['file']
''' first run the good images '''
for run in good_list.keys():
if float(good_list[run]['linearfit']) != 1:
print good_list[run]['linearfit']
match_OBJNAME_specific(good_list[run]['OBJNAME'],goodlist[run]['FILTER'],goodlist[run]['PPRUN'])
''' create a master catalog '''
input = [[good_list[x]['catalog'],good_list[x]['FILTER']] for x in good_list.keys()]
print input
#mk_sdss_like_catalog(input)
match_many_multi_band(input)
''' use the master catalog to fix remaining runs '''
## need to figure out which band/color to use
def match_OBJNAME(OBJNAME=None,FILTER=None,PPRUN=None):
if OBJNAME is None:
batchmode = True
else: batchmode = False
trial = True
if __name__ == '__main__':
trial = False
start = 1
while True:
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
db_keys_f = describe_db(c,['illumination_db'])
keystop = ['PPRUN','ROTATION','OBJNAME']
list = reduce(lambda x,y: x + ',' + y, keystop)
if OBJNAME is None or start == 0:
command="SELECT * from illumination_db where zp_star_ is not null and PPRUN='2002-06-04_W-J-V' and OBJECT='MACSJ1423.8' GROUP BY OBJNAME,ROTATION"
#command="SELECT * from illumination_db where OBJNAME like '%2243%' and filter='W-J-V' GROUP BY OBJNAME,pprun,filter "
#command="SELECT * from illumination_db where file not like '%CALIB%' and OBJECT like '%1423%' GROUP BY OBJNAME,pprun,filter"
#command="SELECT * from illumination_db where file not like '%CALIB%' GROUP BY OBJNAME,pprun,filter"
command="SELECT * from illumination_db where file not like '%CALIB%' GROUP BY OBJNAME,pprun,filter"
command="SELECT * from illumination_db where file not like '%CALIB%' and SUPA not like '%I' and OBJNAME='MACS1423+24' and filter='W-J-V' GROUP BY OBJNAME,pprun,filter"
#command="SELECT * from illumination_db where file not like '%CALIB%' and SUPA not like '%I' and OBJNAME like 'MACS1824%' and filter='W-C-IC' and PPRUN !='KEY_N/A' GROUP BY pprun,filter, OBJNAME" # ORDER BY RAND()" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
command="SELECT * from illumination_db where file not like '%CALIB%' and SUPA not like '%I' and PPRUN !='KEY_N/A' and fixradecCR=1 and PPRUN='2007-07-18_W-J-B' and OBJNAME='MACS2211-03' GROUP BY pprun,filter, OBJNAME ORDER BY RAND()" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
command="SELECT * from illumination_db where file not like '%CALIB%' and SUPA not like '%I' and PPRUN !='KEY_N/A' and fixradecCR=1 and OBJNAME like 'MACS1423%' GROUP BY pprun,filter,OBJNAME" # ORDER BY RAND()" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
#command="SELECT * from illumination_db i left join fit_db f on (i.pprun=f.pprun and i.OBJNAME=f.OBJNAME) where i.file not like '%CALIB%' and i.SUPA not like '%I' and i.PPRUN !='KEY_N/A' and i.fixradecCR=1 and i.OBJNAME like 'MACS1423%'and f.linearfit=0 GROUP BY i.pprun,i.filter,i.OBJNAME" # ORDER BY RAND()" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
command="SELECT * from illumination_db i left join fit_db f on (i.pprun=f.pprun and i.OBJNAME=f.OBJNAME) where i.file not like '%CALIB%' and i.SUPA not like '%I' and i.PPRUN !='KEY_N/A' and i.fixradecCR=1 and i.OBJNAME like 'Zw3146%' and i.filter='W-J-V' GROUP BY i.pprun,i.filter,i.OBJNAME" # ORDER BY RAND()" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
command="SELECT * from illumination_db i left join fit_db f on (i.pprun=f.pprun and i.OBJNAME=f.OBJNAME) where i.file not like '%CALIB%' and i.SUPA not like '%I' and i.PPRUN !='KEY_N/A' and (f.linearfit!=1 or f.linearfit is null) GROUP BY i.pprun,i.filter,i.OBJNAME ORDER BY RAND() " # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
command=" select i.* from temp i left join fit_db f on (i.pprun=f.pprun and i.OBJNAME=f.OBJNAME) where i.PPRUN !='KEY_N/A' and i.file not like '%CALIB%' and i.pprun like '%' and (f.linearfit=1) and f.piggyback is null GROUP BY i.pprun,i.filter,i.OBJNAME ORDER BY RAND() " # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
command=" drop temporary table if exists temp ; create temporary table temp as select * from illumination_db group by objname, pprun; select i.* from temp i left join fit_db f on (i.pprun=f.pprun and i.OBJNAME=f.OBJNAME) where i.PPRUN !='KEY_N/A' and i.file not like '%CALIB%' and i.pprun like '%' and (linearfit!=1 or linearfit is null) and i.OBJNAME not like 'SXDS' and i.OBJNAME not like 'HDFN' and i.pasted_cat is not null GROUP BY i.pprun,i.filter,i.OBJNAME ORDER BY RAND() limit 1 " # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
command=" drop temporary table if exists temp "
c.execute(command)
command = "create temporary table temp as select * from illumination_db group by objname, pprun "
c.execute(command)
command = "select i.* from temp i left join fit_db f on (i.pprun=f.pprun and i.OBJNAME=f.OBJNAME) where i.PPRUN !='KEY_N/A' and i.file not like '%CALIB%' and i.pprun like '%' and i.OBJNAME not like 'SXDS' and i.OBJNAME not like 'HDFN' and i.pasted_cat is not null and f.bootstrap$linearfit is null and (f.None$good!='y' or f.None$good is null) and (f.sdss$good!='y' or f.sdss$good is null) and i.pprun is not null GROUP BY i.pprun,i.filter,i.OBJNAME ORDER BY RAND() limit 1 " # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
#command="SELECT * from fit_db where (linearfit!=1 or linearfit is null) GROUP BY pprun,filter,OBJNAME ORDER BY RAND() limit 1 " # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
#command="SELECT * from (illumination_db i left join fit_db f on (i.pprun=f.pprun and i.OBJNAME=f.OBJNAME)) left join sdss_db s on (s.OBJNAME = i.OBJNAME) where i.file not like '%CALIB%' and i.SUPA not like '%I' and i.PPRUN !='KEY_N/A' and f.linearfit is null GROUP BY i.pprun,i.filter,i.OBJNAME ORDER BY RAND()" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
#command="SELECT * from illumination_db i left join fit_db f on (i.pprun=f.pprun and i.OBJNAME=f.OBJNAME) where i.file not like '%CALIB%' and i.SUPA not like '%I' and i.PPRUN !='KEY_N/A' and i.fixradecCR=1 and f.linearfit is null and i.PPRUN='2002-12-03_W-C-RC' GROUP BY i.pprun,i.filter,i.OBJNAME" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
else:
command=" drop temporary table if exists temp "
c.execute(command)
command = "create temporary table temp as select * from illumination_db group by objname, pprun "
c.execute(command)
command="SELECT * from temp i left join fit_db f on (i.pprun=f.pprun and i.OBJNAME=f.OBJNAME) where i.SUPA not like '%I' and i.objname='"+OBJNAME+"' and i.pprun='"+PPRUN+"' and i.filter='" + FILTER + "' GROUP BY i.pprun,i.filter,i.OBJNAME ORDER BY RAND() " # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
#command="SELECT * from fit_db where (linearfit!=1 or linearfit is null) and objname='"+OBJNAME+"' and pprun='"+PPRUN+"' and filter='" + FILTER + "' GROUP BY pprun,filter,OBJNAME ORDER BY RAND() limit 1 " # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
#command="SELECT * from fit_db where objname='"+OBJNAME+"' and pprun='"+PPRUN+"' and filter='" + FILTER + "' GROUP BY pprun,filter,OBJNAME ORDER BY RAND() limit 1 " # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
# command="SELECT * from illumination_db where file not like '%CALIB%' and PPRUN !='KEY_N/A' and OBJNAME like '" + OBJNAME + "' and FILTER like '" + FILTER + "' and PPRUN='" + PPRUN + "' GROUP BY pprun,filter,OBJNAME" # ORDER BY RAND()" # and PPRUN='2006-12-21_W-J-B' GROUP BY OBJNAME,pprun,filter"
start = 0
print command
c.execute(command)
f_results=c.fetchall()
#for line in results[0]:
print len(f_results)
print f_results[0]
print 'len results', len(f_results)
print 'hey'
if len(f_results) == 0:
print 'breaking!'
break
if len(f_results) > 0:
print 'start next'
line = f_results[0]
#print 'calc_test_save.linear_fit(' + OBJNAME + ',' + FILTER + ',' + PPRUN + ',' + cov + ',' + CONFIG + ',' + true_sdss + ',primary=' + primary + ',secondary=' + secondary + ')'
if trial: raw_input()
try:
global tmpdir
dtop = {}
for i in range(len(db_keys_f)):
dtop[db_keys_f[i]] = str(line[i])
#res = re.split('\/',dtop['file'])
#for j in range(len(res)):
# if res[j] == 'SUBARU':
# break
FILTER = dtop['filter']
PPRUN = dtop['PPRUN']
OBJNAME = dtop['OBJNAME']
print dtop['filter'], dtop['PPRUN'], dtop['OBJNAME']
save_fit({'PPRUN':PPRUN,'OBJNAME':OBJNAME,'FILTER':FILTER})
keys = ['SUPA','OBJNAME','ROTATION','PPRUN','pasted_cat','filter','ROTATION','files']
list = reduce(lambda x,y: x + ',' + y, keys)
#command="SELECT * from illumination_db where zp_star_ is not null and OBJNAME='"+dtop['OBJNAME'] + "' and PPRUN='" + dtop['PPRUN'] + "'"#+ "' GROUP BY OBJNAME,ROTATION"
db_keys = describe_db(c,['illumination_db','fit_db'])
#command="SELECT * from illumination_db where OBJNAME='" + dtop['OBJNAME'] + "' and PPRUN='" + dtop['PPRUN'] + "' and filter like '" + dtop['filter'] + "' and pasted_cat is not NULL"
command="SELECT * from illumination_db i left join fit_db f on (i.pprun=f.pprun and i.OBJNAME=f.OBJNAME) where i.OBJNAME='" + dtop['OBJNAME'] + "' and i.pasted_cat is not NULL"
print command
c.execute(command)
results=c.fetchall()
print 'sort into good, need help'
all_list = sort_results(results,dtop,db_keys)
primary = all_list[PPRUN]['primary']
secondary = all_list[PPRUN]['secondary']
print primary, secondary
''' now run with PPRUN '''
command="SELECT * from illumination_db i left join fit_db f on (i.pprun=f.pprun and i.OBJNAME=f.OBJNAME) where i.OBJNAME='" + dtop['OBJNAME'] + "' and i.pasted_cat is not NULL and i.PPRUN='" + dtop['PPRUN'] + "'"
print command
c.execute(command)
results=c.fetchall()
field = []
info = []
if len(results) > 0:
for line in results:
d = {}
for i in range(len(db_keys)):
d[db_keys[i]] = str(line[i])
ana = '' #raw_input('analyze ' + d['SUPA'] + '?')
if len(ana) > 0:
if ana[0] == 'y':
analyze(d['SUPA'],d['FLAT_TYPE'])
''' use SCAMP CRVAL, etc. '''
a=1
if int(float(d['fixradecCR'])) != 1: # or str(d['fixradecCR']) == '-1':
a = fix_radec(d['SUPA'],d['FLAT_TYPE'])
if a==1:
key = str(int(float(d['ROTATION']))) + '$' + d['SUPA'] + '$'
field.append({'key':key,'pasted_cat':d['pasted_cat'],'ROT':d['ROTATION'],'file':d['file']})
info.append([d['ROTATION'],d['SUPA'],d['OBJNAME']])
if d['CRVAL1'] == 'None':
length(d['SUPA'],d['FLAT_TYPE'])
print d['SUPA']
print all_list[PPRUN]
if all_list[PPRUN]['status']=='help' and all_list[PPRUN]['primary'] is not None:
match = 'bootstrap'
print all_list
print 'primary', primary, 'secondary', secondary
''' match images '''
finalcat = match_many_multi_band([[all_list[primary]['catalog'],'primary'],[all_list[secondary]['catalog'],'secondary']])
print finalcat
save_fit({'status':all_list[PPRUN]['status'],'PPRUN':PPRUN,'OBJNAME':OBJNAME,'FILTER':FILTER})
else:
''' this means that the image is good '''
save_fit({'status':all_list[PPRUN]['status'],'piggyback':'no','PPRUN':PPRUN,'OBJNAME':OBJNAME,'FILTER':FILTER})
''' now check to see if there is SDSS '''
sdss_cov,galaxycat,starcat = sdss_coverage(d['SUPA'],d['FLAT_TYPE'])
''' get SDSS matched stars, use photometric calibration to remove color term '''
if sdss_cov:
match = 'sdss'
print d['SUPA'], d['FLAT_TYPE'], d['OBJECT'], d['CRVAL1'], d['CRVAL2']
''' retrieve SDSS catalog '''
print d['pasted_cat']
sdssmatch = get_cats_ready(d['SUPA'],d['FLAT_TYPE'],galaxycat,starcat)
print 'calibration done'
else:
match=None
print match
d = get_files(d['SUPA'],d['FLAT_TYPE'])
print field
input = [[x['pasted_cat'],x['key'],x['ROT']] for x in field]
input_files = [[x['pasted_cat']] for x in field]
print input_files
import utilities
input_filt = []
print input
for f in input:
Ns = ['MAGERR_AUTO < 0.05)','Flag = 0)']
filt= '(' + reduce(lambda x,y: '(' + x + ' AND (' + y + ')',Ns)
print filt, f
filtered = f[0].replace('.cat','.filt.cat')
print filtered
command = 'ldacfilter -i ' + f[0] + ' -t OBJECTS -o ' + filtered + ' -c "' + filt + ';" '
print command
import utilities
utilities.run(command,[filtered])
input_filt.append([filtered,f[1],f[2]])
if 0: #len(input) > 8:
input_short = []
i = 0
while len(input_short) < 6 and len(input_short)<len(input):
i += 1
rot0 = filter(lambda x:float(x[1][0])==0,input)[0:i]
rot1 = filter(lambda x:float(x[1][0])==1,input)[0:i]
rot2 = filter(lambda x:float(x[1][0])==2,input)[0:i]
rot3 = filter(lambda x:float(x[1][0])==2,input)[0:i]
input_short = rot0 + rot1 + rot2 + rot3
input = input_short
print 'new', input
print input
input = input_filt
if match=='sdss':
input.append([sdssmatch,'SDSS',None])
elif match=='bootstrap':
input.append([finalcat,'SDSS',None])
print input_files
match_many(input)
print 'matched'
CONFIG = find_config(d['GABODSID'])
print OBJNAME,FILTER,PPRUN,match,CONFIG,primary,secondary
linear_fit(OBJNAME,FILTER,PPRUN,match,CONFIG,primary=primary,secondary=secondary)
#construct_correction(d['OBJNAME'],d['FILTER'],d['PPRUN'])
print '\n\nDONE'
if batchmode:
os.system('rm -rf ' + tmpdir)
except KeyboardInterrupt:
raise
except:
print traceback.print_exc(file=sys.stdout)
print 'fail'
if batchmode:
os.system('rm -rf ' + tmpdir)
if trial:
raise Exception
def find_config(GID):
config_list = [[575,691,'8'],[691,871,'9'],[817,1309,'10_1'],[1309,3470,'10_2'],[3470,4000,'10_3']]
CONFIG_IM = None
for config in config_list:
if config[0] < GID < config[1]:
CONFIG_IM = config[2]
break
return CONFIG_IM
def sort_results(results2,dict,db_keys):
import config_bonn
reload(config_bonn)
from config_bonn import wavelength_groups, wavelength_order
rotation_runs = {}
for line in results2:
dict = {}
for i in range(len(db_keys)):
dict[db_keys[i]] = str(line[i])
GID = float(dict['GABODSID'])
CONFIG_IM = find_config(GID)
FILTER_NUM = None
for i in range(len(wavelength_groups)):
for filt in wavelength_groups[i]:
if filt == dict['filter']:
FILTER_NUM = i
if FILTER_NUM is None:
print dict['filter']
raise NoFilterMatch
if float(dict['EXPTIME']) > 10.0:
if not dict['PPRUN'] in rotation_runs:
rotation_runs[dict['PPRUN']] = {'ROTATION':{dict['ROTATION']:'yes'},'FILTER':dict['filter'],'CONFIG_IM':CONFIG_IM,'EXPTIME':dict['EXPTIME'],'file':dict['file'],'linearfit':dict['linearfit'],'OBJNAME':dict['OBJNAME'],'catalog':dict['catalog'],'FILTER_NUM':FILTER_NUM,'sdss$good':dict['sdss$good'],'None$good':dict['None$good']}
rotation_runs[dict['PPRUN']]['ROTATION'][dict['ROTATION']] = 'yes'
print rotation_runs
help_list = {}
good_list = {}
for y in rotation_runs.keys():
if (rotation_runs[y]['sdss$good'] == 'y' or rotation_runs[y]['None$good'] =='y') and rotation_runs[y]['CONFIG_IM'] != '8' and rotation_runs[y]['CONFIG_IM'] != '9' and rotation_runs[y]['CONFIG_IM'] != '10_3' and len(rotation_runs[y]['ROTATION'].keys()) > 1:
good_list[y] = rotation_runs[y]
good_list[y]['status'] = 'good'
good_list[y]['primary'] = None
good_list[y]['secondary'] = None
else:
help_list[y] = rotation_runs[y]
help_list[y]['status'] = 'help'
help_list[y]['primary'] = None
help_list[y]['secondary'] = None
orphan_list = {}
matched_list = {}
for y in help_list.keys():
''' use rules to assign comparison cats'''
''' first determine the closest filter '''
primaries = []
for x in good_list.keys():
primaries.append([abs(help_list[y]['FILTER_NUM'] - good_list[x]['FILTER_NUM']),x])
primaries.sort()
if len(primaries) > 0:
primary = primaries[0][1]
help_list[y]['primary'] = primary
#print 'primary', primaries, primary, y
secondaries = []
for x in good_list.keys():
if x != primary and help_list[y]['FILTER_NUM'] != good_list[x]['FILTER_NUM']:
secondaries.append([abs(help_list[y]['FILTER_NUM'] - good_list[x]['FILTER_NUM']),x])
''' if no calibrated secondary, use the same catalog '''
if len(secondaries) == 0:
for x in help_list.keys():
secondaries.append([abs(help_list[y]['FILTER_NUM'] - help_list[x]['FILTER_NUM']),x])
''' guaranteed to be a secondary '''
secondaries.sort()
secondary = secondaries[0][1]
help_list[y]['secondary'] = secondary
#print 'secondary', secondaries, secondary, y
matched = False
for x in good_list.keys():
if help_list[y]['FILTER'] == good_list[x]['FILTER']:
matched_list[y] = help_list[y]
matched = True
break
if matched == False:
orphan_list[y] = help_list[y]
print good_list
print help_list
print 'good'
for key in sorted(good_list.keys()): print key, good_list[key]['EXPTIME'], good_list[key]['file']
print 'help'
for key in sorted(help_list.keys()): print key, help_list[key]['EXPTIME'],help_list[key]['file']
print 'matched'
for key in sorted(matched_list.keys()): print key, matched_list[key]['EXPTIME'],matched_list[key]['file']
print 'orphaned'
for key in sorted(orphan_list.keys()): print key, orphan_list[key]['EXPTIME'],orphan_list[key]['file']
import copy
all_list = copy.copy(good_list)
all_list.update(help_list)
return all_list
def add_correction_new(cat_list,OBJNAME,FILTER,PPRUN):
import scipy, re, string, os
''' create chebychev polynomials '''
cheby_x = [{'n':'0x','f':lambda x,y:1.},{'n':'1x','f':lambda x,y:x},{'n':'2x','f':lambda x,y:2*x**2-1},{'n':'3x','f':lambda x,y:4*x**3.-3*x}]
cheby_y = [{'n':'0y','f':lambda x,y:1.},{'n':'1y','f':lambda x,y:y},{'n':'2y','f':lambda x,y:2*y**2-1},{'n':'3y','f':lambda x,y:4*y**3.-3*y}]
cheby_terms = []
cheby_terms_no_linear = []
for tx in cheby_x:
for ty in cheby_y:
if not ((tx['n'] == '0x' and ty['n'] == '0y')): # or (tx['n'] == '0x' and ty['n'] == '1y') or (tx['n'] == '1x' and ty['n'] == '0y')) :
cheby_terms.append({'n':tx['n'] + ty['n'],'fx':tx['f'],'fy':ty['f']})
if not ((tx['n'] == '0x' and ty['n'] == '0y') or (tx['n'] == '0x' and ty['n'] == '1y') or (tx['n'] == '1x' and ty['n'] == '0y')) :
cheby_terms_no_linear.append({'n':tx['n'] + ty['n'],'fx':tx['f'],'fy':ty['f']})
cov = 1
if cov:
samples = [['sdss',cheby_terms,True]] #,['None',cheby_terms_no_linear,False]] #[['None',cheby_terms_no_linear],['sdss',cheby_terms]]
else:
samples = [['None',cheby_terms_no_linear,False]]
sample = 'sdss'
sample_size = 'all'
import re, time
dt = get_a_file(OBJNAME,FILTER,PPRUN)
d = get_fits(OBJNAME,FILTER,PPRUN)
print d.keys()
column_prefix = sample+'$'+sample_size+'$'
position_columns_names = re.split('\,',d[column_prefix + 'positioncolumns'])
print position_columns_names, 'position_columns_names'
fitvars = {}
cheby_terms_dict = {}
print column_prefix, position_columns_names
for ele in position_columns_names:
print ele
if type(ele) != type({}):
ele = {'name':ele}
res = re.split('$',ele['name'])
if string.find(ele['name'],'zp_image') == -1:
fitvars[ele['name']] = float(d[sample+'$'+sample_size+'$'+ele['name']])
for term in cheby_terms:
if term['n'] == ele['name'][2:]:
cheby_terms_dict[term['n']] = term
zp_images = re.split(',',d[sample+'$'+sample_size+'$zp_images'])
zp_images_names = re.split(',',d[sample+'$'+sample_size+'$zp_images_names'])
for i in range(len(zp_images)):
fitvars[zp_images_names[i]] = float(zp_images[i])
cheby_terms_use = [cheby_terms_dict[k] for k in cheby_terms_dict.keys()]
print cheby_terms_use, fitvars
CHIPS = [int(x) for x in re.split(',',dt['CHIPS'])]
LENGTH1, LENGTH2 = dt['LENGTH1'], dt['LENGTH2']
per_chip = True
coord_conv_x = lambda x:(2.*x-0-LENGTH1)/(LENGTH1-0)
coord_conv_y = lambda x:(2.*x-0-LENGTH2)/(LENGTH2-0)
''' make images of illumination corrections '''
cat_grads = []
for cat in cat_list:
import astropy, astropy.io.fits as pyfits
p = pyfits.open(cat[0])
tab = p["OBJECTS"].data
print str(type(tab))
if str(type(tab)) != "<type 'NoneType'>":
print tab.field('MAG_AUTO')[0:10]
ROT = str(int(float(cat[2])))
print cat
x = coord_conv_x(scipy.array(tab.field('Xpos_ABS')[:]))
y = coord_conv_y(scipy.array(tab.field('Ypos_ABS')[:]))
CHIPS = tab.field('CHIP')
chip_zps = []
for i in range(len(CHIPS)):
chip_zps.append(float(fitvars['zp_' + str(int(CHIPS[i]))]))
chip_zps = scipy.array(chip_zps)
''' save pattern w/ chip zps '''
trial = False
children = []
''' correct w/ polynomial '''
epsilonC = 0
index = 0
for term in cheby_terms_use:
index += 1
print index, ROT, term, fitvars[str(ROT)+'$'+term['n']]
epsilonC += fitvars[str(ROT)+'$'+term['n']]*term['fx'](x,y)*term['fy'](x,y)
''' add the zeropoint '''
epsilonC += chip_zps
''' save pattern w/o chip zps '''
print LENGTH1, LENGTH2
print epsilonC[2000:2020]
print x[2000:2020]
print y[2000:2020]
print tab.field('Xpos_ABS')[2000:2020]
print tab.field('Ypos_ABS')[2000:2020]
tab.field('MAG_AUTO')[:] = tab.field('MAG_AUTO')[:] - epsilonC
print tab.field('MAG_AUTO')[0:20]
new_name = cat[0].replace('.cat','.gradient.cat')
os.system('rm ' + new_name)
p.writeto(new_name)
cat_grads.append([new_name,cat[1],ROT])
return cat_grads
def add_gradient(cat_list):
import astropy, astropy.io.fits as pyfits, os
cat_grads = []
for cat in cat_list:
print cat
p = pyfits.open(cat[0])
tab = p["OBJECTS"].data
print tab.field('MAG_AUTO')[0:10]
tab.field('MAG_AUTO')[:] = tab.field('MAG_AUTO') + 5./10000.*tab.field('Xpos_ABS')
new_name = cat[0].replace('.cat','.gradient.cat')
os.system('rm ' + new_name)
p.writeto(new_name)
cat_grads.append([new_name,cat[1]])
return cat_grads
def add_correction(cat_list):
import astropy, astropy.io.fits as pyfits, os
cat_grads = []
EXPS = getTableInfo()
cheby_x = [{'n':'0x','f':lambda x,y:1.},{'n':'1x','f':lambda x,y:x},{'n':'2x','f':lambda x,y:2*x**2-1},{'n':'3x','f':lambda x,y:4*x**3.-3*x}]
cheby_y = [{'n':'0y','f':lambda x,y:1.},{'n':'1y','f':lambda x,y:y},{'n':'2y','f':lambda x,y:2*y**2-1},{'n':'3y','f':lambda x,y:4*y**3.-3*y}]
#func = lambda x,y: [cheby_x_dict[f[0:2]](x,y)*cheby_y_dict[f[2:]](x,y) for f in fitvars]
import scipy
x = scipy.array([-0.5,0,1])
y = scipy.array([-0.5,0,0.5])
for cat in cat_list:
for ROT in EXPS.keys():
for SUPA in EXPS[ROT]:
import re
print SUPA, cat
res = re.split('$',cat[1])
file = res[1]
print file, cat
if file == SUPA: rotation = ROT
import pickle
f=open(tmpdir + '/fitvars' + rotation,'r')
m=pickle.Unpickler(f)
fitvars=m.load()
cheby_terms = []
for tx in cheby_x:
for ty in cheby_y:
if fitvars.has_key(tx['n']+ty['n']): # not ((tx['n'] == '0x' and ty['n'] == '0y')): # or (tx['n'] == '0x' and ty['n'] == '1y') or (tx['n'] == '1x' and ty['n'] == '0y')) :
cheby_terms.append({'n':tx['n'] + ty['n'],'fx':tx['f'],'fy':ty['f']})
print EXPS
print cat
p = pyfits.open(cat[0])
tab = p["OBJECTS"].data
print tab.field('MAG_AUTO')[0:10]
x = coord_conv_x(tab.field('Xpos_ABS'))
y = coord_conv_y(tab.field('Ypos_ABS'))
epsilon = 0
for term in cheby_terms:
epsilon += fitvars[term['n']]*term['fx'](x,y)*term['fy'](x,y)
print epsilon[0:20]
tab.field('MAG_AUTO')[:] = tab.field('MAG_AUTO')[:] - epsilon
print tab.field('MAG_AUTO')[0:20]
new_name = cat[0].replace('.cat','.gradient.cat')
os.system('rm ' + new_name)
p.writeto(new_name)
cat_grads.append([new_name,cat[1]])
return cat_grads
def make_ssc_config(list):
ofile = tmpdir + '/tmp.cat'
os.system('mkdir ' + tmpdir)
out = open(tmpdir + '/tmp.ssc','w')
import os, string, re
keys = []
i = -1
for file_name,prefix in list:
i += 1
print file_name
os.system('ldacdesc -t OBJECTS -i ' + file_name + ' > ' + ofile)
file = open(ofile,'r').readlines()
for line in file:
if string.find(line,"Key name") != -1:
red = re.split('\.+',line)
key = red[1].replace(' ','').replace('\n','')
out_key = prefix + key
out.write("COL_NAME = " + out_key + '\nCOL_INPUT = ' + key + '\nCOL_MERGE = AVE_REG\nCOL_CHAN = ' + str(i) + "\n#\n")
#print key
keys.append(key)
out.close()
def make_ssc_multi_color(list):
ofile = tmpdir + '/tmp.cat'
import os
os.system('mkdir -p ' + tmpdir)
out = open(tmpdir + '/tmp.ssc','w')
import os, string, re
#key_list = ['CHIP','Flag','MAG_AUTO','MAGERR_AUTO','MAG_APER2','MAGERR_APER2','Xpos_ABS','Ypos_ABS','CLASS_STAR','MaxVal','BackGr','stdMag_corr','stdMagErr_corr','stdMagColor_corr','stdMagClean_corr','stdMagStar_corr','Star_corr','ALPHA_J2000','DELTA_J2000']
Ns = []
keys = {}
i = -1
for file_name,filter in list:
key_list = [['ALPHA_J2000','ALPHA_J2000'],['DELTA_J2000','DELTA_J2000'],['stdMag_corr',filter+'mag'],['stdMagErr_corr',filter+'err'],['stdMagClean_corr','Clean'],]
i += 1
Ns.append('N_0' + str(i) + ' = 1)')
print file_name
for key in key_list:
out_key = key[1]
in_key = key[0]
#if reduce(lambda x,y: x+ y, [string.find(out_key,k)!=-1 for k in key_list]):
if not key[1] in keys:
out.write("COL_NAME = " + out_key + '\nCOL_INPUT = ' + in_key + '\nCOL_MERGE = AVE_REG\nCOL_CHAN = ' + str(i) + "\n#\n")
print key
keys[key[1]] = True
out.close()
print out
return Ns
def make_ssc_config_few(list):
ofile = tmpdir + '/tmp.cat'
import os
os.system('mkdir -p ' + tmpdir)
out = open(tmpdir + '/tmp.ssc','w')
import os, string, re
key_list = ['CHIP','Flag','MAG_AUTO','MAGERR_AUTO','MAG_APER2','MAGERR_APER2','Xpos','Ypos','Xpos_ABS','Ypos_ABS','CLASS_STAR','MaxVal','BackGr','stdMag_corr','stdMagErr_corr','stdMagColor_corr','stdMagClean_corr','stdMagStar_corr','Star_corr','ALPHA_J2000','DELTA_J2000']
keys = []
i = -1
for file_name,prefix,rot in list:
i += 1
print file_name
os.system('ldacdesc -t OBJECTS -i ' + file_name + ' > ' + ofile)
file = open(ofile,'r').readlines()
for line in file:
if string.find(line,"Key name") != -1 :
red = re.split('\.+',line)
key = red[1].replace(' ','').replace('\n','')
out_key = prefix + key
if reduce(lambda x,y: x+ y, [string.find(out_key,k)!=-1 for k in key_list]):
out.write("COL_NAME = " + out_key + '\nCOL_INPUT = ' + key + '\nCOL_MERGE = AVE_REG\nCOL_CHAN = ' + str(i) + "\n#\n")
print key
keys.append(key)
out.close()
def make_ssc_config_colors(list):
ofile = tmpdir + '/tmp.cat'
out = open(tmpdir + '/tmp.ssc','w')
import os, string, re
keys = []
i = -1
for file_name,prefix in list:
i += 1
print file_name
os.system('ldacdesc -t OBJECTS -i ' + file_name + ' > ' + ofile)
file = open(ofile,'r').readlines()
for line in file:
if string.find(line,"Key name") != -1:
red = re.split('\.+',line)
key = red[1].replace(' ','').replace('\n','')
out_key = key + '_' + prefix
out.write("COL_NAME = " + out_key + '\nCOL_INPUT = ' + key + '\nCOL_MERGE = AVE_REG\nCOL_CHAN = ' + str(i) + "\n#\n")
#print key
keys.append(key)
out.close()
def threesec():
list = [['/nfs/slac/g/ki/ki05/anja/SUBARU/MACS0417-11/PHOTOMETRY/ILLUMINATION/pasted_SUPA0105807_W-C-RC_2009-01-23_CALIB_0.0.cat','W-C-RC'],['/nfs/slac/g/ki/ki05/anja/SUBARU/MACS0417-11/PHOTOMETRY/ILLUMINATION/pasted_SUPA0105787_W-J-V_2009-01-23_CALIB_0.0.cat','W-J-V'],['/nfs/slac/g/ki/ki05/anja/SUBARU/MACS0417-11/PHOTOMETRY/ILLUMINATION/pasted_SUPA0050786_W-C-IC_2006-12-21_CALIB_0.0.cat','W-C-IC']]
match_many(list,True)
def match_many(list,color=False):
if color:
make_ssc_config_colors(list)
print color
else:
make_ssc_config_few(list)
import os
os.system('rm -rf ' + tmpdir + '/assoc/')
os.system('mkdir ' + tmpdir + '/assoc/')
import os
files = []
for file,prefix,rot in list:
print file
#os.system('ldactoasc -i ' + file + ' -t OBJECTS -k ALPHA_J2000 DELTA_J2000 > ' + file[-11:] + '.cat')
#os.system('mkreg.pl -c -rad 3 -xcol 0 -ycol 1 -wcs ' + file[-11:] + '.cat')
command = 'ldacaddkey -i %(inputcat)s -t OBJECTS -o %(outputcat)s -k A_WCS_assoc 0.0003 FLOAT "" \
B_WCS_assoc 0.0003 FLOAT "" \
Theta_assoc 0.0 FLOAT "" \
Flag_assoc 0 SHORT "" ' % {'inputcat':file,'outputcat':file + '.assoc1'}
os.system(command)
#command = 'ldacrenkey -i %(inputcat)s -o %(outputcat)s -k ALPHA_J2000 Ra DELTA_J2000 Dec' % {'inputcat':file + '.assoc1','outputcat':file+'.assoc2'}
#os.system(command)
files.append(file+'.assoc1')
import re
files_input = reduce(lambda x,y:x + ' ' + y,files)
files_output = reduce(lambda x,y:x + ' ' + y,[tmpdir + '/assoc/'+re.split('\/',z)[-1] +'.assd' for z in files])
print files
print files_input, files_output
command = 'associate -i %(inputcats)s -o %(outputcats)s -t OBJECTS -c %(bonn)s/photconf/fullphotom.alpha.associate' % {'inputcats':files_input,'outputcats':files_output, 'bonn':os.environ['bonn']}
print command
os.system(command)
print 'associated'
outputcat = tmpdir + '/final.cat'
command = 'make_ssc -i %(inputcats)s \
-o %(outputcat)s\
-t OBJECTS -c %(tmpdir)s/tmp.ssc ' % {'tmpdir': tmpdir, 'inputcats':files_output,'outputcat':outputcat}
os.system(command)
def match_many_multi_band(list,color=False):
Ns = make_ssc_multi_color(list)
import os
os.system('rm -rf ' + tmpdir + '/assoc/')
os.system('mkdir ' + tmpdir + '/assoc/')
import os
files = []
for file,filter in list:
print file
#os.system('ldactoasc -i ' + file + ' -t OBJECTS -k ALPHA_J2000 DELTA_J2000 > ' + file[-11:] + '.cat')
#os.system('mkreg.pl -c -rad 3 -xcol 0 -ycol 1 -wcs ' + file[-11:] + '.cat')
command = 'ldacaddkey -i %(inputcat)s -t OBJECTS -o %(outputcat)s -k A_WCS_assoc 0.0003 FLOAT "" \
B_WCS_assoc 0.0003 FLOAT "" \
Theta_assoc 0.0 FLOAT "" \
Flag_assoc 0 SHORT "" ' % {'inputcat':file,'outputcat':file + '.assoc1'}
os.system(command)
#command = 'ldacrenkey -i %(inputcat)s -o %(outputcat)s -k ALPHA_J2000 Ra DELTA_J2000 Dec' % {'inputcat':file + '.assoc1','outputcat':file+'.assoc2'}
#os.system(command)
files.append(file+'.assoc1')
import re
files_input = reduce(lambda x,y:x + ' ' + y,files)
files_output = reduce(lambda x,y:x + ' ' + y,[tmpdir + '/assoc/'+re.split('\/',z)[-1] +'.assd' for z in files])
print files
print files_input, files_output
command = 'associate -i %(inputcats)s -o %(outputcats)s -t OBJECTS -c %(bonn)s/photconf/fullphotom.alpha.associate' % {'inputcats':files_input,'outputcats':files_output, 'bonn':os.environ['bonn']}
print command
os.system(command)
print 'associated'
outputcat = tmpdir + '/multiband.cat'
command = 'make_ssc -i %(inputcats)s \
-o %(outputcat)s\
-t OBJECTS -c %(tmpdir)s/tmp.ssc ' % {'tmpdir': tmpdir, 'inputcats':files_output,'outputcat':outputcat}
os.system(command)
print outputcat, 'outputcat'
''' now filter out the ones with incomplete colors '''
filt= '(' + reduce(lambda x,y: '(' + x + ' AND (' + y + ')',Ns)
print filt
intermediatecat = tmpdir + '/multiband_intermediate.cat'
command = 'ldacfilter -i ' + outputcat + ' -t PSSC -o ' + intermediatecat + ' -c "' + filt + ';" '
print command
import utilities
utilities.run(command,[intermediatecat])
finalcat = tmpdir + '/multiband_final.cat'
command = 'ldacrentab -i ' + intermediatecat + ' -t PSSC OBJECTS -o ' + finalcat
print command
import utilities
utilities.run(command,[finalcat])
print finalcat, 'finalcat'
''' now make into SDSS format '''
tmp = {}
import astropy, astropy.io.fits as pyfits, scipy
p = pyfits.open(finalcat)[1].data
cols = []
print 'data start'
import Numeric
cols.append(pyfits.Column(name='stdMag_corr', format='E',array=p.field('primarymag')))
cols.append(pyfits.Column(name='stdMagErr_corr', format='E',array=p.field('primaryerr')))
cols.append(pyfits.Column(name='stdMagColor_corr', format='E',array=(p.field('primarymag')-p.field('secondarymag'))))
cols.append(pyfits.Column(name='stdMagClean_corr', format='E',array=p.field('Clean')))
cols.append(pyfits.Column(name='ALPHA_J2000', format='E',array=p.field('ALPHA_J2000')))
cols.append(pyfits.Column(name='DELTA_J2000', format='E',array=p.field('DELTA_J2000')))
cols.append(pyfits.Column(name='SeqNr', format='E',array=p.field('SeqNr')))
cols.append(pyfits.Column(name='Star_corr', format='E',array=scipy.ones(len(p.field('Clean')))))
path = tmpdir
outcat = path + 'sdssfinalcat.cat'
print cols
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
tbhu = pyfits.BinTableHDU.from_columns(cols)
hdulist.append(tbhu)
hdulist[1].header['EXTNAME']='OBJECTS'
os.system('rm ' + outcat)
hdulist.writeto( outcat )
print 'wrote out new cat'
print outcat
return outcat
def match_inside(SUPA1,SUPA2,FLAT_TYPE):
dict1 = get_files(SUPA1,FLAT_TYPE)
search_params1 = initialize(dict1['filter'],dict1['OBJNAME'])
search_params1.update(dict1)
dict2 = get_files(SUPA2,FLAT_TYPE)
search_params2 = initialize(dict2['filter'],dict2['OBJNAME'])
search_params2.update(dict2)
import os
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params1['OBJNAME']}
illum_path='/nfs/slac/g/ki/ki05/anja/SUBARU/ILLUMINATION/' % {'OBJNAME':search_params1['OBJNAME']}
#os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/SELF/')
from glob import glob
catalog1 = search_params1['pasted_cat']
catalog2 = search_params2['pasted_cat']
#os.system('ldacrentab -i ' + catalog2 + ' -t OBJECTS STDTAB -o ' + catalog2.replace('cat','std.cat'))
filter = search_params1['filter'] #exposures[exposure]['keywords']['filter']
OBJECT = search_params1['OBJECT'] #exposures[exposure]['keywords']['OBJECT']
outcat = path + 'PHOTOMETRY/ILLUMINATION/SELF/matched_' + SUPA1 + '_' + filter + '_' + '_self.cat'
file = 'matched_' + SUPA1 + '.cat'
os.system('rm ' + outcat)
command = 'match_simple_cats.sh ' + catalog1 + ' ' + catalog2 + ' ' + outcat
print command
os.system(command)
save_exposure({'matched_cat_self':outcat},SUPA1,FLAT_TYPE)
print outcat
def getTableInfo():
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy , string
p = pyfits.open(tmpdir + '/final.cat')
tbdata = p[1].data
types = []
ROTS = {}
KEYS = {}
for column in p[1].columns:
if string.find(column.name,'$') != -1:
print column
res = re.split('\$',column.name)
ROT = res[0]
IMAGE = res[1]
KEY = res[2]
if not ROTS.has_key(ROT):
ROTS[ROT] = []
if not len(filter(lambda x:x==IMAGE,ROTS[ROT])) and IMAGE!='SUPA0011082':
ROTS[ROT].append(IMAGE)
return ROTS
def diffCalcNew():
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy , string
p = pyfits.open(tmpdir + '/final.cat')
tbdata = p[1].data
types = []
ROTS = {}
KEYS = {}
for column in p[1].columns:
if string.find(column.name,'$') != -1:
print column
res = re.split('\$',column.name)
ROT = res[0]
IMAGE = res[1]
KEY = res[2]
if not ROTS.has_key(ROT):
ROTS[ROT] = []
if not len(filter(lambda x:x==IMAGE,ROTS[ROT])):
ROTS[ROT].append(IMAGE)
print ROTS
#good = 0
#for i in range(len(tbdata)):
# array = []
# for y in ROTS[ROT]:
# array += [tbdata.field(ROT+'$'+y+'$CLASS_STAR')[i] for y in ROTS[ROT]]
# array.sort()
# if array[-1]>0.9 and array[-2]>0.9:
# good += 1
#print good, len(tbdata)
def starConstruction(EXPS):
''' the top two most star-like objects have CLASS_STAR>0.9 and, for each rotation, their magnitudes differ by less than 0.01 '''
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy , string, scipy
p = pyfits.open(tmpdir + '/final.cat')
table = p[1].data
from copy import copy
w = []
for ROT in EXPS.keys():
for y in EXPS[ROT]:
w.append(copy(table.field(ROT+'$'+y+'$MAG_AUTO')))
medians = []
stds = []
for i in range(len(w[0])):
non_zero = []
for j in range(len(w)):
if w[j][i] != 0:
non_zero.append(w[j][i])
if len(non_zero) != 0:
medians.append(float(scipy.median(non_zero)))
stds.append(float(scipy.std(non_zero)))
else:
medians.append(float(-99))
stds.append(99)
print medians[0:99]
tnew = mk_tab([[medians,'median'],[stds,'std']])
tall = merge(tnew,p)
print 'done merging'
def selectGoodStars(EXPS,match,LENGTH1,LENGTH2):
''' the top two most star-like objects have CLASS_STAR>0.9 and, for each rotation, their magnitudes differ by less than 0.01 '''
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy , string, scipy
p = pyfits.open(tmpdir + '/final.cat')
#print p[1].columns
table = p[1].data
star_good = [] #= scipy.zeros(len(table))
supas = []
from copy import copy
totalstars = 0
''' if there is an image that does not match, throw it out '''
Finished = False
while not Finished:
temp = copy(table)
for ROT in EXPS.keys():
for y in EXPS[ROT]:
mask = temp.field(ROT+'$'+y+'$MAG_AUTO') != 0.0
good_entries = temp[mask]
temp = good_entries
print len(good_entries.field(ROT+'$'+y+'$MAG_AUTO'))
mask = temp.field(ROT+'$'+y+'$MAG_AUTO') < 27
good_entries = temp[mask]
temp = good_entries
print len(good_entries.field(ROT+'$'+y+'$MAG_AUTO'))
mask = 0 < temp.field(ROT+'$'+y+'$MAG_AUTO')
good_entries = temp[mask]
temp = good_entries
print len(good_entries.field(ROT+'$'+y+'$MAG_AUTO'))
print ROT,y, temp.field(ROT+'$'+y+'$MaxVal')[0:10],temp.field(ROT+'$'+y+'$BackGr')[0:10]
mask = (temp.field(ROT+'$'+y+'$MaxVal') + temp.field(ROT+'$'+y+'$BackGr')) < 26000
good_entries = temp[mask]
temp = good_entries
good_number = len(good_entries.field(ROT+'$'+y+'$MAG_AUTO'))
print ROT,y, good_number , EXPS
if good_number == 0:
TEMP = {}
for ROTTEMP in EXPS.keys():
TEMP[ROTTEMP] = []
for yTEMP in EXPS[ROTTEMP]:
if y != yTEMP:
TEMP[ROTTEMP].append(yTEMP)
EXPS = TEMP
break
if good_number != 0:
Finished = True
print len(temp), 'temp'
zps = {}
print EXPS.keys(), EXPS
for ROT in EXPS.keys():
for y in EXPS[ROT]:
s = good_entries.field(ROT+'$'+y+'$MAG_AUTO').sum()
print s
print s/len(good_entries)
zps[y] = s/len(good_entries)
print zps
from copy import copy
tab = {}
for ROT in EXPS.keys():
for y in EXPS[ROT]:
keys = [ROT+'$'+y+'$CHIP',ROT+'$'+y+'$Xpos_ABS',ROT+'$'+y+'$Ypos_ABS',ROT+'$'+y+'$MAG_AUTO',ROT+'$'+y+'$MAGERR_AUTO',ROT+'$'+y+'$MaxVal',ROT+'$'+y+'$BackGr',ROT+'$'+y+'$CLASS_STAR',ROT+'$'+y+'$Flag',ROT+'$'+y+'$ALPHA_J2000',ROT+'$'+y+'$DELTA_J2000']
if match:
keys = [ROT+'$'+y+'$CHIP',ROT+'$'+y+'$Xpos_ABS',ROT+'$'+y+'$Ypos_ABS',ROT+'$'+y+'$MAG_AUTO',ROT+'$'+y+'$MAGERR_AUTO',ROT+'$'+y+'$MaxVal',ROT+'$'+y+'$BackGr',ROT+'$'+y+'$CLASS_STAR',ROT+'$'+y+'$Flag' ,'SDSSstdMag_corr','SDSSstdMagErr_corr','SDSSstdMagColor_corr','SDSSstdMagClean_corr','SDSSStar_corr',ROT+'$'+y+'$ALPHA_J2000',ROT+'$'+y+'$DELTA_J2000']
#print 'SDSS', table.field('SDSSstdMag_corr')[-1000:]
for key in keys:
tab[key] = copy(table.field(key))
print keys
for i in range(len(table)):
mags_ok = False
star_ok = False
class_star_array = []
include_star = []
in_box = []
name = []
mags_diff_array = []
mags_good_array = []
mags_array = []
from copy import copy
for ROT in EXPS.keys():
#for y in EXPS[ROT]:
# if table.field(ROT+'$'+y+'$MAG_AUTO')[i] != 0.0:
mags_array += [tab[ROT+'$'+y+'$MAG_AUTO'][i] for y in EXPS[ROT]]
mags_diff_array += [zps[y] - tab[ROT+'$'+y+'$MAG_AUTO'][i] for y in EXPS[ROT]]
mags_good_array += [tab[ROT+'$'+y+'$MAG_AUTO'][i]!=0.0 for y in EXPS[ROT]]
#in_box += [1000 < tab[ROT+'$'+y+'$Xpos_ABS'][i] < 9000 and 1000 < tab[ROT+'$'+y+'$Ypos_ABS'][i] < 7000 for y in EXPS[ROT]]
if 0: #tab[ROT+'$'+y+'$MAG_AUTO'][i]!=0.0:
print LENGTH1, LENGTH2, (tab[ROT+'$'+y+'$MaxVal'][i] + tab[ROT+'$'+y+'$BackGr'][i]) < 20000 , tab[ROT+'$'+y+'$Flag'][i]==0 , tab[ROT+'$'+y+'$MAG_AUTO'][i] < 30 , tab[ROT+'$'+y+'$MAG_AUTO'][i]!=0.0 , tab[ROT+'$'+y+'$MAGERR_AUTO'][i]<0.05 , ((tab[ROT+'$'+y+'$Xpos_ABS'][i]-LENGTH1/2.)**2.+(tab[ROT+'$'+y+'$Ypos_ABS'][i]-LENGTH2/2.)**2.) < (LENGTH1/2.)**2
if 0: # 0 < tab[ROT+'$'+EXPS[ROT][0]+'$Xpos_ABS'][i] < 200 or 0 < tab[ROT+'$'+EXPS[ROT][0]+'$Ypos_ABS'][i] < 200:
print LENGTH1, LENGTH2, [((tab[ROT+'$'+y+'$Xpos_ABS'][i]-LENGTH1/2.)**2.+(tab[ROT+'$'+y+'$Ypos_ABS'][i]-LENGTH2/2.)**2.) < ((LENGTH1/2.)**2 + (LENGTH2/2.)**2.) for y in EXPS[ROT][0:2]]
print [[tab[ROT+'$'+y+'$Xpos_ABS'][i]-LENGTH1/2., tab[ROT+'$'+y+'$Ypos_ABS'][i]-LENGTH2/2., tab[ROT+'$'+y+'$Xpos_ABS'][i],tab[ROT+'$'+y+'$Ypos_ABS'][i]] for y in EXPS[ROT][0:2]]
print [[tab[ROT+'$'+y+'$Xpos_ABS'][i], tab[ROT+'$'+y+'$Ypos_ABS'][i], tab[ROT+'$'+y+'$Xpos_ABS'][i],tab[ROT+'$'+y+'$Ypos_ABS'][i]] for y in EXPS[ROT][0:2]]
#include_star += [( tab[ROT+'$'+y+'$MAG_AUTO'][i]!=0.0 ) for y in EXPS[ROT]] # and
#print [[tab[ROT+'$'+y+'$MaxVal'][i] , tab[ROT+'$'+y+'$BackGr'][i]] for y in EXPS[ROT]]
#print [((tab[ROT+'$'+y+'$MaxVal'][i] + tab[ROT+'$'+y+'$BackGr'][i]) < 1 and tab[ROT+'$'+y+'$Flag'][i]==0 and tab[ROT+'$'+y+'$MAG_AUTO'][i] < 30 and tab[ROT+'$'+y+'$MAG_AUTO'][i]!=0.0 and tab[ROT+'$'+y+'$MAGERR_AUTO'][i]<0.05 and ((tab[ROT+'$'+y+'$Xpos_ABS'][i]-LENGTH1/2.)**2.+(tab[ROT+'$'+y+'$Ypos_ABS'][i]-LENGTH2/2.)**2.) < (LENGTH1/2.)**2) for y in EXPS[ROT]]
include_star += [( 0 < (tab[ROT+'$'+y+'$MaxVal'][i] + tab[ROT+'$'+y+'$BackGr'][i]) < 25000 and tab[ROT+'$'+y+'$Flag'][i]==0 and tab[ROT+'$'+y+'$MAG_AUTO'][i] < 30 and tab[ROT+'$'+y+'$MAG_AUTO'][i]!=0.0 and tab[ROT+'$'+y+'$MAGERR_AUTO'][i]<0.05 and ((tab[ROT+'$'+y+'$Xpos_ABS'][i]-LENGTH1/2.)**2.+(tab[ROT+'$'+y+'$Ypos_ABS'][i]-LENGTH2/2.)**2.) < (LENGTH1/2.)**2) for y in EXPS[ROT]] # and
#include_star += [( 0 < (tab[ROT+'$'+y+'$MaxVal'][i] + tab[ROT+'$'+y+'$BackGr'][i]) < 25000 and tab[ROT+'$'+y+'$Flag'][i]==0 and tab[ROT+'$'+y+'$MAG_AUTO'][i] < 30 and tab[ROT+'$'+y+'$MAG_AUTO'][i]!=0.0 and tab[ROT+'$'+y+'$MAGERR_AUTO'][i]<0.05) for y in EXPS[ROT]] # and
#include_star += [((tab[ROT+'$'+y+'$MaxVal'][i] + tab[ROT+'$'+y+'$BackGr'][i]) < 25000 and tab[ROT+'$'+y+'$Flag'][i]==0 and tab[ROT+'$'+y+'$MAG_AUTO'][i] < 30 and tab[ROT+'$'+y+'$MAG_AUTO'][i]!=0.0 and tab[ROT+'$'+y+'$MAGERR_AUTO'][i]<0.05) for y in EXPS[ROT]] # and
#in_circ = lambda x,y,r: (x**2.+y**2.)<r**2.
#include_star += [((tab[ROT+'$'+y+'$MaxVal'][i] + tab[ROT+'$'+y+'$BackGr'][i]) < 25000 and tab[ROT+'$'+y+'$Flag'][i]==0 and tab[ROT+'$'+y+'$MAG_AUTO'][i] < 30 and tab[ROT+'$'+y+'$MAG_AUTO'][i]!=0.0 and tab[ROT+'$'+y+'$MAGERR_AUTO'][i]<0.05 and in_circ(tab[ROT+'$'+y+'$Xpos_ABS'][i]-LENGTH1/2.,tab[ROT+'$'+y+'$Ypos_ABS'][i]-LENGTH2/2,LENGTH) for y in EXPS[ROT]]
#include_star += [((tab[ROT+'$'+y+'$MaxVal'][i] + tab[ROT+'$'+y+'$BackGr'][i]) < 25000 and tab[ROT+'$'+y+'$Flag'][i]==0 and tab[ROT+'$'+y+'$MAG_AUTO'][i] < 30 and tab[ROT+'$'+y+'$MAG_AUTO'][i]!=0.0 and tab[ROT+'$'+y+'$MAGERR_AUTO'][i]<0.05) for y in EXPS[ROT]]
#for y in EXPS[ROT]:
# print (tab[ROT+'$'+y+'$MaxVal'][i] + tab[ROT+'$'+y+'$BackGr'][i]) < 27500 , tab[ROT+'$'+y+'$Flag'][i]==0 , tab[ROT+'$'+y+'$MAG_AUTO'][i] < 40 , tab[ROT+'$'+y+'$MAG_AUTO'][i]!=0.0
name += [{'name':EXPS[ROT][z],'rotation':ROT} for z in range(len(EXPS[ROT]))]
class_star_array += [tab[ROT+'$'+y+'$CLASS_STAR'][i] for y in EXPS[ROT]]
class_star_array.sort()
#if len(mags_array) > 1:
# if 1: #abs(mags_array[0] - mags_array[1]) < 0.5:
# mags_ok = True
# if 1: #abs(class_star_array[-1]) > 0.01: # MAIN PARAMETER!
# star_ok = True
list = []
for k in range(len(mags_good_array)):
if mags_good_array[k]:
list.append(mags_diff_array[k])
if len(list) > 1:
median_mag_diff = scipy.median(list)
file_list=[]
for j in range(len(include_star)):
if include_star[j] and abs(mags_diff_array[j] - median_mag_diff) < 1.: # MAIN PARAMETER!
file_list.append(name[j])
mag = mags_diff_array[j]
if match:
''' if match object exists '''
if tab['SDSSstdMag_corr'][i] != 0.0: match_exists = 1
else: match_exists = 0
''' if match object is good -- throw out galaxies for this '''
if (match=='sdss' or abs(class_star_array[-1]) > 0.8) and 40. > tab['SDSSstdMag_corr'][i] > 0.0 and 5 > tab['SDSSstdMagColor_corr'][i] > -5: match_good = 1
else: match_good = 0
else:
match_good = 0
match_exists = 0
if len(file_list) > 1:
totalstars += len(file_list)
''' if using chip dependent color terms, colors for each object are required '''
#if catalog='bootstrap':
# if sdss==1:
# star_good.append(i)
# supas.append({'mag':mag,'table index':i,'supa files':file_list, 'match':match, 'match_exists':match_exists})
#else:
star_good.append(i)
supas.append({'mag':mag,'table index':i,'supa files':file_list, 'match':match_good, 'match_exists':match_exists, 'std':scipy.std(list)})
if i%2000==0: print i
supas.sort(sort_supas)
return EXPS, star_good, supas, totalstars
def sort_supas(x,y):
if x['mag'] > y['mag']:
return 1
else: return -1
def diffCalc(SUPA1,FLAT_TYPE):
dict = get_files(SUPA1,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy
print search_params['matched_cat_self']
p = pyfits.open(search_params['matched_cat_self'])
tbdata = p[1].data
mask = tbdata.field('SEx_MaxVal') + tbdata.field('SEx_BackGr') < 27500
newtbdata = tbdata[mask]
print len(newtbdata)
mask = newtbdata.field('CLASS_STAR') > 0.95
newtbdata = newtbdata[mask]
mask = abs(newtbdata.field('SEx_MAG_APER2') - newtbdata.field('MAG_APER2')) < 0.01
new2tbdata = newtbdata[mask]
print len(new2tbdata)
data = new2tbdata.field('SEx_MAG_APER2') - new2tbdata.field('MAG_APER2')
magErr = new2tbdata.field('SEx_MAGERR_APER2')
X = new2tbdata.field('Xpos_ABS')
Y = new2tbdata.field('Ypos_ABS')
file = 'test'
calcDataIllum(file,search_params['LENGTH1'], search_params['LENGTH2'],data,magErr,X,Y)
data_save = []
magErr_save = []
X_save = []
Y_save = []
for i in range(len(data)):
data_save.append([new2tbdata.field('SEx_MAG_APER2')[i],new2tbdata.field('MAG_APER2')[i]])
magErr_save.append([new2tbdata.field('SEx_MAGERR_APER2')[i],new2tbdata.field('MAGERR_APER2')[i]])
X_save.append([new2tbdata.field('Xpos_ABS')[i],new2tbdata.field('SEx_Xpos_ABS')[i]])
Y_save.append([new2tbdata.field('Ypos_ABS')[i],new2tbdata.field('SEx_Ypos_ABS')[i]])
return data_save, magErr_save, X_save, Y_save
def linear_fit(OBJNAME,FILTER,PPRUN,match=None,CONFIG=None,primary=None,secondary=None):
print match, CONFIG
print OBJNAME,FILTER, PPRUN, tmpdir
maxSigIter=50
solutions = []
quick = False
fit_db = {}
import pickle
''' get data '''
EXPS = getTableInfo()
for ROT in EXPS.keys():
print EXPS[ROT]
save_fit({'PPRUN':PPRUN,'FILTER':FILTER,'OBJNAME':OBJNAME,str(ROT)+'images':len(EXPS[ROT]),str(ROT)+'supas':reduce(lambda x,y:x+','+y,EXPS[ROT])})
print EXPS
''' create chebychev polynomials '''
if CONFIG == '10_3':
cheby_x = [{'n':'0x','f':lambda x,y:1.,'order':0},{'n':'1x','f':lambda x,y:x,'order':1},{'n':'2x','f':lambda x,y:2*x**2-1,'order':2},{'n':'3x','f':lambda x,y:4*x**3.-3*x,'order':3},{'n':'4x','f':lambda x,y:8*x**4.-8*x**2.+1,'order':4}]#,{'n':'5x','f':lambda x,y:16*x**5.-20*x**3.+5*x,'order':5}]
cheby_y = [{'n':'0y','f':lambda x,y:1.,'order':0},{'n':'1y','f':lambda x,y:y,'order':1},{'n':'2y','f':lambda x,y:2*y**2-1,'order':2},{'n':'3y','f':lambda x,y:4*y**3.-3*y,'order':3},{'n':'4y','f':lambda x,y:8*y**4.-8*y**2.+1,'order':4}] #,{'n':'5y','f':lambda x,y:16*y**5.-20*y**3.+5*y,'order':5}]
else:
cheby_x = [{'n':'0x','f':lambda x,y:1.,'order':0},{'n':'1x','f':lambda x,y:x,'order':1},{'n':'2x','f':lambda x,y:2*x**2-1,'order':2},{'n':'3x','f':lambda x,y:4*x**3.-3*x,'order':3}] #,{'n':'4x','f':lambda x,y:8*x**4.-8*x**2.+1,'order':4},{'n':'5x','f':lambda x,y:16*x**5.-20*x**3.+5*x,'order':5}]
cheby_y = [{'n':'0y','f':lambda x,y:1.,'order':0},{'n':'1y','f':lambda x,y:y,'order':1},{'n':'2y','f':lambda x,y:2*y**2-1,'order':2},{'n':'3y','f':lambda x,y:4*y**3.-3*y,'order':3}] #,{'n':'4y','f':lambda x,y:8*y**4.-8*y**2.+1,'order':4},{'n':'5y','f':lambda x,y:16*y**5.-20*y**3.+5*y,'order':5}]
cheby_terms = []
cheby_terms_no_linear = []
for tx in cheby_x:
for ty in cheby_y:
if 1: #tx['order'] + ty['order'] <=3:
if not ((tx['n'] == '0x' and ty['n'] == '0y')): # or (tx['n'] == '0x' and ty['n'] == '1y') or (tx['n'] == '1x' and ty['n'] == '0y')) :
cheby_terms.append({'n':tx['n'] + ty['n'],'fx':tx['f'],'fy':ty['f']})
if not ((tx['n'] == '0x' and ty['n'] == '0y') or (tx['n'] == '0x' and ty['n'] == '1y') or (tx['n'] == '1x' and ty['n'] == '0y')) :
cheby_terms_no_linear.append({'n':tx['n'] + ty['n'],'fx':tx['f'],'fy':ty['f']})
#ROTS, data, err, X, Y, maxVal, classStar = diffCalcNew()
#save = {'ROTS': ROTS, 'data':data,'err':err,'X':X,'Y':Y,'maxVal':maxVal,'classStar':classStar}
#uu = open(tmpdir + '/store','w')
#import pickle
#pickle.dump(save,uu)
#uu.close()
''' EXPS has all of the image information for different rotations '''
''' make model '''
#fit = make_model(EXPS)
#position_fit = make_position_model(EXPS)
print fit
''' see if in sdss, linear or not '''
dt = get_files(EXPS[EXPS.keys()[0]][0])
import re
print dt['CHIPS']
CHIPS = [int(x) for x in re.split(',',dt['CHIPS'])]
LENGTH1, LENGTH2 = dt['LENGTH1'], dt['LENGTH2']
print LENGTH1, LENGTH2
#cov, galaxycat, starcat = sdss_coverage(dt['SUPA'],dt['FLAT_TYPE'])
''' if early chip configuration, use chip color terms '''
if (CONFIG=='8' or CONFIG=='9'):
relative_colors = True
else: relative_colors = False
print relative_colors
if not quick:
EXPS, star_good,supas, totalstars = selectGoodStars(EXPS,match,LENGTH1,LENGTH2)
uu = open(tmpdir + '/selectGoodStars','w')
import pickle
pickle.dump({'EXPS':EXPS,'star_good':star_good,'supas':supas,'totalstars':totalstars},uu)
uu.close()
import pickle
f=open(tmpdir + '/selectGoodStars','r')
m=pickle.Unpickler(f)
d=m.load()
EXPS = d['EXPS']
star_good = d['star_good']
supas = d['supas']
totalstars = d['totalstars']
info = starStats(supas)
print "calc_test_save.linear_fit('" + OBJNAME + "','" + FILTER + "','" + PPRUN + "'," + str(match) + ",'" + CONFIG + str(primary) + "',secondary='" + str(secondary) + "')"
print len(star_good)
#cheby_terms_use = cheby_terms_no_linear
fitvars_fiducial = False
if match:
if info['match'] > 400:
samples = [['match','cheby_terms',True]]
print 'all terms'
else:
samples = [['match','cheby_terms_no_linear',True]]
print 'no linear terms'
else:
samples = [['nomatch','cheby_terms_no_linear',False]]
sample = str(match)
for hold_sample,which_terms,match in samples:
save_fit({'PPRUN':PPRUN,'FILTER':FILTER,'OBJNAME':OBJNAME,sample + '$primary_filt':primary,sample + '$secondary_filt':secondary,'coverage':str(match),sample + '$relative_colors':relative_colors,sample + '$catalog':str(match),'CONFIG':CONFIG})
cheby_terms_use = locals()[which_terms]
import scipy
import astropy, astropy.io.fits as pyfits
p = pyfits.open(tmpdir + '/final.cat')
table = p[1].data
from copy import copy
tab = {}
for ROT in EXPS.keys():
for y in EXPS[ROT]:
keys = [ROT+'$'+y+'$CHIP',ROT+'$'+y+'$Xpos',ROT+'$'+y+'$Ypos',ROT+'$'+y+'$CHIP',ROT+'$'+y+'$Xpos_ABS',ROT+'$'+y+'$Ypos_ABS',ROT+'$'+y+'$MAG_AUTO',ROT+'$'+y+'$MAGERR_AUTO',ROT+'$'+y+'$MaxVal',ROT+'$'+y+'$BackGr',ROT+'$'+y+'$CLASS_STAR',ROT+'$'+y+'$Flag',ROT+'$'+y+'$ALPHA_J2000',ROT+'$'+y+'$DELTA_J2000']
if match:
keys = [ROT+'$'+y+'$CHIP',ROT+'$'+y+'$Xpos',ROT+'$'+y+'$Ypos',ROT+'$'+y+'$CHIP',ROT+'$'+y+'$Xpos_ABS',ROT+'$'+y+'$Ypos_ABS',ROT+'$'+y+'$MAG_AUTO',ROT+'$'+y+'$MAGERR_AUTO',ROT+'$'+y+'$MaxVal',ROT+'$'+y+'$BackGr',ROT+'$'+y+'$CLASS_STAR',ROT+'$'+y+'$Flag' ,'SDSSstdMag_corr','SDSSstdMagErr_corr','SDSSstdMagColor_corr','SDSSstdMagClean_corr','SDSSStar_corr',ROT+'$'+y+'$ALPHA_J2000',ROT+'$'+y+'$DELTA_J2000']
for key in keys:
tab[key] = copy(table.field(key))
coord_conv_x = lambda x:(2.*x-(LENGTH1))/((LENGTH1))
coord_conv_y = lambda x:(2.*x-(LENGTH2))/((LENGTH2))
print LENGTH1, LENGTH2
save_fit({'FILTER':FILTER,'OBJNAME':OBJNAME,'PPRUN':PPRUN,'supas':len(supas),sample + '$match_stars':len(filter(lambda x:x['match'],supas))})
supas_copy = copy(supas)
''' find the color term '''
if True:
data = []
magErr = []
color = []
for star in supas:
''' each exp of each star '''
if star['match'] and (sample=='sdss' or sample=='bootstrap'):
for exp in star['supa files']:
if 2 > tab['SDSSstdMagColor_corr'][star['table index']] > -2:
rotation = exp['rotation']
sigma = tab['SDSSstdMagErr_corr'][star['table index']]
data.append(tab[str(rotation)+'$'+exp['name']+'$MAG_AUTO'][star['table index']] - tab['SDSSstdMag_corr'][star['table index']])
magErr.append(tab['SDSSstdMagErr_corr'][star['table index']])
color.append(tab['SDSSstdMagColor_corr'][star['table index']])
color.sort()
if False:
A = scipy.zeros([len(data),2])
B = scipy.zeros(len(data))
for i in range(len(data)):
A[i][0] = color[i]/magErr[i]
A[i][1] = 1./magErr[i]
B[i] = data[i]/magErr[i]
print A
print B
from scipy import linalg
print 'doing linear algebra'
U = linalg.lstsq(A,B)[0]
print U
print A
print B
print scipy.shape(U)
print scipy.shape(A)
print scipy.shape(B)
print scipy.shape(A), len(U),
Bprime = scipy.dot(A,U)
print scipy.shape(Bprime),scipy.shape(B)
Bdiff = (abs(abs(B-Bprime))).sum()/len(B)
print (B-Bprime)[:300]
print U[0:20]
print Bdiff, 'reduced chi-squared'
a = [-100,100]
m = [U[1]-100*U[0],U[1]+U[0]*100]
print a, m
#plot_color(color, data, a, m)
for sample_size in ['all']: #'rand1','rand1']: #,'rand2','rand3','rand4','all']: #,'rand3']:
''' take a random sample of half '''
if sample_size != 'all':
## changing the CLASS_STAR criterion upwards helps as does increasing the sigma on the SDSS stars
print len(supas)
l = range(len(supas_copy))
print l[0:10]
l.sort(random_cmp)
print l[0:10]
''' shorten star_good, supas '''
supas = [supas_copy[i] for i in l[0:len(supas_copy)/2]]
else:
supas = copy(supas_copy)
if totalstars > 30000:
print len(supas)
l = range(len(supas_copy))
print l[0:10]
l.sort(random_cmp)
print l[0:10]
''' shorten star_good, supas '''
print totalstars, len(supas)
#supas = [supas_copy[i] for i in l[0:int(float(30000)/float(totalstars)*len(supas))]]
supas = copy(supas_copy[0:int(float(30000)/float(totalstars)*len(supas))])
print len(supas), 'supas', supas[0], totalstars
print len(supas_copy), len(supas)
print supas[0:10]
columns = []
column_dict = {}
''' position-dependent terms in design matrix '''
position_columns = []
index = -1
for ROT in EXPS.keys():
for term in cheby_terms_use:
index += 1
name = str(ROT) + '$' + term['n'] # + reduce(lambda x,y: x + 'T' + y,term)
position_columns.append({'name':name,'fx':term['fx'],'fy':term['fy'],'rotation':ROT,'index':index})
#print position_columns
columns += position_columns
''' zero point terms in design matrix '''
per_chip = False # have a different zp for each chip on each exposures
same_chips = True# have a different zp for each chip but constant across exposures
if not per_chip:
zp_columns = []
for ROT in EXPS.keys():
for exp in EXPS[ROT]:
index += 1
zp_columns.append({'name':'zp_image_'+exp,'image':exp,'im_rotation':ROT,'index':index})
if per_chip:
zp_columns = []
for ROT in EXPS.keys():
for exp in EXPS[ROT]:
for chip in CHIPS:
index += 1
zp_columns.append({'name':'zp_image_'+exp + '_' + chip,'image':exp,'im_rotation':ROT, 'chip':chip,'index':index})
if False: # CONFIG == '10_3':
first_empty = 0
for chip in CHIPS:
for sub_chip in [1,2,3,4]:
if first_empty != 0:
index += 1
zp_columns.append({'name':'zp_'+str(chip)+'_'+str(sub_chip),'image':'chip_zp','chip':str(chip)+'_'+str(sub_chip),'index':index})
else: first_empty = 1
else:
if not per_chip and same_chips:
for chip in CHIPS:
index += 1
zp_columns.append({'name':'zp_'+str(chip),'image':'chip_zp','chip':chip,'index':index})
if match:
index += 1
zp_columns.append({'name':'zp_SDSS','image':'match','index':index})
columns += zp_columns
print columns
import os
os.system('pwd')
import config_bonn
reload(config_bonn)
from config_bonn import chip_groups
color_columns = []
if match:
if relative_colors:
''' add chip dependent color terms'''
for group in chip_groups[str(CONFIG)].keys():
''' this is the relative color term, so leave out the first group '''
if float(group) != 1:
index += 1
color_columns.append({'name':'color_group_'+str(group),'image':'chip_color','chip_group':group,'index':index})
''' add a color term for the catalog '''
index += 1
color_columns+=[{'name':'SDSS_color','image':'match_color_term','index':index, 'chip_group':[]}]
else: color_columns = []
columns += color_columns
print color_columns, match,
mag_columns = []
for star in supas:
mag_columns.append({'name':'mag_' + str(star['table index'])})
columns += mag_columns
print len(columns)
column_names = [x['name'] for x in columns] #reduce(lambda x,y: x+y,columns)]
print column_names[0:100]
''' total number of fit parameters summed over each rotation + total number of images of all rotations + total number of stars to fit '''
tot_exp = 0
for ROT in EXPS.keys():
for ele in EXPS[ROT]:
tot_exp += 1
x_length = len(position_columns) + len(zp_columns) + len(color_columns) + len(mag_columns)
print len(columns), x_length
x_length = len(columns)
y_length = reduce(lambda x,y: x + y,[len(star['supa files'])*2 for star in supas]) # double number of rows for SDSS
print x_length, y_length
print star['supa files']
print
Bstr = ''
row_num = -1
supa_num = -1
''' each star '''
print 'creating matrix....'
sigmas = []
inst = []
data = {}
magErr = {}
whichimage = {}
X = {}
Y = {}
color = {}
chipnums = {}
Star = {}
catalog_values = {}
for ROT in EXPS.keys():
data[ROT] = []
magErr[ROT] = []
X[ROT] = []
Y[ROT] = []
color[ROT] = []
whichimage[ROT] = []
chipnums[ROT] = []
Star[ROT] = []
chip_dict = {}
x_positions = {}
y_positions = {}
for star in supas:
supa_num += 1
''' each exp of each star '''
if 1:
star_A = []
star_B = []
star_B_cat = []
sigmas = []
for exp in star['supa files']:
row_num += 1
col_num = -1
rotation = exp['rotation']
x = tab[str(rotation) + '$' + exp['name'] + '$Xpos_ABS'][star['table index']]
y = tab[str(rotation) + '$' + exp['name'] + '$Ypos_ABS'][star['table index']]
x_rel = tab[str(rotation) + '$' + exp['name'] + '$Xpos'][star['table index']]
y_rel = tab[str(rotation) + '$' + exp['name'] + '$Ypos'][star['table index']]
if False: #CONFIG == '10_3':
from config_bonn import chip_divide_10_3
chip_num = int(tab[str(rotation) + '$' + exp['name'] + '$CHIP'][star['table index']] )
for div in chip_divide_10_3.keys():
if chip_divide_10_3[div][0] < x_rel <= chip_divide_10_3[div][1]:
sub_chip = div
chip = str(chip_num) + '_' + str(sub_chip)
else:
chip = int(tab[str(rotation) + '$' + exp['name'] + '$CHIP'][star['table index']] )
if not chip_dict.has_key(str(chip)):
chip_dict[str(chip)] = ''
print chip_dict.keys(), CHIPS
#print CONFIG, CONFIG == '10_3'
#print chip_div, x_rel, y_rel
#if x < 2000 or y < 2000 or abs(LENGTH1 - x) < 2000 or abs(LENGTH2 - y) < 2000:
# sigma = 1.5 * tab[str(rotation) + '$' + exp['name'] + '$MAGERR_AUTO'][star['table index']]
#else:
sigma = tab[str(rotation) + '$' + exp['name'] + '$MAGERR_AUTO'][star['table index']]
if sigma < 0.001: sigma = 0.001
sigma = sigma # * 1000.
#sigma = 1
for c in position_columns:
col_num += 1
if c['rotation'] == rotation:
n = str(rotation) + '$' + exp['name'] + '$Xpos_ABS'
x = tab[str(rotation) + '$' + exp['name'] + '$Xpos_ABS'][star['table index']]
y = tab[str(rotation) + '$' + exp['name'] + '$Ypos_ABS'][star['table index']]
x_positions[row_num] = x
y_positions[row_num] = y
x = coord_conv_x(x)
y = coord_conv_y(y)
value = c['fx'](x,y)*c['fy'](x,y)/sigma
star_A.append([row_num,col_num,value])
first_exposure = True
for c in zp_columns:
col_num += 1
#if not degeneracy_break[c['im_rotation']] and c['image'] == exp['name']:
if not per_chip:
if (first_exposure is not True and c['image'] == exp['name']):
value = 1./sigma
star_A.append([row_num,col_num,value])
if same_chips and c.has_key('chip'):
if (c['chip'] == chip) and chip != CHIPS[0]:
value = 1./sigma
star_A.append([row_num,col_num,value])
first_exposure = False
if per_chip:
if (first_column is not True and c['image'] == exp['name'] and c['chip'] == chip):
value = 1./sigma
star_A.append([row_num,col_num,value])
''' fit for the color term dependence for SDSS comparison '''
if match:
if relative_colors:
for c in color_columns:
col_num += 1
for chip_num in c['chip_group']:
if float(chip_num) == float(chip):
value = tab['SDSSstdMagColor_corr'][star['table index']]/sigma
star_A.append([row_num,col_num,value])
else:
col_num += 1
''' magnitude column -- include the correct/common magnitude '''
col_num += 1
value = 1./sigma
star_A.append([row_num,col_num+supa_num,value])
ra = tab[str(rotation) + '$' + exp['name'] + '$ALPHA_J2000'][star['table index']]
dec = tab[str(rotation) + '$' + exp['name'] + '$DELTA_J2000'][star['table index']]
value = tab[str(rotation) + '$' + exp['name'] + '$MAG_AUTO'][star['table index']]/sigma
catalog_values[col_num+supa_num] = {'inst_value':value*sigma,'ra':ra,'dec':dec,'sigma':sigma} # write into catalog
#print catalog_values, col_num+supa_num
x_long = tab[str(rotation) + '$' + exp['name'] + '$Xpos_ABS'][star['table index']]
y_long = tab[str(rotation) + '$' + exp['name'] + '$Ypos_ABS'][star['table index']]
x = coord_conv_x(x_long)
y = coord_conv_y(y_long)
if fitvars_fiducial:
value += add_single_correction(x,y,fitvars_fiducial)
star_B.append([row_num,value])
sigmas.append(sigma)
inst.append({'type':'match','A_array':star_A, 'B_array':star_B, 'sigma_array': sigmas})
''' only include one SDSS observation per star '''
#print sample
#raw_input()
if star['match'] and (sample=='sdss' or sample=='bootstrap') and tab['SDSSStar_corr'][star['table index']] == 1:
star_A = []
star_B = []
sigmas = []
''' need to filter out bad colored-stars '''
if 1:
row_num += 1
col_num = -1
exp = star['supa files'][0]
rotation = exp['rotation']
sigma = tab['SDSSstdMagErr_corr'][star['table index']]
if sigma < 0.03: sigma = 0.03
for c in position_columns:
col_num += 1
first_column = True
for c in zp_columns:
col_num += 1
''' remember that the good magnitude does not have any zp dependence!!! '''
if c['image'] == 'match':
value = 1./sigma
star_A.append([row_num,col_num,value])
x_positions[row_num] = x
y_positions[row_num] = y
first_column = False
''' fit for the color term dependence for SDSS comparison -- '''
if relative_colors:
for c in color_columns:
col_num += 1
if c['name'] == 'SDSS_color':
value = tab['SDSSstdMagColor_corr'][star['table index']]/sigma
star_A.append([row_num,col_num,value])
else:
col_num += 1
value = tab['SDSSstdMagColor_corr'][star['table index']]/sigma
star_A.append([row_num,col_num,value])
col_num += 1
''' magnitude column -- include the correct/common magnitude '''
value = 1./sigma
star_A.append([row_num,col_num+supa_num,value])
value = tab['SDSSstdMag_corr'][star['table index']]/sigma
star_B.append([row_num,value])
sigmas.append(sigma)
inst.append({'type':'sdss','A_array':star_A, 'B_array':star_B, 'sigma_array': sigmas})
''' record star information '''
if True:
for exp in star['supa files']:
rotation = str(exp['rotation'])
data[rotation].append(tab[str(rotation)+'$'+exp['name']+'$MAG_AUTO'][star['table index']] - tab['SDSSstdMag_corr'][star['table index']])
Star[rotation].append(tab['SDSSStar_corr'][star['table index']])
magErr[rotation].append(tab['SDSSstdMagErr_corr'][star['table index']])
whichimage[rotation].append(exp['name'])
X[rotation].append(tab[str(rotation) + '$' + exp['name'] + '$Xpos_ABS'][star['table index']])
Y[rotation].append(tab[str(rotation) + '$' + exp['name'] + '$Ypos_ABS'][star['table index']])
color[rotation].append(tab['SDSSstdMagColor_corr'][star['table index']])
chipnums[rotation].append(tab[str(rotation) + '$' + exp['name'] + '$CHIP'][star['table index']])
#if tab[str(rotation) + '$' + exp['name'] + '$CHIP'][star['table index']] == 1:
# print str(rotation) + '$' + exp['name'] + '$CHIP'
#print star_A, star_B, sigmas, sigma
print EXPS
for rot in EXPS.keys():
print data.keys()
print rot, len(data[str(rot)])
''' save the SDSS matches '''
matches = {'data':data,'magErr':magErr,'whichimage':whichimage,'X':X,'Y':Y,'color':color}
uu = open(tmpdir + '/sdss','w')
import pickle
pickle.dump(matches,uu)
uu.close()
''' do fitting '''
#if 1: #not quick:
for attempt in ['first','rejected']:
''' make matrices/vectors '''
Ainst_expand = []
for z in inst:
for y in z['A_array']:
Ainst_expand.append(y)
Binst_expand = []
for z in inst:
for y in z['B_array']:
Binst_expand.append(y)
print len(Binst_expand)
''' this gives the total number of rows added '''
sigmas = []
for z in inst:
for y in z['sigma_array']:
sigmas.append(y)
print len(Binst_expand)
ylength = len(Binst_expand)
print y_length, x_length
print len(Ainst_expand), len(Binst_expand)
print 'lengths'
A = scipy.zeros([y_length,x_length])
B = scipy.zeros(y_length)
import copy
if attempt == 'first': rejectlist = 0*copy.copy(B)
Af = open('A','w')
Bf = open('b','w')
rejected = 0
rejected_x = []
rejected_y = []
all_x = []
all_y = []
all_resids = []
if attempt == 'rejected':
for ele in Ainst_expand:
if rejectlist[ele[0]] == 0:
if x_positions.has_key(ele[0]) and y_positions.has_key(ele[0]):
all_x.append(float(str(x_positions[ele[0]])))
all_y.append(float(str(y_positions[ele[0]])))
all_resids.append(float(str(resids_sign[ele[0]])))
if rejectlist[ele[0]] == 0:
Af.write(str(ele[0]) + ' ' + str(ele[1]) + ' ' + str(ele[2]) + '\n')
#print ele, y_length, x_length
#print ele
A[ele[0],ele[1]] = ele[2]
else:
rejected += 1
if x_positions.has_key(ele[0]) and y_positions.has_key(ele[0]):
rejected_x.append(float(str(x_positions[ele[0]])))
rejected_y.append(float(str(y_positions[ele[0]])))
else:
for ele in Ainst_expand:
Af.write(str(ele[0]) + ' ' + str(ele[1]) + ' ' + str(ele[2]) + '\n')
#print ele, y_length, x_length
#print ele
A[ele[0],ele[1]] = ele[2]
for ele in Binst_expand:
if rejectlist[ele[0]] == 0:
B[ele[0]] = ele[1]
if attempt == 'rejected' and rejected > 0:
print rejected, 'rejected'
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':OBJNAME}
illum_dir = path + 'PHOTOMETRY/ILLUMINATION/' + FILTER + '/' + PPRUN + '/'
import Numeric
print all_resids[0:100]
print all_x[0:100]
print all_y[0:100]
print 'check'
os.system('mkdir -p ' + illum_dir)
calcDataIllum(sample + 'reducedchi'+str(ROT)+FILTER,LENGTH1,LENGTH2,Numeric.array(all_resids),Numeric.ones(len(all_resids)),Numeric.array(all_x),Numeric.array(all_y),pth=illum_dir,rot=0)
dtmp = {}
dtmp[sample+'$'+sample_size+'$rejected']=rejected
dtmp[sample+'$'+sample_size+'$totalmeasurements']=rejected
import Numeric
import ppgplot
x_p = Numeric.array(rejected_x)
y_p = Numeric.array(rejected_y)
import copy
x = sorted(copy.copy(x_p))
y = sorted(copy.copy(y_p))
illum_dir = path + 'PHOTOMETRY/ILLUMINATION/' + FILTER + '/' + PPRUN + '/'
import os
os.system('mkdir ' + illum_dir)
reject_plot = illum_dir + sample + 'rejects.ps'
dtmp[sample+'$'+sample_size+'$reject_plot']=reject_plot
dtmp.update({'PPRUN':PPRUN,'FILTER':FILTER,'OBJNAME':OBJNAME,sample + '$linearfit':'1'})
save_fit(dtmp)
import tempfile
t = tempfile.NamedTemporaryFile(dir='/tmp/').name
ppgplot.pgbeg(t + '/cps',1,1)
ppgplot.pgiden()
#print x_p
#print z_p
#print zerr_p
#pgswin(x[0],x[-1],z[0],z[-1])
### plot positions
ppgplot.pgpanl(1,1)
ppgplot.pgswin(x[0],x[-1],y[0],y[-1])
ppgplot.pgbox()
ppgplot.pglab('X','Y','rejected points') # label the plot
#pgsci(3)
#pgerrb(6,x_p,z_p,zerr_p)
print x_p[0:100], y_p[0:100]
print type(x_p), type(y_p)
print 'plotting'
ppgplot.pgpt(x_p,y_p,3)
print 'plotted'
ppgplot.pgend()
print reject_plot
os.system('mv ' + t + ' ' + reject_plot)
Bstr = reduce(lambda x,y:x+' '+y,[str(z[1]) for z in Binst_expand])
Bf.write(Bstr)
Bf.close()
Af.close()
print 'finished matrix....'
print len(position_columns), len(zp_columns)
print A[0,0:30], B[0:10], scipy.shape(A), scipy.shape(B)
print A[1,0:30], B[0:10], scipy.shape(A), scipy.shape(B)
print 'hi!'
Af = open(tmpdir + '/B','w')
for i in range(len(B)):
Af.write(str(B[i]) + '\n')
Af.close()
print 'solving matrix...'
import re, os
os.system('rm x')
os.system('sparse < A')
bout = open('x','r').read()
res = re.split('\s+',bout[:-1].replace('nan','0'))
T = [float(x) for x in res][:x_length]
params = {}
for i in range(len(T)):
if i < len(column_names):
params[column_names[i]] = T[i]
if T[i] == -99:
print column_names[i], T[i]
if catalog_values.has_key(i):
catalog_values[i]['mag'] = T[i]
#raw_input()
U = [float(x) for x in res][:x_length]
print 'finished solving...'
#from scipy import linalg
#print 'doing linear algebra'
#U = linalg.lstsq(A,B)
#print U[0][0:30]
''' calculate reduced chi-squared value'''
print scipy.shape(A), len(U), x_length, len(res)
Bprime = scipy.dot(A,U)
print scipy.shape(Bprime),scipy.shape(B)
Bdiff = (abs(abs(B-Bprime))).sum()/len(B)
resids = abs(B-Bprime)
resids_sign = B-Bprime
rejectlist = []
rejectnums = 0
for i in range(len(resids)):
if resids[i] > 5:
rejectlist.append(1)
rejectnums += 1
else: rejectlist.append(0)
print (B-Bprime)[:300]
print len(resids), rejectnums
print U[0:20]
print x[0:20]
reducedchi = Bdiff
print reducedchi, 'reduced chi-squared'
#save_fit({'PPRUN':PPRUN,'FILTER':FILTER,'OBJNAME':OBJNAME,'reducedchi$'+sample+'$'+sample_size:Bdiff})
data_directory = '/nfs/slac/g/ki/ki04/pkelly/illumination/'
position_fit = [['Xpos_ABS','Xpos_ABS'],['Xpos_ABS','Ypos_ABS'],['Ypos_ABS','Ypos_ABS'],['Xpos_ABS'],['Ypos_ABS']]
import re
''' save fit information '''
#print sample+'$'+sample_size+'$' + str(ROT) + '$positioncolumns',reduce(lambda x,y: x+','+y,[z['name'] for z in position_columns])
if match:
save_columns = position_columns + zp_columns + color_columns
else:
save_columns = position_columns + zp_columns
dtmp = {}
o = zp_columns + position_columns
#for ROT in EXPS.keys():
#dtmp['zp_' + ROT] = params['zp_' + ROT]
fitvars = {}
zp_images = ''
zp_images_names = ''
for ele in save_columns:
print ele
res = re.split('$',ele['name'])
import string
if string.find(ele['name'],'zp_image') == -1:
fitvars[ele['name']] = U[ele['index']]
term_name = sample+'$'+sample_size+'$'+ele['name']
print term_name
dtmp[term_name]=fitvars[ele['name']]
print ele['name'], fitvars[ele['name']]
else:
zp_images += str(U[ele['index']]) + ','
zp_images_names += ele['name'] + ','
zp_images = zp_images[:-1]
zp_images_names = zp_images_names[:-1]
term_name = sample+'$'+sample_size+'$zp_images'
print term_name
dtmp[term_name]=zp_images
print dtmp[term_name]
term_name = sample+'$'+sample_size+'$zp_images_names'
print term_name
dtmp[term_name]=zp_images_names
print dtmp[term_name]
import string
print dtmp.keys()
use_columns = filter(lambda x: string.find(x,'zp_image') == -1,[z['name'] for z in save_columns] ) + ['zp_images','zp_images_names']
positioncolumns = reduce(lambda x,y: x+','+y,use_columns)
print positioncolumns
#print positioncolumns, sample+'$'+sample_size + '$positioncolumns'
#save_fit({'PPRUN':PPRUN,'FILTER':FILTER,'OBJNAME':OBJNAME,sample+'$'+sample_size+'$positioncolumns':positioncolumns})
dtmp[sample+'$'+sample_size+'$positioncolumns'] = positioncolumns
dtmp[sample+'$'+sample_size+'$reducedchi']=reducedchi
#term_name = sample+'$'+sample_size+'$0x1y'
#print term_name, '!!!!!'
#if 0:
#print fitvars['1$0x1y'], '1$0x1y'
#term_name = sample+'$'+sample_size+'$1$0x1y'
#dtmp[term_name] = 1.
#term_name = sample+'$'+sample_size+'$0$1x0y'
#dtmp[term_name] = 1.
#fitvars['1$0x1y'] = 1.
#fitvars['0$1x0y'] = 1.
#print fitvars
#raw_input()
print dtmp.keys()
print 'stop'
print dtmp[sample + '$' + sample_size + '$positioncolumns'], sample + '$' + sample_size + '$positioncolumns', PPRUN, FILTER, OBJNAME
dtmp.update({'PPRUN':PPRUN,'FILTER':FILTER,'OBJNAME':OBJNAME,'linearfit':'1'})
print dtmp
save_fit(dtmp)
if 1:
''' save the corrected catalog '''
tmp = {}
import astropy, astropy.io.fits as pyfits
cols = []
stdMag_corr = []
stdMagErr_corr = []
stdMagColor_corr = []
stdMagClean_corr = []
ALPHA_J2000 = []
DELTA_J2000 = []
SeqNr = []
Star_corr = []
sn = -1
for i in catalog_values.keys():
entr = catalog_values[i]
sn += 1
SeqNr.append(sn)
stdMag_corr.append(entr['mag'])
ALPHA_J2000.append(entr['ra'])
DELTA_J2000.append(entr['dec'])
stdMagErr_corr.append(entr['sigma'])
stdMagColor_corr.append(0)
stdMagClean_corr.append(1)
Star_corr.append(1)
print 'data start'
import Numeric
cols.append(pyfits.Column(name='stdMag_corr', format='E',array=Numeric.array(stdMag_corr)))
cols.append(pyfits.Column(name='stdMagErr_corr', format='E',array=Numeric.array(stdMagErr_corr)))
cols.append(pyfits.Column(name='stdMagColor_corr', format='E',array=Numeric.array(stdMagColor_corr)))
cols.append(pyfits.Column(name='stdMagClean_corr', format='E',array=Numeric.array(stdMagClean_corr)))
cols.append(pyfits.Column(name='ALPHA_J2000', format='E',array=Numeric.array(ALPHA_J2000)))
cols.append(pyfits.Column(name='DELTA_J2000', format='E',array=Numeric.array(DELTA_J2000)))
cols.append(pyfits.Column(name='SeqNr', format='E',array=Numeric.array(SeqNr)))
cols.append(pyfits.Column(name='Star_corr', format='E',array=Numeric.array(Star_corr)))
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':OBJNAME}
outcat = path + 'PHOTOMETRY/ILLUMINATION/' + 'catalog_' + PPRUN + '.cat'
print cols
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
tbhu = pyfits.BinTableHDU.from_columns(cols)
hdulist.append(tbhu)
hdulist[1].header['EXTNAME']='OBJECTS'
os.system('rm ' + outcat)
hdulist.writeto( outcat )
print 'wrote out new cat'
print outcat
save_fit({'FILTER':FILTER,'OBJNAME':OBJNAME,'PPRUN':PPRUN,'catalog':outcat})
#save_exposure({type + 'atch':outcat},SUPA,FLAT_TYPE)
#tmp[type + 'sdssmatch'] = outcat
''' make diagnostic plots '''
if 1:
import re, time
d = get_fits(OBJNAME,FILTER,PPRUN)
print d.keys()
column_prefix = sample+'$'+sample_size+'$'
position_columns_names = re.split('\,',d[column_prefix + 'positioncolumns'])
print position_columns_names, 'position_columns_names'
fitvars = {}
cheby_terms_dict = {}
print column_prefix, position_columns_names
for ele in position_columns_names:
print ele
if type(ele) != type({}):
ele = {'name':ele}
res = re.split('$',ele['name'])
if string.find(ele['name'],'zp_image') == -1:
fitvars[ele['name']] = float(d[sample+'$'+sample_size+'$'+ele['name']])
for term in cheby_terms:
if term['n'] == ele['name'][2:]:
cheby_terms_dict[term['n']] = term
zp_images = re.split(',',d[sample+'$'+sample_size+'$zp_images'])
zp_images_names = re.split(',',d[sample+'$'+sample_size+'$zp_images_names'])
for i in range(len(zp_images)):
fitvars[zp_images_names[i]] = float(zp_images[i])
print fitvars
cheby_terms_use = [cheby_terms_dict[k] for k in cheby_terms_dict.keys()]
print cheby_terms_use, fitvars
''' make images of illumination corrections '''
for ROT in EXPS.keys():
size_x=LENGTH1
size_y=LENGTH2
bin=100
import numpy, math, pyfits, os
x,y = numpy.meshgrid(numpy.arange(0,size_x,bin),numpy.arange(0,size_y,bin))
F=0.1
print 'calculating'
x = coord_conv_x(x)
y = coord_conv_y(y)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':OBJNAME}
illum_dir = path + 'PHOTOMETRY/ILLUMINATION/' + FILTER + '/' + PPRUN + '/' + str(ROT)
os.system('mkdir -p ' + illum_dir)
epsilon = 0
index = 0
for term in cheby_terms_use:
index += 1
print index, ROT, term, fitvars[str(ROT)+'$'+term['n']]
epsilon += fitvars[str(ROT)+'$'+term['n']]*term['fx'](x,y)*term['fy'](x,y)
''' save pattern w/o chip zps '''
print 'writing'
hdu = pyfits.PrimaryHDU(epsilon)
im = illum_dir + '/nochipzps' + sample + sample_size + '.fits'
save_fit({'PPRUN':PPRUN,'FILTER':FILTER,'OBJNAME':OBJNAME,sample+'$'+sample_size+'$'+str(ROT)+'$im':im})
os.system('rm ' + im)
hdu.writeto(im)
''' save pattern w/ chip zps '''
if per_chip or same_chips:
print CHIPS, 'CHIPS'
for CHIP in CHIPS:
if str(dt['CRPIX1_' + str(CHIP)]) != 'None':
if False: #CONFIG == '10_3':
for sub_chip in ['1','2','3','4']:
from config_bonn import chip_divide_10_3
import re
xmin = float(dt['CRPIX1ZERO']) - float(dt['CRPIX1_' + str(CHIP)]) + chip_divide_10_3[sub_chip][0]
xmax = float(dt['CRPIX1ZERO']) - float(dt['CRPIX1_' + str(CHIP)]) + chip_divide_10_3[sub_chip][1]
ymin = float(dt['CRPIX2ZERO']) - float(dt['CRPIX2_' + str(CHIP)])
ymax = ymin + float(dt['NAXIS2_' + str(CHIP)])
print xmin, xmax, ymin, ymax, CHIP, 'CHIP'
print int(xmin/bin), int(xmax/bin), int(ymin/bin), int(ymax/bin), CHIP, 'CHIP', bin, scipy.shape(epsilon)
print epsilon[int(xmin/bin):int(xmax/bin)][int(ymin/bin):int(ymax/bin)]
print fitvars.keys()
if fitvars.has_key('zp_' + str(CHIP) + '_' + sub_chip):
print 'zp', fitvars['zp_' + str(CHIP) + '_' + sub_chip]
epsilon[int(ymin/bin):int(ymax/bin),int(xmin/bin):int(xmax/bin)] += float(fitvars['zp_' + str(CHIP) + '_' + sub_chip])
#raw_input()
else:
xmin = float(dt['CRPIX1ZERO']) - float(dt['CRPIX1_' + str(CHIP)])
xmax = xmin + float(dt['NAXIS1_' + str(CHIP)])
ymin = float(dt['CRPIX2ZERO']) - float(dt['CRPIX2_' + str(CHIP)])
ymax = ymin + float(dt['NAXIS2_' + str(CHIP)])
print xmin, xmax, ymin, ymax, CHIP, 'CHIP'
print int(xmin/bin), int(xmax/bin), int(ymin/bin), int(ymax/bin), CHIP, 'CHIP', bin, scipy.shape(epsilon)
print epsilon[int(xmin/bin):int(xmax/bin)][int(ymin/bin):int(ymax/bin)]
print fitvars.keys()
print 'zp', fitvars['zp_' + str(CHIP)]
epsilon[int(ymin/bin):int(ymax/bin),int(xmin/bin):int(xmax/bin)] += float(fitvars['zp_' + str(CHIP)])
print 'writing'
hdu = pyfits.PrimaryHDU(epsilon)
im = illum_dir + '/correction' + sample + sample_size + '.fits'
save_fit({'linearplot':1,'PPRUN':PPRUN,'FILTER':FILTER,'OBJNAME':OBJNAME,sample+'$'+sample_size+'$'+str(ROT)+'$im':im})
os.system('rm ' + im)
hdu.writeto(im)
print 'done'
if match:
''' calculate matched plot differences, before and after '''
for ROT in EXPS.keys():
data[ROT] = scipy.array(data[ROT])
print scipy.array(data[ROT]), ROT
print EXPS
color[ROT] = scipy.array(color[ROT])
''' apply the color term measured from the data '''
zp_correction = scipy.array([float(fitvars['zp_image_'+x]) for x in whichimage[ROT]])
#data1 = data[ROT] - fitvars['SDSS_color']*color[ROT] - zp_correction
if 1:
data1 = data[ROT] + fitvars['SDSS_color']*color[ROT] - zp_correction
#else:
# data1 = data[ROT] - zp_correction
#print data1, data[ROT], fitvars['SDSS_color'], color[ROT], zp_correction
#print len(data1)
data2 = data1 - (data1/data1*scipy.median(data1))
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':OBJNAME}
illum_dir = path + 'PHOTOMETRY/ILLUMINATION/' + FILTER + '/' + PPRUN + '/' + str(ROT) + '/'
for kind,keyvalue in [['star',1]]: #['galaxy',0],
calcDataIllum(sample + kind + 'nocorr'+str(ROT)+FILTER,10000,8000,data2,magErr[ROT],X[ROT],Y[ROT],pth=illum_dir,rot=0,good=[Star[ROT],keyvalue])
dtmp = {}
var = variance(data2,magErr[ROT])
print 'var'
print var
dtmp[sample + 'stdnocorr$' + str(ROT)] = var[1]**0.5
dtmp[sample + 'redchinocorr$' + str(ROT)] = var[2]
#plot_color(color[ROT], data2)
import scipy
#print X[ROT]
x = coord_conv_x(scipy.array(X[ROT]))
y = coord_conv_y(scipy.array(Y[ROT]))
#epsilon = 0
#for term in cheby_terms:
# data += fitvars[term[str(ROT)+'$'+'n']]*term['fx'](x,y)*term['fy'](x,y)
epsilon=0
for term in cheby_terms_use:
epsilon += fitvars[str(ROT)+'$'+term['n']]*term['fx'](x,y)*term['fy'](x,y)
chipcorrect = []
#print chipnums
if CONFIG != '10_3':
for chip in chipnums[ROT]:
chipcorrect.append(fitvars['zp_' + str(int(float(chip)))])
chipcorrect = scipy.array(chipcorrect)
epsilon += chipcorrect
calcim = sample+kind+'correction'+str(ROT)+FILTER
calcDataIllum(calcim,10000,8000,epsilon,magErr[ROT],X[ROT],Y[ROT],pth=illum_dir,rot=0,good=[Star[ROT],keyvalue])
data2 -= epsilon
#print whichimage[ROT][0:100]
#data1 = data[ROT] - zp_correction
#data2 = data1 - (data1/data1*scipy.median(data1))
#plot_color(color[ROT], data2)
#print magErr[ROT][0:20]
calcim = sample+kind+'rot'+str(ROT)+FILTER
#print illum_dir
calcDataIllum(calcim,10000,8000,data2,magErr[ROT],X[ROT],Y[ROT],pth=illum_dir,rot=0,good=[Star[ROT],keyvalue])
var = variance(data2,magErr[ROT])
print 'second', var
dtmp[sample + 'stdcorr$' + str(ROT)] = var[1]**0.5
dtmp[sample + 'redchicorr$' + str(ROT)] = var[2]
dtmp.update({'PPRUN':PPRUN,'FILTER':FILTER,'OBJNAME':OBJNAME})
save_fit(dtmp)
print 'calcDataIllum', im, calcim, len(data[ROT])
#print params['SDSS_color'], 'SDSS_color'
print OBJNAME, FILTER, PPRUN, tmpdir
return
def construct_correction(OBJNAME,FILTER,PPRUN):
import scipy, re, string, os
''' create chebychev polynomials '''
cheby_x = [{'n':'0x','f':lambda x,y:1.},{'n':'1x','f':lambda x,y:x},{'n':'2x','f':lambda x,y:2*x**2-1},{'n':'3x','f':lambda x,y:4*x**3.-3*x}]
cheby_y = [{'n':'0y','f':lambda x,y:1.},{'n':'1y','f':lambda x,y:y},{'n':'2y','f':lambda x,y:2*y**2-1},{'n':'3y','f':lambda x,y:4*y**3.-3*y}]
cheby_terms = []
cheby_terms_no_linear = []
for tx in cheby_x:
for ty in cheby_y:
if not ((tx['n'] == '0x' and ty['n'] == '0y')): # or (tx['n'] == '0x' and ty['n'] == '1y') or (tx['n'] == '1x' and ty['n'] == '0y')) :
cheby_terms.append({'n':tx['n'] + ty['n'],'fx':tx['f'],'fy':ty['f']})
if not ((tx['n'] == '0x' and ty['n'] == '0y') or (tx['n'] == '0x' and ty['n'] == '1y') or (tx['n'] == '1x' and ty['n'] == '0y')) :
cheby_terms_no_linear.append({'n':tx['n'] + ty['n'],'fx':tx['f'],'fy':ty['f']})
#if cov:
# samples = [['sdss',cheby_terms,True]] #,['None',cheby_terms_no_linear,False]] #[['None',cheby_terms_no_linear],['sdss',cheby_terms]]
#else:
# samples = [['None',cheby_terms_no_linear,False]]
samples = [['sdss',cheby_terms,True],['None',cheby_terms_no_linear,False]] #[['None',cheby_terms_no_linear],['sdss',cheby_terms]]
sample_size = 'all'
import re, time
dt = get_a_file(OBJNAME,FILTER,PPRUN)
d = get_fits(OBJNAME,FILTER,PPRUN)
print d.keys()
if d['sdss$good'] == 'y':
sample = 'sdss'
if d['None$good'] == 'y':
sample = 'None'
if d['bootstrap$good'] == 'y':
sample = 'bootstrap'
column_prefix = sample+'$'+sample_size+'$'
position_columns_names = re.split('\,',d[column_prefix + 'positioncolumns'])
print position_columns_names, 'position_columns_names'
fitvars = {}
cheby_terms_dict = {}
print column_prefix, position_columns_names
ROTS_dict = {}
for ele in position_columns_names:
print ele
if type(ele) != type({}):
ele = {'name':ele}
res = re.split('\$',ele['name'])
if len(res) > 1:
ROTS_dict[res[0]] = ''
print res
if string.find(ele['name'],'zp_image') == -1:
fitvars[ele['name']] = float(d[sample+'$'+sample_size+'$'+ele['name']])
for term in cheby_terms:
if term['n'] == ele['name'][2:]:
cheby_terms_dict[term['n']] = term
ROTS = ROTS_dict.keys()
print ROTS
zp_images = re.split(',',d[sample+'$'+sample_size+'$zp_images'])
zp_images_names = re.split(',',d[sample+'$'+sample_size+'$zp_images_names'])
for i in range(len(zp_images)):
fitvars[zp_images_names[i]] = float(zp_images[i])
cheby_terms_use = [cheby_terms_dict[k] for k in cheby_terms_dict.keys()]
print cheby_terms_use, fitvars
CHIPS = [int(x) for x in re.split(',',dt['CHIPS'])]
LENGTH1, LENGTH2 = dt['LENGTH1'], dt['LENGTH2']
per_chip = True
coord_conv_x = lambda x:(2.*x-0-LENGTH1)/(LENGTH1-0)
coord_conv_y = lambda x:(2.*x-0-LENGTH2)/(LENGTH2-0)
''' make images of illumination corrections '''
for ROT in ROTS: #EXPS.keys():
size_x=LENGTH1
size_y=LENGTH2
bin=100
import numpy, math, pyfits, os
x,y = numpy.meshgrid(numpy.arange(0,size_x,bin),numpy.arange(0,size_y,bin))
F=0.1
print 'calculating'
x = coord_conv_x(x)
y = coord_conv_y(y)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':OBJNAME}
illum_dir = path + 'PHOTOMETRY/ILLUMINATION/' + FILTER + '/' + PPRUN + '/' + str(ROT)
os.system('mkdir -p ' + illum_dir)
epsilon = 0
index = 0
for term in cheby_terms_use:
index += 1
print index, ROT, term, fitvars[str(ROT)+'$'+term['n']]
epsilon += fitvars[str(ROT)+'$'+term['n']]*term['fx'](x,y)*term['fy'](x,y)
''' save pattern w/o chip zps '''
print 'writing'
hdu = pyfits.PrimaryHDU(epsilon)
im = illum_dir + '/nochipzps' + sample + sample_size + '.fits'
print 'before save'
save_fit({'PPRUN':PPRUN,'FILTER':FILTER,'OBJNAME':OBJNAME,sample+'$'+sample_size+'$'+str(ROT)+'$im':im})
print 'after save'
print im
os.system('rm ' + im)
print im
hdu.writeto(im)
''' save pattern w/ chip zps '''
print 'here'
trial = False
Test = False
children = []
if 1: #per_chip or same_chips:
child = False
for CHIP in CHIPS:
if not trial:
print 'forking'
child = os.fork()
if child:
children.append(child)
if not child:
if str(dt['CRPIX1_' + str(CHIP)]) != 'None':
xmin = int(float(dt['CRPIX1ZERO'])) - int(float(dt['CRPIX1_' + str(CHIP)]))
ymin = int(float(dt['CRPIX2ZERO'])) - int(float(dt['CRPIX2_' + str(CHIP)]))
xmax = xmin + int(dt['NAXIS1_' + str(CHIP)])
ymax = ymin + int(dt['NAXIS2_' + str(CHIP)])
print xmin, xmax, xmax - xmin, ymin, ymax, ymax-ymin, CHIP, 'CHIP'
print int(xmin/bin), int(xmax/bin), int(ymin/bin), int(ymax/bin), CHIP, 'CHIP', bin, scipy.shape(epsilon)
print epsilon[int(xmin/bin):int(xmax/bin)][int(ymin/bin):int(ymax/bin)]
print fitvars.keys()
print 'zp', fitvars['zp_' + str(CHIP)]
epsilon[int(ymin/bin):int(ymax/bin),int(xmin/bin):int(xmax/bin)] += float(fitvars['zp_' + str(CHIP)])
x,y = numpy.meshgrid(numpy.arange(xmin,xmax,1),numpy.arange(ymin,ymax,1))
x = coord_conv_x(x)
y = coord_conv_y(y)
''' correct w/ polynomial '''
epsilonC = 0
index = 0
#sum = [lambda u,v: fitvars[str(ROT)+'$'+term['n']]*term['fx'](u,v)*term['fy'](u,v) for term in cheby_terms_use]
#print sum
#p = lambda d,e: reduce(lambda a,b: a(d,e) + b(d,e), sum)
for term in cheby_terms_use:
index += 1
print index, ROT, term, fitvars[str(ROT)+'$'+term['n']]
epsilonC += fitvars[str(ROT)+'$'+term['n']]*term['fx'](x,y)*term['fy'](x,y)
''' add the zeropoint '''
epsilonC += float(fitvars['zp_' + str(CHIP)])
''' save pattern w/o chip zps '''
import math
print 'writing/converting to linear flux units'
hdu = pyfits.PrimaryHDU(10.**(epsilonC/2.5))
im = tmpdir + str(ROT) + '_' + str(CHIP) + '.fits'
os.system('rm ' + im)
hdu.writeto(im)
import sys
print 'exiting'
#if not trial:
if not trial:
sys.exit(0)
for c in children:
os.waitpid(c,0)
print 'finished'
print 'writing'
''' apply the corrections to the images '''
import MySQLdb, sys, os, re
db2,c = connect_except()
command ="select file from illumination_db where SUPA not like '%I' and OBJNAME='" + OBJNAME + "' and PPRUN='" + PPRUN + "' and ROTATION='" + str(ROT) + "'"
print command
c.execute(command)
results=c.fetchall()
db_keys = describe_db(c,'illumination_db')
files = []
for line in results:
files.append(str(line[0]))
db2.close()
print files
trial = False
for file in files:
children = []
for CHIP in CHIPS:
child = False
if not trial:
child = os.fork()
if child:
children.append(child)
if not child:
RUN = re.split('\_',PPRUN)[0]
p = re.compile('\_\d+O')
file_chip = p.sub('_' + str(CHIP) + 'O',file)#.replace('.fits','.sub.fits')
import commands
info = commands.getoutput('dfits ' + file_chip + ' | fitsort -d ROTATION')
print info, file_chip
CHIP_ROT = str(int(re.split('\s+',info)[1]))
file_short = re.split('\/',file_chip)[-1]
SUPA = re.split('\_',file_short)[0]
print SUPA
if Test:
file_short = file_short.replace(SUPA,SUPA+'I')
file_chip.replace(SUPA,SUPA+'I')
if int(CHIP_ROT) == int(ROT):
im = tmpdir + str(CHIP_ROT) + '_' + str(CHIP) + '.fits'
print im
weight_file = file_chip.replace('SCIENCE','WEIGHTS').replace('.fits','.weight.fits')
flag_file = file_chip.replace('SCIENCE','WEIGHTS').replace('.fits','.flag.fits')
print file_chip, weight_file
directory = reduce(lambda x,y: x + '/' + y, re.split('\/',file_chip)[:-1])
print directory, 'directory' ,file
filter_dir = directory.replace(FILTER+'_'+RUN,FILTER)
if Test:
out_directory = os.environ['subdir'] + '/TEST/' + FILTER + '_' + RUN + '/SCIENCE/'
out_filter_dir = os.environ['subdir'] + '/TEST/' + FILTER + '/SCIENCE/'
out_file = os.environ['subdir'] + '/TEST/' + FILTER + '_' + RUN + '/SCIENCE/' + file_short.replace('.fits','I.fits')
out_weight_file = out_file.replace('SCIENCE','WEIGHTS').replace('.fits','.weight.fits')
out_flag_file = out_file.replace('SCIENCE','WEIGHTS').replace('.fits','.flag.fits')
os.system('mkdir -p ' + out_directory)
os.system('mkdir -p ' + out_directory.replace('SCIENCE','WEIGHTS'))
''' make link to the header information '''
from glob import glob
print directory
os.system('mkdir -p ' + out_filter_dir)
print filter_dir, directory, out_filter_dir, out_directory, 'dirs'
print filter_dir+ '/head*'
print glob(filter_dir+ '/head*')
for file_scamp in glob(filter_dir+ '/head*'):
command = 'ln -s ' + file_scamp + ' ' + out_filter_dir
print command
os.system(command)
os.system('rm ' + out_weight_file)
command = 'ln -s ' + weight_file + ' ' + out_weight_file
print command
os.system(command)
os.system('rm ' + out_flag_file)
command = 'ln -s ' + flag_file + ' ' + out_flag_file
print command
os.system(command)
command = 'sethead ' + out_file + ' OBJNAME=TEST'
print command
os.system(command)
else:
out_file = os.environ['subdir'] + '/' + OBJNAME + '/' + FILTER + '/SCIENCE/' + file_short.replace('.fits','I.fits')
out_weight_file = os.environ['subdir'] + '/' + OBJNAME + '/' + FILTER + '/WEIGHTS/' + file_short.replace('.fits','I.weight.fits')
bad_out_weight_file = os.environ['subdir'] + '/' + OBJNAME + '/' + FILTER + '/SCIENCE/' + file_short.replace('.fits','I.weight.fits')
os.system('rm ' + out_file)
command = "ic '%1 %2 *' " + file_chip + " " + im + "> " + out_file
print command
os.system(command)
os.system('rm ' + bad_out_weight_file) # remove this file which was accidently put there:w
os.system('rm ' + out_weight_file)
command = "ic '%1 %2 /' " + weight_file + " " + im + "> " + out_weight_file
print command
os.system(command)
import sys
if not trial:
sys.exit(0)
for child in children:
os.waitpid(child,0)
print 'finished'
save_fit({'PPRUN':PPRUN,'FILTER':FILTER,'OBJNAME':OBJNAME,'correction_applied':'yes'})
def correct_image():
''' make diagnostic plots '''
if 1:
import re
d = get_fits(CLUSTER,FILTER,PPRUN)
column_prefix = sample+'$'+sample_size+'$'
position_columns_names = re.split('\,',d[column_prefix + 'positioncolumns'])
fitvars = {}
cheby_terms_dict = {}
for ele in position_columns:
res = re.split('$',ele['name'])
fitvars[ele['name']] = float(d[sample+'$'+sample_size+'$'+ele['name']])
for term in cheby_terms:
if term['n'] == ele['name'][2:]:
cheby_terms_dict[term['n']] = term
cheby_terms_use = [cheby_terms_dict[k] for k in cheby_terms_dict.keys()]
print cheby_terms_use, fitvars
''' make images of illumination corrections '''
for ROT in EXPS.keys():
size_x=LENGTH1
size_y=LENGTH2
bin=100
import numpy, math, pyfits, os
x,y = numpy.meshgrid(numpy.arange(0,size_x,bin),numpy.arange(0,size_y,bin))
F=0.1
print 'calculating'
x = coord_conv_x(x)
y = coord_conv_y(y)
epsilon = 0
index = 0
for term in cheby_terms_use:
index += 1
print index, ROT, term, fitvars[str(ROT)+'$'+term['n']]
epsilon += fitvars[str(ROT)+'$'+term['n']]*term['fx'](x,y)*term['fy'](x,y)
print 'writing'
hdu = pyfits.PrimaryHDU(epsilon)
def residual_plots():
for ROT in EXPS.keys():
print 'ROT', ROT
fitvars = {}
for ele in position_columns:
res = re.split('$',ele['name'])
if res[0] == ROT:
fitvars[ele['name'][2:]] = U[ele['index']]
save_fit({'PPRUN':PPRUN,'FILTER':FILTER,'CLUSTER':CLUSTER,sample+'$'+sample_size+'$'+ele['name'].replace('$','$'):fitvars[ele['name'][2:]]})
print ele['name'], fitvars[ele['name'][2:]]
if 0:
uu = open(tmpdir + '/fitvars' + ROT,'w')
import pickle
pickle.dump(fitvars,uu)
uu.close()
size_x=8000
size_y=10000
bin=100
import numpy, math, pyfits, os
x,y = numpy.meshgrid(numpy.arange(0,size_x,bin),numpy.arange(0,size_y,bin))
F=0.1
print 'calculating'
x = coord_conv_x(x)
y = coord_conv_y(y)
epsilon = 0
for term in cheby_terms_use:
epsilon += fitvars[term['n']]*term['fx'](x,y)*term['fy'](x,y)
print 'writing'
hdu = pyfits.PrimaryHDU(epsilon)
#os.system('rm ' + tmpdir + '/correction' + ROT + filter + sample_size + '.fits')
#hdu.writeto(tmpdir + '/correction' + ROT + filter + sample_size + '.fits')
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':CLUSTER}
illum_dir = path + 'PHOTOMETRY/ILLUMINATION/' + FILTER + '/' + str(ROT)
os.system('mkdir -p ' + illum_dir)
im = illum_dir + '/correction' + sample + sample_size + '.fits'
save_fit({'PPRUN':PPRUN,'FILTER':FILTER,'CLUSTER':CLUSTER,sample+'$'+sample_size+'$'+str(ROT)+'$im':im})
os.system('rm ' + im)
hdu.writeto(im)
#print 'done'
epsilon = 10.**(epsilon/2.5)
#correction = 10.**(epsilon/2.5)
# xaxis is always vertical!!!
#print 'writing'
#hdu = pyfits.PrimaryHDU(epsilon)
#os.system('rm ' + tmpdir + '/fcorrection' + ROT + filter + '.fits')
#hdu.writeto(tmpdir + '/fcorrection' + ROT + filter + '.fits')
#print 'done'
return
def fit():
maxSigIter=50
solutions = []
import pickle
''' get data '''
EXPS = getTableInfo()
print EXPS
#ROTS, data, err, X, Y, maxVal, classStar = diffCalcNew()
#save = {'ROTS': ROTS, 'data':data,'err':err,'X':X,'Y':Y,'maxVal':maxVal,'classStar':classStar}
#uu = open(tmpdir + '/store','w')
#import pickle
#pickle.dump(save,uu)
#uu.close()
''' EXPS has all of the image information for different rotations '''
''' make model '''
fit = make_model(EXPS)
print fit
star_good = selectGoodStars(EXPS)
uu = open(tmpdir + '/store','w')
import pickle
pickle.dump(star_good,uu)
uu.close()
import pickle
f=open(tmpdir + '/store','r')
m=pickle.Unpickler(f)
star_good=m.load()
fit['class'] = phot_funct(fit['model'],fit['fixed'],EXPS,star_good,fit['apply'])
import astropy, astropy.io.fits as pyfits
p = pyfits.open(tmpdir + '/final.cat')
table = p[1].data
import copy
table_save = copy.copy(table)
for i in range(maxSigIter):
fa = {"table": table_save}
func = fit['class'].calc_model
#functkw takes input data arrays
#parinfo takes initial guess and constraints on parameters
#import optimize
#params, covar, info, mesg, ier = optimize.leastsq(func,guess,args = (points,vals,errs), full_output=True)
import mpfit
m = mpfit.mpfit(func, functkw=fa,
parinfo=fit['class'].parstart,
maxiter=1000, quiet=0)
print m.params, m.perror
if (m.status <= 0):
print 'error message = ', m.errmsg
condition = Numeric.zeros(len(data))
break
print m.params,m.perror
#fits = [{'vars':['zp','color1coeff','color1coeff2'],'parinfo':[{'value':p[0],'fixed':0},{'value':p[1],'fixed':0},{'value':p[2],'fixed':0},'function':phot_funct_secondorder,'fit_type':'no_airmass'}]
fit['class'].fitvars = {}
for ele in range(len(fit['class'].smodel)):
print ele, fit['class'].smodel
name = make_name(fit['class'].smodel[ele])
print ele, fit['class'].fitvars, name, m.params[ele]
fit['class'].fitvars[name] = m.params[ele]
fit['class'].fitvars[name + '_err'] = m.perror[ele]
perror = copy.copy(m.perror)
# Compute a 3 sigma rejection criterion
print m.params, data_rec[0], data[0]
#condition, redchisq = SigmaCond(params, data_save, data,
# airmass_save, airmass,
# color1_save, color1, color2_save, color2, err_save, err, sigmareject)
calcIllum(10000, 10000, 100, fit)
if len(data_save) > 1:
(mo_save, reddm) = fit['class'].calc_sigma(m.params, airmass_save, color1_save, color2_save, data_save, err_save, X_save, Y_save)
#reddm = (data-mo)/err
redchisq = Numeric.sqrt(Numeric.sum(Numeric.power(reddm, 2)) / (len(reddm) - 1))
dm = data_save-mo_save
#dm_save = data_save - mo_save
print len(data_save), len(mo_save)
dm_save = data_save - mo_save
mean = Numeric.sum(dm)/len(dm)
sigma = Numeric.sqrt(Numeric.sum(Numeric.power(mean-dm, 2)) / (len(dm) - 1))
# you can pick either
#condition = Numeric.less(Numeric.fabs(dm_save), float(sigmareject) * sigma)
condition = Numeric.less(Numeric.fabs(dm_save), float(sigmareject) * err_save)
else:
condition = Numeric.zeros(len(data_save))
print redchisq
# Keep everything (from the full data set!) that is within
# the 3 sigma criterion
#data_sig = Numeric.compress(condition, data_save)
data = Numeric.compress(condition, data_rec)
err = Numeric.compress(condition, err_save)
X = Numeric.compress(condition, X_save)
Y = Numeric.compress(condition, Y_save)
new_len = len(data)
if float(new_len)/float(save_len) < 0.5:
print "Rejected more than 50% of all measurements."
print "Aborting this fit."
break
# No change
if new_len == old_len:
print "Converged! (%d iterations)" % (i+1, )
print "Kept %d/%d stars." % (new_len, save_len)
break
#print params, perror, condition
meanerr = Numeric.sum(err_save)/len(err_save)
def make_name(name):
if len(name) > 1:
name = reduce(lambda x,y: x + 'T' + y,name)
else:
name = name[0]
return name
''' reduce size od SDSS data '''
def convert_SDSS_cat(SUPA,FLAT_TYPE):
from config_bonn import info
import utilities, Numeric, os
reload(utilities)
from utilities import *
dict = get_files(SUPA,FLAT_TYPE)
print dict.keys()
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
print dict['starcat']
import astropy, astropy.io.fits as pyfits
hdulist1 = pyfits.open(dict['starcat'])
#print hdulist1["STDTAB"].columns
table = hdulist1["STDTAB"].data
other_info = info[dict['filter']]
filters_info = make_filters_info([dict['filter']])
compband = filters_info[0][1] ## use the SDSS/other comparison band
color1which = other_info['color1']
print filters_info, compband
print dict['OBJNAME']
for key in dict.keys():
import string
if string.find(key,'color') != -1:
print key
cols = [pyfits.Column(name=column.name, format=column.format,array=Numeric.array(0 + hdulist1["STDTAB"].data.field(column.name))) for column in hdulist1["STDTAB"].columns]
cols.append(pyfits.Column(name='stdMag_corr', format='E',array=Numeric.array(0 + hdulist1["STDTAB"].data.field(compband+'mag'))))
cols.append(pyfits.Column(name='stdMagErr_corr', format='E',array=Numeric.array(0 + hdulist1["STDTAB"].data.field(compband+'err'))))
cols.append(pyfits.Column(name='stdMagColor_corr', format='E',array=Numeric.array(0 + hdulist1["STDTAB"].data.field(color1which))))
cols.append(pyfits.Column(name='stdMagClean_corr', format='E',array=Numeric.array(0 + hdulist1["STDTAB"].data.field('Clean'))))
type = 'star'
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
outcat = path + 'PHOTOMETRY/ILLUMINATION/sdssmatch__' + search_params['SUPA'] + '_' + type + '.cat'
print cols
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
tbhu = pyfits.BinTableHDU.from_columns(cols)
hdulist.append(tbhu)
hdulist[1].header['EXTNAME']='OBJECTS'
os.system('rm ' + outcat)
hdulist.writeto( outcat )
print 'wrote out new cat'
save_exposure({'sdssmatch':outcat},SUPA,FLAT_TYPE)
def apply_photometric_calibration(SUPA,FLAT_TYPE,starcat):
from config_bonn import info
import utilities, Numeric, os
reload(utilities)
dict = get_files(SUPA,FLAT_TYPE)
print dict.keys()
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
#print dict['starcat']
import astropy, astropy.io.fits as pyfits
hdulist1 = pyfits.open(starcat)
#print hdulist1["STDTAB"].columns
table = hdulist1["STDTAB"].data
other_info = info[dict['filter']]
filters_info = utilities.make_filters_info([dict['filter']])
compband = filters_info[0][1] ## use the SDSS/other comparison band
color1which = other_info['color1']
print filters_info, compband
print dict['OBJNAME']
for key in dict.keys():
import string
if string.find(key,'color') != -1:
print key
#calib = get_calibrations_threesecond(dict['OBJNAME'],filters_info)
#print 'calib', calib
#raw_input()
model = utilities.convert_modelname_to_array('zpPcolor1') #dict['model_name%'+dict['filter']])
cols = [] #pyfits.Column(name=column.name, format=column.format,array=Numeric.array(0 + hdulist1["STDTAB"].data.field(column.name))) for column in hdulist1["STDTAB"].columns]
print cols
#print start
print 'data start'
#data = utilities.color_std_correct(model,dict,table,dict['filter'],compband+'mag',color1which) # correct standard magnitude into instrumntal system -- at least get rid of the color term
from copy import copy
data = copy(table.field(compband+'mag'))
print 'data done'
cols.append(pyfits.Column(name='stdMag_corr', format='E',array=data))
cols.append(pyfits.Column(name='stdMagErr_corr', format='E',array=Numeric.array(0 + hdulist1["STDTAB"].data.field(compband+'err'))))
cols.append(pyfits.Column(name='stdMagColor_corr', format='E',array=Numeric.array(0 + hdulist1["STDTAB"].data.field(color1which))))
cols.append(pyfits.Column(name='stdMagClean_corr', format='E',array=Numeric.array(0 + hdulist1["STDTAB"].data.field('Clean'))))
type = 'star'
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
outcat = path + 'PHOTOMETRY/ILLUMINATION/sdssmatch__' + search_params['SUPA'] + '_' + type + '.cat'
print cols
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
tbhu = pyfits.BinTableHDU.from_columns(cols)
hdulist.append(tbhu)
hdulist[1].header['EXTNAME']='OBJECTS'
os.system('rm ' + outcat)
print 'writing out new cat'
hdulist.writeto( outcat )
print 'wrote out new cat'
save_exposure({'sdssmatch':outcat},SUPA,FLAT_TYPE)
''' read in the photometric calibration and apply it to the data '''
def get_cats_ready(SUPA,FLAT_TYPE,galaxycat,starcat):
from config_bonn import info, wavelength_order
import utilities, Numeric, os
reload(utilities)
dict = get_files(SUPA,FLAT_TYPE)
print dict.keys()
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
''' figure out the correct color and magnitudes for the filter '''
colors_in_cat = ['W-C-RC','W-S-Z+']
def find_index(color):
index = -99
for i in range(len(wavelength_order)):
if wavelength_order[i] == color:
index = i
if index == -99:
raise CantFindFilter
return index
colors_indices = [find_index(color) for color in colors_in_cat]
print colors_indices
#raw_input()
#print dict['starcat']
tmp = {}
import astropy, astropy.io.fits as pyfits
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params['OBJNAME']}
for type,cat in [['star',starcat]]: #['galaxy',galaxycat],
hdulist1 = pyfits.open(cat)
#print hdulist1["STDTAB"].columns
table = hdulist1["STDTAB"].data
other_info = info[dict['filter']]
filters_info = utilities.make_filters_info([dict['filter']])
compband = filters_info[0][1] ## use the SDSS/other comparison band
color1which = other_info['color1']
print filters_info, compband
print dict['OBJNAME']
for key in dict.keys():
import string
if string.find(key,'color') != -1:
print key
#calib = get_calibrations_threesecond(dict['OBJNAME'],filters_info)
#print 'calib', calib
#raw_input()
model = utilities.convert_modelname_to_array('zpPcolor1') #dict['model_name%'+dict['filter']])
cols = [] #pyfits.Column(name=column.name, format=column.format,array=Numeric.array(0 + hdulist1["STDTAB"].data.field(column.name))) for column in hdulist1["STDTAB"].columns]
print cols
#print start
print 'data start'
#data = utilities.color_std_correct(model,dict,table,dict['filter'],compband+'mag',color1which) # correct standard magnitude into instrumntal system -- at least get rid of the color term
from copy import copy
data = copy(table.field(compband+'mag'))
print 'data done', 'here'
print (data)
cols.append(pyfits.Column(name='stdMag_corr', format='E',array=data))
#print (Numeric.array(0 + hdulist1["STDTAB"].data.field(compband+'err')))
cols.append(pyfits.Column(name='stdMagErr_corr', format='E',array=Numeric.array(0 + hdulist1["STDTAB"].data.field(compband+'err'))))
#print (Numeric.array(0 + hdulist1["STDTAB"].data.field(color1which)))
cols.append(pyfits.Column(name='stdMagColor_corr', format='E',array=Numeric.array(0 + hdulist1["STDTAB"].data.field(color1which))))
#print (Numeric.array(0 + hdulist1["STDTAB"].data.field('Clean')))
cols.append(pyfits.Column(name='stdMagClean_corr', format='E',array=Numeric.array(0 + hdulist1["STDTAB"].data.field('Clean'))))
#print (Numeric.array(0 + hdulist1["STDTAB"].data.field('Ra')))
cols.append(pyfits.Column(name='ALPHA_J2000', format='E',array=Numeric.array(0 + hdulist1["STDTAB"].data.field('Ra'))))
#print (Numeric.array(0 + hdulist1["STDTAB"].data.field('Dec')))
cols.append(pyfits.Column(name='DELTA_J2000', format='E',array=Numeric.array(0 + hdulist1["STDTAB"].data.field('Dec'))))
#print (Numeric.array(0 + hdulist1["STDTAB"].data.field('SeqNr')))
cols.append(pyfits.Column(name='SeqNr', format='E',array=Numeric.array(0 + hdulist1["STDTAB"].data.field('SeqNr'))))
length = len(hdulist1["STDTAB"].data.field('SeqNr'))
if type == 'star':
cols.append(pyfits.Column(name='Star_corr', format='E',array=Numeric.ones(length)))
else:
cols.append(pyfits.Column(name='Star_corr', format='E',array=Numeric.zeros(length)))
outcat = path + 'PHOTOMETRY/ILLUMINATION/' + type + 'sdssmatch__' + search_params['SUPA'] + '_' + type + '.cat'
print cols, 'here'
hdu = pyfits.PrimaryHDU()
print 'hdulist'
hdulist = pyfits.HDUList([hdu])
print 'tbhu'
tbhu = pyfits.BinTableHDU.from_columns(cols)
print 'hdulist'
hdulist.append(tbhu)
print 'headers'
hdulist[1].header['EXTNAME']='OBJECTS'
os.system('rm ' + outcat)
print 'writing out', outcat
hdulist.writeto( outcat )
print 'wrote out new cat'
save_exposure({type + 'sdssmatch':outcat},SUPA,FLAT_TYPE)
tmp[type + 'sdssmatch'] = outcat
import calc_tmpsave
outcat = path + 'PHOTOMETRY/ILLUMINATION/sdssmatch__' + search_params['SUPA'] + '_' + type + '.cat'
#calc_tmpsave.paste_cats([tmp['galaxysdssmatch'],tmp['starsdssmatch']],outcat,index=1)
calc_tmpsave.paste_cats([tmp['starsdssmatch']],outcat,index=1)
#calc_tmpsave.paste_cats([tmp['galaxysdssmatch']],outcat,index=1)
#print tmp['galaxysdssmatch'],tmp['starsdssmatch']
#calc_tmpsave.paste_cats([tmp['galaxysdssmatch']],outcat,index=1)
print 'added', outcat
return outcat
def plot_color(color,data,a=None,m=None):
import numpy, math, pyfits, os
import copy
from ppgplot import *
pgbeg("/XTERM",1,1)
pgiden()
pgpanl(1,1)
from Numeric import *
x = copy.copy(color) #hdulist1["OBJECTS"].data.field(color1which)
y = copy.copy(data) #hdulist1["OBJECTS"].data.field(compband+'mag') - data
plotx = copy.copy(x)
ploty = copy.copy(y)
x.sort()
y.sort()
mediany = y[int(len(y)/2.)]
lowx=-2 #x[2]
highx=2 #x[-2]
lowy=mediany + 1.5
highy=mediany -1.5
pgswin(lowx,highx,lowy,highy)
plotx = array(plotx)
ploty = array(ploty)
if a is not None:
print a, m
pgline(array(a), array(m))
#pylab.scatter(z,x)
pglab('Mag','Mag - Mag(Inst)')
#print plotx, ploty
pgpt(plotx,ploty,3)
pgbox()
pgend()
def hold():
if 0: #star['sdss']:
star_A = []
star_B = []
sigmas = []
for exp in star['supa files']:
row_num += 1
col_num = -1
rotation = exp['rotation']
#sigma = tab[str(rotation) + '$' + exp['name'] + '$MAGERR_AUTO'][star['table index']]
sigma = tab['SDSSstdMagErr_corr'][star['table index']]
for c in position_columns:
col_num += 1
first_column = True
for c in zp_columns:
col_num += 1
''' remember that the good magnitude does not have any zp dependence!!! '''
#if (first_column is not True and c['image'] == exp['name']) or c['image'] == 'sdss':
if c['image'] == 'sdss':
value = 1./sigma
star_A.append([row_num,col_num,value])
first_column = False
''' fit for the color term dependence '''
for c in color_columns:
col_num += 1
value = tab['SDSSstdMagColor_corr'][star['table index']]/sigma
star_A.append([row_num,col_num,value])
col_num += 1
''' magnitude column -- include the correct/common magnitude '''
value = 1./sigma
star_A.append([row_num,col_num+supa_num,value])
#value = (tab[str(rotation)+'$'+exp['name']+'$MAG_AUTO'][star['table index']] - tab['SDSSstdMag_corr'][star['table index']])/sigma
#print tab[str(rotation)+'$'+exp['name']+'$MAG_AUTO'][star['table index']], tab['SDSSstdMag_corr'][star['table index']]
data[rotation].append(tab[str(rotation)+'$'+exp['name']+'$MAG_AUTO'][star['table index']] - tab['SDSSstdMag_corr'][star['table index']])
magErr[rotation].append(tab['SDSSstdMagErr_corr'][star['table index']])
whichimage[rotation].append(exp['name'])
X[rotation].append(tab[str(rotation) + '$' + exp['name'] + '$Xpos_ABS'][star['table index']])
Y[rotation].append(tab[str(rotation) + '$' + exp['name'] + '$Ypos_ABS'][star['table index']])
color[rotation].append(tab['SDSSstdMagColor_corr'][star['table index']])
star_B.append([row_num,value])
sigmas.append(sigma)
inst.append({'type':'sdss','A_array':star_A, 'B_array':star_B, 'sigma_array': sigmas})
def save_fit(dict,OBJNAME=None,FILTER=None,PPRUN=None):
if OBJNAME!= None and FILTER!= None and PPRUN!=None:
dict['OBJNAME'] = OBJNAME
dict['FILTER'] = FILTER
dict['PPRUN'] = PPRUN
db2,c = connect_except()
db = 'fit_db'
#c.execute("DROP TABLE IF EXISTS fit_db")
command = "CREATE TABLE IF NOT EXISTS " + db + " ( id MEDIUMINT NOT NULL AUTO_INCREMENT, PRIMARY KEY (id))"
#print command
c.execute(command)
db_keys = describe_db(c,db)
from copy import copy
floatvars = {}
stringvars = {}
#copy array but exclude lists
import string, traceback, sys
letters = string.ascii_lowercase + string.ascii_uppercase.replace('E','') + '_' + '-' + ','
for ele in dict.keys():
type = 'float'
for l in letters:
if string.find(str(dict[ele]),l) != -1:
type = 'string'
if type == 'float':
floatvars[ele] = str(float(dict[ele]))
elif type == 'string':
stringvars[ele] = dict[ele]
# make database if it doesn't exist
#print 'floatvars', floatvars
#print 'stringvars', stringvars
for column in stringvars:
stop = False
for key in db_keys:
import string
if key.lower() == column.lower(): stop = True
if not stop:
try:
if string.find(column,'reject_plot') != -1 or string.find(column,'im') != -1 or string.find(column,'positioncolumns') != -1:
command = 'ALTER TABLE ' + db + ' ADD ' + column + ' varchar(1000)'
elif string.find(column,'zp_image') != -1:
command = 'ALTER TABLE ' + db + ' ADD ' + column + ' varchar(3000)'
else:
command = 'ALTER TABLE ' + db + ' ADD ' + column + ' varchar(100)'
c.execute(command)
except:
print traceback.print_exc(file=sys.stdout)
for column in floatvars:
stop = False
for key in db_keys:
import string
if key.lower() == column.lower(): stop = True
if not stop:
try:
command = 'ALTER TABLE ' + db + ' ADD ' + column + ' float(15)'
c.execute(command)
except:
print traceback.print_exc(file=sys.stdout)
# insert new observation
#print db_keys
OBJNAME = dict['OBJNAME']
FILTER = dict['FILTER']
PPRUN = dict['PPRUN']
command = "SELECT OBJNAME from " + db + " where OBJNAME = '" + OBJNAME + "' and FILTER = '" + FILTER + "' and PPRUN='" + PPRUN + "'"
#print command
c.execute(command)
#print OBJNAME, FILTER, PPRUN
results = c.fetchall()
#print results
if len(results) > 0:
print 'already added'
else:
command = "INSERT INTO " + db + " (OBJNAME,FILTER,PPRUN) VALUES ('" + dict['OBJNAME'] + "','" + dict['FILTER'] + "','" + dict['PPRUN'] + "')"
#print command
c.execute(command)
import commands
vals = ''
for key in stringvars.keys():
#print key, stringvars[key]
vals += ' ' + key + "='" + str(stringvars[key]) + "',"
for key in floatvars.keys():
#print key, floatvars[key]
vals += ' ' + key + "='" + floatvars[key] + "',"
vals = vals[:-1]
if len(vals) > 1:
command = "UPDATE " + db + " set " + vals + " WHERE OBJNAME='" + dict['OBJNAME'] + "' AND FILTER='" + dict['FILTER'] + "' AND PPRUN='" + dict['PPRUN'] + "'"
print command
c.execute(command)
#print vals
#names = reduce(lambda x,y: x + ',' + y, [x for x in floatvars.keys()])
#values = reduce(lambda x,y: str(x) + ',' + str(y), [floatvars[x] for x in floatvars.keys()])
#names += ',' + reduce(lambda x,y: x + ',' + y, [x for x in stringvars.keys()])
#values += ',' + reduce(lambda x,y: x + ',' + y, ["'" + str(stringvars[x]) + "'" for x in stringvars.keys()])
#command = "INSERT INTO illumination_db (" + names + ") VALUES (" + values + ")"
#print command
#os.system(command)
def gather_exposures_all(filters=None):
#if not filters:
# filters = ['B','W-J-B','W-J-V','W-C-RC','W-C-IC','I','W-S-Z+']
import os, re
from glob import glob
dirs = glob(os.environ['subdir'] + '/*')
print len(dirs)
for i in range(len(dirs)):
dir = dirs[i]
print 'dir',dir
subdirs = glob(dir + '/*')
for subdir in subdirs:
try:
slash = re.split('/',subdir)[-1]
res = re.split('_',slash)
if len(res) > 1:
files = glob(subdir+'/SCIENCE/*fits')
if len(files)>0:
#search_params = initialize(filter,OBJNAME)
import os, re, bashreader, sys, string, utilities
from glob import glob
from copy import copy
#files = glob(searchstr)
files.sort()
exposures = {}
import MySQLdb, sys, os, re
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
for file in files:
if string.find(file,'links') == -1 and string.find(file,'wcs') == -1 and string.find(file,'.sub.fits') == -1:
res = re.split('_',re.split('/',file)[-1])
exp_name = res[0]
if not exposures.has_key(exp_name): exposures[exp_name] = {'images':[],'keywords':{}}
exposures[exp_name]['images'].append(file) # exp_name is the root of the image name
if len(exposures[exp_name]['keywords'].keys()) == 0: #not exposures[exp_name]['keywords'].has_key('ROTATION'): #if exposure does not have keywords yet, then get them -- this makes sure you only record each SUPA file once!!!
#exposures[exp_name]['keywords']['filter'] = filter
exposures[exp_name]['keywords']['file'] = file
res2 = re.split('/',file)
#for r in res2:
# if string.find(r,filter) != -1:
# print r
# exposures[exp_name]['keywords']['date'] = r.replace(filter + '_','')
# exposures[exp_name]['keywords']['fil_directory'] = r
# search_params['fil_directory'] = r
kws = utilities.get_header_kw(file,['CRVAL1','CRVAL2','ROTATION','OBJECT','GABODSID','CONFIG','EXPTIME','AIRMASS','INSTRUM','PPRUN','BADCCD','FILTER']) # return KEY/NA if not SUBARU
''' figure out PPRUN '''
import commands
readlink = commands.getoutput('readlink -f ' + file)
res = re.split('SUBARU/',readlink)
res = re.split('/',res[1])
kws['PPRUN'] = res[0]
''' firgure out OBJNAME '''
res = re.split('SUBARU/',file)
res = re.split('/',res[1])
kws['OBJNAME'] = res[0]
print kws['OBJNAME'], 'OBJNAME'
''' figure out a way to break into SKYFLAT, DOMEFLAT '''
ppid = str(os.getppid())
command = 'dfits ' + file
file = commands.getoutput(command)
import string
if string.find(file,'SKYFLAT') != -1: exposures[exp_name]['keywords']['FLAT_TYPE'] = 'SKYFLAT'
elif string.find(file,'DOMEFLAT') != -1: exposures[exp_name]['keywords']['FLAT_TYPE'] = 'DOMEFLAT'
import string
file = re.split('\n',file)
for line in file:
print line
if string.find(line,'Flat frame:') != -1 and string.find(line,'illum') != -1:
import re
res = re.split('SET',line)
if len(res) > 1:
res = re.split('_',res[1])
set = res[0]
exposures[exp_name]['keywords']['FLAT_SET'] = set
res = re.split('illum',line)
res = re.split('\.',res[1])
smooth = res[0]
exposures[exp_name]['keywords']['SMOOTH'] = smooth
break
for kw in kws.keys():
exposures[exp_name]['keywords'][kw] = kws[kw]
exposures[exp_name]['keywords']['SUPA'] = exp_name
#exposures[exp_name]['keywords']['OBJNAME'] = OBJNAME
print exposures[exp_name]['keywords']
save_exposure(exposures[exp_name]['keywords'])
except KeyboardInterrupt:
raise
except:
ppid_loc = str(os.getppid())
print sys.exc_info()
print 'something else failed',ppid, ppid_loc
return exposures
def run_telarchive(ra,dec,objname):
from ephem import *
coord = Equatorial(str(ra/15.),str(dec))
ra = str(coord.get()[0]).replace(':',' ')
dec = str(coord.get()[1]).replace(':',' ')
print 'ra','dec',ra,dec
import commands, re, string
command = 'python dosearch.py --coords="' + ra + ' ' + dec + '" 6.0'
print command
out = commands.getoutput(command)
#i = open('ij','w')
#i.write(out)
#i.close()
#out = open('ij','r').read()
print out
res = re.split('\n',out)
print res
d = {}
for i in res:
res_t = re.split('\t',i)
if len(res_t) > 1:
if res_t[1] != '':
name = re.split('\s+',re.split(':',res_t[1])[0])[0]
d[name + '_info'] = ' '
if string.find(re.split(':',res_t[1])[1],'No data found') != -1:
d[name + '_data'] = 0
elif string.find(re.split(':',res_t[1])[0],'Sloan Digital') != -1:
d[name + '_data'] = 1
else:
print res_t[1]
a = re.split(':',res_t[1])[1]
print a
b = re.split('\(',a)[1]
c = re.split('\s+',b)[0]
d[name + '_data'] = c
else: d[name + '_info'] += res_t[2] + '; '
print objname, d
return d
def get_observations():
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
db_keys = describe_db(c)
command = "CREATE TABLE IF NOT EXISTS telarchive_db ( id MEDIUMINT NOT NULL AUTO_INCREMENT, PRIMARY KEY (id))"
print command
#c.execute("DROP TABLE IF EXISTS telarchive_db")
c.execute(command)
keystop = ['PPRUN','ROTATION','OBJNAME']
list = reduce(lambda x,y: x + ',' + y, keystop)
command="SELECT * from illumination_db LEFT OUTER JOIN telarchive_db on telarchive_db.OBJNAME=illumination_db.OBJNAME where illumination_db.OBJNAME is not null and illumination_db.OBJNAME!='HDFN' and illumination_db.OBJNAME!='COSMOS' and telarchive_db.HST_data is NULL GROUP BY illumination_db.OBJNAME"
print command
c.execute(command)
results=c.fetchall()
for line in results:
dtop = {}
for i in range(len(db_keys)):
dtop[db_keys[i]] = str(line[i])
print dtop['CRVAL1'],dtop['CRVAL2'],dtop['OBJNAME']
dict = run_telarchive(float(dtop['CRVAL1']),dtop['CRVAL2'],dtop['OBJNAME'])
OBJNAME = dtop['OBJNAME']
dict['OBJNAME'] = OBJNAME
floatvars = {}
stringvars = {}
#copy array but exclude lists
import string
letters = string.ascii_lowercase + string.ascii_uppercase.replace('E','') + '_' + '-'
for ele in dict.keys():
print ele, dict[ele]
type = 'float'
for l in letters:
if string.find(str(dict[ele]),l) != -1 or dict[ele] == ' ':
type = 'string'
if type == 'float':
floatvars[ele] = str(float(dict[ele]))
elif type == 'string':
stringvars[ele] = dict[ele]
# make database if it doesn't exist
print 'floatvars', floatvars
print 'stringvars', stringvars
for column in stringvars:
try:
command = 'ALTER TABLE telarchive_db ADD ' + column + ' varchar(240)'
c.execute(command)
except: nope = 1
for column in floatvars:
try:
command = 'ALTER TABLE telarchive_db ADD ' + column + ' float(30)'
c.execute(command)
except: nope = 1
c.execute("SELECT OBJNAME from telarchive_db where OBJNAME = '" + OBJNAME + "'")
results = c.fetchall()
print results
if len(results) > 0:
print 'already added'
else:
command = "INSERT INTO telarchive_db (OBJNAME) VALUES ('" + OBJNAME + "')"
print command
c.execute(command)
import commands
vals = ''
for key in stringvars.keys():
print key, stringvars[key]
vals += ' ' + key + "='" + str(stringvars[key]) + "',"
for key in floatvars.keys():
print key, floatvars[key]
vals += ' ' + key + "='" + floatvars[key] + "',"
vals = vals[:-1]
command = "UPDATE telarchive_db set " + vals + " WHERE OBJNAME='" + OBJNAME + "'"
print command
c.execute(command)
def calcDataIllum(file, LENGTH1, LENGTH2, data,magErr, X, Y, pth='/nfs/slac/g/ki/ki04/pkelly/plots/', rot=0, good=None):
import numpy, math, pyfits, os
from ppgplot import *
#print size_x, size_y, bin, size_x/bin
x = []
y = []
z = []
zerr = []
from copy import copy
X_sort = copy(X)
Y_sort = copy(Y)
X_sort = numpy.sort(X_sort)
Y_sort = numpy.sort(Y_sort)
X_min = X_sort[0]
Y_min = Y_sort[0]
X_max = X_sort[-1]
Y_max = Y_sort[-1]
X_width = abs(X_max - X_min)
Y_width = abs(Y_max - Y_min)
nbin1 =15
nbin2 =15
LENGTH1 = LENGTH1
LENGTH2 = LENGTH2
print LENGTH1, LENGTH2
bin1 = int(LENGTH1/nbin1)
bin2 = int(LENGTH2/nbin2)
diff_weightsum = -9999*numpy.ones([nbin1,nbin2])
diff_invvar = -9999*numpy.ones([nbin1,nbin2])
diff_X = -9999*numpy.ones([nbin1,nbin2])
diff_Y = -9999*numpy.ones([nbin1,nbin2])
X_cen = []
Y_cen = []
data_cen = []
zerr_cen = []
chisq = 0
for i in range(len(data)):
if good is not None:
use = good[0][i] == good[1]
else:
use = True
if use:
if 1: # LENGTH1*0.3 < X[i] < LENGTH1*0.6:
X_cen.append(X[i])
Y_cen.append(Y[i])
data_cen.append(data[i])
zerr_cen.append(magErr[i])
x.append(X[i])
y.append(Y[i])
z.append(data[i])
zerr.append(magErr[i])
chisq += data[i]**2./magErr[i]**2.
x_val = int((X[i])/float(bin1)) # + size_x/(2*bin)
y_val = int((Y[i])/float(bin2)) #+ size_y/(2*bin)
#print LENGTH1, LENGTH2, x_val, y_val, X[i], Y[i]
#print size_x/bin+1,size_y/bin+1, x_val, y_val, X[i], Y[i]
err = magErr[i]
''' lower limit on error '''
if err < 0.04: err = 0.04
weightsum = data[i]/err**2.
weightX = X[i]/err**2.
weightY = Y[i]/err**2.
invvar = 1/err**2.
#if 1: #0 <= x_val and x_val < int(nbin1) and y_val >= 0 and y_val < int(nbin2): #0 < x_val < size_x/bin and 0 < y_val < size_y/bin:
#print x_val, y_val
try:
if diff_weightsum[x_val][y_val] == -9999:
diff_weightsum[x_val][y_val] = weightsum
diff_invvar[x_val][y_val] = invvar
diff_X[x_val][y_val] = weightX
diff_Y[x_val][y_val] = weightY
#print x_val, y_val, weightsum, '!!!!!'
else:
diff_weightsum[x_val][y_val] += weightsum
diff_invvar[x_val][y_val] += invvar
diff_X[x_val][y_val] += weightX
diff_Y[x_val][y_val] += weightY
except: fail = 'fail'
redchisq = chisq**0.5 / len(data)
print 'redchisq', redchisq
import Numeric
x_p = Numeric.array(X_cen)
y_p = Numeric.array(Y_cen)
z_p = Numeric.array(data_cen)
zerr_p = Numeric.array(zerr_cen)
x.sort()
y.sort()
z.sort()
mean = diff_weightsum/diff_invvar
print 'mean'
#print mean
err = 1/diff_invvar**0.5
print 'err'
#print err
print 'writing'
hdu = pyfits.PrimaryHDU(mean)
f = pth + file
os.system('rm ' + f + 'diffmap.fits')
hdu.writeto( f + 'diffmap.fits')
hdu = pyfits.PrimaryHDU(err)
os.system('rm ' + f + 'diffinvar.fits')
hdu.writeto( f + 'diffinvar.fits')
''' now make cuts with binned data '''
mean_flat = Numeric.array(mean.flatten(1))
print mean_flat
err_flat = Numeric.array(err.flatten(1))
print err_flat
mean_X = Numeric.array((diff_X/diff_invvar).flatten(1))
print mean_X
mean_Y = Numeric.array((diff_Y/diff_invvar).flatten(1))
print mean_Y
file = f + 'diffp.ps'
t = tempfile.NamedTemporaryFile(dir='/tmp/').name
### plot residuals
pgbeg(t+"/cps",1,2)
pgiden()
#print x_p
#print z_p
#print zerr_p
#pgswin(x[0],x[-1],z[0],z[-1])
pgpanl(1,1)
pgswin(x[0],x[-1],-0.4,0.4)
pgbox()
pglab('X axis','SDSS-SUBARU',file) # label the plot
#pgsci(3)
#pgerrb(6,x_p,z_p,zerr_p)
pgerrb(6,mean_X,mean_flat,err_flat)
pgpt(mean_X,mean_flat,3)
#pgswin(y[0],y[-1],z[0],z[-1])
pgpanl(1,2)
pgswin(y[0],y[-1],-0.4,0.4)
pgsci(1)
pgbox()
pglab('Y axis','SDSS-SUBARU',file) # label the plot
#pgsci(3)
pgerrb(6,mean_Y,mean_flat,err_flat)
pgpt(mean_Y,mean_flat,3)
pgsci(1)
pgend()
os.system('mv ' + t + ' ' + file)
file = f + 'pos.ps'
t = tempfile.NamedTemporaryFile(dir='/tmp/').name
print file
os.system('rm ' + file)
pgbeg(t + '/cps',1,1)
pgiden()
#print x_p
#print z_p
#print zerr_p
#pgswin(x[0],x[-1],z[0],z[-1])
### plot positions
pgpanl(1,1)
pgswin(x[0],x[-1],y[0],y[-1])
pgbox()
pglab('X','Y',file) # label the plot
#pgsci(3)
#pgerrb(6,x_p,z_p,zerr_p)
pgpt(x_p,y_p,3)
pgend()
os.system('mv ' + t + ' ' + file)
print f + 'pos.ps'+"/cps"
file = f + 'diff.ps'
t = tempfile.NamedTemporaryFile(dir='/tmp/').name
### plot residuals
pgbeg(t+"/cps",1,2)
pgiden()
#print x_p
#print z_p
#print zerr_p
#pgswin(x[0],x[-1],z[0],z[-1])
pgpanl(1,1)
pgswin(x[0],x[-1],-0.4,0.4)
pgbox()
pglab('X axis','SDSS-SUBARU',file) # label the plot
#pgsci(3)
#pgerrb(6,x_p,z_p,zerr_p)
pgpt(x_p,z_p,3)
#pgswin(y[0],y[-1],z[0],z[-1])
pgpanl(1,2)
pgswin(y[0],y[-1],-0.4,0.4)
pgsci(1)
pgbox()
pglab('Y axis','SDSS-SUBARU',file) # label the plot
#pgsci(3)
#pgerrb(6,y_p,z_p,zerr_p)
pgpt(y_p,z_p,3)
pgsci(1)
#print x_p
#print z_p
#print zerr_p
pgend()
os.system('mv ' + t + ' ' + file)
return
if __name__ == '__main__':
import sys, os
tmpdir_root = sys.argv[1] + '/'
os.chdir(tmpdir_root)
tmpdir = tmpdir_root + '/tmp/'
os.system('mkdir -p ' + tmpdir)
astrom = 'solve-field'
if len(sys.argv)>2:
astrom = sys.argv[2]
select_analyze()
#match_OBJNAME()
else:
if not 'loaded' in locals():
import tempfile
tmpdir = tempfile.NamedTemporaryFile(dir='/usr/work/pkelly/').name
os.system('mkdir ' + tmpdir)
loaded = 'yes'
print 'loaded' in locals()
|
deapplegate/wtgpipeline
|
non_essentials/calc_test/calc_test_save.olddb.py
|
Python
|
mit
| 314,806
|
[
"Galaxy"
] |
ab37ad6f100514910432d311911e84a16455176f32c8ccff52c578e377348722
|
#!/usr/bin/env python3
import itertools as it
import logging
import os
from pathlib import Path
import re
from natsort import natsorted
import numpy as np
from pysisyphus.calculators.Gaussian09 import Gaussian09
from pysisyphus.calculators.Gaussian16 import Gaussian16
from pysisyphus.calculators.ORCA import ORCA
from pysisyphus.calculators.Turbomole import Turbomole
from pysisyphus.helpers import (geom_from_xyz_file, index_array_from_overlaps,
np_print
)
class Overlapper:
orca_exts = ("out", "gbw", "cis")
gaussian_exts = ("log", "fchk", "dump_635r")
logger = logging.getLogger("overlapper")
def __init__(self, path, ovlp_with="previous", prev_n=0,
calc_key=None, calc_kwargs=None):
self.path = Path(path)
self.ovlp_with = ovlp_with
assert ovlp_with in ("previous", "first")
self.calc_key = calc_key
self.calc_kwargs = calc_kwargs
self.calc_kwargs["out_dir"] = path
mobj = re.match("previous(\d+)", self.ovlp_with)
self.prev_n = prev_n
assert self.prev_n >= 0
self.setter_dict = {
"gaussian09": self.set_g16_files,
"gaussian16": self.set_g16_files,
"orca": self.set_orca_files,
"turbomole": self.set_turbo_files,
}
self.files_from_dir_dict = {
"orca": self.set_orca_files_from_dir,
"gaussian09": self.set_gaussian16_files_from_dir,
"gaussian16": self.set_gaussian16_files_from_dir,
}
def log(self, message, lvl="info"):
func = getattr(self.logger, lvl)
func(message)
def keyfunc(self, element):
regex = "_(\d+)\.(\d+)\."
mobj = re.search(regex, element)
return tuple([int(num) for num in mobj.groups()])
def discover_files(self, path):
image_str = "image_"
calc_str = "calculator_"
files = [str(f) for f in path.glob(image_str + "*")]
if len(files) > 1:
base_str = image_str
else:
files = [str(f) for f in path.glob(calc_str + "*")]
base_str = calc_str
print(f"Found {len(files)} files starting with '{base_str}'. "
f"I assume that base string is '{base_str}'.")
files = sorted(files, key=self.keyfunc)
files_dict = dict()
for key, elements in it.groupby(files, self.keyfunc):
files_dict[key] = list(elements)
return files_dict
def discover_files_in_dir(self, path, exts):
files_list = list()
for ext in exts:
glob = f"*{ext}"
fns = [_ for _ in path.glob(glob)]
assert len(fns) == 1, f"Searched for *.{ext} and was expecting " \
f"one file but found {len(fns)} files " \
f"instead: {fns}"
fn = str(fns[0])
files_list.append(fn)
return files_list
def discover_geometries(self, path):
xyz_fns = natsorted(path.glob("*.xyz"))
geoms = [geom_from_xyz_file(xyz_fn) for xyz_fn in xyz_fns]
self.restore_calculators(geoms)
return geoms
def file_by_ext(self, iterable, ext):
matches = [f for f in iterable if f.endswith(ext)]
if len(matches) == 0:
raise Exception(f"Couldn't file with extension '{ext}'!")
assert len(matches) == 1
return matches[0]
def set_files_on_calculator(self, geom, files_dict, calc_class, exts,
calc_number, cycle_number=0):
key = (calc_number, cycle_number)
files = files_dict[key]
calc = calc_class(calc_number=calc_number, **self.calc_kwargs)
geom.set_calculator(calc)
print(f"Setting files on calculator_{calc_number:03d}:")
for ext in exts:
file_ext = self.file_by_ext(files, ext)
setattr(calc, ext, file_ext)
print(f"\t{file_ext}")
def set_files_on_calculators(self, geoms, files_dict, calc_class,
exts):
for i, geom in enumerate(geoms):
calc_number, cycle_number = i, 0
self.set_files_on_calculator(geom, files_dict, calc_class, exts,
calc_number, cycle_number)
def set_files_from_dir(self, geom, path, calc_number):
func = self.files_from_dir_dict[self.calc_key]
func(geom, path, calc_number)
def set_orca_files_from_dir(self, geom, path, calc_number):
exts = self.orca_exts
files_list = self.discover_files_in_dir(path, exts)
files_dict = {
(calc_number, 0): files_list,
}
self.set_files_on_calculator(geom, files_dict, ORCA, exts, calc_number)
geom.calculator.store_overlap_data(geom.atoms, geom.coords)
def set_orca_files(self, geoms, files_dict):
self.set_files_on_calculators(geoms, files_dict, ORCA, self.orca_exts)
for geom in geoms:
geom.calculator.store_overlap_data(geom.atoms, geom.coords)
def set_gaussian16_files_from_dir(self, geom, path, calc_number):
exts = self.gaussian_exts
files_list = self.discover_files_in_dir(path, exts)
log_file, *files_list = files_list
files_dict = {
(calc_number, 0): files_list,
}
exts_without_log = exts[1:]
assert "log" not in exts_without_log
self.set_files_on_calculator(geom, files_dict, Gaussian16,
exts_without_log,
calc_number
)
log_path = Path(log_file)
nmos, roots = geom.calculator.parse_log(log_path)
calc = geom.calculator
calc.nmos = nmos
calc.roots = roots
calc.store_overlap_data(geom.atoms, geom.coords)
def set_g16_files(self, geoms, files_dict):
exts = ("fchk", "dump_635r")
self.set_files_on_calculators(geoms, files_dict, Gaussian16, exts)
first_log = Path(self.file_by_ext(files_dict[(0, 0)], ".log"))
nmos, roots = geoms[0].calculator.parse_log(first_log)
for geom in geoms:
calc = geom.calculator
calc.nmos = nmos
calc.roots = roots
calc.store_overlap_data(geom.atoms, geom.coords)
def set_turbo_files(self, geoms, files_dict):
exts = ("mos", "ciss_a", "out", "control")
self.set_files_on_calculators(geoms, files_dict, Turbomole, exts)
for geom in geoms:
calc = geom.calculator
if hasattr(calc, "ciss_a"):
calc.td_vec_fn = calc.ciss_a
elif hasattr(calc, "ccres"):
calc.td_vec_fn = calc.ccres
calc.store_overlap_data(geom.atoms, geom.coords)
def restore_calculators(self, geoms):
files_dict = self.discover_files(self.path)
unique_calculators = set([calc_num for calc_num, cycle_num in files_dict])
# assert len(unique_calculators) <= len(geoms), ("Number of discovered "
# f"unique calculators ({len(unique_calculators)}) is bigger than the "
# f"number of discovered geometries ({len(geoms)})."
# )
print(f"Found {len(unique_calculators)} unique calculators.")
print(f"Found {len(geoms)} geometries.")
calc_num = min(len(unique_calculators), len(geoms))
setter_func = self.setter_dict[self.calc_key]
setter_func(geoms[:calc_num], files_dict)
print(f"Restored {calc_num} calculators.")
return calc_num
def similar_overlaps(self, overlaps_for_state, ovlp_thresh=.1, diff_thresh=.2):
"""Return True if overlaps for a state are similar."""
# Find overlaps above ovlp_thresh
above_inds = np.where(np.abs(overlaps_for_state) > ovlp_thresh)[0]
# Unambiguous assignment. There is a one to one correspondence between
# the states.
if len(above_inds) == 1:
return False
# Given the a full row containing overlaps this may evaluate to True if
# something went wrong and the overlaps ARE that small. Given only a subset
# of a full row, e.g. when only the first N states are considered this may
# evaluate to True if the index of the current state lies below N. E.g. if we
# check state 6, but got only the overlaps from state 1 to 5.
elif len(above_inds) == 0:
return False
above_thresh = np.abs(overlaps_for_state[above_inds])
max_ovlp_ind = above_thresh.argmax()
max_ovlp = above_thresh[max_ovlp_ind]
without_max = np.delete(above_thresh, max_ovlp_ind)
# Consider the differences between the maximum overlap and the smaller ones.
diffs = np.abs(max_ovlp - without_max)
# Return True if any difference is below the threshold
return any(diffs < diff_thresh)
def get_ovlp_func(self, ovlp_type, double_mol=False, recursive=False,
consider_first=None):
def wf_ovlp(calc1, calc2, ao_ovlp):
ovlp_mats = calc1.wfow.overlaps_with(calc2.wfow, ao_ovlp=ao_ovlp)
ovlp_mat = ovlp_mats[0]
return ovlp_mat
def tden_ovlp(calc1, calc2, ao_ovlp):
return calc1.tdens_overlap_with_calculator(calc2,
ao_ovlp=ao_ovlp)
ovlp_dict = {
"wf": wf_ovlp,
"tden": tden_ovlp,
}
valid_ovlps = "/".join([str(k) for k in ovlp_dict.keys()])
assert ovlp_type in ovlp_dict.keys(), \
f"Invalid ovlp_type! Valid keys are {valid_ovlps}."
ovlp_func_ = ovlp_dict[ovlp_type]
def ovlp_func(geoms, i, j, depth=2, ao_ovlp=None):
ith_geom = geoms[i]
jth_geom = geoms[j]
ith_calc = geoms[i].calculator
jth_calc = geoms[j].calculator
icn = ith_calc.calc_number
jcn = jth_calc.calc_number
if double_mol:
true_ovlp_mat_fn = f"ao_ovlp_true_{icn:03d}_{jcn:03d}"
try:
ao_ovlp = np.loadtxt(true_ovlp_mat_fn)
self.logger.info(f"Using true AO overlaps from {true_ovlp_mat_fn}.")
except:
self.logger.info("Doing double molecule calculation to get "
"AO overlaps."
)
ao_ovlp = jth_geom.calc_double_ao_overlap(ith_geom)
np.savetxt(f"ao_ovlp_true_{icn:03d}_{jcn:03d}", ao_ovlp)
self.log(f"Calculationg overlaps for steps {icn:03d} and {jcn:03d}.")
ovlp_mat = ovlp_func_(ith_calc, jth_calc, ao_ovlp)
self.log(ovlp_mat)
ovlp_mat_fn = f"{ovlp_type}_ovlp_mat_{icn:03d}_{jcn:03d}"
np.savetxt(self.path / ovlp_mat_fn, ovlp_mat)
similar = any(
[self.similar_overlaps(per_state)
for per_state in ovlp_mat[:,:consider_first]]
)
if similar:
self.log( "Some entries of the overlap matrix between steps "
f"{icn:03d} and {jcn:03d} are very similar!")
if recursive and similar and (i > 0) and depth > 0:
self.log(f"Comparing {icn-1:03d} and {jcn:03d} now, "
f"because steps {icn:03d} and {jcn:03d} were "
"too similar."
)
return ovlp_func(geoms, i-1, j, depth-1)
return ovlp_mat
return ovlp_func
@np_print
def overlaps_for_geoms(self, geoms, ovlp_type="wf", double_mol=False,
recursive=False, consider_first=None, skip=0):
# if skip > 0 and recursive:
# raise Exception("recursive = True and skip > 0 can't be used "
# "together."
# )
ovlp_func = self.get_ovlp_func(ovlp_type, double_mol, recursive,
consider_first)
if double_mol:
assert hasattr(geoms[0].calculator, "run_double_mol_calculation"), \
"Double molecule calculation not implemented for " \
f"{self.calc_key}."
self.log(f"Doing {ovlp_type.upper()}-overlaps.")
inds_list = list()
ovlp_mats = list()
is_similar = lambda ovlp_mat: any([self.similar_overlaps(per_state)
for per_state in ovlp_mat[:,:consider_first]]
)
for i in range(len(geoms)-1):
# We can be sure that i is always a valid index.
j = i+(1+skip)
if self.ovlp_with == "first":
i = 0
elif self.prev_n:
i = max(i - self.prev_n, 0)
if j >= len(geoms):
break
ovlp_mat = ovlp_func(geoms, i, j)
ovlp_mats.append(ovlp_mat)
index_array = index_array_from_overlaps(ovlp_mat)
inds_list.append(index_array)
self.log(index_array)
inds_arr = np.array(inds_list)
ovlp_mats = np.array(ovlp_mats)
max_ovlp_inds_fn = f"{ovlp_type}_max_ovlp_inds"
ovlp_mats_fn = f"{ovlp_type}_ovlp_mats"
np.savetxt(self.path / max_ovlp_inds_fn, inds_arr, fmt="%i")
np.save(ovlp_mats_fn, ovlp_mats)
self.log("")
self.log("Max overlap indices.")
for i, row in enumerate(inds_arr):
self.log(f"Step {i:02d}: {row}")
if __name__ == "__main__":
# path = Path("/scratch/programme/pysisyphus/tests_staging/test_diabatizer/cb3_def2svp")
path = Path("/scratch/programme/pysisyphus/tests_staging/test_diabatizer/cb3_def2svp/first_five")
calc_kwargs = {
"keywords": "CAM-B3LYP def2-SVP RIJCOSX D3BJ TightSCF",
"blocks": "%tddft nroots 5 tda false end %maxcore 1000",
"track": True,
"pal": 4,
}
calc_key = "orca"
ovl = Overlapper(path, calc_key, calc_kwargs)
geoms = ovl.discover_geometries(path)
# files_dict = dia.discover_files(path)
# dia.restore_calculators("orca")
ovl.overlaps(geoms)
|
eljost/pysisyphus
|
deprecated/overlaps/Overlapper.py
|
Python
|
gpl-3.0
| 14,208
|
[
"ORCA",
"TURBOMOLE"
] |
3e4de9bb42d37547e9dee4e8776cf888d5a15d21d7f3a421cee1499c543aa755
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
Module to provide lightweight definitions of functionals and
SuperFunctionals
"""
import re
import os
import math
from psi4 import core
from psi4.driver.qcdb import interface_dftd3 as dftd3
from psi4.driver.p4util.exceptions import *
from . import libxc_xc_funcs
from . import gga_superfuncs
from . import hyb_superfuncs
from . import mgga_superfuncs
from . import double_hyb_superfuncs
## ==> SuperFunctionals <== ##
superfunctionals = {}
superfunctionals.update(libxc_xc_funcs.libxc_xc_functional_list)
superfunctionals.update(gga_superfuncs.gga_superfunc_list)
superfunctionals.update(hyb_superfuncs.hyb_superfunc_list)
superfunctionals.update(mgga_superfuncs.mgga_superfunc_list)
superfunctionals.update(double_hyb_superfuncs.double_hyb_superfunc_list)
## ==> SuperFunctional List <== ##
superfunctional_list = []
for key in superfunctionals.keys():
sup = superfunctionals[key](key, 1, 1, True)[0]
superfunctional_list.append(sup)
## ==> Dispersion SuperFunctional List <== ##
p4_funcs = set([x for x in list(superfunctionals)])
p4_funcs -= set(['b97-d'])
for dashlvl, dashparam_dict in dftd3.dashcoeff.items():
func_list = (set(dashparam_dict) & p4_funcs)
for func in func_list:
sup = superfunctionals[func](func, 1, 1, True)[0]
sup.set_name(sup.name() + '-' + dashlvl.upper())
superfunctional_list.append(sup)
if dashlvl == 'd2p4':
# -D2 overide
sup = superfunctionals[func](func, 1, 1, True)[0]
sup.set_name(sup.name() + '-D2')
superfunctional_list.append(sup)
# -D overide
sup = superfunctionals[func](func, 1, 1, True)[0]
sup.set_name(sup.name() + '-D')
superfunctional_list.append(sup)
if dashlvl == 'd3zero':
sup = superfunctionals[func](func, 1, 1, True)[0]
sup.set_name(sup.name() + '-D3')
superfunctional_list.append(sup)
if dashlvl == 'd3mzero':
sup = superfunctionals[func](func, 1, 1, True)[0]
sup.set_name(sup.name() + '-D3M')
superfunctional_list.append(sup)
# # B97D is an odd one
for dashlvl in dftd3.full_dash_keys:
if dashlvl == 'd2p4': continue
sup = superfunctionals['b97-d']('b97-d', 1, 1, True)[0]
sup.set_name('B97-' + dashlvl.upper())
superfunctional_list.append(sup)
# wPBE, grr need a new scheme
for dashlvl in ['d3', 'd3m', 'd3zero', 'd3mzero', 'd3bj', 'd3mbj']:
sup = superfunctionals['wpbe']('wpbe', 1, 1, True)[0]
sup.set_name(sup.name() + '-' + dashlvl.upper())
superfunctional_list.append(sup)
## ==> SuperFunctional Builder <== ##
def build_superfunctional(alias, restricted):
name = alias.lower()
npoints = core.get_option("SCF", "DFT_BLOCK_MAX_POINTS");
deriv = 1 # Default depth for now
# Grab out superfunctional
if name in ["gen", ""]:
sup = (core.get_option("DFT_CUSTOM_FUNCTIONAL"), False)
if not isinstance(sup[0], core.SuperFunctional):
raise KeyError("SCF: Custom Functional requested, but nothing provided in DFT_CUSTOM_FUNCTIONAL")
elif name in superfunctionals.keys():
sup = superfunctionals[name](name, npoints, deriv, restricted)
elif name.upper() in superfunctionals.keys():
sup = superfunctionals[name.upper()](name, npoints, deriv, restricted)
elif any(name.endswith(al) for al in dftd3.full_dash_keys):
# Odd hack for b97-d
if 'b97-d' in name:
name = name.replace('b97', 'b97-d')
dashparam = [x for x in dftd3.full_dash_keys if name.endswith(x)]
if len(dashparam) > 1:
raise Exception("Dashparam %s is ambiguous.")
else:
dashparam = dashparam[0]
base_name = name.replace('-' + dashparam, '')
if dashparam in ['d2', 'd']:
dashparam = 'd2p4'
if dashparam == 'd3':
dashparam = 'd3zero'
if dashparam == 'd3m':
dashparam = 'd3mzero'
if base_name not in superfunctionals.keys():
raise KeyError("SCF: Functional (%s) with base (%s) not found!" % (alias, base_name))
func = superfunctionals[base_name](base_name, npoints, deriv, restricted)[0]
base_name = base_name.replace('wpbe', 'lcwpbe')
sup = (func, (base_name, dashparam))
else:
raise KeyError("SCF: Functional (%s) not found!" % alias)
if (core.get_global_option('INTEGRAL_PACKAGE') == 'ERD') and (sup[0].is_x_lrc() or sup[0].is_c_lrc()):
raise ValidationError("INTEGRAL_PACKAGE ERD does not play nicely with omega ERI's, so stopping.")
# Set options
if core.has_option_changed("SCF", "DFT_OMEGA") and sup[0].is_x_lrc():
sup[0].set_x_omega(core.get_option("SCF", "DFT_OMEGA"))
if core.has_option_changed("SCF", "DFT_OMEGA_C") and sup[0].is_c_lrc():
sup[0].set_c_omega(core.get_option("SCF", "DFT_OMEGA_C"))
if core.has_option_changed("SCF", "DFT_ALPHA"):
sup[0].set_x_alpha(core.get_option("SCF", "DFT_ALPHA"))
if core.has_option_changed("SCF", "DFT_ALPHA_C"):
sup[0].set_c_alpha(core.get_option("SCF", "DFT_ALPHA_C"))
# Check SCF_TYPE
if sup[0].is_x_lrc() and (core.get_option("SCF", "SCF_TYPE") not in ["DIRECT", "DF", "OUT_OF_CORE", "PK"]):
raise KeyError("SCF: SCF_TYPE (%s) not supported for range-seperated functionals."
% core.get_option("SCF", "SCF_TYPE"))
if (core.get_global_option('INTEGRAL_PACKAGE') == 'ERD') and (sup[0].is_x_lrc()):
raise ValidationError('INTEGRAL_PACKAGE ERD does not play nicely with LRC DFT functionals, so stopping.')
return sup
def test_ccl_functional(functional, ccl_functional):
check = True
if (not os.path.exists('data_pt_%s.html' % (ccl_functional))):
os.system('wget ftp://ftp.dl.ac.uk/qcg/dft_library/data_pt_%s.html' % ccl_functional)
fh = open('data_pt_%s.html' % (ccl_functional))
lines = fh.readlines()
fh.close()
points = []
point = {}
rho_line = re.compile(r'^\s*rhoa=\s*(-?\d+\.\d+E[+-]\d+)\s*rhob=\s*(-?\d+\.\d+E[+-]\d+)\s*sigmaaa=\s*(-?\d+\.\d+E[+-]\d+)\s*sigmaab=\s*(-?\d+\.\d+E[+-]\d+)\s*sigmabb=\s*(-?\d+\.\d+E[+-]\d+)\s*')
val_line = re.compile(r'^\s*(\w*)\s*=\s*(-?\d+\.\d+E[+-]\d+)')
aliases = { 'zk' : 'v',
'vrhoa' : 'v_rho_a',
'vrhob' : 'v_rho_b',
'vsigmaaa' : 'v_gamma_aa',
'vsigmaab' : 'v_gamma_ab',
'vsigmabb' : 'v_gamma_bb',
'v2rhoa2' : 'v_rho_a_rho_a',
'v2rhoab' : 'v_rho_a_rho_b',
'v2rhob2' : 'v_rho_b_rho_b',
'v2rhoasigmaaa' : 'v_rho_a_gamma_aa',
'v2rhoasigmaab' : 'v_rho_a_gamma_ab',
'v2rhoasigmabb' : 'v_rho_a_gamma_bb',
'v2rhobsigmaaa' : 'v_rho_b_gamma_aa',
'v2rhobsigmaab' : 'v_rho_b_gamma_ab',
'v2rhobsigmabb' : 'v_rho_b_gamma_bb',
'v2sigmaaa2' : 'v_gamma_aa_gamma_aa',
'v2sigmaaaab' : 'v_gamma_aa_gamma_ab',
'v2sigmaaabb' : 'v_gamma_aa_gamma_bb',
'v2sigmaab2' : 'v_gamma_ab_gamma_ab',
'v2sigmaabbb' : 'v_gamma_ab_gamma_bb',
'v2sigmabb2' : 'v_gamma_bb_gamma_bb',
}
for line in lines:
mobj = re.match(rho_line, line)
if (mobj):
if len(point):
points.append(point)
point = {}
point['rho_a'] = float(mobj.group(1))
point['rho_b'] = float(mobj.group(2))
point['gamma_aa'] = float(mobj.group(3))
point['gamma_ab'] = float(mobj.group(4))
point['gamma_bb'] = float(mobj.group(5))
continue
mobj = re.match(val_line, line)
if (mobj):
point[aliases[mobj.group(1)]] = float(mobj.group(2))
points.append(point)
N = len(points)
rho_a = core.Vector(N)
rho_b = core.Vector(N)
gamma_aa = core.Vector(N)
gamma_ab = core.Vector(N)
gamma_bb = core.Vector(N)
tau_a = core.Vector(N)
tau_b = core.Vector(N)
index = 0
for point in points:
rho_a[index] = point['rho_a']
rho_b[index] = point['rho_b']
gamma_aa[index] = point['gamma_aa']
gamma_ab[index] = point['gamma_ab']
gamma_bb[index] = point['gamma_bb']
index = index + 1
super = build_superfunctional(functional, N, 1)
super.test_functional(rho_a, rho_b, gamma_aa, gamma_ab, gamma_bb, tau_a, tau_b)
v = super.value('V')
v_rho_a = super.value('V_RHO_A')
v_rho_b = super.value('V_RHO_B')
v_gamma_aa = super.value('V_GAMMA_AA')
v_gamma_ab = super.value('V_GAMMA_AB')
v_gamma_bb = super.value('V_GAMMA_BB')
if not v_gamma_aa:
v_gamma_aa = tau_a
v_gamma_ab = tau_a
v_gamma_bb = tau_a
tasks = ['v', 'v_rho_a', 'v_rho_b', 'v_gamma_aa', 'v_gamma_ab', 'v_gamma_bb']
mapping = {
'v': v,
'v_rho_a': v_rho_a,
'v_rho_b': v_rho_b,
'v_gamma_aa': v_gamma_aa,
'v_gamma_ab': v_gamma_ab,
'v_gamma_bb': v_gamma_bb,
}
super.print_detail(3)
index = 0
for point in points:
core.print_out('rho_a= %11.3E, rho_b= %11.3E, gamma_aa= %11.3E, gamma_ab= %11.3E, gamma_bb= %11.3E\n' % (rho_a[index], rho_b[index], gamma_aa[index], gamma_ab[index], gamma_bb[index]))
for task in tasks:
v_ref = point[task]
v_obs = mapping[task][index]
delta = v_obs - v_ref
if (v_ref == 0.0):
epsilon = 0.0
else:
epsilon = abs(delta / v_ref)
if (epsilon < 1.0E-11):
passed = 'PASSED'
else:
passed = 'FAILED'
check = False
core.print_out('\t%-15s %24.16E %24.16E %24.16E %24.16E %6s\n' % (task, v_ref, v_obs, delta, epsilon, passed))
index = index + 1
core.print_out('\n')
return check
|
rmcgibbo/psi4public
|
psi4/driver/procrouting/dft_funcs/superfuncs.py
|
Python
|
lgpl-3.0
| 11,064
|
[
"Psi4"
] |
3fd71ca9880a0dafc101c80fad5db7cd77c0c1f1ec03e7d6e0db07b9645d6509
|
# -*- mode: python; -*-
##
## sashimi_plot
##
## Utility for visualizing RNA-Seq densities along gene models and
## for plotting MISO output
##
import os
import sys
import glob
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
import matplotlib
# Use PDF backend
try: matplotlib.use("pdf")
except Exception: pass
from scipy import *
from numpy import *
import pysam
import shelve
import misopy
import misopy.gff_utils as gff_utils
import misopy.pe_utils as pe_utils
from misopy.parse_csv import csv2dictlist_raw
from misopy.samples_utils import load_samples
from misopy.sashimi_plot.Sashimi import Sashimi
from misopy.sashimi_plot.plot_utils.samples_plotter import SamplesPlotter
from misopy.sashimi_plot.plot_utils.plotting import *
from misopy.sashimi_plot.plot_utils.plot_gene import plot_density_from_file
import matplotlib.pyplot as plt
from matplotlib import rc
def plot_bf_dist(bf_filename, settings_filename, output_dir,
max_bf=1e12):
"""
Plot a Bayes factor distribution from a .miso_bf file.
"""
if not bf_filename.endswith(".miso_bf"):
print "WARNING: %s does not end in .miso_bf, are you sure it is the " \
"output of a MISO samples comparison?" %(bf_filename)
# Load BF data
data, h = csv2dictlist_raw(bf_filename)
plot_name = os.path.basename(bf_filename)
sashimi_obj = Sashimi(plot_name, output_dir,
settings_filename=settings_filename)
settings = sashimi_obj.settings
# Setup the figure
sashimi_obj.setup_figure()
# Matrix of bayes factors and delta psi pairs
bfs_and_deltas = []
for event in data:
bf = event['bayes_factor']
delta_psi = event['diff']
if type(bf) == str and "," in bf:
print "WARNING: %s is a multi-isoform event, skipping..." \
%(event)
continue
else:
# Impose upper limit on Bayes factor
bf = min(1e12, float(bf))
delta_psi = float(delta_psi)
bfs_and_deltas.append([bf, delta_psi])
bfs_and_deltas = array(bfs_and_deltas)
num_events = len(bfs_and_deltas)
print "Loaded %d event comparisons." %(num_events)
output_filename = sashimi_obj.output_filename
print "Plotting Bayes factors distribution"
print " - Output filename: %s" %(output_filename)
bf_thresholds = settings["bf_thresholds"]
bar_color = settings["bar_color"]
min_bf_thresh = min(bf_thresholds)
num_events_used = sum(bfs_and_deltas[:, 0] >= min_bf_thresh)
for thresh in bf_thresholds:
if type(thresh) != int:
print "Error: BF thresholds must be integers."
#sys.exit(1)
print "Using BF thresholds: "
print bf_thresholds
print "Using bar color: %s" %(bar_color)
plot_cumulative_bars(bfs_and_deltas[:, 0],
bf_thresholds,
bar_color=bar_color,
logged=True)
plt.xticks(bf_thresholds)
c = 1
plt.xlim([bf_thresholds[0] - c, bf_thresholds[-1] + c])
plt.title("Bayes factor distributions\n(using %d/%d events)" \
%(num_events_used, num_events))
plt.xlabel("Bayes factor thresh.")
plt.ylabel("No. events")
sashimi_obj.save_plot()
def plot_event(event_name, pickle_dir, settings_filename,
output_dir,
no_posteriors=False,
plot_title=None,
plot_label=None):
"""
Visualize read densities across the exons and junctions
of a given MISO alternative RNA processing event.
Also plots MISO estimates and Psi values.
"""
if not os.path.isfile(settings_filename):
print "Error: settings filename %s not found." %(settings_filename)
#sys.exit(1)
if not os.path.isdir(pickle_dir):
print "Error: event pickle directory %s not found." %(pickle_dir)
#sys.exit(1)
# Retrieve the full pickle filename
genes_filename = os.path.join(pickle_dir,
"genes_to_filenames.shelve")
# Check that file basename exists
if len(glob.glob("%s*" %(genes_filename))) == 0:
raise Exception, "Cannot find file %s. Are you sure the events " \
"were indexed with the latest version of index_gff.py?" \
%(genes_filename)
event_to_filenames = shelve.open(genes_filename)
if event_name not in event_to_filenames:
raise Exception, "Event %s not found in pickled directory %s. " \
"Are you sure this is the right directory for the event?" \
%(event_name, pickle_dir)
pickle_filename = event_to_filenames[event_name]
if pickle_dir not in pickle_filename:
import string
pickle_filename = string.replace(pickle_filename,'\\','/')
if 'sashimi_index' in pickle_filename:
pickle_filename = pickle_dir + string.split(pickle_filename,'sashimi_index')[1]
else:
pickle_filename = pickle_dir + string.split(pickle_filename,'trial_index')[1]
import string
#pickle_filename = string.replace(pickle_filename,' 1','')
if no_posteriors:
print "Asked to not plot MISO posteriors."
plot_density_from_file(settings_filename, pickle_filename, event_name,
output_dir,
no_posteriors=no_posteriors,
plot_title=plot_title,
plot_label=plot_label)
def plot_insert_len(insert_len_filename,
settings_filename,
output_dir):
"""
Plot insert length distribution.
"""
if not os.path.isfile(settings_filename):
print "Error: settings filename %s not found." %(settings_filename)
#sys.exit(1)
plot_name = os.path.basename(insert_len_filename)
sashimi_obj = Sashimi(plot_name, output_dir,
settings_filename=settings_filename)
settings = sashimi_obj.settings
num_bins = settings["insert_len_bins"]
output_filename = sashimi_obj.output_filename
sashimi_obj.setup_figure()
s = plt.subplot(1, 1, 1)
print "Plotting insert length distribution..."
print " - Distribution file: %s" %(insert_len_filename)
print " - Output plot: %s" %(output_filename)
insert_dist, params = pe_utils.load_insert_len(insert_len_filename)
mean, sdev, dispersion, num_pairs \
= pe_utils.compute_insert_len_stats(insert_dist)
print "min insert: %.1f" %(min(insert_dist))
print "max insert: %.1f" %(max(insert_dist))
plt.title("%s (%d read-pairs)" \
%(plot_name,
num_pairs),
fontsize=10)
plt.hist(insert_dist, bins=num_bins, color='k',
edgecolor="#ffffff", align='mid')
axes_square(s)
ymin, ymax = s.get_ylim()
plt.text(0.05, 0.95, "$\mu$: %.1f\n$\sigma$: %.1f\n$d$: %.1f" \
%(round(mean, 2),
round(sdev, 2),
round(dispersion, 2)),
horizontalalignment='left',
verticalalignment='top',
bbox=dict(edgecolor='k', facecolor="#ffffff",
alpha=0.5),
fontsize=10,
transform=s.transAxes)
plt.xlabel("Insert length (nt)")
plt.ylabel("No. read pairs")
sashimi_obj.save_plot()
def greeting():
print "Sashimi plot: Visualize spliced RNA-Seq reads along gene models. " \
"Part of the MISO (Mixture of Isoforms model) framework."
print "See --help for usage.\n"
print "Manual available at: http://genes.mit.edu/burgelab/miso/docs/sashimi.html\n"
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--plot-insert-len", dest="plot_insert_len", nargs=2, default=None,
help="Plot the insert length distribution from a given insert length (*.insert_len) "
"filename. Second argument is a settings file name.")
parser.add_option("--plot-bf-dist", dest="plot_bf_dist", nargs=2, default=None,
help="Plot Bayes factor distributon. Takes the arguments: "
"(1) Bayes factor filename (*.miso_bf) filename, "
"(2) a settings filename.")
parser.add_option("--plot-event", dest="plot_event", nargs=3, default=None,
help="Plot read densities and MISO inferences for a given alternative event. "
"Takes the arguments: (1) event name (i.e. the ID= of the event based on MISO gff3 "
"annotation file, (2) directory where indexed GFF annotation is (output of "
"index_gff.py), (3) path to plotting settings file.")
parser.add_option("--no-posteriors", dest="no_posteriors", default=False, action="store_true",
help="If given this argument, MISO posterior estimates are not plotted.")
parser.add_option("--plot-title", dest="plot_title", default=None, nargs=1,
help="Title of plot: a string that will be displayed at top of plot. Example: " \
"--plot-title \"My favorite gene\".")
parser.add_option("--plot-label", dest="plot_label", default=None, nargs=1,
help="Plot label. If given, plot will be saved in the output directory as " \
"the plot label ending in the relevant extension, e.g. <plot_label>.pdf. " \
"Example: --plot-label my_gene")
parser.add_option("--output-dir", dest="output_dir", nargs=1, default=None,
help="Output directory.")
(options, args) = parser.parse_args()
if options.plot_event is None:
greeting()
#sys.exit(1)
if options.output_dir == None:
print "Error: need --output-dir"
#sys.exit(1)
output_dir = os.path.abspath(os.path.expanduser(options.output_dir))
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
no_posteriors = options.no_posteriors
plot_title = options.plot_title
plot_label = options.plot_label
if options.plot_insert_len != None:
insert_len_filename = os.path.abspath(os.path.expanduser(options.plot_insert_len[0]))
settings_filename = os.path.abspath(os.path.expanduser(options.plot_insert_len[1]))
plot_insert_len(insert_len_filename, settings_filename, output_dir)
if options.plot_bf_dist != None:
bf_filename = os.path.abspath(os.path.expanduser(options.plot_bf_dist[0]))
settings_filename = os.path.abspath(os.path.expanduser(options.plot_bf_dist[1]))
plot_bf_dist(bf_filename, settings_filename, output_dir)
if options.plot_event != None:
event_name = options.plot_event[0]
pickle_dir = os.path.abspath(os.path.expanduser(options.plot_event[1]))
settings_filename = os.path.abspath(os.path.expanduser(options.plot_event[2]))
plot_event(event_name, pickle_dir, settings_filename, output_dir,
no_posteriors=no_posteriors,
plot_title=plot_title,
plot_label=plot_label)
if __name__ == '__main__':
main()
|
nsalomonis/AltAnalyze
|
misopy/sashimi_plot/sashimi_plot.py
|
Python
|
apache-2.0
| 11,370
|
[
"pysam"
] |
177ff41be26f8de615e2231518679a8c7d45ed67b25d3adcd54458358762c5bf
|
"""Utilities to help Computing Element Queues manipulation
"""
from __future__ import absolute_import
from __future__ import division
import six
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.List import fromChar
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getDIRACPlatform
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
__RCSID__ = '$Id$'
def getQueuesResolved(siteDict):
"""
Get the list of queue descriptions merging site/ce/queue parameters and adding some
derived parameters.
:param dict siteDict: dictionary with configuration data as returned by Resources.getQueues() method
:return: S_OK/S_ERROR, Value dictionary per queue with configuration data updated, e.g. for SiteDirector
"""
queueFinalDict = {}
for site in siteDict:
for ce, ceDict in siteDict[site].items():
qDict = ceDict.pop('Queues')
for queue in qDict:
queueName = '%s_%s' % (ce, queue)
queueDict = qDict[queue]
queueDict['Queue'] = queue
queueDict['Site'] = site
# Evaluate the CPU limit of the queue according to the Glue convention
# To Do: should be a utility
if "maxCPUTime" in queueDict and "SI00" in queueDict:
maxCPUTime = float(queueDict['maxCPUTime'])
# For some sites there are crazy values in the CS
maxCPUTime = max(maxCPUTime, 0)
maxCPUTime = min(maxCPUTime, 86400 * 12.5)
si00 = float(queueDict['SI00'])
queueCPUTime = 60 / 250 * maxCPUTime * si00
queueDict['CPUTime'] = int(queueCPUTime)
# Tags & RequiredTags defined on the Queue level and on the CE level are concatenated
# This also converts them from a string to a list if required.
for tagFieldName in ('Tag', 'RequiredTag'):
ceTags = ceDict.get(tagFieldName, [])
if isinstance(ceTags, six.string_types):
ceTags = fromChar(ceTags)
queueTags = queueDict.get(tagFieldName, [])
if isinstance(queueTags, six.string_types):
queueTags = fromChar(queueTags)
queueDict[tagFieldName] = list(set(ceTags + queueTags))
# Some parameters can be defined on the CE level and are inherited by all Queues
for parameter in ['MaxRAM', 'NumberOfProcessors', 'WholeNode']:
queueParameter = queueDict.get(parameter, ceDict.get(parameter))
if queueParameter:
queueDict[parameter] = queueParameter
# If we have a multi-core queue add MultiProcessor tag
if queueDict.get('NumberOfProcessors', 1) > 1:
queueDict.setdefault('Tag', []).append('MultiProcessor')
queueDict['CEName'] = ce
queueDict['GridCE'] = ce
queueDict['CEType'] = ceDict['CEType']
queueDict['GridMiddleware'] = ceDict['CEType']
queueDict['QueueName'] = queue
platform = queueDict.get('Platform', ceDict.get('Platform', ''))
if not platform and "OS" in ceDict:
architecture = ceDict.get('architecture', 'x86_64')
platform = '_'.join([architecture, ceDict['OS']])
queueDict['Platform'] = platform
if platform:
result = getDIRACPlatform(platform)
if result['OK']:
queueDict['Platform'] = result['Value'][0]
queueFinalDict[queueName] = queueDict
return S_OK(queueFinalDict)
def matchQueue(jobJDL, queueDict, fullMatch=False):
"""
Match the job description to the queue definition
:param str job: JDL job description
:param bool fullMatch: test matching on all the criteria
:param dict queueDict: queue parameters dictionary
:return: S_OK/S_ERROR, Value - result of matching, S_OK if matched or
S_ERROR with the reason for no match
"""
# Check the job description validity
job = ClassAd(jobJDL)
if not job.isOK():
return S_ERROR('Invalid job description')
noMatchReasons = []
# Check job requirements to resource
# 1. CPUTime
cpuTime = job.getAttributeInt('CPUTime')
if not cpuTime:
cpuTime = 84600
if cpuTime > queueDict.get('CPUTime', 0.):
noMatchReasons.append('Job CPUTime requirement not satisfied')
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 2. Multi-value match requirements
for parameter in ['Site', 'GridCE', 'Platform', 'GridMiddleware',
'PilotType', 'SubmitPool', 'JobType']:
if parameter in queueDict:
valueSet = set(job.getListFromExpression(parameter))
if not valueSet:
valueSet = set(job.getListFromExpression('%ss' % parameter))
queueSet = set(fromChar(queueDict[parameter]))
if valueSet and queueSet and not valueSet.intersection(queueSet):
valueToPrint = ','.join(valueSet)
if len(valueToPrint) > 20:
valueToPrint = "%s..." % valueToPrint[:20]
noMatchReasons.append('Job %s %s requirement not satisfied' % (parameter, valueToPrint))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 3. Banned multi-value match requirements
for par in ['Site', 'GridCE', 'Platform', 'GridMiddleware',
'PilotType', 'SubmitPool', 'JobType']:
parameter = "Banned%s" % par
if par in queueDict:
valueSet = set(job.getListFromExpression(parameter))
if not valueSet:
valueSet = set(job.getListFromExpression('%ss' % parameter))
queueSet = set(fromChar(queueDict[par]))
if valueSet and queueSet and valueSet.issubset(queueSet):
valueToPrint = ','.join(valueSet)
if len(valueToPrint) > 20:
valueToPrint = "%s..." % valueToPrint[:20]
noMatchReasons.append('Job %s %s requirement not satisfied' % (parameter, valueToPrint))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 4. Tags
tags = set(job.getListFromExpression('Tag'))
nProc = job.getAttributeInt('NumberOfProcessors')
if nProc and nProc > 1:
tags.add('MultiProcessor')
wholeNode = job.getAttributeString('WholeNode')
if wholeNode:
tags.add('WholeNode')
queueTags = set(queueDict.get('Tags', []))
if not tags.issubset(queueTags):
noMatchReasons.append('Job Tag %s not satisfied' % ','.join(tags))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 4. MultiProcessor requirements
if nProc and nProc > int(queueDict.get('NumberOfProcessors', 1)):
noMatchReasons.append('Job NumberOfProcessors %d requirement not satisfied' % nProc)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 5. RAM
ram = job.getAttributeInt('RAM')
# If MaxRAM is not specified in the queue description, assume 2GB
if ram and ram > int(queueDict.get('MaxRAM', 2048)) / 1024:
noMatchReasons.append('Job RAM %d requirement not satisfied' % ram)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# Check resource requirements to job
# 1. OwnerGroup - rare case but still
if "OwnerGroup" in queueDict:
result = getProxyInfo(disableVOMS=True)
if not result['OK']:
return S_ERROR('No valid proxy available')
ownerGroup = result['Value']['group']
if ownerGroup != queueDict['OwnerGroup']:
noMatchReasons.append('Resource OwnerGroup %s requirement not satisfied' % queueDict['OwnerGroup'])
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 2. Required tags
requiredTags = set(queueDict.get('RequiredTags', []))
if not requiredTags.issubset(tags):
noMatchReasons.append('Resource RequiredTags %s not satisfied' % ','.join(requiredTags))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 3. RunningLimit
site = queueDict['Site']
opsHelper = Operations()
result = opsHelper.getSections('JobScheduling/RunningLimit')
if result['OK'] and site in result['Value']:
result = opsHelper.getSections('JobScheduling/RunningLimit/%s' % site)
if result['OK']:
for parameter in result['Value']:
value = job.getAttributeString(parameter)
if value and opsHelper.getValue('JobScheduling/RunningLimit/%s/%s/%s' % (site, parameter, value), 1) == 0:
noMatchReasons.append('Resource operational %s requirement not satisfied' % parameter)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
return S_OK({'Match': not bool(noMatchReasons), 'Reason': noMatchReasons})
|
fstagni/DIRAC
|
WorkloadManagementSystem/Utilities/QueueUtilities.py
|
Python
|
gpl-3.0
| 8,673
|
[
"DIRAC"
] |
b3862e5006069c940ebd5da32ee42695cb29df0b6b5809f8cb1af61ad9a95554
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
class Migration(migrations.Migration):
dependencies = [
('visit', '0071_auto_20150826_2053'),
]
operations = [
migrations.AlterField(
model_name='participanttype',
name='key',
field=models.UUIDField(default=uuid.uuid4, unique=True),
),
]
|
koebbe/homeworks
|
visit/migrations/0072_auto_20150826_2057.py
|
Python
|
mit
| 436
|
[
"VisIt"
] |
61171113baadc6bd484f9259d781c7aaddb06d581dab7d884ea0253388ef79f5
|
# -*- coding: utf-8 -*-
###############################################################################
# Name: arpeggio.py
# Purpose: PEG parser interpreter
# Author: Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# Copyright: (c) 2009-2015 Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# License: MIT License
#
# This is an implementation of packrat parser interpreter based on PEG
# grammars. Grammars are defined using Python language constructs or the PEG
# textual notation.
###############################################################################
from __future__ import print_function, unicode_literals
import sys
if sys.version < '3':
text = unicode
else:
text = str
import codecs
import re
import bisect
from arpeggio.utils import isstr
import types
DEFAULT_WS = '\t\n\r '
NOMATCH_MARKER = 0
class ArpeggioError(Exception):
"""
Base class for arpeggio errors.
"""
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class GrammarError(ArpeggioError):
"""
Error raised during parser building phase used to indicate error in the
grammar definition.
"""
class SemanticError(ArpeggioError):
"""
Error raised during the phase of semantic analysis used to indicate
semantic error.
"""
class NoMatch(Exception):
"""
Exception raised by the Match classes during parsing to indicate that the
match is not successful.
Args:
rules (list of ParserExpression): Rules that are tried at the position
of the exception.
position (int): A position in the input stream where exception
occurred.
parser (Parser): An instance of a parser.
"""
def __init__(self, rules, position, parser):
self.rules = rules
self.position = position
self.parser = parser
def __str__(self):
def rule_to_exp_str(rule):
if hasattr(rule, '_exp_str'):
# Rule may override expected report string
return rule._exp_str
elif rule.root:
return rule.rule_name
elif isinstance(rule, Match) and \
not isinstance(rule, EndOfFile):
return "'{}'".format(rule.to_match)
else:
return rule.name
what_is_expected = ["{}".format(rule_to_exp_str(r)) for r in self.rules]
if len(what_is_expected) == 0:
what_str = "'{}'".format(what_is_expected[0])
else:
what_str = " or ".join(what_is_expected)
if self.parser.file_name:
return "Expected {} at {}:{} => '{}'."\
.format(what_str,
self.parser.file_name,
text(self.parser.pos_to_linecol(self.position)),
self.parser.context(position=self.position))
else:
return "Expected {} at position {} => '{}'."\
.format(what_str,
text(self.parser.pos_to_linecol(self.position)),
self.parser.context(position=self.position))
def __unicode__(self):
return self.__str__()
def flatten(_iterable):
'''Flattening of python iterables.'''
result = []
for e in _iterable:
if hasattr(e, "__iter__") and not type(e) in [text, NonTerminal]:
result.extend(flatten(e))
else:
result.append(e)
return result
class DebugPrinter(object):
"""
Mixin class for adding debug print support.
Attributes:
debug (bool): If true debugging messages will be printed.
_current_ident(int): Current identation level for prints.
"""
def __init__(self, **kwargs):
self.debug = kwargs.pop("debug", False)
self._current_ident = 0
super(DebugPrinter, self).__init__(**kwargs)
def dprint(self, message, ident_change=0):
"""
Handle debug message. Current implementation will print to stdout using
the current identation level.
"""
if ident_change < 0:
self._current_ident += ident_change
print(("%s%s" % (" " * self._current_ident, message)))
if ident_change > 0:
self._current_ident += ident_change
# ---------------------------------------------------------
# Parser Model (PEG Abstract Semantic Graph) elements
class ParsingExpression(object):
"""
An abstract class for all parsing expressions.
Represents the node of the Parser Model.
Attributes:
elements: A list (or other python object) used as a staging structure
for python based grammar definition. Used in _from_python for
building nodes list of child parser expressions.
rule_name (str): The name of the parser rule if this is the root rule.
root (bool): Does this parser expression represents the
root of the parser rule? The root parser rule will create
non-terminal node of the parse tree during parsing.
nodes (list of ParsingExpression): A list of child parser expressions.
suppress (bool): If this is set to True than no ParseTreeNode will be
created for this ParsingExpression. Default False.
"""
def __init__(self, *elements, **kwargs):
if len(elements) == 1:
elements = elements[0]
self.elements = elements
self.rule_name = kwargs.get('rule_name', '')
self.root = kwargs.get('root', False)
nodes = kwargs.get('nodes', [])
if not hasattr(nodes, '__iter__'):
nodes = [nodes]
self.nodes = nodes
self.suppress = kwargs.get('suppress', False)
# Memoization. Every node cache the parsing results for the given input
# positions.
self._result_cache = {} # position -> parse tree at the position
@property
def desc(self):
return "{}{}".format(self.name, "-" if self.suppress else "")
@property
def name(self):
if self.root:
return "%s=%s" % (self.rule_name, self.__class__.__name__)
else:
return self.__class__.__name__
@property
def id(self):
if self.root:
return self.rule_name
else:
return id(self)
def _clear_cache(self, processed=None):
"""
Clears memoization cache. Should be called on input change and end
of parsing.
Args:
processed (set): Set of processed nodes to prevent infinite loops.
"""
self._result_cache = {}
if not processed:
processed = set()
for node in self.nodes:
if node not in processed:
processed.add(node)
node._clear_cache(processed)
def parse(self, parser):
if parser.debug:
name = self.name
if name.startswith('__asgn'):
name = "{}[{}]".format(self.name, self._attr_name)
parser.dprint(">> Matching rule {}{} at position {} => {}"
.format(name,
" in {}".format(parser.in_rule)
if parser.in_rule else "",
parser.position,
parser.context()), 1)
# Current position could change in recursive calls
# so save it.
c_pos = parser.position
# Memoization.
# If this position is already parsed by this parser expression use
# the result
if parser.memoization:
try:
result, new_pos = self._result_cache[c_pos]
parser.position = new_pos
parser.cache_hits += 1
if parser.debug:
parser.dprint(
"** Cache hit for [{}, {}] = '{}' : new_pos={}"
.format(name, c_pos, text(result), text(new_pos)))
parser.dprint(
"<<+ Matched rule {} at position {}"
.format(name, new_pos), -1)
# If NoMatch is recorded at this position raise.
if result is NOMATCH_MARKER:
raise parser.nm
# else return cached result
return result
except KeyError:
parser.cache_misses += 1
# Remember last parsing expression and set this as
# the new last.
last_pexpression = parser.last_pexpression
parser.last_pexpression = self
if self.rule_name:
# If we are entering root rule
# remember previous root rule name and set
# this one on the parser to be available for
# debugging messages
previous_root_rule_name = parser.in_rule
parser.in_rule = self.rule_name
try:
result = self._parse(parser)
if self.suppress or (type(result) is list and
result and result[0] is None):
result = None
except NoMatch:
parser.position = c_pos # Backtracking
# Memoize NoMatch at this position for this rule
if parser.memoization:
self._result_cache[c_pos] = (NOMATCH_MARKER, c_pos)
raise
finally:
# Recover last parsing expression.
parser.last_pexpression = last_pexpression
if parser.debug:
parser.dprint("<<{} rule {}{} at position {} => {}"
.format("- Not matched"
if parser.position is c_pos
else "+ Matched",
name,
" in {}".format(parser.in_rule)
if parser.in_rule else "",
parser.position,
parser.context()), -1)
# If leaving root rule restore previous root rule name.
if self.rule_name:
parser.in_rule = previous_root_rule_name
# For root rules flatten non-terminal/list
if self.root and result and not isinstance(result, Terminal):
if not isinstance(result, NonTerminal):
result = flatten(result)
# Tree reduction will eliminate Non-terminal with single child.
if parser.reduce_tree and len(result) == 1:
result = result[0]
# If the result is not parse tree node it must be a plain list
# so create a new NonTerminal.
if not isinstance(result, ParseTreeNode):
result = NonTerminal(self, result)
# Result caching for use by memoization.
if parser.memoization:
self._result_cache[c_pos] = (result, parser.position)
return result
class Sequence(ParsingExpression):
"""
Will match sequence of parser expressions in exact order they are defined.
"""
def __init__(self, *elements, **kwargs):
super(Sequence, self).__init__(*elements, **kwargs)
self.ws = kwargs.pop('ws', None)
self.skipws = kwargs.pop('skipws', None)
def _parse(self, parser):
results = []
c_pos = parser.position
if self.ws is not None:
old_ws = parser.ws
parser.ws = self.ws
if self.skipws is not None:
old_skipws = parser.skipws
parser.skipws = self.skipws
# Prefetching
append = results.append
try:
for e in self.nodes:
result = e.parse(parser)
if result:
append(result)
except NoMatch:
parser.position = c_pos # Backtracking
raise
finally:
if self.ws is not None:
parser.ws = old_ws
if self.skipws is not None:
parser.skipws = old_skipws
if results:
return results
class OrderedChoice(Sequence):
"""
Will match one of the parser expressions specified. Parser will try to
match expressions in the order they are defined.
"""
def _parse(self, parser):
result = None
match = False
c_pos = parser.position
for e in self.nodes:
try:
result = e.parse(parser)
if result is not None:
match = True
result = [result]
break
except NoMatch:
parser.position = c_pos # Backtracking
if not match:
parser._nm_raise(self, c_pos, parser)
return result
class Repetition(ParsingExpression):
"""
Base class for all repetition-like parser expressions (?,*,+)
Args:
eolterm(bool): Flag that indicates that end of line should
terminate repetition match.
"""
def __init__(self, *elements, **kwargs):
super(Repetition, self).__init__(*elements, **kwargs)
if 'eolterm' in kwargs:
self.eolterm = kwargs['eolterm']
else:
self.eolterm = False
class Optional(Repetition):
"""
Optional will try to match parser expression specified and will not fail
in case match is not successful.
"""
def _parse(self, parser):
result = None
c_pos = parser.position
# Set parser for optional mode
oldin_optional = parser.in_optional
parser.in_optional = True
try:
result = [self.nodes[0].parse(parser)]
except NoMatch:
parser.position = c_pos # Backtracking
# Restore in_optional state
parser.in_optional = oldin_optional
return result
class ZeroOrMore(Repetition):
"""
ZeroOrMore will try to match parser expression specified zero or more
times. It will never fail.
"""
def _parse(self, parser):
results = []
if self.eolterm:
# Remember current eolterm and set eolterm of
# this repetition
old_eolterm = parser.eolterm
parser.eolterm = self.eolterm
# Set parser for optional mode
oldin_optional = parser.in_optional
parser.in_optional = True
# Prefetching
append = results.append
p = self.nodes[0].parse
while True:
try:
c_pos = parser.position
result = p(parser)
if not result:
break
append(result)
except NoMatch:
parser.position = c_pos # Backtracking
break
if self.eolterm:
# Restore previous eolterm
parser.eolterm = old_eolterm
# Restore in_optional state
parser.in_optional = oldin_optional
return results
class OneOrMore(Repetition):
"""
OneOrMore will try to match parser expression specified one or more times.
"""
def _parse(self, parser):
results = []
first = True
if self.eolterm:
# Remember current eolterm and set eolterm of
# this repetition
old_eolterm = parser.eolterm
parser.eolterm = self.eolterm
# Set parser for optional mode
oldin_optional = parser.in_optional
# Prefetching
append = results.append
p = self.nodes[0].parse
try:
while True:
try:
c_pos = parser.position
result = p(parser)
if not result:
break
append(result)
first = False
parser.in_optional = True
except NoMatch:
parser.position = c_pos # Backtracking
if first:
raise
break
finally:
if self.eolterm:
# Restore previous eolterm
parser.eolterm = old_eolterm
# Restore in_optional state
parser.in_optional = oldin_optional
return results
class SyntaxPredicate(ParsingExpression):
"""
Base class for all syntax predicates (and, not, empty).
Predicates are parser expressions that will do the match but will not
consume any input.
"""
class And(SyntaxPredicate):
"""
This predicate will succeed if the specified expression matches current
input.
"""
def _parse(self, parser):
c_pos = parser.position
for e in self.nodes:
try:
e.parse(parser)
except NoMatch:
parser.position = c_pos
raise
parser.position = c_pos
class Not(SyntaxPredicate):
"""
This predicate will succeed if the specified expression doesn't match
current input.
"""
def _parse(self, parser):
c_pos = parser.position
for e in self.nodes:
try:
e.parse(parser)
except NoMatch:
parser.position = c_pos
return
parser.position = c_pos
parser._nm_raise(self, c_pos, parser)
class Empty(SyntaxPredicate):
"""
This predicate will always succeed without consuming input.
"""
def _parse(self, parser):
pass
class Decorator(ParsingExpression):
"""
Decorator are special kind of parsing expression used to mark
a containing pexpression and give it some special semantics.
For example, decorators are used to mark pexpression as lexical
rules (see :class:Lex).
"""
class Combine(Decorator):
"""
This decorator defines pexpression that represents a lexeme rule.
This rules will always return a Terminal parse tree node.
Whitespaces will be preserved. Comments will not be matched.
"""
def _parse(self, parser):
results = []
oldin_lex_rule = parser.in_lex_rule
parser.in_lex_rule = True
c_pos = parser.position
try:
for parser_model_node in self.nodes:
results.append(parser_model_node.parse(parser))
results = flatten(results)
# Create terminal from result
return Terminal(self, c_pos,
"".join([x.flat_str() for x in results]))
except NoMatch:
parser.position = c_pos # Backtracking
raise
finally:
parser.in_lex_rule = oldin_lex_rule
class Match(ParsingExpression):
"""
Base class for all classes that will try to match something from the input.
"""
def __init__(self, rule_name, root=False):
super(Match, self).__init__(rule_name=rule_name, root=root)
@property
def name(self):
if self.root:
return "%s=%s(%s)" % (self.rule_name, self.__class__.__name__,
self.to_match)
else:
return "%s(%s)" % (self.__class__.__name__, self.to_match)
def _parse_comments(self, parser):
"""Parse comments."""
try:
parser.in_parse_comments = True
if parser.comments_model:
try:
while True:
# TODO: Consumed whitespaces and comments should be
# attached to the first match ahead.
parser.comments.append(
parser.comments_model.parse(parser))
if parser.skipws:
parser._skip_ws()
except NoMatch:
# NoMatch in comment matching is perfectly
# legal and no action should be taken.
pass
finally:
parser.in_parse_comments = False
def parse(self, parser):
if parser.skipws and not parser.in_lex_rule:
parser._skip_ws()
if parser.debug:
parser.dprint(
"?? Try match rule {}{} at position {} => {}"
.format(self.name,
" in {}".format(parser.in_rule)
if parser.in_rule else "",
parser.position,
parser.context()))
if parser.position in parser.comment_positions:
# Skip comments if already parsed.
parser.position = parser.comment_positions[parser.position]
else:
if not parser.in_parse_comments and not parser.in_lex_rule:
comment_start = parser.position
self._parse_comments(parser)
parser.comment_positions[comment_start] = parser.position
result = self._parse(parser)
if not self.suppress:
return result
class RegExMatch(Match):
'''
This Match class will perform input matching based on Regular Expressions.
Args:
to_match (regex string): A regular expression string to match.
It will be used to create regular expression using re.compile.
ignore_case(bool): If case insensitive match is needed.
Default is None to support propagation from global parser setting.
str_repr(str): A string that is used to represent this regex.
'''
def __init__(self, to_match, rule_name='', root=False, ignore_case=None,
str_repr=None):
super(RegExMatch, self).__init__(rule_name, root)
self.to_match_regex = to_match
self.ignore_case = ignore_case
self.to_match = str_repr if str_repr is not None else to_match
def compile(self):
flags = re.MULTILINE
if self.ignore_case:
flags |= re.IGNORECASE
self.regex = re.compile(self.to_match_regex, flags)
def __str__(self):
return self.to_match
def __unicode__(self):
return self.__str__()
def _parse(self, parser):
c_pos = parser.position
m = self.regex.match(parser.input, c_pos)
if m:
matched = m.group()
if parser.debug:
parser.dprint(
"++ Match '%s' at %d => '%s'" %
(matched, c_pos, parser.context(len(matched))))
parser.position += len(matched)
if matched:
return Terminal(self, c_pos, matched)
else:
if parser.debug:
parser.dprint("-- NoMatch at {}".format(c_pos))
parser._nm_raise(self, c_pos, parser)
class StrMatch(Match):
"""
This Match class will perform input matching by a string comparison.
Args:
to_match (str): A string to match.
ignore_case(bool): If case insensitive match is needed.
Default is None to support propagation from global parser setting.
"""
def __init__(self, to_match, rule_name='', root=False, ignore_case=None):
super(StrMatch, self).__init__(rule_name, root)
self.to_match = to_match
self.ignore_case = ignore_case
def _parse(self, parser):
c_pos = parser.position
input_frag = parser.input[c_pos:c_pos+len(self.to_match)]
if self.ignore_case:
match = input_frag.lower() == self.to_match.lower()
else:
match = input_frag == self.to_match
if match:
if parser.debug:
parser.dprint(
"++ Match '{}' at {} => '{}'"
.format(self.to_match, c_pos,
parser.context(len(self.to_match))))
parser.position += len(self.to_match)
# If this match is inside sequence than mark for suppression
suppress = type(parser.last_pexpression) is Sequence
return Terminal(self, c_pos, self.to_match, suppress=suppress)
else:
if parser.debug:
parser.dprint(
"-- No match '{}' at {} => '{}'"
.format(self.to_match, c_pos,
parser.context(len(self.to_match))))
parser._nm_raise(self, c_pos, parser)
def __str__(self):
return self.to_match
def __unicode__(self):
return self.__str__()
def __eq__(self, other):
return self.to_match == text(other)
def __hash__(self):
return hash(self.to_match)
# HACK: Kwd class is a bit hackish. Need to find a better way to
# introduce different classes of string tokens.
class Kwd(StrMatch):
"""
A specialization of StrMatch to specify keywords of the language.
"""
def __init__(self, to_match):
super(Kwd, self).__init__(to_match)
self.to_match = to_match
self.root = True
self.rule_name = 'keyword'
class EndOfFile(Match):
"""
The Match class that will succeed in case end of input is reached.
"""
def __init__(self):
super(EndOfFile, self).__init__("EOF")
@property
def name(self):
return "EOF"
def _parse(self, parser):
c_pos = parser.position
if len(parser.input) == c_pos:
return Terminal(EOF(), c_pos, '', suppress=True)
else:
if parser.debug:
parser.dprint("!! EOF not matched.")
parser._nm_raise(self, c_pos, parser)
def EOF():
return EndOfFile()
# ---------------------------------------------------------
# ---------------------------------------------------
# Parse Tree node classes
class ParseTreeNode(object):
"""
Abstract base class representing node of the Parse Tree.
The node can be terminal(the leaf of the parse tree) or non-terminal.
Attributes:
rule (ParsingExpression): The rule that created this node.
rule_name (str): The name of the rule that created this node if
root rule or empty string otherwise.
position (int): A position in the input stream where the match
occurred.
error (bool): Is this a false parse tree node created during error
recovery.
comments : A parse tree of comment(s) attached to this node.
"""
def __init__(self, rule, position, error):
assert rule
assert rule.rule_name is not None
self.rule = rule
self.rule_name = rule.rule_name
self.position = position
self.error = error
self.comments = None
@property
def name(self):
return "%s [%s]" % (self.rule_name, self.position)
def visit(self, visitor):
"""
Visitor pattern implementation.
Args:
visitor(PTNodeVisitor): The visitor object.
"""
if visitor.debug:
visitor.dprint("Visiting {} type:{} str:{}"
.format(self.name, type(self).__name__, text(self)))
children = SemanticActionResults()
if isinstance(self, NonTerminal):
for node in self:
child = node.visit(visitor)
# If visit returns None suppress that child node
if child is not None:
children.append_result(node.rule_name, child)
visit_name = "visit_%s" % self.rule_name
if hasattr(visitor, visit_name):
# Call visit method.
result = getattr(visitor, visit_name)(self, children)
# If there is a method with 'second' prefix save
# the result of visit for post-processing
if hasattr(visitor, "second_%s" % self.rule_name):
visitor.for_second_pass.append((self.rule_name, result))
return result
elif visitor.defaults:
# If default actions are enabled
return visitor.visit__default__(self, children)
class Terminal(ParseTreeNode):
"""
Leaf node of the Parse Tree. Represents matched string.
Attributes:
rule (ParsingExpression): The rule that created this terminal.
position (int): A position in the input stream where match occurred.
value (str): Matched string at the given position or missing token
name in the case of an error node.
suppress(bool): If True this terminal can be ignored in semantic
analysis.
"""
__slots__ = ['rule', 'rule_name', 'position', 'error', 'comments',
'value', 'suppress']
def __init__(self, rule, position, value, error=False, suppress=False):
super(Terminal, self).__init__(rule, position, error)
self.value = value
self.suppress = suppress
@property
def desc(self):
if self.value:
return "%s '%s' [%s]" % (self.rule_name, self.value, self.position)
else:
return "%s [%s]" % (self.rule_name, self.position)
def flat_str(self):
return self.value
def __str__(self):
return self.value
def __unicode__(self):
return self.__str__()
def __repr__(self):
return self.desc
def __eq__(self, other):
return text(self) == text(other)
class NonTerminal(ParseTreeNode, list):
"""
Non-leaf node of the Parse Tree. Represents language syntax construction.
At the same time used in ParseTreeNode navigation expressions.
See test_ptnode_navigation_expressions.py for examples of navigation
expressions.
Attributes:
nodes (list of ParseTreeNode): Children parse tree nodes.
_filtered (bool): Is this NT a dynamically created filtered NT.
This is used internally.
"""
__slots__ = ['rule', 'rule_name', 'position', 'error', 'comments',
'_filtered', '_expr_cache']
def __init__(self, rule, nodes, error=False, _filtered=False):
# Inherit position from the first child node
position = nodes[0].position
super(NonTerminal, self).__init__(rule, position, error)
self.extend(flatten([nodes]))
self._filtered = _filtered
@property
def value(self):
"""Terminal protocol."""
return text(self)
@property
def desc(self):
return self.name
def flat_str(self):
"""
Return flatten string representation.
"""
return "".join([x.flat_str() for x in self])
def __str__(self):
return " | ".join([text(x) for x in self])
def __unicode__(self):
return self.__str__()
def __repr__(self):
return "[ %s ]" % ", ".join([repr(x) for x in self])
def __getattr__(self, rule_name):
"""
Find a child (non)terminal by the rule name.
Args:
rule_name(str): The name of the rule that is referenced from
this node rule.
"""
# Prevent infinite recursion
if rule_name in ['_expr_cache', '_filtered', 'rule', 'rule_name',
'position', 'append', 'extend']:
raise AttributeError
try:
# First check the cache
if rule_name in self._expr_cache:
return self._expr_cache[rule_name]
except AttributeError:
# Navigation expression cache. Used for lookup by rule name.
self._expr_cache = {}
# If result is not found in the cache collect all nodes
# with the given rule name and create new NonTerminal
# and cache it for later access.
nodes = []
rule = None
for n in self:
if self._filtered:
# For filtered NT rule_name is a rule on
# each of its children
for m in n:
if m.rule_name == rule_name:
nodes.append(m)
rule = m.rule
else:
if n.rule_name == rule_name:
nodes.append(n)
rule = n.rule
result = NonTerminal(rule=rule, nodes=nodes, _filtered=True)
self._expr_cache[rule_name] = result
return result
# ----------------------------------------------------
# Semantic Actions
#
class PTNodeVisitor(DebugPrinter):
"""
Base class for all parse tree visitors.
"""
def __init__(self, defaults=True, **kwargs):
"""
Args:
defaults(bool): If the default visit method should be applied in
case no method is defined.
"""
self.for_second_pass = []
self.defaults = defaults
super(PTNodeVisitor, self).__init__(**kwargs)
def visit__default__(self, node, children):
"""
Called if no visit method is defined for the node.
Args:
node(ParseTreeNode):
children(processed children ParseTreeNode-s):
"""
if isinstance(node, Terminal):
# Default for Terminal is to convert to string unless suppress flag
# is set in which case it is suppressed by setting to None.
retval = text(node) if not node.suppress else None
else:
retval = node
# Special case. If only one child exist return it.
if len(children) == 1:
retval = children[0]
else:
# If there is only one non-string child return
# that by default. This will support e.g. bracket
# removals.
last_non_str = None
for c in children:
if not isstr(c):
if last_non_str is None:
last_non_str = c
else:
# If there is multiple non-string objects
# by default convert non-terminal to string
if self.debug:
self.dprint("*** Warning: Multiple "
"non-string objects found in "
"default visit. Converting non-"
"terminal to a string.")
retval = text(node)
break
else:
# Return the only non-string child
retval = last_non_str
return retval
def visit_parse_tree(parse_tree, visitor):
"""
Applies visitor to parse_tree and runs the second pass
afterwards.
Args:
parse_tree(ParseTreeNode):
visitor(PTNodeVisitor):
"""
if not parse_tree:
raise Exception(
"Parse tree is empty. You did call parse(), didn't you?")
if visitor.debug:
visitor.dprint("ASG: First pass")
# Visit tree.
result = parse_tree.visit(visitor)
# Second pass
if visitor.debug:
visitor.dprint("ASG: Second pass")
for sa_name, asg_node in visitor.for_second_pass:
getattr(visitor, "second_%s" % sa_name)(asg_node)
return result
class SemanticAction(object):
"""
Semantic actions are executed during semantic analysis. They are in charge
of producing Abstract Semantic Graph (ASG) out of the parse tree.
Every non-terminal and terminal can have semantic action defined which will
be triggered during semantic analysis.
Semantic action triggering is separated in two passes. first_pass method is
required and the method called second_pass is optional and will be called
if exists after the first pass. Second pass can be used for forward
referencing, e.g. linking to the declaration registered in the first pass
stage.
"""
def first_pass(self, parser, node, nodes):
"""
Called in the first pass of tree walk.
This is the default implementation used if no semantic action is
defined.
"""
if isinstance(node, Terminal):
# Default for Terminal is to convert to string unless suppress flag
# is set in which case it is suppressed by setting to None.
retval = text(node) if not node.suppress else None
else:
retval = node
# Special case. If only one child exist return it.
if len(nodes) == 1:
retval = nodes[0]
else:
# If there is only one non-string child return
# that by default. This will support e.g. bracket
# removals.
last_non_str = None
for c in nodes:
if not isstr(c):
if last_non_str is None:
last_non_str = c
else:
# If there is multiple non-string objects
# by default convert non-terminal to string
if parser.debug:
parser.dprint(
"*** Warning: Multiple non-"
"string objects found in applying "
"default semantic action. Converting "
"non-terminal to string.")
retval = text(node)
break
else:
# Return the only non-string child
retval = last_non_str
return retval
class SemanticActionResults(list):
"""
Used in visitor methods call to supply results of semantic analysis
of children parse tree nodes.
Enables dot access by the name of the rule similar to NonTerminal
tree navigation.
Enables index access as well as iteration.
"""
def __init__(self):
self.results = {}
def append_result(self, name, result):
if name:
if name not in self.results:
self.results[name] = []
self.results[name].append(result)
self.append(result)
def __getattr__(self, attr_name):
if attr_name == 'results':
raise AttributeError
return self.results.get(attr_name, [])
# Common semantic actions
class SemanticActionSingleChild(SemanticAction):
def first_pass(self, parser, node, children):
return children[0]
class SemanticActionBodyWithBraces(SemanticAction):
def first_pass(self, parser, node, children):
return children[1:-1]
class SemanticActionToString(SemanticAction):
def first_pass(self, parser, node, children):
return text(node)
# ----------------------------------------------------
# Parsers
class Parser(DebugPrinter):
"""
Abstract base class for all parsers.
Attributes:
comments_model: parser model for comments.
comments(list): A list of ParseTreeNode for matched comments.
sem_actions(dict): A dictionary of semantic actions keyed by the
rule name.
parse_tree(NonTerminal): The parse tree consisting of NonTerminal and
Terminal instances.
in_rule (str): Current rule name.
in_parse_comments (bool): True if parsing comments.
in_optional (bool): True if parsing optionals (Optional, ZeroOrMore or
OneOrMore after first).
in_lex_rule (bool): True if in lexical rule. Currently used in Combine
decorator to convert match to a single Terminal.
last_pexpression (ParsingExpression): Last parsing expression traversed.
"""
def __init__(self, skipws=True, ws=None, reduce_tree=False,
autokwd=False, ignore_case=False, memoization=False, **kwargs):
"""
Args:
skipws (bool): Should the whitespace skipping be done. Default is
True.
ws (str): A string consisting of whitespace characters.
reduce_tree (bool): If true non-terminals with single child will be
eliminated from the parse tree. Default is False.
autokwd(bool): If keyword-like StrMatches are matched on word
boundaries. Default is False.
ignore_case(bool): If case is ignored (default=False)
memoization(bool): If memoization should be used
(a.k.a. packrat parsing)
"""
super(Parser, self).__init__(**kwargs)
# Used to indicate state in which parser should not
# treat newlines as whitespaces.
self._eolterm = False
self.skipws = skipws
if ws is not None:
self.ws = ws
else:
self.ws = DEFAULT_WS
self.reduce_tree = reduce_tree
self.autokwd = autokwd
self.ignore_case = ignore_case
self.memoization = memoization
self.comments_model = None
self.comments = []
self.comment_positions = {}
self.sem_actions = {}
self.parse_tree = None
# Create regex used for autokwd matching
flags = 0
if ignore_case:
flags = re.IGNORECASE
self.keyword_regex = re.compile(r'[^\d\W]\w*', flags)
# Keep track of root rule we are currently in.
# Used for debugging purposes
self.in_rule = ''
self.in_parse_comments = False
# If under optional PE (Optional or ZeroOrMore or OneOrMore after
# first occurence) we do not store NoMatch for error reporting.
self.in_optional = False
# Are we in lexical rule. If so do not
# skip whitespaces.
self.in_lex_rule = False
# Last parsing expression traversed
self.last_pexpression = None
@property
def ws(self):
return self._ws
@ws.setter
def ws(self, new_value):
self._real_ws = new_value
self._ws = new_value
if self.eolterm:
self._ws = self._ws.replace('\n', '').replace('\r', '')
@property
def eolterm(self):
return self._eolterm
@eolterm.setter
def eolterm(self, new_value):
# Toggle newline char in ws on eolterm property set.
# During eolterm state parser should not treat
# newline as a whitespace.
self._eolterm = new_value
if self._eolterm:
self._ws = self._ws.replace('\n', '').replace('\r', '')
else:
self._ws = self._real_ws
def parse(self, _input, file_name=None):
"""
Parses input and produces parse tree.
Args:
_input(str): An input string to parse.
file_name(str): If input is loaded from file this can be
set to file name. It is used in error messages.
"""
self.position = 0 # Input position
self.nm = None # Last NoMatch exception
self.line_ends = []
self.input = _input
self.file_name = file_name
self.comment_positions = {}
self.cache_hits = 0
self.cache_misses = 0
try:
self.parse_tree = self._parse()
finally:
# At end of parsing clear all memoization caches.
# Do this here to free memory.
if self.memoization:
self._clear_caches()
# In debug mode export parse tree to dot file for
# visualization
if self.debug:
from arpeggio.export import PTDOTExporter
root_rule_name = self.parse_tree.rule_name
PTDOTExporter().exportFile(
self.parse_tree, "{}_parse_tree.dot".format(root_rule_name))
return self.parse_tree
def parse_file(self, file_name):
"""
Parses content from the given file.
Args:
file_name(str): A file name.
"""
with codecs.open(file_name, 'r', 'utf-8') as f:
content = f.read()
return self.parse(content, file_name=file_name)
def getASG(self, sem_actions=None, defaults=True):
"""
Creates Abstract Semantic Graph (ASG) from the parse tree.
Args:
sem_actions (dict): The semantic actions dictionary to use for
semantic analysis. Rule names are the keys and semantic action
objects are values.
defaults (bool): If True a default semantic action will be
applied in case no action is defined for the node.
"""
if not self.parse_tree:
raise Exception(
"Parse tree is empty. You did call parse(), didn't you?")
if sem_actions is None:
if not self.sem_actions:
raise Exception("Semantic actions not defined.")
else:
sem_actions = self.sem_actions
if type(sem_actions) is not dict:
raise Exception("Semantic actions parameter must be a dictionary.")
for_second_pass = []
def tree_walk(node):
"""
Walking the parse tree and calling first_pass for every registered
semantic actions and creating list of object that needs to be
called in the second pass.
"""
if self.debug:
self.dprint(
"Walking down %s type: %s str: %s" %
(node.name, type(node).__name__, text(node)))
children = SemanticActionResults()
if isinstance(node, NonTerminal):
for n in node:
child = tree_walk(n)
if child is not None:
children.append_result(n.rule_name, child)
if self.debug:
self.dprint("Processing %s = '%s' type:%s len:%d" %
(node.name, text(node), type(node).__name__,
len(node) if isinstance(node, list) else 0))
for i, a in enumerate(children):
self.dprint(" %d:%s type:%s" %
(i+1, text(a), type(a).__name__))
if node.rule_name in sem_actions:
sem_action = sem_actions[node.rule_name]
if isinstance(sem_action, types.FunctionType):
retval = sem_action(self, node, children)
else:
retval = sem_action.first_pass(self, node, children)
if hasattr(sem_action, "second_pass"):
for_second_pass.append((node.rule_name, retval))
if self.debug:
action_name = sem_action.__name__ \
if hasattr(sem_action, '__name__') \
else sem_action.__class__.__name__
self.dprint(" Applying semantic action %s" % action_name)
else:
if defaults:
# If no rule is present use some sane defaults
if self.debug:
self.dprint(" Applying default semantic action.")
retval = SemanticAction().first_pass(self, node, children)
else:
retval = node
if self.debug:
if retval is None:
self.dprint(" Suppressed.")
else:
self.dprint(" Resolved to = %s type:%s" %
(text(retval), type(retval).__name__))
return retval
if self.debug:
self.dprint("ASG: First pass")
asg = tree_walk(self.parse_tree)
# Second pass
if self.debug:
self.dprint("ASG: Second pass")
for sa_name, asg_node in for_second_pass:
sem_actions[sa_name].second_pass(self, asg_node)
return asg
def pos_to_linecol(self, pos):
"""
Calculate (line, column) tuple for the given position in the stream.
"""
if not self.line_ends:
try:
# TODO: Check this implementation on Windows.
self.line_ends.append(self.input.index("\n"))
while True:
try:
self.line_ends.append(
self.input.index("\n", self.line_ends[-1] + 1))
except ValueError:
break
except ValueError:
pass
line = bisect.bisect_left(self.line_ends, pos)
col = pos
if line > 0:
col -= self.line_ends[line - 1]
if self.input[self.line_ends[line - 1]] in '\n\r':
col -= 1
return line + 1, col + 1
def context(self, length=None, position=None):
"""
Returns current context substring, i.e. the substring around current
position.
Args:
length(int): If given used to mark with asterisk a length chars
from the current position.
position(int): The position in the input stream.
"""
if not position:
position = self.position
if length:
retval = "{}*{}*{}".format(
text(self.input[max(position - 10, 0):position]),
text(self.input[position:position + length]),
text(self.input[position + length:position + 10]))
else:
retval = "{}*{}".format(
text(self.input[max(position - 10, 0):position]),
text(self.input[position:position + 10]))
return retval.replace('\n', ' ').replace('\r', '')
def _skip_ws(self):
"""
Skiping whitespace characters.
"""
pos = self.position
ws = self.ws
while pos < len(self.input) and \
self.input[pos] in ws:
pos += 1
self.position = pos
def _nm_raise(self, *args):
"""
Register new NoMatch object if the input is consumed
from the last NoMatch and raise last NoMatch.
Args:
args: A NoMatch instance or (value, position, parser)
"""
if len(args) == 1:
exc = args[0]
if exc.position > self.nm.position:
self.nm = exc
else:
rule, position, parser = args
if self.nm is None or not parser.in_parse_comments:
if not self.nm or position > self.nm.position:
self.nm = NoMatch([rule], position, parser)
elif position == self.nm.position and isinstance(rule, Match):
self.nm.rules.append(rule)
raise self.nm
def _clear_caches(self):
"""
Clear memoization caches if packrat parser is used.
"""
self.parser_model._clear_cache()
if self.comments_model:
self.comments_model._clear_cache()
class CrossRef(object):
'''
Used for rule reference resolving.
'''
def __init__(self, target_rule_name, position=-1):
self.target_rule_name = target_rule_name
self.position = position
class ParserPython(Parser):
def __init__(self, language_def, comment_def=None, *args, **kwargs):
"""
Constructs parser from python statements and expressions.
Args:
language_def (python function): A python function that defines
the root rule of the grammar.
comment_def (python function): A python function that defines
the root rule of the comments grammar.
"""
super(ParserPython, self).__init__(*args, **kwargs)
# PEG Abstract Syntax Graph
self.parser_model = self._from_python(language_def)
self.comments_model = None
if comment_def:
self.comments_model = self._from_python(comment_def)
self.comments_model.root = True
self.comments_model.rule_name = comment_def.__name__
# In debug mode export parser model to dot for
# visualization
if self.debug:
from arpeggio.export import PMDOTExporter
root_rule = language_def.__name__
PMDOTExporter().exportFile(self.parser_model,
"{}_parser_model.dot".format(root_rule))
def _parse(self):
return self.parser_model.parse(self)
def _from_python(self, expression):
"""
Create parser model from the definition given in the form of python
functions returning lists, tuples, callables, strings and
ParsingExpression objects.
Returns:
Parser Model (PEG Abstract Semantic Graph)
"""
__rule_cache = {"EndOfFile": EndOfFile()}
__for_resolving = [] # Expressions that needs crossref resolvnih
self.__cross_refs = 0
def inner_from_python(expression):
retval = None
if isinstance(expression, types.FunctionType):
# If this expression is a parser rule
rule_name = expression.__name__
if rule_name in __rule_cache:
c_rule = __rule_cache.get(rule_name)
if self.debug:
self.dprint("Rule {} founded in cache."
.format(rule_name))
if isinstance(c_rule, CrossRef):
self.__cross_refs += 1
if self.debug:
self.dprint("CrossRef usage: {}"
.format(c_rule.target_rule_name))
return c_rule
# Semantic action for the rule
if hasattr(expression, "sem"):
self.sem_actions[rule_name] = expression.sem
# Register rule cross-ref to support recursion
__rule_cache[rule_name] = CrossRef(rule_name)
curr_expr = expression
while isinstance(curr_expr, types.FunctionType):
# If function directly returns another function
# go into until non-function is returned.
curr_expr = curr_expr()
retval = inner_from_python(curr_expr)
retval.rule_name = rule_name
retval.root = True
# Update cache
__rule_cache[rule_name] = retval
if self.debug:
self.dprint("New rule: {} -> {}"
.format(rule_name, retval.__class__.__name__))
elif type(expression) is text or isinstance(expression, StrMatch):
if type(expression) is text:
retval = StrMatch(expression, ignore_case=self.ignore_case)
else:
retval = expression
if expression.ignore_case is None:
expression.ignore_case = self.ignore_case
if self.autokwd:
to_match = retval.to_match
match = self.keyword_regex.match(to_match)
if match and match.span() == (0, len(to_match)):
retval = RegExMatch(r'{}\b'.format(to_match),
ignore_case=self.ignore_case,
str_repr=to_match)
retval.compile()
elif isinstance(expression, RegExMatch):
# Regular expression are not compiled yet
# to support global settings propagation from
# parser.
if expression.ignore_case is None:
expression.ignore_case = self.ignore_case
expression.compile()
retval = expression
elif isinstance(expression, Match):
retval = expression
elif isinstance(expression, Sequence) or \
isinstance(expression, Repetition) or \
isinstance(expression, SyntaxPredicate) or \
isinstance(expression, Decorator):
retval = expression
retval.nodes.append(inner_from_python(retval.elements))
if any((isinstance(x, CrossRef) for x in retval.nodes)):
__for_resolving.append(retval)
elif type(expression) in [list, tuple]:
if type(expression) is list:
retval = OrderedChoice(expression)
else:
retval = Sequence(expression)
retval.nodes = [inner_from_python(e) for e in expression]
if any((isinstance(x, CrossRef) for x in retval.nodes)):
__for_resolving.append(retval)
else:
raise GrammarError("Unrecognized grammar element '%s'." %
text(expression))
return retval
# Cross-ref resolving
def resolve():
for e in __for_resolving:
for i, node in enumerate(e.nodes):
if isinstance(node, CrossRef):
self.__cross_refs -= 1
e.nodes[i] = __rule_cache[node.target_rule_name]
parser_model = inner_from_python(expression)
resolve()
assert self.__cross_refs == 0, "Not all crossrefs are resolved!"
return parser_model
def errors(self):
pass
|
renatav/GraphDrawing
|
GraphLayoutDSL/target/classes/modules/arpeggio/__init__.py
|
Python
|
mit
| 57,497
|
[
"VisIt"
] |
5af597d77d3c858c513f6ce3206bb4bbada4e9a14178796e31b691db1d98c399
|
"""
Test atomic coordinates and neighbor lists.
"""
import os
import logging
import numpy as np
import unittest
from deepchem.utils import conformers
from deepchem.feat import AtomicCoordinates
from deepchem.feat import NeighborListAtomicCoordinates
from deepchem.feat import NeighborListComplexAtomicCoordinates
logger = logging.getLogger(__name__)
class TestAtomicCoordinates(unittest.TestCase):
"""
Test AtomicCoordinates.
"""
def setUp(self):
"""
Set up tests.
"""
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
from rdkit import Chem
mol = Chem.MolFromSmiles(smiles)
engine = conformers.ConformerGenerator(max_conformers=1)
self.mol = engine.generate_conformers(mol)
self.get_angstrom_coords = AtomicCoordinates()._featurize
assert self.mol.GetNumConformers() > 0
def test_atomic_coordinates(self):
"""
Simple test that atomic coordinates returns ndarray of right shape.
"""
N = self.mol.GetNumAtoms()
atomic_coords_featurizer = AtomicCoordinates()
coords = atomic_coords_featurizer._featurize(self.mol)
assert isinstance(coords, np.ndarray)
assert coords.shape == (N, 3)
def test_neighbor_list_shape(self):
"""
Simple test that Neighbor Lists have right shape.
"""
nblist_featurizer = NeighborListAtomicCoordinates()
N = self.mol.GetNumAtoms()
coords = self.get_angstrom_coords(self.mol)
nblist_featurizer = NeighborListAtomicCoordinates()
nblist = nblist_featurizer._featurize(self.mol)[1]
assert isinstance(nblist, dict)
assert len(nblist.keys()) == N
for (atom, neighbors) in nblist.items():
assert isinstance(atom, int)
assert isinstance(neighbors, list)
assert len(neighbors) <= N
# Do a manual distance computation and make
for i in range(N):
for j in range(N):
dist = np.linalg.norm(coords[i] - coords[j])
logger.info("Distance(%d, %d) = %f" % (i, j, dist))
if dist < nblist_featurizer.neighbor_cutoff and i != j:
assert j in nblist[i]
else:
assert j not in nblist[i]
def test_neighbor_list_extremes(self):
"""
Test Neighbor Lists with large/small boxes.
"""
N = self.mol.GetNumAtoms()
# Test with cutoff 0 angstroms. There should be no neighbors in this case.
nblist_featurizer = NeighborListAtomicCoordinates(neighbor_cutoff=.1)
nblist = nblist_featurizer._featurize(self.mol)[1]
for atom in range(N):
assert len(nblist[atom]) == 0
# Test with cutoff 100 angstroms. Everything should be neighbors now.
nblist_featurizer = NeighborListAtomicCoordinates(neighbor_cutoff=100)
nblist = nblist_featurizer._featurize(self.mol)[1]
for atom in range(N):
assert len(nblist[atom]) == N - 1
def test_neighbor_list_max_num_neighbors(self):
"""
Test that neighbor lists return only max_num_neighbors.
"""
N = self.mol.GetNumAtoms()
max_num_neighbors = 1
nblist_featurizer = NeighborListAtomicCoordinates(max_num_neighbors)
nblist = nblist_featurizer._featurize(self.mol)[1]
for atom in range(N):
assert len(nblist[atom]) <= max_num_neighbors
# Do a manual distance computation and ensure that selected neighbor is
# closest since we set max_num_neighbors = 1
coords = self.get_angstrom_coords(self.mol)
for i in range(N):
closest_dist = np.inf
closest_nbr = None
for j in range(N):
if i == j:
continue
dist = np.linalg.norm(coords[i] - coords[j])
logger.info("Distance(%d, %d) = %f" % (i, j, dist))
if dist < closest_dist:
closest_dist = dist
closest_nbr = j
logger.info("Closest neighbor to %d is %d" % (i, closest_nbr))
logger.info("Distance: %f" % closest_dist)
if closest_dist < nblist_featurizer.neighbor_cutoff:
assert nblist[i] == [closest_nbr]
else:
assert nblist[i] == []
def test_neighbor_list_periodic(self):
"""Test building a neighbor list with periodic boundary conditions."""
cutoff = 4.0
box_size = np.array([10.0, 8.0, 9.0])
N = self.mol.GetNumAtoms()
coords = self.get_angstrom_coords(self.mol)
featurizer = NeighborListAtomicCoordinates(
neighbor_cutoff=cutoff, periodic_box_size=box_size)
neighborlist = featurizer._featurize(self.mol)[1]
expected_neighbors = [set() for i in range(N)]
for i in range(N):
for j in range(i):
delta = coords[i] - coords[j]
delta -= np.round(delta / box_size) * box_size
if np.linalg.norm(delta) < cutoff:
expected_neighbors[i].add(j)
expected_neighbors[j].add(i)
for i in range(N):
assert (set(neighborlist[i]) == expected_neighbors[i])
def test_complex_featurization_simple(self):
"""Test Neighbor List computation on protein-ligand complex."""
dir_path = os.path.dirname(os.path.realpath(__file__))
ligand_file = os.path.join(dir_path, "data/3zso_ligand_hyd.pdb")
protein_file = os.path.join(dir_path, "data/3zso_protein.pdb")
max_num_neighbors = 4
complex_featurizer = NeighborListComplexAtomicCoordinates(max_num_neighbors)
system_coords, system_neighbor_list = complex_featurizer._featurize(
ligand_file, protein_file)
N = system_coords.shape[0]
assert len(system_neighbor_list.keys()) == N
for atom in range(N):
assert len(system_neighbor_list[atom]) <= max_num_neighbors
# TODO(rbharath): This test will be uncommented in the next PR up on the docket.
# def test_full_complex_featurization(self):
# """Unit test for ComplexNeighborListFragmentAtomicCoordinates."""
# dir_path = os.path.dirname(os.path.realpath(__file__))
# ligand_file = os.path.join(dir_path, "data/3zso_ligand_hyd.pdb")
# protein_file = os.path.join(dir_path, "data/3zso_protein.pdb")
# # Pulled from PDB files. For larger datasets with more PDBs, would use
# # max num atoms instead of exact.
# frag1_num_atoms = 44 # for ligand atoms
# frag2_num_atoms = 2336 # for protein atoms
# complex_num_atoms = 2380 # in total
# max_num_neighbors = 4
# # Cutoff in angstroms
# neighbor_cutoff = 4
# complex_featurizer = ComplexNeighborListFragmentAtomicCoordinates(
# frag1_num_atoms, frag2_num_atoms, complex_num_atoms, max_num_neighbors,
# neighbor_cutoff)
# (frag1_coords, frag1_neighbor_list, frag1_z, frag2_coords,
# frag2_neighbor_list, frag2_z, complex_coords,
# complex_neighbor_list, complex_z) = complex_featurizer._featurize_complex(
# ligand_file, protein_file)
#
# assert frag1_coords.shape == (frag1_num_atoms, 3)
# self.assertEqual(
# sorted(list(frag1_neighbor_list.keys())), list(range(frag1_num_atoms)))
# self.assertEqual(frag1_z.shape, (frag1_num_atoms,))
#
# self.assertEqual(frag2_coords.shape, (frag2_num_atoms, 3))
# self.assertEqual(
# sorted(list(frag2_neighbor_list.keys())), list(range(frag2_num_atoms)))
# self.assertEqual(frag2_z.shape, (frag2_num_atoms,))
#
# self.assertEqual(complex_coords.shape, (complex_num_atoms, 3))
# self.assertEqual(
# sorted(list(complex_neighbor_list.keys())),
# list(range(complex_num_atoms)))
# self.assertEqual(complex_z.shape, (complex_num_atoms,))
|
lilleswing/deepchem
|
deepchem/feat/tests/test_atomic_coordinates.py
|
Python
|
mit
| 7,286
|
[
"RDKit"
] |
85b6ccdc6c0f7eb015015d9ebfb1f0adfe192daef0d746df14e33dacb295aef0
|
import cPickle, gzip, numpy as np
#py 3
# u = pickle._Unpickler(f)
# train_set, valid_set, test_set = u.load()
f = gzip.open('mnist.pkl.gz', 'rb')
f2 = open('matplot-text.txt', 'w+')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
print 'train_set length: ', len(train_set[0])
perceptron_layer = []
epochs = 1
learning_rate = 0.1
class Perceptron_Net(object):
def __init__(self, weight, bias):
self.weight = weight
self.bias = bias
self.layer = []
self.ok_rate = 0
self.error_rate = 0
def init_layer(self):
for i in range(10):
# print [numpy.random.uniform(0, 1, size=784) for i in range(10)]
# print len(train_set[0])
# shapes (50000,) and (784,) not aligned: 50000 (dim 0) != 784 (dim 0) => np.random.rand(1, len(train_set[0]))
self.layer.append(Perceptron(i))
# check for initialization
for perceptron in self.layer:
print perceptron, perceptron.digit # , i.weight, i.bias
def train_network(self):
for perceptron in self.layer:
print 'queued train: ', perceptron.digit
perceptron.train()
def test(self):
self.train_network()
ok = 0
clock_counter = 0
for i in range(len(test_set[0])):
maximum = -1
digitmax = -1
for digit in range(10):
z = np.dot(test_set[0][i], self.weight[digit]) + self.bias[digit]
if z > maximum:
maximum = z
digitmax = digit
if digitmax == test_set[1][i]:
ok += 1
self.ok_rate = ok * 1.0 / len(test_set[0]) * 100
self.error_rate = 100 - self.ok_rate
str = "%d, %f" % (clock_counter, self.error_rate)
print "clock, error: ", str
print clock_counter, self.error_rate
f2.write(str + "\n")
clock_counter += 10
#self.ok_rate = ok * 1.0 / len(test_set[0]) * 100
#self.error_rate = 100 - self.ok_rate
self.error_rate = 100 - (ok * 1.0 / len(test_set[0]) * 100)
self.ok_rate = ok * 1.0 / len(test_set[0]) * 100
print "Final result: ", self.ok_rate, "%"
print "Error rate: ", self.error_rate, "%"
f2.close
class Perceptron(object):
#error = 1
#errorRate = 100 - perceptron_net.ok_rate
def __init__(self, digit):
self.digit = digit
def description(self):
print "This is a perceptron object"
def activation(self, input):
# Function used for activation of the neuron
if (input > 0): return 1
return 0
def expected(self, value):
if self.digit == value:
return 1
return 0
def train(self):
'''
global epochs
allClassified = False
timeCounter = 0
while not allClassified and epochs > 0:
allClassified = True
# for x, t in train_set:
x = train_set[0]
t = train_set[1]
for i in range(len(train_set[0])):
# compute net input
z = np.sum(np.dot(self.weight[i], x[i]), self.bias[self.digit])
# classify the sample
output = self.activation(z)
# adjust the weights
self.weight[self.digit] = np.sum(self.weight[i], (self.expected(t[i]) - output) * x[i] * learning_rate) #* a[i] * learning_rate, for j in x[i])# adjust the weights
# adjust the bias
self.bias[self.digit] = self.bias[self.digit] + (t[i]-output) * learning_rate
if output != t[i]:
allClassified = False
self.error = self.expected(t[i]) - output
if self.digit == 0:
str = "%d,%d" % (timeCounter, self.error)
print str
#f2.write(str + "\n")
timeCounter += 10
epochs = epochs - 1
#f2.close
'''
print("###Train digit: " + str(self.digit))
for i in range(len(train_set[0])):
z = np.dot(train_set[0][i], perceptron_net.weight[self.digit]) + perceptron_net.bias[self.digit]
output = self.activation(z)
x = np.array(train_set[0][i]).dot((self.expected(train_set[1][i]) - output) * learning_rate)
perceptron_net.weight[self.digit] = np.add(perceptron_net.weight[self.digit], x)
perceptron_net.bias[self.digit] += (self.expected(train_set[1][i]) - output) * learning_rate
print("---Digit trained: " + str(self.digit))
perceptron_net = Perceptron_Net([np.random.uniform(0, 1, size=784) for i in range(10)], np.zeros(10))
perceptron_net.init_layer()
perceptron_net.train_network()
perceptron_net.test()
|
xR86/ml-stuff
|
labs-neural-networks/hw-lab4/mnist-perceptron.py
|
Python
|
mit
| 4,852
|
[
"NEURON"
] |
831086f11ad059bf73303ed892a554391181ddfbbeb114f24a950859eddc407e
|
"""
gene_server_dump v0.01
gene server dump
Copyright 2011 Brian Monkaba
This file is part of ga-bitbot.
ga-bitbot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ga-bitbot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ga-bitbot. If not, see <http://www.gnu.org/licenses/>.
"""
#
# Dumps the stored gene data from the server to a local file
#
__appversion__ = "0.01a"
print "Genetic Bitcoin Gene Server Dump v%s"%__appversion__
# connect to the xml server
#
import gene_server_config
import xmlrpclib
import json
import time
__server__ = gene_server_config.__server__
__port__ = str(gene_server_config.__port__)
#make sure the port number matches the server.
server = xmlrpclib.Server('http://' + __server__ + ":" + __port__)
print "Connected to",__server__,":",__port__
def ppdict(d):
#pretty print a dict
print "-"*40
try:
for key in d.keys():
print key,':',d[key]
except:
print d
return d
def pwdict(d,filename):
#pretty write a dict
f = open(filename,'w')
try:
for key in d.keys():
f.write(key + " : " + str(d[key]) + "\n")
except:
pass
f.write('\n' + '-'*80 + '\n')
f.write(str(d))
f.close()
return d
for quartile in [1,2,3,4]:
try:
print "-"*80
print "Quartile:",quartile
ag = json.loads(server.get(60*360,quartile))
#ppdict(ag)
print "gene last updated",time.time() - ag['time'],"seconds ago.", "SCORE:",ag['score']
pwdict(ag,'./test_data/gene_high_score_' + str(quartile))
except:
pass
print "-"*80
print "PID STATUS:"
pid_status = json.loads(server.get_pids())
#ppdict(pid_status)
pwdict(pid_status,'./test_data/pid_status')
print "-"*80
print "PID Watchdog Check (90 sec): "
for pid in pid_status.keys():
print pid,server.pid_check(pid,90)
#save all genes
for quartile in [1,2,3,4]:
gd = {'bobs':[],'high_scores':[]}
gd['high_scores'] = json.loads(server.get_all(quartile))
gd['bobs'] = json.loads(server.get_bobs(quartile))
f = open('./config/gene_server_db_backup_quartile' + str(quartile) + '.json','w')
f.write(json.dumps(gd))
f.close()
#print server.shutdown()
|
OndroNR/ga-bitbot
|
gene_server_dump.py
|
Python
|
gpl-3.0
| 2,672
|
[
"Brian"
] |
accaf3426cdc5990975bce0dd6d6bc38d1f6cc8f07791ff902a62f3f941e860f
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 1 18:05:48 2018
@author: davch
"""
#%%
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
#%%
h = .02
names = ["Nearest Neighbors", 'Linear SVM', "RBF SVM", 'Gaussian Process', 'Decision Tree',
'Random Forest', 'Neural Net', 'AdaBoost', 'Naive Bayes', 'QDA']
clfs = [
KNeighborsClassifier(3),
SVC(kernel='linear', C=0.025),
SVC(gamma=2, C=1.0),
GaussianProcessClassifier(1.0*RBF(1.0)),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2, random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2*rng.uniform(size=X.shape)
linearly_separable = (X,y)
datasets = [
make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable]
#%%
figure = plt.figure(figsize=(27,9))
i=1
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(clfs) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,
edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, clfs):
ax = plt.subplot(len(datasets), len(clfs) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
plt.savefig("results_classifiers_comparison.jpg")
|
DoWhatILove/turtle
|
programming/python/library/scikit-learn/classifiers_comparison.py
|
Python
|
mit
| 4,273
|
[
"Gaussian"
] |
ff766419b1a7b249f58810eac64f15b0004c1661b21e367b7e0dad1f71c0a50a
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
from collections import defaultdict
def read_tab_file_handle_sorted(tab_file_handle, factor_index=0):
"""
Parse a tab file (sorted by a column) and return a generator
"""
previous_factor_id = ''
factor_tab_list = list()
# Reading tab file
for line in tab_file_handle:
l = line.strip()
if l:
tab = l.split()
current_factor = tab[factor_index]
# Yield the previous factor tab list
if current_factor != previous_factor_id:
if previous_factor_id:
yield factor_tab_list
factor_tab_list = list()
factor_tab_list.append(tab)
previous_factor_id = current_factor
# Yield the last tab list
yield factor_tab_list
# Close tab file
tab_file_handle.close()
if __name__ == '__main__':
# Arguments parsing
parser = argparse.ArgumentParser(description='')
# -i / --input_tab
parser.add_argument('-i', '--input_tab',
metavar='INBLAST',
type=argparse.FileType('r'),
default='-',
help='Input blast tab file. '
'Only best matches should be kept')
# -o / --output_tab
parser.add_argument('-o', '--output_tab',
metavar='OUTBLAST',
type=argparse.FileType('w'),
default='-',
help='Ouput filtered blast tab file')
args = parser.parse_args()
# Variables initialization
tab_list_list = list()
# Reading blast tab file
for tab_list in read_tab_file_handle_sorted(args.input_tab, 0):
query_id = tab_list[0][0]
tab_list_list.append(sorted(tab_list, key=lambda x: x[1]))
# Sort by specificity (decreasing match number) and then
# by decreasing alignment length. So the first seen contigs
# are the long specific ones
tab_list_list.sort(key=lambda x: (len(x), -int(x[0][3])))
#
kept_references_ids_set = set()
fillers_tab_list_list = list()
# Count refs
ref_count_dict = defaultdict(int)
for tab_list in tab_list_list:
for tab in tab_list:
ref_count_dict[tab[1]] += 1
# Init a buffer for the output file
output_tab_buffer = list()
# Start iterating
while (len(tab_list_list)):
# Deal with specific contigs
tab_list_buffer = list()
# Get most specific contig so far
selected_tab_list = tab_list_list[0]
#~ print(selected_tab_list)
tab_list_buffer = tab_list_list[1:]
# Get the alignment against the reference with the most alignments
selected_tab_list.sort(key=lambda t: (-ref_count_dict[t[1]], t[1]))
selected_tab = selected_tab_list[0]
kept_references_ids_set.add(selected_tab[1]) # Add the matching ref id to the set
# Write the alignment
output_tab_buffer.append('\t'.join(selected_tab))
tab_list_list = tab_list_buffer
tab_list_buffer = list()
# Uses already known fillers
fillers_tab_list_buffer = list()
for tab_list in fillers_tab_list_list:
#
references_ids_set = frozenset(x[1] for x in tab_list)
references_intersection = references_ids_set & kept_references_ids_set
#
tab_buffer = list()
for tab in tab_list:
reference_id = tab[1]
if reference_id in references_intersection:
output_tab_buffer.append('\t'.join(tab))
else:
# Store the alignments not already used
tab_buffer.append(tab)
if tab_buffer:
fillers_tab_list_buffer.append(tab_buffer)
fillers_tab_list_list = fillers_tab_list_buffer
fillers_tab_list_buffer = list()
# Deal with new fillers
for tab_list in tab_list_list:
#
references_ids_set = frozenset(x[1] for x in tab_list)
references_intersection = references_ids_set & kept_references_ids_set
#
if len(references_intersection): # So fillers
tab_buffer = list()
for tab in tab_list:
reference_id = tab[1]
if reference_id in references_intersection:
output_tab_buffer.append('\t'.join(tab))
else:
# Store the alignments not already used
tab_buffer.append(tab)
if tab_buffer:
fillers_tab_list_list.append(tab_buffer)
else: # Still specific
tab_list_buffer.append(tab_list)
tab_list_list = tab_list_buffer
tab_list_buffer = list()
# Write output file
for line in output_tab_buffer:
args.output_tab.write('{}\n'.format(line))
|
ppericard/matamog
|
scripts/generate_scaffolding_blast.py
|
Python
|
agpl-3.0
| 5,040
|
[
"BLAST"
] |
35a86630e61c65d4b6bccedad8f6a42ee016096f1e4c95cf95c691e20a2792ab
|
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2016
# Copyright by UWA (in the framework of the ICRAR)
# All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
"""
Generate the SLURM script, and submit it to the queue based on various paramters
parse the log result, and produce the plot
"""
import datetime
import optparse
import os
import pwd
import re
import socket
import string
import subprocess
import sys
import time
from ... import utils
from ...runtime import __git_version__ as git_commit
sub_tpl_str = """#!/bin/bash --login
#SBATCH --nodes=$NUM_NODES
#SBATCH --ntasks-per-node=1
#SBATCH --job-name=DALiuGE-$PIP_NAME
#SBATCH --time=$JOB_DURATION
#SBATCH --account=$ACCOUNT
#SBATCH --error=err-%j.log
module swap PrgEnv-cray PrgEnv-gnu
module load python/2.7.10
module load mpi4py
aprun -b -n $NUM_NODES -N 1 $PY_BIN -m dlg.deploy.pawsey.start_dfms_cluster -l $LOG_DIR $GRAPH_PAR $PROXY_PAR $GRAPH_VIS_PAR $LOGV_PAR $ZERORUN_PAR $MAXTHREADS_PAR $SNC_PAR $NUM_ISLANDS_PAR $ALL_NICS $CHECK_WITH_SESSION
"""
sub_tpl = string.Template(sub_tpl_str)
default_aws_mon_host = 'sdp-dfms.ddns.net'
default_aws_mon_port = 8898
class DefaultConfig(object):
def __init__(self):
self._dict = dict()
l = self.init_list()
self.setpar('acc', l[0])
self.setpar('log_root', l[1])
def init_list(self):
pass
def setpar(self, k, v):
self._dict[k] = v
def getpar(self, k):
return self._dict.get(k)
class GalaxyMWAConfig(DefaultConfig):
def __init__(self):
super(GalaxyMWAConfig, self).__init__()
def init_list(self):
return ['mwaops', '/group/mwaops/cwu/dfms/logs']
class GalaxyASKAPConfig(DefaultConfig):
def __init__(self):
super(GalaxyASKAPConfig, self).__init__()
def init_list(self):
return ['astronomy856', '/group/astronomy856/cwu/dfms/logs']
class MagnusConfig(DefaultConfig):
def __init__(self):
super(MagnusConfig, self).__init__()
def init_list(self):
return ['pawsey0129', '/group/pawsey0129/daliuge_logs']
class TianHe2Config(DefaultConfig):
def __init__(self):
super(TianHe2Config, self).__init__()
def init_list(self): #TODO please fill in
return ['SHAO', '/group/shao/daliuge_logs']
class ConfigFactory():
mapping = {'galaxy_mwa':GalaxyMWAConfig, 'galaxy_askap':GalaxyASKAPConfig,
'magnus':MagnusConfig, 'galaxy':GalaxyASKAPConfig}
@staticmethod
def create_config(facility=None):
facility = facility.lower() if (facility is not None) else facility
return ConfigFactory.mapping.get(facility)()
class PawseyClient(object):
"""
parameters we can control:
1. Pawsey group / account name (Required)
2. whether to submit a graph, and if so provide graph path
3. # of nodes (of Drop Managers)
4. how long to run
5. whether to produce offline graph vis
6. whether to attach proxy for remote monitoring, and if so provide
DLG_MON_HOST
DLG_MON_PORT
7. Root directory of the Log files (Required)
"""
def __init__(self, log_root=None, acc=None,
pg=None, lg=None,
job_dur=30,
num_nodes=5,
run_proxy=False,
mon_host=default_aws_mon_host,
mon_port=default_aws_mon_port,
logv=1,
facility=socket.gethostname().split('-')[0],
zerorun=False,
max_threads=0,
sleepncopy=False,
num_islands=1,
all_nics=False,
check_with_session=False):
self._config = ConfigFactory.create_config(facility=facility)
self._acc = self._config.getpar('acc') if (acc is None) else acc
self._log_root = self._config.getpar('log_root') if (log_root is None) else log_root
self._num_nodes = num_nodes
self._job_dur = job_dur
self._lg = lg
self._pg = pg
self._graph_vis = False
self._run_proxy = run_proxy
self._mon_host = mon_host
self._mon_port = mon_port
self._pip_name = utils.fname_to_pipname(lg or pg) if lg or pg else 'None'
self._logv = logv
self._zerorun = zerorun
self._max_threads = max_threads
self._sleepncopy = sleepncopy
self._num_islands = num_islands
self._all_nics = all_nics
self._check_with_session = check_with_session
@property
def num_daliuge_nodes(self):
if (self._run_proxy):
ret = self._num_nodes - 1 # exclude the proxy node
else:
ret = self._num_nodes - 0 # exclude the data island node?
if (ret <= 0):
raise Exception("Not enough nodes {0} to run DALiuGE.".format(self._num_nodes))
return ret
def get_log_dirname(self):
"""
(pipeline name_)[Nnum_of_daliuge_nodes]_[time_stamp]
"""
dtstr = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") #.%f
return "{0}_N{1}_{2}".format(self._pip_name, self.num_daliuge_nodes, dtstr)
def label_job_dur(self):
"""
e.g. 135 min --> 02:15:00
"""
seconds = self._job_dur * 60
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%02d:%02d:%02d" % (h, m, s)
def submit_job(self):
lgdir = '{0}/{1}'.format(self._log_root, self.get_log_dirname())
if (not os.path.exists(lgdir)):
os.makedirs(lgdir)
pardict = dict()
pardict['NUM_NODES'] = str(self._num_nodes)
pardict['PIP_NAME'] = self._pip_name
pardict['JOB_DURATION'] = self.label_job_dur()
pardict['ACCOUNT'] = self._acc
pardict['PY_BIN'] = sys.executable
pardict['LOG_DIR'] = lgdir
pardict['GRAPH_PAR'] = '-L "{0}"'.format(self._lg) if self._lg else '-P "{0}"'.format(self._pg) if self._pg else ''
pardict['PROXY_PAR'] = '-m %s -o %d' % (self._mon_host, self._mon_port) if self._run_proxy else ''
pardict['GRAPH_VIS_PAR'] = '-d' if self._graph_vis else ''
pardict['LOGV_PAR'] = '-v %d' % self._logv
pardict['ZERORUN_PAR'] = '-z' if self._zerorun else ''
pardict['MAXTHREADS_PAR'] = '-t %d' % (self._max_threads)
pardict['SNC_PAR'] = '--app 1' if self._sleepncopy else '--app 0'
pardict['NUM_ISLANDS_PAR'] = '-s %d' % (self._num_islands)
pardict['ALL_NICS'] = '-u' if self._all_nics else ''
pardict['CHECK_WITH_SESSION'] = '-S' if self._check_with_session else ''
job_desc = sub_tpl.safe_substitute(pardict)
job_file = '{0}/jobsub.sh'.format(lgdir)
with open(job_file, 'w') as jf:
jf.write(job_desc)
with open(os.path.join(lgdir, 'git_commit.txt'), 'w') as gf:
gf.write(git_commit)
os.chdir(lgdir) # so that slurm logs will be dumped here
print(subprocess.check_output(['sbatch', job_file]))
class LogEntryPair(object):
"""
"""
def __init__(self, name, gstart, gend):
self._name = name
self._gstart = gstart + 2 # group 0 is the whole matching line, group 1 is the catchall
self._gend = gend + 2
self._start_time = None
self._end_time = None
self._other = dict() # hack
def get_timestamp(self, line):
"""
microsecond precision
"""
sp = line.split()
date_time = '{0}T{1}'.format(sp[0], sp[1])
pattern = '%Y-%m-%dT%H:%M:%S,%f'
epoch = time.mktime(time.strptime(date_time, pattern))
return datetime.datetime.strptime(date_time, pattern).microsecond / 1e6 + epoch
def check_start(self, match, line):
if self._start_time is None and match.group(self._gstart):
self._start_time = self.get_timestamp(line)
def check_end(self, match, line):
if self._end_time is None and match.group(self._gend):
self._end_time = self.get_timestamp(line)
if (self._name == 'unroll'):
self._other['num_drops'] = int(line.split()[-1])
elif (self._name == 'node managers'):
self._other['num_node_mgrs'] = int(line.split('Got a node list with')[1].split()[0])
elif (self._name == 'build drop connections'):
self._other['num_edges'] = int(line.split()[-4][1:-1])
def get_duration(self):
if ((self._start_time is None) or (self._end_time is None)):
#print "Cannot calc duration for '{0}': start_time:{1}, end_time:{2}".format(self._name,
#self._start_time, self._end_time)
return None
return (self._end_time - self._start_time)
def reset(self):
self._start_time = None
self._end_time = None
class LogParser(object):
"""
Given the log dir, analyse all DIM and NMs logs, and store the resuls
in CSV, which has the following fields:
====================================
0. user name (e.g. cwu)
1. facility (e.g. galaxy)
2. pipeline (e.g. lofar_std)
3. time (e.g. 2016-08-22T11-52-11/)
4. # of nodes
5. # of drops
6. Git commit number
7. unroll_time
8. translation_time
9. pg_spec_gen_time
10. created_session_at_all_nodes_time
11. graph_separation_time
12. push_sub_graphs_to_all_nodes_time
13. created_drops_at_all_nodes_time
14. Num_pyro_connections_at_all_nodes
15. created_pyro_conn_at_all_nodes_time
16. triggered_drops_at_all_nodes_time
17. Total completion time
Detailed description of each field:
https://confluence.icrar.uwa.edu.au/display/DALIUGE/Scalability+test#Scalabilitytest-Datacollection
"""
dim_kl = ['Start to unroll',
'Unroll completed for {0} with # of Drops',
'Start to translate',
'Translation completed for',
'PG spec is calculated',
'Creating Session {0} in all hosts',
'Successfully created session {0} in all hosts',
'Separating graph',
'Removed (and sanitized) {0} inter-dm relationships',
'Adding individual graphSpec of session {0} to each DM',
'Successfully added individual graphSpec of session {0} to each DM',
'Deploying Session {0} in all hosts',
'Successfully deployed session {0} in all hosts',
'Establishing {0} drop relationships',
'Established all drop relationships {0} in',
'Moving Drops to COMPLETED right away',
'Successfully triggered drops',
'Got a node list with {0} node managers']
nm_kl = [
'Starting Pyro4 Daemon for session', # Logged by the old master branch
'Creating DROPs for session', # Drops are being created
'Session {0} is now RUNNING', # All drops created and ready
'Session {0} finished' # All drops executed
]
kwords = dict()
kwords['dim'] = dim_kl
kwords['nm'] = nm_kl
def __init__(self, log_dir):
self._dim_log_f = None
if (not self.check_log_dir(log_dir)):
raise Exception("No DIM log found at: {0}".format(log_dir))
self._log_dir = log_dir
self._dim_catchall_pattern = self.construct_catchall_pattern(node_type='dim')
self._nm_catchall_pattern = self.construct_catchall_pattern(node_type='nm')
def build_dim_log_entry_pairs(self):
return [LogEntryPair(name, g1, g2) for name, g1, g2 in (
('unroll', 0, 1),
('translate', 2, 3),
('gen pg spec', 3, 4),
('create session', 5, 6),
('separate graph', 7, 8),
('add session to all', 9, 10),
('deploy session to all', 11, 12),
('build drop connections', 13, 14),
('trigger drops', 15, 16),
('node managers', 17, 17),
)]
def build_nm_log_entry_pairs(self):
return [LogEntryPair(name, g1, g2) for name, g1, g2 in (
('completion_time_old', 0, 3), # Old master branch
('completion_time', 2, 3),
('node_deploy_time', 1, 2),
)]
def construct_catchall_pattern(self, node_type):
pattern_strs = LogParser.kwords.get(node_type)
patterns = [x.format('.*').replace('(', r'\(').replace(')', r'\)') for x in pattern_strs]
catchall = '|'.join(['(%s)' % (s,) for s in patterns])
catchall = ".*(%s).*" % (catchall,)
return re.compile(catchall)
def parse(self, out_csv=None):
"""
e.g. lofar_std_N4_2016-08-22T11-52-11
"""
logb_name = os.path.basename(self._log_dir)
ss = re.search('_N[0-9]+_', logb_name)
if (ss is None):
raise Exception("Invalid log directory: {0}".format(self._log_dir))
delimit = ss.group(0)
sp = logb_name.split(delimit)
pip_name = sp[0]
do_date = sp[1]
num_nodes = int(delimit.split('_')[1][1:])
user_name = pwd.getpwuid(os.stat(self._dim_log_f[0]).st_uid).pw_name
gitf = os.path.join(self._log_dir, 'git_commit.txt')
if (os.path.exists(gitf)):
with open(gitf, 'r') as gf:
git_commit = gf.readline().strip()
else:
git_commit = 'None'
# parse DIM log
dim_log_pairs = self.build_dim_log_entry_pairs()
for lff in self._dim_log_f:
with open(lff, "r") as dimlog:
for line in dimlog:
m = self._dim_catchall_pattern.match(line)
if not m:
continue
for lep in dim_log_pairs:
lep.check_start(m, line)
lep.check_end(m, line)
num_drops = -1
temp_dim = []
num_node_mgrs = 0
for lep in dim_log_pairs:
add_dur = True
if ('unroll' == lep._name):
num_drops = lep._other.get('num_drops', -1)
elif ('node managers' == lep._name):
num_node_mgrs = lep._other.get('num_node_mgrs', 0)
add_dur = False
elif ('build drop connections' == lep._name):
num_edges = lep._other.get('num_edges', -1)
temp_dim.append(str(num_edges))
if (add_dur):
temp_dim.append(str(lep.get_duration()))
# parse NM logs
nm_logs = []
max_node_deploy_time = 0
num_finished_sess = 0
num_dims = 0
for df in os.listdir(self._log_dir):
# Check this is a dir and contains the NM log
if not os.path.isdir(os.path.join(self._log_dir, df)):
continue
nm_logf = os.path.join(self._log_dir, df, 'dlgNM.log')
nm_dim_logf = os.path.join(self._log_dir, df, 'dlgDIM.log')
nm_mm_logf = os.path.join(self._log_dir, df, 'dlgMM.log')
if not os.path.exists(nm_logf):
if (os.path.exists(nm_dim_logf) or os.path.exists(nm_mm_logf)):
num_dims += 1
continue
# Start anew every time
nm_log_pairs = self.build_nm_log_entry_pairs()
nm_logs.append(nm_log_pairs)
# Read NM log and fill all LogPair objects
with open(nm_logf, "r") as nmlog:
for line in nmlog:
m = self._nm_catchall_pattern.match(line)
if not m:
continue
for lep in nm_log_pairs:
lep.check_start(m, line)
lep.check_end(m, line)
# Looking for the deployment times and counting for finished sessions
for lep in nm_log_pairs:
# Consider only valid durations
dur = lep.get_duration()
if dur is None:
continue
if lep._name in ('completion_time', 'completion_time_old'):
num_finished_sess += 1
elif lep._name == 'node_deploy_time':
if dur > max_node_deploy_time:
max_node_deploy_time = dur
theory_num_nm = num_nodes - num_dims
actual_num_nm = num_node_mgrs or theory_num_nm
if actual_num_nm != num_finished_sess:
print("Pipeline %s is not complete: %d != %d." % (pip_name, actual_num_nm, num_finished_sess))
#return
else:
failed_nodes = theory_num_nm - actual_num_nm
num_nodes -= failed_nodes
if (failed_nodes > 0):
print("Pipeline %s has %d node managers that failed to start!" % (pip_name, failed_nodes))
# The DIM waits for all NMs to setup before triggering the first drops.
# This has the effect that the slowest to setup will make the others
# idle while already in RUNNING state, effectively increasing their
# "exec_time". We subtract the maximum deploy time to account for this
# effect
max_exec_time = 0
for log_entry_pairs in nm_logs:
indexed_leps = {lep._name: lep for lep in log_entry_pairs}
deploy_time = indexed_leps['node_deploy_time'].get_duration()
if (deploy_time is None): # since some node managers failed to start
continue
exec_time = indexed_leps['completion_time'].get_duration() or indexed_leps['completion_time_old'].get_duration()
if (exec_time is None):
continue
real_exec_time = exec_time - (max_node_deploy_time - deploy_time)
if real_exec_time > max_exec_time:
max_exec_time = real_exec_time
temp_nm = [str(max_exec_time)]
ret = [user_name, socket.gethostname().split('-')[0], pip_name, do_date,
num_nodes, num_drops, git_commit]
ret = [str(x) for x in ret]
num_dims = num_dims if num_dims == 1 else num_dims - 1 #exclude master manager
add_line = ','.join(ret + temp_dim + temp_nm + [str(int(num_dims))])
if (out_csv is not None):
with open(out_csv, 'a') as of:
of.write(add_line)
of.write(os.linesep)
else:
print(add_line)
def check_log_dir(self, log_dir):
possible_logs = [
os.path.join(log_dir, '0', 'dlgDIM.log'),
os.path.join(log_dir, '0', 'dlgMM.log')
]
for dim_log_f in possible_logs:
if (os.path.exists(dim_log_f)):
self._dim_log_f = [dim_log_f]
if (dim_log_f == possible_logs[0]):
cluster_log = os.path.join(log_dir, '0', 'start_dlg_cluster.log')
if (os.path.exists(cluster_log)):
self._dim_log_f.append(cluster_log)
return True
return False
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("-a", "--action", action="store", type="int",
dest="action", help="1 - submit job, 2 - analyse log", default=1)
parser.add_option("-l", "--log-root", action="store",
dest="log_root", help="The root directory of the log file")
parser.add_option("-d", "--log-dir", action="store",
dest="log_dir", help="The directory of the log file for parsing")
parser.add_option("-L", "--logical-graph", action="store", type="string",
dest="logical_graph", help="The filename of the logical graph to deploy", default=None)
parser.add_option("-P", "--physical-graph", action="store", type="string",
dest="physical_graph", help="The filename of the physical graph (template) to deploy", default=None)
parser.add_option("-t", "--job-dur", action="store", type="int",
dest="job_dur", help="job duration in minutes", default=30)
parser.add_option("-n", "--num_nodes", action="store", type="int",
dest="num_nodes", help="number of compute nodes requested", default=5)
parser.add_option('-i', '--graph_vis', action='store_true',
dest='graph_vis', help='Whether to visualise graph (poll status)', default=False)
parser.add_option('-p', '--run_proxy', action='store_true',
dest='run_proxy', help='Whether to attach proxy server for real-time monitoring', default=False)
parser.add_option("-m", "--monitor_host", action="store", type="string",
dest="mon_host", help="Monitor host IP (optional)", default=default_aws_mon_host)
parser.add_option("-o", "--monitor_port", action="store", type="int",
dest="mon_port", help="The port to bind DALiuGE monitor", default=default_aws_mon_port)
parser.add_option("-v", "--verbose-level", action="store", type="int",
dest="verbose_level", help="Verbosity level (1-3) of the DIM/NM logging",
default=1)
parser.add_option("-c", "--csvoutput", action="store",
dest="csv_output", help="CSV output file to keep the log analysis result")
parser.add_option("-z", "--zerorun", action="store_true",
dest="zerorun", help="Generate a physical graph that takes no time to run", default=False)
parser.add_option("-y", "--sleepncopy", action="store_true",
dest="sleepncopy", help="Whether include COPY in the default Component drop", default=False)
parser.add_option("-T", "--max-threads", action="store", type="int",
dest="max_threads", help="Max thread pool size used for executing drops. 0 (default) means no pool.", default=0)
parser.add_option('-s', '--num_islands', action='store', type='int',
dest='num_islands', default=1, help='The number of Data Islands')
parser.add_option("-u", "--all_nics", action="store_true",
dest="all_nics", help="Listen on all NICs for a node manager", default=False)
parser.add_option("-S", "--check_with_session", action="store_true",
dest="check_with_session", help="Check for node managers' availability by creating/destroy a session", default=False)
(opts, args) = parser.parse_args(sys.argv)
if (opts.action == 2):
if (opts.log_dir is None):
# you can specify:
# either a single directory
if (opts.log_root is None):
facility = socket.gethostname().split('-')[0]
config = ConfigFactory.create_config(facility=facility)
log_root = config.getpar('log_root')
else:
log_root = opts.log_root
if (log_root is None or (not os.path.exists(log_root))):
parser.error("Missing or invalid log directory/facility for log analysis")
# or a root log directory
else:
for df in os.listdir(log_root):
df = os.path.join(log_root, df)
if (os.path.isdir(df)):
try:
lg = LogParser(df)
lg.parse(out_csv=opts.csv_output)
except Exception as exp:
print("Fail to parse {0}: {1}".format(df, exp))
else:
lg = LogParser(opts.log_dir)
lg.parse(out_csv=opts.csv_output)
elif (opts.action == 1):
if opts.logical_graph and opts.physical_graph:
parser.error("Either a logical graph or physical graph filename must be specified")
for p in (opts.logical_graph, opts.physical_graph):
if p and not os.path.exists(p):
parser.error("Cannot locate graph file at '{0}'".format(p))
pc = PawseyClient(job_dur=opts.job_dur, num_nodes=opts.num_nodes, logv=opts.verbose_level,
zerorun=opts.zerorun, max_threads=opts.max_threads,
run_proxy=opts.run_proxy, mon_host=opts.mon_host, mon_port=opts.mon_port,
num_islands=opts.num_islands, all_nics=opts.all_nics,
check_with_session=opts.check_with_session,
lg=opts.logical_graph, pg=opts.physical_graph)
pc._graph_vis = opts.graph_vis
pc.submit_job()
else:
parser.error("Invalid action -a")
|
steve-ord/daliuge
|
daliuge-engine/dlg/deploy/pawsey/scale_test.py
|
Python
|
lgpl-2.1
| 25,117
|
[
"Galaxy"
] |
c1a22855f42db281a670e1e1bb18068c5eabd501f78b15e7f768d25bbde44f15
|
#coding:utf-8
import re
TLD_NAME = (
'ac.at',
'ac.cn',
'ac.id',
'ac.jp',
'ac.th',
'adv.br',
'ae',
'ag',
'ah.cn',
'ai',
'am',
'appspot.com',
'ar',
'art.pl',
'as',
'asia',
'at',
'au',
'ba',
'bd',
'be',
'bg',
'biz',
'bj.cn',
'bl.uk',
'blog.br',
'bo',
'br',
'br.com',
'by',
'bz',
'ca',
'cat',
'cc',
'cd',
'ch',
'cl',
'club.tw',
'cn',
'cn.com',
'co',
'co.at',
'co.id',
'co.in',
'co.jp',
'co.kr',
'com',
'com.br',
'com.cn',
'com.co',
'com.do',
'com.ec',
'com.es',
'com.hk',
'com.lb',
'com.mx',
'com.my',
'com.pe',
'com.ph',
'com.pl',
'com.pt',
'com.ru',
'com.sg',
'com.tw',
'com.ua',
'com.vn',
'coop',
'cq.cn',
'cx',
'cz',
'de',
'dk',
'donetsk.ua',
'ec',
'edu',
'edu.br',
'edu.cn',
'edu.hk',
'edu.mx',
'edu.my',
'edu.ph',
'edu.pl',
'edu.sa',
'edu.sg',
'edu.tw',
'ee',
'elk.pl',
'eng.br',
'es',
'etc.br',
'eti.br',
'eu',
'eu.int',
'fed.us',
'fi',
'fj.cn',
'fm',
'fr',
'gc.ca',
'gd',
'gd.cn',
'ge',
'gg',
'go.cr',
'go.jp',
'go.th',
'gob.mx',
'gob.pe',
'gouv.fr',
'gov',
'gov.br',
'gov.cn',
'gov.co',
'gov.hk',
'gov.mo',
'gov.sg',
'gov.tw',
'gov.uk',
'gp',
'gr',
'gr.jp',
'gs',
'gt',
'gx.cn',
'gy',
'gz.cn',
'ha.cn',
'hb.cn',
'he.cn',
'hi.cn',
'hk',
'hk.cn',
'hm',
'hn.cn',
'hr',
'hu',
'i.ph',
'idv.tw',
'ie',
'if.ua',
'iki.fi',
'il',
'im',
'in',
'in.th',
'in.ua',
'info',
'info.pl',
'int',
'int.ru',
'io',
'ir',
'is',
'it',
'je',
'jl.cn',
'jp',
'js.cn',
'jx.cn',
'k12.mi.us',
'kharkov.ua',
'kiev.ua',
'kr',
'ks.us',
'kz',
'la',
'li',
'lib.in.us',
'lk',
'ln.cn',
'local',
'lt',
'lu',
'lukow.pl',
'lv',
'lviv.ua',
'ly',
'ma',
'mat.br',
'md',
'me',
'med.br',
'mil',
'mn',
'mn.us',
'mo.cn',
'mobi',
'mp',
'ms',
'msk.ru',
'mu',
'museum',
'mx',
'my',
'naklo.pl',
'name',
'name.my',
'ne.jp',
'net',
'net.cn',
'net.my',
'net.pl',
'net.ru',
'net.tw',
'nhs.uk',
'nl',
'nm.cn',
'no',
'nom.br',
'np',
'nu',
'nz',
'odessa.ua',
'on.ca',
'or.id',
'or.it',
'or.jp',
'or.kr',
'or.th',
'org',
'org.br',
'org.cn',
'org.es',
'org.hk',
'org.in',
'org.mk',
'org.mx',
'org.pl',
'org.ru',
'org.sg',
'org.tw',
'org.ua',
'oslo.no',
'padova.it',
'pc.pl',
'pe',
'pe.kr',
'ph',
'pisz.pl',
'pk',
'pl',
'pp.ru',
'pri.ee',
'pro',
'ps',
'pt',
'py',
're',
'ro',
'rs',
'ru',
'sc',
'sc.cn',
'sch.id',
'sd.cn',
'se',
'sebastopol.ua',
'sg',
'sh',
'sh.cn',
'si',
'sk',
'sn.cn',
'srv.br',
'st',
'su',
'tc',
'te.ua',
'tk',
'tl',
'tm',
'to',
'tomsk.ru',
'tr',
'tt',
'tv',
'tw',
'tw.cn',
'uk',
'uk.com',
'us',
'us.com',
'uy',
'uz',
'va.us',
'vc',
've',
'vg',
'vic.gov.au',
'vn',
'vn.ua',
'waw.pl',
'web.id',
'wroc.pl',
'ws',
'xxx',
'za',
'za.net',
'za.org',
'zj.cn',
'zm',
)
RE_TLD_NAME = re.compile(
r".*?((?:[^\.]+\.)(%s))$"%(
'|'.join([i.replace('.', '\\.') for i in TLD_NAME])
)
)
def tld_name(domain):
m = RE_TLD_NAME.match(domain)
if m:
return m.groups()[0]
if __name__ == '__main__':
print tld_name('www.work.com')
print tld_name('zsp.com')
print tld_name('www.zsp.com')
print tld_name('www.zsp.edu.cn')
print tld_name('xx.www.zsp.edu.cn')
print tld_name('xx.www.zsp.com.cn')
|
tonghuashuai/42qu-notepad
|
lib/tld_name.py
|
Python
|
mit
| 2,993
|
[
"Elk"
] |
0b60021e783abdb5f3db039b5bd4db03a3d7aa20c0c7631cea146c88ff608f31
|
'''
Created on Nov 27, 2011
@author: cryan
'''
import unittest
from PySim.SystemParams import SystemParams
from PySim.Simulation import simulate_sequence
from PySim.QuantumSystems import SCQubit, Hamiltonian
from PySim.OptimalControl import optimize_pulse, PulseParams
import numpy as np
import matplotlib.pyplot as plt
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testInversion(self):
'''
Try a simple three level SC qubit system and see if can prepare the excited state.
'''
#Setup a three level qubit and a 100MHz delta
Q1 = SCQubit(3, 4.987456e9, -100e6, name='Q1')
systemParams = SystemParams()
systemParams.add_sub_system(Q1)
systemParams.add_control_ham(inphase = Hamiltonian(0.5*(Q1.loweringOp + Q1.raisingOp)), quadrature = Hamiltonian(0.5*(-1j*Q1.loweringOp + 1j*Q1.raisingOp)))
systemParams.create_full_Ham()
systemParams.measurement = Q1.levelProjector(1)
#Setup the pulse parameters for the optimization
pulseParams = PulseParams()
pulseParams.timeSteps = 1e-9*np.ones(30)
pulseParams.rhoStart = Q1.levelProjector(0)
pulseParams.rhoGoal = Q1.levelProjector(1)
pulseParams.add_control_line(freq=-Q1.omega)
pulseParams.H_int = Hamiltonian(Q1.omega*np.diag(np.arange(Q1.dim)))
pulseParams.optimType = 'state2state'
#Call the optimization
optimize_pulse(pulseParams, systemParams)
#Now test the optimized pulse and make sure it puts all the population in the excited state
result = simulate_sequence(pulseParams, systemParams, pulseParams.rhoStart, simType='unitary')[0]
assert result > 0.99
def testDRAG(self):
'''
Try a unitary inversion pulse on a three level SCQuibt and see if we get something close to DRAG
'''
#Setup a three level qubit and a 100MHz delta
Q1 = SCQubit(3, 4.987456e9, -150e6, name='Q1')
systemParams = SystemParams()
systemParams.add_sub_system(Q1)
systemParams.add_control_ham(inphase = Hamiltonian(0.5*(Q1.loweringOp + Q1.raisingOp)), quadrature = Hamiltonian(0.5*(-1j*Q1.loweringOp + 1j*Q1.raisingOp)))
systemParams.add_control_ham(inphase = Hamiltonian(0.5*(Q1.loweringOp + Q1.raisingOp)), quadrature = Hamiltonian(0.5*(-1j*Q1.loweringOp + 1j*Q1.raisingOp)))
systemParams.create_full_Ham()
systemParams.measurement = Q1.levelProjector(1)
#Setup the pulse parameters for the optimization
numPoints = 30
pulseTime = 15e-9
pulseParams = PulseParams()
pulseParams.timeSteps = (pulseTime/numPoints)*np.ones(numPoints)
pulseParams.rhoStart = Q1.levelProjector(0)
pulseParams.rhoGoal = Q1.levelProjector(1)
pulseParams.Ugoal = Q1.pauliX
pulseParams.add_control_line(freq=-Q1.omega, bandwidth=300e6, maxAmp=200e6)
pulseParams.add_control_line(freq=-Q1.omega, phase=-np.pi/2, bandwidth=300e6, maxAmp=200e6)
pulseParams.H_int = Hamiltonian((Q1.omega)*np.diag(np.arange(Q1.dim)))
pulseParams.optimType = 'unitary'
pulseParams.derivType = 'finiteDiff'
#Start with a Gaussian
tmpGauss = np.exp(-np.linspace(-2,2,numPoints)**2)
tmpScale = 0.5/(np.sum(pulseParams.timeSteps*tmpGauss))
pulseParams.startControlAmps = np.vstack((tmpScale*tmpGauss, np.zeros(numPoints)))
#Call the optimization
optimize_pulse(pulseParams, systemParams)
if plotResults:
plt.plot(np.cumsum(pulseParams.timeSteps)*1e9,pulseParams.controlAmps.T/1e6);
plt.ylabel('Pulse Amplitude (MHz)')
plt.xlabel('Time (ns)')
plt.legend(('X Quadrature', 'Y Quadrature'))
plt.title('DRAG Pulse from Optimal Control')
plt.show()
#Now test the optimized pulse and make sure it does give us the desired unitary
result = simulate_sequence(pulseParams, systemParams, pulseParams.rhoStart, simType='unitary')
assert np.abs(np.trace(np.dot(result[1].conj().T, pulseParams.Ugoal)))**2/np.abs(np.trace(np.dot(pulseParams.Ugoal.conj().T, pulseParams.Ugoal)))**2 > 0.99
if __name__ == "__main__":
plotResults = True
#import sys;sys.argv = ['', 'Test.test']
unittest.main()
|
BBN-Q/PySimulator
|
tests/GRAPETests.py
|
Python
|
apache-2.0
| 4,491
|
[
"Gaussian"
] |
8d264086a17c810c82111007246518e5378e13550efb0d00951b78a4b39ea2d0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# fpblockaverager.FPBlockAverager.py
#
# Copyright (C) 2012-2017 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Command-line tool to estimate standard error using the
block-averaging method of Flyvbjerg and Petersen
Flyvbjerg, H., and Petersen, H. G. Error estimates on averages of
correlated data. Journal of Chemical Physics. 1989. 91 (1). 461-466.
.. todo:
- Add a nice __str__ method to look at results without needing to set
verbose flag
"""
################################### MODULES ###################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
if __name__ == "__main__":
__package__ = str("fpblockaverager")
import fpblockaverager
import numpy as np
import pandas as pd
pd.set_option('display.width', 120)
################################### CLASSES ###################################
class FPBlockAverager(object):
"""
Manages estimation of standard error using the block-averaging
method of Flyvbjerg and Petersen.
Flyvbjerg, H., and Petersen, H. G. Error estimates on averages of
correlated data. Journal of Chemical Physics. 1989. 91 (1). 461-466.
.. todo:
- Reimplement plotting
- Test Python 3
- Support omitting blockings outside min_n_blocks and max cut from
fit, but still calculating
- Fit to linear portion after initial fits
- Estimate sample size and correlation time
"""
def __init__(self, dataframe=None, **kwargs):
"""
Arguments:
dataframe (DataFrame): DataFrame for which to calculate error;
index should be time
all_factors (bool): Use all factors by which the dataset is
divisible rather than only factors of two
min_n_blocks (int): Minimum number of blocks after
transformation
max_cut (float): Maximum proportion of dataset of omit in
transformation
fit_exp (bool): Fit exponential curve
fit_sig (bool): Fit sigmoid curve
verbose (int): Level of verbose output
kwargs (dict): Additional keyword arguments
"""
# Arguments
verbose = kwargs.get("verbose", 1)
transformations = self.select_transformations(dataframe, **kwargs)
if verbose >= 2:
print(transformations)
blockings = self.calculate_blockings(dataframe, transformations,
**kwargs)
if verbose >= 2:
print(blockings)
blockings, parameters = self.fit_curves(dataframe, blockings, **kwargs)
if verbose >= 3:
print(blockings)
if verbose >= 2:
print(parameters)
self.blockings = blockings
self.parameters = parameters
def select_transformations(self, dataframe, all_factors=False,
min_n_blocks=2, max_cut=0.1, **kwargs):
"""
Selects lengths of block transformations
Arguments:
dataframe (DataFrame): Timeseries for which to select
tranformations
all_factors (bool): Use all factors by which the dataset is
divisible rather than only factors of two
min_n_blocks (int): Minimum number of blocks after
transformation
max_cut (float): Maximum proportion of dataset of omit in
transformation
kwargs (dict): Additional keyword arguments
Returns:
DataFrame: transformations
"""
# Arguments
full_length = dataframe.shape[0]
# Determine number of blocks, block lengths, total lengths used,
# and number of transforms
if all_factors:
block_lengths = np.array(sorted(
set(range(1, 2 ** int(np.floor(np.log2(full_length)))))),
np.int)
n_blocks = np.array([full_length / n for n in block_lengths],
np.int)
n_blocks = np.array(sorted(set(n_blocks)))[::-1]
n_blocks = n_blocks[n_blocks >= min_n_blocks]
block_lengths = np.array(full_length / n_blocks, np.int)
else:
block_lengths = np.array(
[2 ** i for i in range(int(np.floor(np.log2(full_length))))],
np.int)
n_blocks = np.array([full_length / n for n in block_lengths],
np.int)
n_blocks = n_blocks[n_blocks >= min_n_blocks]
n_transforms = np.log2(block_lengths)
# Cut transformations for which more than max_cut proprotion of
# dataframe must be omitted
used_lengths = n_blocks * block_lengths
indexes = (used_lengths >= full_length * (1 - max_cut))
n_blocks = n_blocks[indexes]
block_lengths = block_lengths[indexes]
used_lengths = used_lengths[indexes]
n_transforms = n_transforms[indexes]
# Organize and return
transformations = pd.DataFrame(
np.column_stack((n_blocks, block_lengths, used_lengths)),
columns=["n blocks", "block length", "used length"],
index=n_transforms)
transformations.index.name = "n transforms"
return transformations
def calculate_blockings(self, dataframe, transformations, **kwargs):
"""
Calculates standard error of block transforms
The standard deviation of each standard error (stderr_stddev) is
only valid for points whose standard error has leveled off (i.e.
can be assumed Gaussian).
Arguments:
dataframe (DataFrame): Timeseries for which to calculate
blockings
transformations (DataFrame): transformations
kwargs (dict): Additional keyword arguments
Returns:
DataFrame: blockings
"""
def transform(n_blocks, block_length):
"""
Prepares a block-transformed dataset.
Arguments:
n_blocks (int): Number of blocks in transformed dataset
block_length (int): Length of each block in transformed
dataset
"""
reshaped = np.reshape(dataframe.values[:n_blocks * block_length],
(n_blocks, block_length, dataframe.shape[1]))
transformed = pd.DataFrame(np.mean(reshaped, axis=1),
columns=dataframe.columns)
return transformed
# Construct destination for results
columns = [[c + " mean", c + " se", c + " se sd"] for c in
map(str, dataframe.columns)]
columns = [item for sublist in columns for item in sublist]
analysis = pd.DataFrame(
np.zeros((transformations.shape[0], dataframe.shape[1] * 3)),
columns=columns, index=transformations.index.values)
analysis.index.name = "n transforms"
# Calculate mean, stderr, and stddev of stderr for each blocking
for n_transforms, row in transformations.iterrows():
transformed = transform(row["n blocks"], row["block length"])
mean = np.mean(transformed.values, axis=0)
stddev = np.std(transformed.values, axis=0)
stderr = stddev / np.sqrt(row["n blocks"] - 1)
stderr_stddev = stderr / np.sqrt(2 * (row["n blocks"] - 1))
analysis.loc[n_transforms][0::3] = mean
analysis.loc[n_transforms][1::3] = stderr
analysis.loc[n_transforms][2::3] = stderr_stddev
# Organize and return
blockings = transformations.join(analysis)
return blockings
def fit_curves(self, dataframe, blockings, fit_exp=True, fit_sig=True,
**kwargs):
"""
Fits exponential and sigmoid curves to block-transformed data.
Arguments:
dataframe (DataFrame): Timeseries for which to fit curves
blockings (DataFrame): Blockings
fit_exp (bool): Fit exponential curve
fit_sig (bool): Fit sigmoid curve
verbose (int): Level of verbose output
kwargs (dict): Additional keyword arguments
Returns:
(Dataframe, Dataframe): blockings with fit curves inserted,
fit parameters
"""
import warnings
from scipy.optimize import curve_fit
def exponential(x, a, b, c):
"""
(c * x)
y = a + b * e
Arguments:
x (float): x
a (float): Final y value; y(+∞) = a
b (float): Scale
c (float): Power
Returns:
float: y(x)
"""
return a + b * np.exp(c * x)
def sigmoid(x, a, b, c, d):
"""
a - b
y = --------------- + b
d
1 + (x / c)
Arguments:
x (float): x
a (float): Initial y value; y(-∞) = a
b (float): Final y value; y(+∞) = b
c (float): Center of sigmoid; y(c) = (a + b) / 2
d (float): Power
Returns:
float: y(x)
"""
return b + ((a - b) / (1 + (x / c) ** d))
# Process arguments
verbose = kwargs.get("verbose", 1)
# Construct destinations for results
fields = map(str, dataframe.columns.tolist())
columns = ["n blocks", "block length", "used length"]
if fit_exp:
exp_fit = pd.DataFrame(
np.zeros((blockings.shape[0], dataframe.shape[1])) * np.nan,
columns=[f + " exp fit" for f in fields],
index=blockings.index.values)
exp_fit.index.name = "n transforms"
exp_par = pd.DataFrame(np.zeros((3, len(fields))) * np.nan,
index=["a (se)", "b", "c"], columns=fields)
if fit_sig:
sig_fit = pd.DataFrame(
np.zeros((blockings.shape[0], dataframe.shape[1])) * np.nan,
columns=[f + " sig fit" for f in fields],
index=blockings.index.values)
sig_fit.index.name = "n transforms"
sig_par = pd.DataFrame(np.zeros((4, len(fields))) * np.nan,
index=["a", "b (se)", "c", "d"], columns=fields)
# Calculate and store fit and parameters
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for i, field in enumerate(fields):
columns.extend(
[field + " mean", field + " se", field + " se sd"])
if fit_exp:
columns.append(field + " exp fit")
try:
a, b, c = \
curve_fit(exponential, blockings["block length"],
blockings[field + " se"],
p0=(0.01, -1.0, -0.1))[0]
if a >= 0:
exp_fit[field + " exp fit"] = exponential(
blockings["block length"].values, a, b, c)
exp_par[field] = [a, b, c]
elif verbose >= 1:
print("Exponential fit for field "
"'{0}' yielded negative ".format(
field) + "standard error, setting values to "
"NaN")
except RuntimeError:
if verbose >= 1:
print("Could not fit exponential for field "
"'{0}', setting values to NaN".format(field))
if fit_sig:
columns.append(field + " sig fit")
try:
a, b, c, d = curve_fit(sigmoid, blockings.index.values,
blockings[field + " se"], p0=(0.0, 0.1, 10, 1))[0]
if b >= 0:
sig_fit[field + " sig fit"] = sigmoid(
blockings.index.values, a, b, c, d)
sig_par[field] = [a, b, c, d]
elif verbose >= 1:
print("Sigmoidal fit for field "
"'{0}' yielded negative ".format(
field) + "standard error, setting values to "
"NaN")
except RuntimeError:
if verbose >= 1:
print("Could not fit sigmoid for field "
"'{0}', setting values to NaN".format(field))
# Organize and return
if fit_exp and fit_sig:
parameters = pd.concat([exp_par, sig_par], keys=["exp", "sig"])
elif fit_exp:
parameters = pd.concat([exp_par], keys=["exp"])
elif fit_sig:
parameters = pd.concat([sig_par], keys=["sig"])
else:
parameters = None
if fit_exp:
blockings = blockings.join(exp_fit)
if fit_sig:
blockings = blockings.join(sig_fit)
blockings = blockings[columns]
return blockings, parameters
# def plot(self, blockings, parameters, verbose=1, debug=0,
# outfile="test.pdf", **kwargs):
# """
# Plots block average results using matplotlib.
#
# Arguments:
# verbose (int): Level of verbose output
# debug (int): Level of debug output
# kwargs (dict): Additional keyword arguments
# Returns:
#
# """
# import matplotlib
# import matplotlib.pyplot as plt
# from matplotlib.backends.backend_pdf import PdfPages
#
# fields = [n[:-5] for n in blockings.columns.tolist()
# if n.endswith("_mean")]
# n_fields = len(fields)
# fit_exp = True
# fit_sig = True
#
# # Generate and format figure and subplots
# figure, subplots = plt.subplots(n_fields, 2,
# figsize=[6.5, 2+n_fields*1.5],
# subplot_kw=dict(autoscale_on = True))
## if self.n_fields == 1:
## subplots = np.expand_dims(subplots, 0)
# # Must adjust for 1 or two column
# figure.tight_layout(pad=2, h_pad=-1, w_pad=-1)
# figure.subplots_adjust(
# left = 0.10, wspace = 0.1, right = 0.95,
# bottom = 0.06, hspace = 0.1, top = 0.95)
#
# # Title columns for sigmoid and exponential fit
## for i, field in enumerate(fields):
## # Format x
## if i != n_fields - 1:
## subplots[i,0].set_xticklabels([])
## subplots[i,1].set_xticklabels([])
#
## # Format y
## subplots[i,0].set_ylabel("σ", rotation="horizontal")
## subplots[i,1].set_yticklabels([])
##
## # Add y2 label
## subplots[i,1].yaxis.set_label_position("right")
## subplots[i,1].set_ylabel(field.title(), rotation=270,
# labelpad=15)
##
# # set xticks and yticks
## subplots[n_fields-1,0].set_xlabel("Block Length")
## subplots[n_fields-1,1].set_xlabel("Number of Block Transformations")
##
## for i, field in enumerate(fields):
## subplots[i,0].plot(
## blockings["block_length"], blockings[field+"_se"],
## color="blue")
## subplots[i,1].plot(
## blockings["n_transforms"], blockings[field+"_se"],
## color="blue")
## se_sd = blockings[field+"_se_sd"]
## subplots[i,0].fill_between(blockings["block_length"],
## blockings[field+"_se"] - 1.96 * blockings[field+"_se_sd"],
## blockings[field+"_se"] + 1.96 * blockings[field+"_se_sd"],
## lw=0, alpha=0.5, color="blue")
## subplots[i,1].fill_between(blockings["n_transforms"],
## blockings[field+"_se"] - 1.96 * blockings[field+"_se_sd"],
## blockings[field+"_se"] + 1.96 * blockings[field+"_se_sd"],
## lw=0, alpha=0.5, color="blue")
## if fit_exp:
## subplots[i,0].plot(
## blockings["block_length"], blockings[field+"_exp_fit"],
## color="red")
### subplots[i,1].plot(
### blockings["n_transforms"], blockings[field+"_exp_fit"],
### color="red")
## if fit_sig:
### subplots[i,0].plot(
### blockings["block_length"], blockings[field+"_sig_fit"],
### color="green")
## subplots[i,1].plot(
## blockings["n_transforms"], blockings[field+"_sig_fit"],
## color="green")
## # Annotate
### subplots[i,1].legend(loc = 4)
##
## # Also make sure y lower bound is 0 and x upper bound is max x
## # Scale exponential tick labels?
## for i, field in enumerate(fields):
## print(field)
## # Adjust x ticks
## xticks = subplots[i,0].get_xticks()
## xticks = xticks[xticks <= blockings["block_length"].max()]
## subplots[i,0].set_xbound(0, xticks[-1])
## subplots[i,1].set_xbound(0, blockings["n_transforms"].max())
##
## # Adjust y ticks
## yticks = subplots[i,0].get_yticks()
## yticks = yticks[yticks >= 0]
## subplots[i,0].set_ybound(yticks[0], yticks[-1])
## subplots[i,1].set_ybound(yticks[0], yticks[-1])
#
# # Save and return
# with PdfPages(outfile) as pdf_outfile:
# figure.savefig(pdf_outfile, format="pdf")
# return None
# def main(self, parser=None):
# """
# Provides command-line functionality.
#
# Arguments:
# parser (ArgumentParser, optional): argparse argument parser;
# enables sublass to instantiate parser and add arguments;
# feature not well tested
# """
# import argparse
# from inspect import getmodule
#
# if parser is None:
# parser = argparse.ArgumentParser(
# description = getmodule(self.__class__).__doc__,
# formatter_class = argparse.RawDescriptionHelpFormatter)
#
# parser.add_argument(
# "-infile", "-infiles",
# type = str,
# nargs = "+",
# action = "append",
# required = True,
# help = "Input file(s)")
#
# parser.add_argument(
# "-max-cut",
# type = float,
# dest = "max_cut",
# default = 0.1,
# help = "Maximum proportion of dataset to cut when blocking;
# for "
# "example, a dataset of length 21 might be divided into
# 2 "
# "blocks of 10 (0.05 cut), 4 blocks of 5 (0.05 cut), 8 "
# "blocks of 2 (0.24 cut), or 16 blocks of 1 (0.24 cut); "
# "the latter two points would be used only if max_cut is "
# "greater than 0.24. Default: %(default)s")
#
# parser.add_argument(
# "-min-n-blocks",
# type = int,
# dest = "min_n_blocks",
# default = 1,
# help = "Only use blockings that include at least this number "
# "of blocks. Default: %(default)s")
#
# parser.add_argument(
# "--all-factors",
# action = "store_true",
# dest = "all_factors",
# help = "Divide dataset into 2,3,4,5,... blocks rather than "
# "2,4,8,16,... blocks; recommended for testing only")
#
## parser.add_argument(
## "-outfile",
## type = str,
## nargs = "+",
## action = "append",
## help = "Output results to file")
##
## parser.add_argument(
## "-outfigure",
## type = str,
## default = "block_average.pdf",
## help = "Output figure file (default: %(default)s)")
#
# parser.add_argument(
# "-s",
# "--seaborn",
# action = "store_const",
# const = 1,
# default = 0,
# help = "Enable seaborn, overriding matplotlib defaults")
#
# verbosity = parser.add_mutually_exclusive_group()
#
# verbosity.add_argument(
# "-v",
# "--verbose",
# action = "count",
# default = 1,
# help = "Enable verbose output, may be specified more than once")
#
# verbosity.add_argument(
# "-q",
# "--quiet",
# action = "store_const",
# const = 0,
# default = 1,
# dest = "verbose",
# help = "Disable verbose output")
#
# parser.add_argument(
# "-d",
# "--debug",
# action = "count",
# default = 0,
# help = "Enable debug output, may be specified more than once")
#
# arguments = vars(parser.parse_args())
#
# if arguments["seaborn"] == 1:
# import seaborn
# seaborn.set_palette("muted")
#
# if arguments["debug"] >= 1:
# from os import environ
# from .debug import db_s, db_kv
#
# db_s("Environment variables")
# for key in sorted(environ):
# db_kv(key, environ[key], 1)
#
# db_s("Command-line arguments")
# for key in sorted(arguments.keys()):
# db_kv(key, arguments[key], 1)
#
# self(**arguments)
#################################### MAIN #####################################
if __name__ == "__main__":
FPBlockAverager().main()
|
KarlTDebiec/FPBlockAverager
|
FPBlockAverager.py
|
Python
|
bsd-3-clause
| 21,910
|
[
"Gaussian"
] |
dacce42dedab6f1e0373c3bccafc184a009f2e8ca6dc78a879be6450e4ce15f3
|
#!/usr/bin/python
'''
GARNET primary script executes 5 sub scripts according to provided configuration file. We provide vertebrate motif data and gene/xref data for hg19 and mm9.
--------------------------------------------------------------------
Config file:
--------------------------------------------------------------------
Configuration file should provide the following variables.
[chromatinData]
bedfile=[bed file of accessible chromatin regions]
fastafile=[fasta file of same regions, collected via galaxyweb]
genefile=[path to garnet]/examples/ucsc_hg19_knownGenes.txt
xreffile=[path to garnet]/examples/ucsc_hg19_kgXref.txt
windowsize=[distance around transcription starte site]
[motifData]
tamo_file=../data/matrix_files/vertebrates_clustered_motifs.tamo
genome=hg19
numthreads=4
doNetwork=False
tfDelimiter=.
[expressionData]
expressionFile=[name of expression file]
pvalThresh=0.01
qvalThresh=
[regression]
savePlot=False
=======================================================================
'''
__author__='Sara JC Gosline'
__email__='sgosline@mit.edu'
##update this to include direct location of chipsequtil pacakge
import sys,os,re
import argparse
from ConfigParser import ConfigParser
progdir=os.path.dirname(sys.argv[0])
def mapGenesToRegions(genefile,xreffile,bedfile,window='2000',outdir=None):
'''
First step of GARNET maps exposed regions in bedfile to closest gene wtihin rpovided window
calls map_peaks_to_known_genes.py
'''
if outdir is None:
outdir=os.path.splitext(os.path.basename(bedfile))[0]+'eventsWithin'+window
outfile=outdir+'/events_to_genes.xls'
res=0
if not os.path.exists(outdir):
os.system('mkdir '+outdir)
#old file naming scheme:
#os.path.splitext(os.path.basename(bedfile))[0]+'eventsWithin'+window+'bp_of_'+os.path.splitext(os.path.basename(genefile))[0]+'.xls'
##Step 1: map chromatin regions to nearby genes/transcription start sites
cmd='python '+os.path.join(progdir,'map_peaks_to_known_genes.py')+' --peaks-format=auto --utilpath='+os.path.join(progdir,'../src/')+' --upstream-window='+window+' --downstream-window='+window+' --tss --map-output='+outfile+' --symbol-xref='+xreffile+' '+genefile+' '+bedfile
if not os.path.exists(outfile):
print '\n-----------------------------Gene-region mapping output------------------------------------------\n'
print 'Running command:\n'+cmd+'\n'
print 'Mapping genes from '+genefile+' to regions within '+window+' bp of events from '+bedfile+' and putting results in '+outfile
res=os.system(cmd)
else:
print 'File '+outfile+' already exists. If you would like to replace it, delete and re-run'
return res,outfile
def motifScanning(tamo_file,fastafile,numthreads,genome,closest_gene_file='',gene_list=''):
'''
Second step of GARNET scans chromatin regions provided in galaxy-produced FASTA for motif matrix
affinity scores
Arguments:
tamo_file: TAMO-formatted list of motifs to scan
fastafile: FASTA-formatted file to scan
numthreads: number of threads to run, this process can take a while
genome: which genome build to use
closest_gene_file: output of map_peaks_to_known_genes.py so that only those fasta sequences that map to genes will be scanned.
gene_list: list of genes to focus on (i.e. diff ex genes), based on closest_gene_file mappings.
'''
if closest_gene_file=='':
motif_binding_out=re.sub('.fasta','_with_motifs.txt',fastafile)
else:
motif_binding_out=re.sub('.xls','_with_motifs.txt',closest_gene_file)
if os.path.exists(motif_binding_out):
print '\nIntermediate file '+motif_binding_out+' already exists, if you would like to replace, delete and re-run'
return 0,motif_binding_out
scan_cmd='python '+os.path.join(progdir,'motif_fsa_scores.py')+' --motif='+tamo_file+' --genome='+genome+' --outfile='+motif_binding_out+' --genemappingfile='+closest_gene_file+' --scale=10 --threads='+numthreads+' '+fastafile+' --genelist='+gene_list
print '\n-----------------------------Motif Scanning Output------------------------------------------\n'
print 'Running command:\n'+scan_cmd+'\n'
print 'Scanning regions from '+fastafile+' using matrices from '+tamo_file+' and putting results in '+motif_binding_out
res=os.system(scan_cmd)
return res,motif_binding_out
def createBindingMatrix(motif_binding_out,outfile,fastafile,tamo_file,use_uniprot=False):
'''
Third step of GARNET merges motif scores with closest gene information to create motif/gene
scoring matrix with appropriate identifiers
'''
if use_uniprot:
tfs=re.sub('.tamo','_up_tfids.txt',tamo_file)
matfile=re.sub('.txt','.tgm',motif_binding_out)
else:
matfile=re.sub('.txt','.tgm',motif_binding_out)
tfs=re.sub('.tamo','_tfids.txt',tamo_file)
extra_file=re.sub('.tamo','_source_names.txt',os.path.basename(tamo_file))
if os.path.exists(extra_file):
tfs=tfs+','+extra_file
##using regular gene names here
map_cmd='python '+os.path.join(progdir,'get_window_binding_matrix.py')+' '+motif_binding_out+' '+outfile+' '+' '+fastafile+" --distance-to-gene='' --motif-id-list="+tfs+' --outfile='+matfile
pklfile=re.sub('.tgm','.pkl',matfile)
if os.path.exists(pklfile):
print '\nIntermediate file '+pklfile+' already exists, if you would like to replace delete and re-run'
return 0,pklfile
print '\n-----------------------------Binding Matrix Output------------------------------------------\n'
print 'Running command:\n'+map_cmd+'\n'
res=os.system(map_cmd)
return res,pklfile
def getTfsFromRegression(pickle_file,expressionfile,pvalT,qvalT,plot):
'''
Fourth step of GARNET is to perform regression with pickled matrix file and expression data
'''
# print '\nRunning regression using '+expressionfile+' expression data and '+pickle_file+' binding data'
outdir=re.sub('.pkl','regression_results.tsv',pickle_file)
# outdir=os.path.basename(expressionfile).split('.')[-2]+'_'+re.sub('.pkl','',os.path.basename(pickle_file))+'.xls'
print outdir
if not os.path.exists(outdir):
cmd='python '+os.path.join(progdir,'motif_regression.py')+' --outdir='+outdir+' '+pickle_file+' '+expressionfile
if pvalT is None or pvalT=='':
if qvalT is None or qvalT=='':
thresh='0.05'
else:
thresh=qvalT
cmd+=' --use-qval'
else:
thresh=pvalT
cmd+=' --thresh='+thresh
if plot:
cmd+=' --plot'
print '\n-----------------------------Regression Output------------------------------------------\n'
print 'Running command:\n'+cmd+'\n'
res=os.system(cmd)
else:
res=0
return res,outdir
def main():
srcdir=os.path.join(progdir,'../src')
parser=argparse.ArgumentParser()
#uniprot option will be deprecated, SAMNet should be able to map to human gene names
# parser.add_option('--useUniprot',dest='useUniprot',action='store_true',help='Set this flag to use Uniprot identifies',default=False)
parser.add_argument('configfilename', help='Path to configuration file.')
parser.add_argument('--outdir',dest='outdir',help='Name of directory to place garnet output. DEFAULT: none',default=None)
parser.add_argument('--utilpath',dest='addpath',help='Destination of chipsequtil library, DEFAULT: ../src',default=srcdir)
parser.add_argument('--allGenes',dest='allgenes',help='Use this flag to use all annotated genes, even if they show no evidence of encoding proteins.',action='store_true',default=False)
opts=parser.parse_args()
sys.path.insert(0,opts.addpath)
sys.path.insert(0,opts.addpath+'chipsequtil')
config=ConfigParser()
config.read(opts.configfilename)
##now check for elements of config file. if they are missing, move onto next step
##first step 1 check
genefile=config.get('chromatinData','genefile')
bedfile=config.get('chromatinData','bedfile')
xref=config.get('chromatinData','xreffile')
window=config.get('chromatinData','windowsize')
if window is None:
window='2000'
#This variable tracks the results of all the commands. If it becomes non zero, stop.
keeprunning=0
if genefile is not None and bedfile is not None:
keeprunning,outfile=mapGenesToRegions(genefile,xref,bedfile,window,opts.outdir)
else:
print 'Missing genefile,bedfile or xref file, cannot map genes to regions.'
sys.exit()
if keeprunning!=0:
print 'Error running gene mapping step, check your files and try again'
sys.exit()
tamofile=config.get('motifData','tamo_file')
genome=config.get('motifData','genome')
numthreads=config.get('motifData','numthreads')
if numthreads is None:
numthreads='1'
fastafile=config.get('chromatinData','fastafile')
expr=config.get('expressionData','expressionFile')
##step 2
if tamofile is not None and tamofile!='' and genome is not None and fastafile is not None and fastafile!='':
if os.path.exists(tamofile) and os.path.exists(fastafile):
keeprunning,binding_out=motifScanning(tamofile,fastafile,numthreads,genome,outfile,expr)
else:
binding_out=''
print 'Missing FASTA file or TAMO file - check your config file and try again.'
if keeprunning!=0:
print 'Error running motif-scanning step, check your files and try again'
sys.exit()
##step 3
newfasta=re.sub('.xls','.fsa',outfile)
if outfile is not None and outfile!='' and binding_out is not None and binding_out!='':
keeprunning,binding_matrix=createBindingMatrix(binding_out,outfile,newfasta,tamo_file=tamofile,use_uniprot=False)
else:
binding_matrix=''
if keeprunning!=0:
print 'Error running matrix creation step, check your files and try again'
sys.exit()
# pklfile=config.get('motifData','pkl')
do_network=config.get('motifData','doNetwork')
delim=config.get('motifData','tfDelimiter')
if delim is None:##here we want no delimiter if we do not want to tease out individual tfs
delim=''
if do_network is not None and do_network!='' and do_network.lower()!='false':
cmd='python '+os.path.join(progdir,'zipTgms.py')+' --pkl='+binding_matrix+' --genome '+genome+' --as-network --tf-delimiter='+delim
if opts.allgenes:
cmd=cmd+' --allGenes'
print cmd
os.system(cmd)
pvt=config.get('expressionData','pvalThresh')
qvt=config.get('expressionData','qvalThresh')
plot = False
plot_str=config.get('regression','savePlot')
if plot_str is not None and plot_str != '' and plot_str.lower() != 'false':
plot = True
##step 4: regression
if expr is not None and expr!='':
#print binding_matrix,expr
if binding_matrix!='' and os.path.exists(binding_matrix) and os.path.exists(expr):
keeprunning,tfs=getTfsFromRegression(binding_matrix,expr,pvt,qvt,plot)
else:
print 'Cannot perform regression because binding matrix or expression datasets are missing'
if keeprunning!=0:
print 'Error running regression step, check your files and try again'
sys.exit()
if __name__=='__main__':
main()
|
Mkebede/OmicsIntegrator
|
scripts/garnet.py
|
Python
|
bsd-2-clause
| 11,577
|
[
"Galaxy"
] |
5e4944cd6a236e9417a5fcd21e3a1eb08dc068c790a7ad7bf0d4325b904327a7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.